| funasr/datasets/large_datasets/utils/tokenize.py | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 |
funasr/datasets/large_datasets/utils/tokenize.py
@@ -58,7 +58,7 @@ # enable preset hotword detect in sampling pre_index = None for hw in hw_config['pre_hwlist']: hw = " ".join(seg_tokenize(hw)) hw = " ".join(seg_tokenize(hw, seg_dict)) _find = text.find(hw) if _find != -1: _find = text[:_find].count(" ") # bpe sometimes