haoneng.lhn
2023-04-25 a88a1d9938eec790aa7333fe6c3d33e5e2067256
update text tokenize
2个文件已修改
17 ■■■■■ 已修改文件
funasr/datasets/large_datasets/utils/tokenize.py 8 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
funasr/datasets/preprocessor.py 9 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
funasr/datasets/large_datasets/utils/tokenize.py
@@ -17,12 +17,20 @@
    return word_list
def seg_tokenize(txt, seg_dict):
    pattern = re.compile(r'^[\u4E00-\u9FA50-9]+$')
    out_txt = ""
    for word in txt:
        word = word.lower()
        if word in seg_dict:
            out_txt += seg_dict[word] + " "
        else:
            if pattern.match(word):
                for char in word:
                    if char in seg_dict:
                        out_txt += seg_dict[char] + " "
                    else:
                        out_txt += "<unk>" + " "
            else:
            out_txt += "<unk>" + " "
    return out_txt.strip().split()
funasr/datasets/preprocessor.py
@@ -44,14 +44,21 @@
        i += len(longest_word)
    return word_list
def seg_tokenize(txt, seg_dict):
    pattern = re.compile(r'^[\u4E00-\u9FA50-9]+$')
    out_txt = ""
    for word in txt:
        word = word.lower()
        if word in seg_dict:
            out_txt += seg_dict[word] + " "
        else:
            if pattern.match(word):
                for char in word:
                    if char in seg_dict:
                        out_txt += seg_dict[char] + " "
                    else:
                        out_txt += "<unk>" + " "
            else:
            out_txt += "<unk>" + " "
    return out_txt.strip().split()