志浩
2023-04-07 4137f5cf26e7c4b40853959cd2574edfde03aa60
funasr/datasets/preprocessor.py
@@ -47,15 +47,11 @@
def seg_tokenize(txt, seg_dict):
    out_txt = ""
    pattern = re.compile(r"([\u4E00-\u9FA5A-Za-z0-9])")
    for word in txt:
        if pattern.match(word):
            if word in seg_dict:
                out_txt += seg_dict[word] + " "
            else:
                out_txt += "<unk>" + " "
        if word in seg_dict:
            out_txt += seg_dict[word] + " "
        else:
            continue
            out_txt += "<unk>" + " "
    return out_txt.strip().split()
def seg_tokenize_wo_pattern(txt, seg_dict):
@@ -804,3 +800,17 @@
                    data[self.vad_name] = np.array([vad], dtype=np.int64)
                text_ints = self.token_id_converter[i].tokens2ids(tokens)
                data[text_name] = np.array(text_ints, dtype=np.int64)
def split_to_mini_sentence(words: list, word_limit: int = 20):
    assert word_limit > 1
    if len(words) <= word_limit:
        return [words]
    sentences = []
    length = len(words)
    sentence_len = length // word_limit
    for i in range(sentence_len):
        sentences.append(words[i * word_limit:(i + 1) * word_limit])
    if length % word_limit > 0:
        sentences.append(words[sentence_len * word_limit:])
    return sentences