From 9817785c66a13caa681a8e9e272f2ae949233542 Mon Sep 17 00:00:00 2001
From: yhliang <68215459+yhliang-aslp@users.noreply.github.com>
Date: 星期二, 18 四月 2023 19:28:39 +0800
Subject: [PATCH] Merge pull request #380 from alibaba-damo-academy/main
---
funasr/datasets/preprocessor.py | 7 ++++---
1 files changed, 4 insertions(+), 3 deletions(-)
diff --git a/funasr/datasets/preprocessor.py b/funasr/datasets/preprocessor.py
index afeff4e..20a3831 100644
--- a/funasr/datasets/preprocessor.py
+++ b/funasr/datasets/preprocessor.py
@@ -48,6 +48,7 @@
def seg_tokenize(txt, seg_dict):
out_txt = ""
for word in txt:
+ word = word.lower()
if word in seg_dict:
out_txt += seg_dict[word] + " "
else:
@@ -359,7 +360,6 @@
if self.split_with_space:
tokens = text.strip().split(" ")
if self.seg_dict is not None:
- tokens = forward_segment("".join(tokens), self.seg_dict)
tokens = seg_tokenize(tokens, self.seg_dict)
else:
tokens = self.tokenizer.text2tokens(text)
@@ -786,6 +786,7 @@
) -> Dict[str, np.ndarray]:
for i in range(self.num_tokenizer):
text_name = self.text_name[i]
+ #import pdb; pdb.set_trace()
if text_name in data and self.tokenizer[i] is not None:
text = data[text_name]
text = self.text_cleaner(text)
@@ -800,7 +801,7 @@
data[self.vad_name] = np.array([vad], dtype=np.int64)
text_ints = self.token_id_converter[i].tokens2ids(tokens)
data[text_name] = np.array(text_ints, dtype=np.int64)
-
+ return data
def split_to_mini_sentence(words: list, word_limit: int = 20):
assert word_limit > 1
@@ -813,4 +814,4 @@
sentences.append(words[i * word_limit:(i + 1) * word_limit])
if length % word_limit > 0:
sentences.append(words[sentence_len * word_limit:])
- return sentences
\ No newline at end of file
+ return sentences
--
Gitblit v1.9.1