From cfc8c117bd0faea95cf979830cccc7e1d904ea5c Mon Sep 17 00:00:00 2001
From: zhifu gao <zhifu.gzf@alibaba-inc.com>
Date: 星期一, 17 四月 2023 19:13:59 +0800
Subject: [PATCH] Merge pull request #370 from alibaba-damo-academy/dev_lhn2
---
funasr/datasets/preprocessor.py | 2 +-
funasr/datasets/large_datasets/utils/tokenize.py | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/funasr/datasets/large_datasets/utils/tokenize.py b/funasr/datasets/large_datasets/utils/tokenize.py
index 5a2f921..022d321 100644
--- a/funasr/datasets/large_datasets/utils/tokenize.py
+++ b/funasr/datasets/large_datasets/utils/tokenize.py
@@ -19,6 +19,7 @@
def seg_tokenize(txt, seg_dict):
out_txt = ""
for word in txt:
+ word = word.lower()
if word in seg_dict:
out_txt += seg_dict[word] + " "
else:
@@ -41,8 +42,7 @@
if seg_dict is not None:
assert isinstance(seg_dict, dict)
- txt = forward_segment("".join(text).lower(), seg_dict)
- text = seg_tokenize(txt, seg_dict)
+ text = seg_tokenize(text, seg_dict)
length = len(text)
for i in range(length):
diff --git a/funasr/datasets/preprocessor.py b/funasr/datasets/preprocessor.py
index 1adca05..20a3831 100644
--- a/funasr/datasets/preprocessor.py
+++ b/funasr/datasets/preprocessor.py
@@ -48,6 +48,7 @@
def seg_tokenize(txt, seg_dict):
out_txt = ""
for word in txt:
+ word = word.lower()
if word in seg_dict:
out_txt += seg_dict[word] + " "
else:
@@ -359,7 +360,6 @@
if self.split_with_space:
tokens = text.strip().split(" ")
if self.seg_dict is not None:
- tokens = forward_segment("".join(tokens), self.seg_dict)
tokens = seg_tokenize(tokens, self.seg_dict)
else:
tokens = self.tokenizer.text2tokens(text)
--
Gitblit v1.9.1