From 0a8740f3cf3f3511193b5bb941dd4ec20c69098a Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期二, 25 四月 2023 14:49:24 +0800
Subject: [PATCH] docs
---
funasr/datasets/large_datasets/utils/tokenize.py | 4 ++--
1 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/funasr/datasets/large_datasets/utils/tokenize.py b/funasr/datasets/large_datasets/utils/tokenize.py
index 5a2f921..022d321 100644
--- a/funasr/datasets/large_datasets/utils/tokenize.py
+++ b/funasr/datasets/large_datasets/utils/tokenize.py
@@ -19,6 +19,7 @@
def seg_tokenize(txt, seg_dict):
out_txt = ""
for word in txt:
+ word = word.lower()
if word in seg_dict:
out_txt += seg_dict[word] + " "
else:
@@ -41,8 +42,7 @@
if seg_dict is not None:
assert isinstance(seg_dict, dict)
- txt = forward_segment("".join(text).lower(), seg_dict)
- text = seg_tokenize(txt, seg_dict)
+ text = seg_tokenize(text, seg_dict)
length = len(text)
for i in range(length):
--
Gitblit v1.9.1