From f01a646b71dc757347beab9a783cfed59c612434 Mon Sep 17 00:00:00 2001
From: Lizerui9926 <110582652+Lizerui9926@users.noreply.github.com>
Date: 星期二, 25 四月 2023 16:54:59 +0800
Subject: [PATCH] Merge pull request #417 from alibaba-damo-academy/dev_lhn
---
funasr/datasets/preprocessor.py | 11 +++++++++--
funasr/datasets/large_datasets/utils/tokenize.py | 10 +++++++++-
2 files changed, 18 insertions(+), 3 deletions(-)
diff --git a/funasr/datasets/large_datasets/utils/tokenize.py b/funasr/datasets/large_datasets/utils/tokenize.py
index 022d321..0d2fd84 100644
--- a/funasr/datasets/large_datasets/utils/tokenize.py
+++ b/funasr/datasets/large_datasets/utils/tokenize.py
@@ -17,13 +17,21 @@
return word_list
def seg_tokenize(txt, seg_dict):
+ pattern = re.compile(r'^[\u4E00-\u9FA50-9]+$')
out_txt = ""
for word in txt:
word = word.lower()
if word in seg_dict:
out_txt += seg_dict[word] + " "
else:
- out_txt += "<unk>" + " "
+ if pattern.match(word):
+ for char in word:
+ if char in seg_dict:
+ out_txt += seg_dict[char] + " "
+ else:
+ out_txt += "<unk>" + " "
+ else:
+ out_txt += "<unk>" + " "
return out_txt.strip().split()
def tokenize(data,
diff --git a/funasr/datasets/preprocessor.py b/funasr/datasets/preprocessor.py
index 20a3831..758c750 100644
--- a/funasr/datasets/preprocessor.py
+++ b/funasr/datasets/preprocessor.py
@@ -44,15 +44,22 @@
i += len(longest_word)
return word_list
-
def seg_tokenize(txt, seg_dict):
+ pattern = re.compile(r'^[\u4E00-\u9FA50-9]+$')
out_txt = ""
for word in txt:
word = word.lower()
if word in seg_dict:
out_txt += seg_dict[word] + " "
else:
- out_txt += "<unk>" + " "
+ if pattern.match(word):
+ for char in word:
+ if char in seg_dict:
+ out_txt += seg_dict[char] + " "
+ else:
+ out_txt += "<unk>" + " "
+ else:
+ out_txt += "<unk>" + " "
return out_txt.strip().split()
def seg_tokenize_wo_pattern(txt, seg_dict):
--
Gitblit v1.9.1