From 6427c834dfd97b1f05c6659cdc7ccf010bf82fe1 Mon Sep 17 00:00:00 2001
From: 嘉渊 <wangjiaming.wjm@alibaba-inc.com>
Date: 星期一, 24 四月 2023 19:50:07 +0800
Subject: [PATCH] update

---
 funasr/datasets/large_datasets/utils/tokenize.py |   43 ++++++++++++++++++++++++++++++++-----------
 1 files changed, 32 insertions(+), 11 deletions(-)

diff --git a/funasr/datasets/large_datasets/utils/tokenize.py b/funasr/datasets/large_datasets/utils/tokenize.py
index 0c01885..d8ceff2 100644
--- a/funasr/datasets/large_datasets/utils/tokenize.py
+++ b/funasr/datasets/large_datasets/utils/tokenize.py
@@ -18,35 +18,56 @@
 
 def seg_tokenize(txt, seg_dict):
     out_txt = ""
-    pattern = re.compile(r"([\u4E00-\u9FA5A-Za-z0-9])")
     for word in txt:
-        if pattern.match(word):
-            if word in seg_dict:
-                out_txt += seg_dict[word] + " "
-            else:
-                out_txt += "<unk>" + " "
+        if word in seg_dict:
+            out_txt += seg_dict[word] + " "
         else:
-            continue
+            out_txt += "<unk>" + " "
     return out_txt.strip().split()
 
 def tokenize(data,
              vocab=None,
-             seg_dict=None):
+             seg_dict=None,
+             punc_dict=None,
+             bpe_tokenizer=None):
     assert "text" in data
     assert isinstance(vocab, dict)
     text = data["text"]
     token = []
+    vad = -2
+
+    if bpe_tokenizer is not None:
+        text = bpe_tokenizer.text2tokens("".join(text))
 
     if seg_dict is not None:
         assert isinstance(seg_dict, dict)
         txt = forward_segment("".join(text).lower(), seg_dict)
         text = seg_tokenize(txt, seg_dict)
-    
-    for x in text:
-        if x in vocab:
+
+    length = len(text)
+    for i in range(length):
+        x = text[i]
+        if i == length-1 and "punc" in data and text[i].startswith("vad:"):
+            vad = x[-1][4:]
+            if len(vad) == 0:
+                vad = -1
+            else:
+                vad = int(vad)
+        elif x in vocab:
             token.append(vocab[x])
         else:
             token.append(vocab['<unk>'])
 
+    if "punc" in data and punc_dict is not None:
+        punc_token = []
+        for punc in data["punc"]:
+            if punc in punc_dict:
+                punc_token.append(punc_dict[punc])
+            else:
+                punc_token.append(punc_dict["_"])
+        data["punc"] =  np.array(punc_token)
+
     data["text"] = np.array(token)
+    if vad is not -2:
+        data["vad_indexes"]=np.array([vad], dtype=np.int64)
     return data

--
Gitblit v1.9.1