From 1cdb3cc28d4d89a576cc06e5cd8eb80da1f3a3aa Mon Sep 17 00:00:00 2001
From: zhifu gao <zhifu.gzf@alibaba-inc.com>
Date: 星期五, 26 四月 2024 11:27:39 +0800
Subject: [PATCH] Dev gzf exp (#1665)

---
 funasr/datasets/large_datasets/utils/tokenize.py |   52 ++++++++++++++++++++++++++++++++++++----------------
 1 files changed, 36 insertions(+), 16 deletions(-)

diff --git a/funasr/datasets/large_datasets/utils/tokenize.py b/funasr/datasets/large_datasets/utils/tokenize.py
index d8ceff2..5a1ddd2 100644
--- a/funasr/datasets/large_datasets/utils/tokenize.py
+++ b/funasr/datasets/large_datasets/utils/tokenize.py
@@ -1,6 +1,8 @@
 #!/usr/bin/env python
 import re
 import numpy as np
+from funasr.datasets.large_datasets.utils.hotword_utils import sample_hotword
+
 
 def forward_segment(text, seg_dict):
     word_list = []
@@ -16,39 +18,57 @@
         i += len(longest_word)
     return word_list
 
+
 def seg_tokenize(txt, seg_dict):
+    pattern = re.compile(r"^[\u4E00-\u9FA50-9]+$")
     out_txt = ""
     for word in txt:
+        word = word.lower()
         if word in seg_dict:
             out_txt += seg_dict[word] + " "
         else:
-            out_txt += "<unk>" + " "
+            if pattern.match(word):
+                for char in word:
+                    if char in seg_dict:
+                        out_txt += seg_dict[char] + " "
+                    else:
+                        out_txt += "<unk>" + " "
+            else:
+                out_txt += "<unk>" + " "
     return out_txt.strip().split()
 
-def tokenize(data,
-             vocab=None,
-             seg_dict=None,
-             punc_dict=None,
-             bpe_tokenizer=None):
+
+def tokenize(data, vocab=None, seg_dict=None, punc_dict=None, bpe_tokenizer=None, hw_config=None):
     assert "text" in data
     assert isinstance(vocab, dict)
     text = data["text"]
     token = []
     vad = -2
-
     if bpe_tokenizer is not None:
-        text = bpe_tokenizer.text2tokens("".join(text))
-
+        text = bpe_tokenizer.text2tokens(" ".join(text))
     if seg_dict is not None:
         assert isinstance(seg_dict, dict)
-        txt = forward_segment("".join(text).lower(), seg_dict)
-        text = seg_tokenize(txt, seg_dict)
+        text = seg_tokenize(text, seg_dict)
 
     length = len(text)
+    if "hw_tag" in data:
+        pre_index = None
+        if hw_config["pre_hwlist"] is not None and hw_config["pre_prob"] > 0:
+            # enable preset hotword detect in sampling
+            for hw in hw_config["pre_hwlist"]:
+                hw = " ".join(seg_tokenize(hw, seg_dict))
+                _find = " ".join(text).find(hw)
+                if _find != -1:
+                    # _find = text[:_find].count(" ")  # bpe sometimes
+                    pre_index = [_find, _find + max(hw.count(" "), 1)]
+                    break
+        hotword_indxs = sample_hotword(length, **hw_config, pre_index=pre_index)
+        data["hotword_indxs"] = hotword_indxs
+        del data["hw_tag"]
     for i in range(length):
         x = text[i]
-        if i == length-1 and "punc" in data and text[i].startswith("vad:"):
-            vad = x[-1][4:]
+        if i == length - 1 and "punc" in data and x.startswith("vad:"):
+            vad = x[4:]
             if len(vad) == 0:
                 vad = -1
             else:
@@ -56,7 +76,7 @@
         elif x in vocab:
             token.append(vocab[x])
         else:
-            token.append(vocab['<unk>'])
+            token.append(vocab["<unk>"])
 
     if "punc" in data and punc_dict is not None:
         punc_token = []
@@ -65,9 +85,9 @@
                 punc_token.append(punc_dict[punc])
             else:
                 punc_token.append(punc_dict["_"])
-        data["punc"] =  np.array(punc_token)
+        data["punc"] = np.array(punc_token)
 
     data["text"] = np.array(token)
     if vad is not -2:
-        data["vad_indexes"]=np.array([vad], dtype=np.int64)
+        data["vad_indexes"] = np.array([vad], dtype=np.int64)
     return data

--
Gitblit v1.9.1