From d20c030e5b75306dd67e8fe9924d5d94eac1bf30 Mon Sep 17 00:00:00 2001
From: wusong <63332221+wusong1128@users.noreply.github.com>
Date: 星期三, 25 九月 2024 15:11:50 +0800
Subject: [PATCH] 解决python ws服务针对尾部非人声录音无结束标识返回的问题 (#2102)

---
 funasr/auto/auto_model.py |   75 +++++++++++++++++++++++++++++--------
 1 files changed, 58 insertions(+), 17 deletions(-)

diff --git a/funasr/auto/auto_model.py b/funasr/auto/auto_model.py
index 75324dc..9f5f4fb 100644
--- a/funasr/auto/auto_model.py
+++ b/funasr/auto/auto_model.py
@@ -14,6 +14,7 @@
 import numpy as np
 from tqdm import tqdm
 
+from omegaconf import DictConfig, ListConfig
 from funasr.utils.misc import deep_update
 from funasr.register import tables
 from funasr.utils.load_utils import load_bytes
@@ -114,7 +115,7 @@
         try:
             from funasr.utils.version_checker import check_for_update
 
-            check_for_update()
+            check_for_update(disable=kwargs.get("disable_update", False))
         except:
             pass
 
@@ -187,21 +188,59 @@
 
         # build tokenizer
         tokenizer = kwargs.get("tokenizer", None)
-        if tokenizer is not None:
-            tokenizer_class = tables.tokenizer_classes.get(tokenizer)
-            tokenizer = tokenizer_class(**kwargs.get("tokenizer_conf", {}))
-            kwargs["token_list"] = (
-                tokenizer.token_list if hasattr(tokenizer, "token_list") else None
-            )
-            kwargs["token_list"] = (
-                tokenizer.get_vocab() if hasattr(tokenizer, "get_vocab") else kwargs["token_list"]
-            )
-            vocab_size = len(kwargs["token_list"]) if kwargs["token_list"] is not None else -1
-            if vocab_size == -1 and hasattr(tokenizer, "get_vocab_size"):
-                vocab_size = tokenizer.get_vocab_size()
-        else:
-            vocab_size = -1
         kwargs["tokenizer"] = tokenizer
+        kwargs["vocab_size"] = -1
+
+        if tokenizer is not None:
+            tokenizers = (
+                tokenizer.split(",") if isinstance(tokenizer, str) else tokenizer
+            )  # type of tokenizers is list!!!
+            tokenizers_conf = kwargs.get("tokenizer_conf", {})
+            tokenizers_build = []
+            vocab_sizes = []
+            token_lists = []
+            ### === only for kws ===
+            token_list_files = kwargs.get("token_lists", [])
+            seg_dicts = kwargs.get("seg_dicts", [])
+            ### === only for kws ===
+
+            if not isinstance(tokenizers_conf, (list, tuple, ListConfig)):
+                tokenizers_conf = [tokenizers_conf] * len(tokenizers)
+
+            for i, tokenizer in enumerate(tokenizers):
+                tokenizer_class = tables.tokenizer_classes.get(tokenizer)
+                tokenizer_conf = tokenizers_conf[i]
+
+                ### === only for kws ===
+                if len(token_list_files) > 1:
+                    tokenizer_conf.token_list = token_list_files[i]
+                if len(seg_dicts) > 1:
+                    tokenizer_conf.seg_dict = seg_dicts[i]
+                ### === only for kws ===
+
+                tokenizer = tokenizer_class(**tokenizer_conf)
+                tokenizers_build.append(tokenizer)
+                token_list = tokenizer.token_list if hasattr(tokenizer, "token_list") else None
+                token_list = (
+                    tokenizer.get_vocab() if hasattr(tokenizer, "get_vocab") else token_list
+                )
+                vocab_size = -1
+                if token_list is not None:
+                    vocab_size = len(token_list)
+
+                    if vocab_size == -1 and hasattr(tokenizer, "get_vocab_size"):
+                        vocab_size = tokenizer.get_vocab_size()
+                token_lists.append(token_list)
+                vocab_sizes.append(vocab_size)
+
+            if len(tokenizers_build) <= 1:
+                tokenizers_build = tokenizers_build[0]
+                token_lists = token_lists[0]
+                vocab_sizes = vocab_sizes[0]
+
+            kwargs["tokenizer"] = tokenizers_build
+            kwargs["vocab_size"] = vocab_sizes
+            kwargs["token_list"] = token_lists
 
         # build frontend
         frontend = kwargs.get("frontend", None)
@@ -219,7 +258,7 @@
         model_conf = {}
         deep_update(model_conf, kwargs.get("model_conf", {}))
         deep_update(model_conf, kwargs)
-        model = model_class(**model_conf, vocab_size=vocab_size)
+        model = model_class(**model_conf)
 
         # init_param
         init_param = kwargs.get("init_param", None)
@@ -264,6 +303,8 @@
 
     def inference(self, input, input_len=None, model=None, kwargs=None, key=None, **cfg):
         kwargs = self.kwargs if kwargs is None else kwargs
+        if "cache" in kwargs:
+            kwargs.pop("cache")
         deep_update(kwargs, cfg)
         model = self.model if model is None else model
         model.eval()
@@ -337,7 +378,7 @@
         end_vad = time.time()
 
         #  FIX(gcf): concat the vad clips for sense vocie model for better aed
-        if kwargs.get("merge_vad", False):
+        if cfg.get("merge_vad", False):
             for i in range(len(res)):
                 res[i]["value"] = merge_vad(
                     res[i]["value"], kwargs.get("merge_length_s", 15) * 1000

--
Gitblit v1.9.1