From e9d2cfc3a134b00f4e98271fbee3838d1ccecbcc Mon Sep 17 00:00:00 2001
From: VirtuosoQ <2416050435@qq.com>
Date: 星期五, 26 四月 2024 14:59:30 +0800
Subject: [PATCH] FunASR java http  client

---
 funasr/auto/auto_model.py |   26 +++++++++++++++++++-------
 1 files changed, 19 insertions(+), 7 deletions(-)

diff --git a/funasr/auto/auto_model.py b/funasr/auto/auto_model.py
index bba44e7..ba8881a 100644
--- a/funasr/auto/auto_model.py
+++ b/funasr/auto/auto_model.py
@@ -21,15 +21,17 @@
 from funasr.utils.timestamp_tools import timestamp_sentence
 from funasr.download.download_from_hub import download_model
 from funasr.utils.vad_utils import slice_padding_audio_samples
+from funasr.utils.vad_utils import merge_vad
 from funasr.utils.load_utils import load_audio_text_image_video
 from funasr.train_utils.set_all_random_seed import set_all_random_seed
 from funasr.train_utils.load_pretrained_model import load_pretrained_model
 from funasr.utils import export_utils
+
 try:
     from funasr.models.campplus.utils import sv_chunk, postprocess, distribute_spk
     from funasr.models.campplus.cluster_backend import ClusterBackend
 except:
-    print("Notice: If you want to use the speaker diarization, please `pip install hdbscan`")
+    pass
 
 
 def prepare_data_iterator(data_in, input_len=None, data_type=None, key=None):
@@ -110,7 +112,7 @@
         if vad_model is not None:
             logging.info("Building VAD model.")
             vad_kwargs["model"] = vad_model
-            vad_kwargs["model_revision"] = kwargs.get("vad_model_revision", None)
+            vad_kwargs["model_revision"] = kwargs.get("vad_model_revision", "master")
             vad_kwargs["device"] = kwargs["device"]
             vad_model, vad_kwargs = self.build_model(**vad_kwargs)
 
@@ -120,7 +122,7 @@
         if punc_model is not None:
             logging.info("Building punc model.")
             punc_kwargs["model"] = punc_model
-            punc_kwargs["model_revision"] = kwargs.get("punc_model_revision", None)
+            punc_kwargs["model_revision"] = kwargs.get("punc_model_revision", "master")
             punc_kwargs["device"] = kwargs["device"]
             punc_model, punc_kwargs = self.build_model(**punc_kwargs)
 
@@ -130,7 +132,7 @@
         if spk_model is not None:
             logging.info("Building SPK model.")
             spk_kwargs["model"] = spk_model
-            spk_kwargs["model_revision"] = kwargs.get("spk_model_revision", None)
+            spk_kwargs["model_revision"] = kwargs.get("spk_model_revision", "master")
             spk_kwargs["device"] = kwargs["device"]
             spk_model, spk_kwargs = self.build_model(**spk_kwargs)
             self.cb_model = ClusterBackend().to(kwargs["device"])
@@ -173,6 +175,8 @@
             kwargs["token_list"] = tokenizer.token_list if hasattr(tokenizer, "token_list") else None
             kwargs["token_list"] = tokenizer.get_vocab() if hasattr(tokenizer, "get_vocab") else kwargs["token_list"]
             vocab_size = len(kwargs["token_list"]) if kwargs["token_list"] is not None else -1
+            if vocab_size == -1 and hasattr(tokenizer, "get_vocab_size"):
+                vocab_size = tokenizer.get_vocab_size()
         else:
             vocab_size = -1
         kwargs["tokenizer"] = tokenizer
@@ -201,7 +205,7 @@
                 load_pretrained_model(
                     model=model,
                     path=init_param,
-                    ignore_init_mismatch=kwargs.get("ignore_init_mismatch", False),
+                    ignore_init_mismatch=kwargs.get("ignore_init_mismatch", True),
                     oss_bucket=kwargs.get("oss_bucket", None),
                     scope_map=kwargs.get("scope_map", []),
                     excludes=kwargs.get("excludes", None),
@@ -209,6 +213,9 @@
             else:
                 print(f"error, init_param does not exist!: {init_param}")
         
+        # fp16
+        if kwargs.get("fp16", False):
+            model.to(torch.float16)
         return model, kwargs
     
     def __call__(self, *args, **cfg):
@@ -294,6 +301,10 @@
         res = self.inference(input, input_len=input_len, model=self.vad_model, kwargs=self.vad_kwargs, **cfg)
         end_vad = time.time()
 
+        #  FIX(gcf): concat the vad clips for sense vocie model for better aed
+        if kwargs.get("merge_vad", False):
+            for i in range(len(res)):
+                res[i]['value'] = merge_vad(res[i]['value'], kwargs.get("merge_length", 15000))
 
         # step.2 compute asr model
         model = self.model
@@ -312,7 +323,8 @@
             key = res[i]["key"]
             vadsegments = res[i]["value"]
             input_i = data_list[i]
-            speech = load_audio_text_image_video(input_i, fs=kwargs["frontend"].fs, audio_fs=kwargs.get("fs", 16000))
+            fs = kwargs["frontend"].fs if hasattr(kwargs["frontend"], "fs") else 16000
+            speech = load_audio_text_image_video(input_i, fs=fs, audio_fs=kwargs.get("fs", 16000))
             speech_lengths = len(speech)
             n = len(vadsegments)
             data_with_index = [(vadsegments[i], i) for i in range(n)]
@@ -405,7 +417,7 @@
             return_raw_text = kwargs.get('return_raw_text', False)
             # step.3 compute punc model
             if self.punc_model is not None:
-                if not len(result["text"]):
+                if not len(result["text"].strip()):
                     if return_raw_text:
                         result['raw_text'] = ''
                 else:

--
Gitblit v1.9.1