From e9d2cfc3a134b00f4e98271fbee3838d1ccecbcc Mon Sep 17 00:00:00 2001
From: VirtuosoQ <2416050435@qq.com>
Date: 星期五, 26 四月 2024 14:59:30 +0800
Subject: [PATCH] FunASR java http  client

---
 funasr/auto/auto_model.py |   14 ++++++++++++--
 1 files changed, 12 insertions(+), 2 deletions(-)

diff --git a/funasr/auto/auto_model.py b/funasr/auto/auto_model.py
index d8ac5ca..ba8881a 100644
--- a/funasr/auto/auto_model.py
+++ b/funasr/auto/auto_model.py
@@ -21,6 +21,7 @@
 from funasr.utils.timestamp_tools import timestamp_sentence
 from funasr.download.download_from_hub import download_model
 from funasr.utils.vad_utils import slice_padding_audio_samples
+from funasr.utils.vad_utils import merge_vad
 from funasr.utils.load_utils import load_audio_text_image_video
 from funasr.train_utils.set_all_random_seed import set_all_random_seed
 from funasr.train_utils.load_pretrained_model import load_pretrained_model
@@ -174,6 +175,8 @@
             kwargs["token_list"] = tokenizer.token_list if hasattr(tokenizer, "token_list") else None
             kwargs["token_list"] = tokenizer.get_vocab() if hasattr(tokenizer, "get_vocab") else kwargs["token_list"]
             vocab_size = len(kwargs["token_list"]) if kwargs["token_list"] is not None else -1
+            if vocab_size == -1 and hasattr(tokenizer, "get_vocab_size"):
+                vocab_size = tokenizer.get_vocab_size()
         else:
             vocab_size = -1
         kwargs["tokenizer"] = tokenizer
@@ -202,7 +205,7 @@
                 load_pretrained_model(
                     model=model,
                     path=init_param,
-                    ignore_init_mismatch=kwargs.get("ignore_init_mismatch", False),
+                    ignore_init_mismatch=kwargs.get("ignore_init_mismatch", True),
                     oss_bucket=kwargs.get("oss_bucket", None),
                     scope_map=kwargs.get("scope_map", []),
                     excludes=kwargs.get("excludes", None),
@@ -210,6 +213,9 @@
             else:
                 print(f"error, init_param does not exist!: {init_param}")
         
+        # fp16
+        if kwargs.get("fp16", False):
+            model.to(torch.float16)
         return model, kwargs
     
     def __call__(self, *args, **cfg):
@@ -295,6 +301,10 @@
         res = self.inference(input, input_len=input_len, model=self.vad_model, kwargs=self.vad_kwargs, **cfg)
         end_vad = time.time()
 
+        #  FIX(gcf): concat the vad clips for sense vocie model for better aed
+        if kwargs.get("merge_vad", False):
+            for i in range(len(res)):
+                res[i]['value'] = merge_vad(res[i]['value'], kwargs.get("merge_length", 15000))
 
         # step.2 compute asr model
         model = self.model
@@ -407,7 +417,7 @@
             return_raw_text = kwargs.get('return_raw_text', False)
             # step.3 compute punc model
             if self.punc_model is not None:
-                if not len(result["text"]):
+                if not len(result["text"].strip()):
                     if return_raw_text:
                         result['raw_text'] = ''
                 else:

--
Gitblit v1.9.1