From 98c94ab3ab0266482117343a064beeb6bd6bcedc Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期三, 28 二月 2024 20:45:07 +0800
Subject: [PATCH] Merge branch 'main' of github.com:alibaba-damo-academy/FunASR merge

---
 funasr/auto/auto_model.py |   96 +++++++++++++++++++++++++++---------------------
 1 files changed, 54 insertions(+), 42 deletions(-)

diff --git a/funasr/auto/auto_model.py b/funasr/auto/auto_model.py
index e5faa2a..a6be691 100644
--- a/funasr/auto/auto_model.py
+++ b/funasr/auto/auto_model.py
@@ -1,3 +1,8 @@
+#!/usr/bin/env python3
+# -*- encoding: utf-8 -*-
+# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
+#  MIT License  (https://opensource.org/licenses/MIT)
+
 import json
 import time
 import copy
@@ -12,12 +17,12 @@
 from funasr.register import tables
 from funasr.utils.load_utils import load_bytes
 from funasr.download.file import download_from_url
+from funasr.utils.timestamp_tools import timestamp_sentence
 from funasr.download.download_from_hub import download_model
 from funasr.utils.vad_utils import slice_padding_audio_samples
+from funasr.utils.load_utils import load_audio_text_image_video
 from funasr.train_utils.set_all_random_seed import set_all_random_seed
 from funasr.train_utils.load_pretrained_model import load_pretrained_model
-from funasr.utils.load_utils import load_audio_text_image_video
-from funasr.utils.timestamp_tools import timestamp_sentence
 from funasr.models.campplus.utils import sv_chunk, postprocess, distribute_spk
 try:
     from funasr.models.campplus.cluster_backend import ClusterBackend
@@ -90,7 +95,7 @@
 class AutoModel:
     
     def __init__(self, **kwargs):
-        if not kwargs.get("disable_log", False):
+        if not kwargs.get("disable_log", True):
             tables.print()
         
         model, kwargs = self.build_model(**kwargs)
@@ -157,8 +162,10 @@
             tokenizer_class = tables.tokenizer_classes.get(tokenizer)
             tokenizer = tokenizer_class(**kwargs["tokenizer_conf"])
             kwargs["tokenizer"] = tokenizer
-            kwargs["token_list"] = tokenizer.token_list
-            vocab_size = len(tokenizer.token_list)
+
+            kwargs["token_list"] = tokenizer.token_list if hasattr(tokenizer, "token_list") else None
+            kwargs["token_list"] = tokenizer.get_vocab() if hasattr(tokenizer, "get_vocab") else kwargs["token_list"]
+            vocab_size = len(kwargs["token_list"])
         else:
             vocab_size = -1
         
@@ -179,15 +186,18 @@
         # init_param
         init_param = kwargs.get("init_param", None)
         if init_param is not None:
-            logging.info(f"Loading pretrained params from {init_param}")
-            load_pretrained_model(
-                model=model,
-                path=init_param,
-                ignore_init_mismatch=kwargs.get("ignore_init_mismatch", False),
-                oss_bucket=kwargs.get("oss_bucket", None),
-                scope_map=kwargs.get("scope_map", None),
-                excludes=kwargs.get("excludes", None),
-            )
+            if os.path.exists(init_param):
+                logging.info(f"Loading pretrained params from {init_param}")
+                load_pretrained_model(
+                    model=model,
+                    path=init_param,
+                    ignore_init_mismatch=kwargs.get("ignore_init_mismatch", False),
+                    oss_bucket=kwargs.get("oss_bucket", None),
+                    scope_map=kwargs.get("scope_map", []),
+                    excludes=kwargs.get("excludes", None),
+                )
+            else:
+                print(f"error, init_param does not exist!: {init_param}")
         
         return model, kwargs
     
@@ -219,7 +229,7 @@
         speed_stats = {}
         asr_result_list = []
         num_samples = len(data_list)
-        disable_pbar = kwargs.get("disable_pbar", False)
+        disable_pbar = self.kwargs.get("disable_pbar", False)
         pbar = tqdm(colour="blue", total=num_samples, dynamic_ncols=True) if not disable_pbar else None
         time_speech_total = 0.0
         time_escape_total = 0.0
@@ -231,12 +241,12 @@
             if (end_idx - beg_idx) == 1 and kwargs.get("data_type", None) == "fbank": # fbank
                 batch["data_in"] = data_batch[0]
                 batch["data_lengths"] = input_len
-        
+
             time1 = time.perf_counter()
             with torch.no_grad():
                 results, meta_data = model.inference(**batch, **kwargs)
             time2 = time.perf_counter()
-            
+
             asr_result_list.extend(results)
 
             # batch_data_time = time_per_frame_s * data_batch_i["speech_lengths"].sum().item()
@@ -261,31 +271,29 @@
             pbar.set_description(f"rtf_avg: {time_escape_total/time_speech_total:0.3f}")
         torch.cuda.empty_cache()
         return asr_result_list
-    
+
     def inference_with_vad(self, input, input_len=None, **cfg):
-        
+        kwargs = self.kwargs
         # step.1: compute the vad model
         self.vad_kwargs.update(cfg)
         beg_vad = time.time()
         res = self.inference(input, input_len=input_len, model=self.vad_model, kwargs=self.vad_kwargs, **cfg)
         end_vad = time.time()
-        print(f"time cost vad: {end_vad - beg_vad:0.3f}")
 
 
         # step.2 compute asr model
         model = self.model
-        kwargs = self.kwargs
         kwargs.update(cfg)
         batch_size = int(kwargs.get("batch_size_s", 300))*1000
         batch_size_threshold_ms = int(kwargs.get("batch_size_threshold_s", 60))*1000
         kwargs["batch_size"] = batch_size
-        
+
         key_list, data_list = prepare_data_iterator(input, input_len=input_len, data_type=kwargs.get("data_type", None))
         results_ret_list = []
         time_speech_total_all_samples = 1e-6
 
         beg_total = time.time()
-        pbar_total = tqdm(colour="red", total=len(res), dynamic_ncols=True)
+        pbar_total = tqdm(colour="red", total=len(res), dynamic_ncols=True) if not kwargs.get("disable_pbar", False) else None
         for i in range(len(res)):
             key = res[i]["key"]
             vadsegments = res[i]["value"]
@@ -296,14 +304,14 @@
             data_with_index = [(vadsegments[i], i) for i in range(n)]
             sorted_data = sorted(data_with_index, key=lambda x: x[0][1] - x[0][0])
             results_sorted = []
-            
+
             if not len(sorted_data):
                 logging.info("decoding, utt: {}, empty speech".format(key))
                 continue
 
             if len(sorted_data) > 0 and len(sorted_data[0]) > 0:
                 batch_size = max(batch_size, sorted_data[0][0][1] - sorted_data[0][0][0])
-            
+
             batch_size_ms_cum = 0
             beg_idx = 0
             beg_asr_total = time.time()
@@ -322,8 +330,8 @@
                     continue
                 batch_size_ms_cum = 0
                 end_idx = j + 1
-                speech_j, speech_lengths_j = slice_padding_audio_samples(speech, speech_lengths, sorted_data[beg_idx:end_idx])       
-                results = self.inference(speech_j, input_len=None, model=model, kwargs=kwargs, disable_pbar=True, **cfg)
+                speech_j, speech_lengths_j = slice_padding_audio_samples(speech, speech_lengths, sorted_data[beg_idx:end_idx])
+                results = self.inference(speech_j, input_len=None, model=model, kwargs=kwargs, **cfg)
                 if self.spk_model is not None:
                     # compose vad segments: [[start_time_sec, end_time_sec, speech], [...]]
                     for _b in range(len(speech_j)):
@@ -333,26 +341,26 @@
                         segments = sv_chunk(vad_segments)
                         all_segments.extend(segments)
                         speech_b = [i[2] for i in segments]
-                        spk_res = self.inference(speech_b, input_len=None, model=self.spk_model, kwargs=kwargs, disable_pbar=True, **cfg)
+                        spk_res = self.inference(speech_b, input_len=None, model=self.spk_model, kwargs=kwargs, **cfg)
                         results[_b]['spk_embedding'] = spk_res[0]['spk_embedding']
                 beg_idx = end_idx
                 if len(results) < 1:
                     continue
                 results_sorted.extend(results)
-            
+
             # end_asr_total = time.time()
             # time_escape_total_per_sample = end_asr_total - beg_asr_total
             # pbar_sample.update(1)
             # pbar_sample.set_description(f"rtf_avg_per_sample: {time_escape_total_per_sample / time_speech_total_per_sample:0.3f}, "
             #                      f"time_speech_total_per_sample: {time_speech_total_per_sample: 0.3f}, "
             #                      f"time_escape_total_per_sample: {time_escape_total_per_sample:0.3f}")
-            
+
             restored_data = [0] * n
             for j in range(n):
                 index = sorted_data[j][1]
                 restored_data[index] = results_sorted[j]
             result = {}
-            
+
             # results combine for texts, timestamps, speaker embeddings and others
             # TODO: rewrite for clean code
             for j in range(n):
@@ -379,18 +387,21 @@
                             result[k] = restored_data[j][k]
                         else:
                             result[k] += restored_data[j][k]
-            
-            return_raw_text = kwargs.get('return_raw_text', False)            
+
+            return_raw_text = kwargs.get('return_raw_text', False)
             # step.3 compute punc model
             if self.punc_model is not None:
-                self.punc_kwargs.update(cfg)
-                punc_res = self.inference(result["text"], model=self.punc_model, kwargs=self.punc_kwargs, disable_pbar=True, **cfg)
-                raw_text = copy.copy(result["text"])
-                if return_raw_text: result['raw_text'] = raw_text
-                result["text"] = punc_res[0]["text"]
+                if not len(result["text"]):
+                    result['raw_text'] = ''
+                else:
+                    self.punc_kwargs.update(cfg)
+                    punc_res = self.inference(result["text"], model=self.punc_model, kwargs=self.punc_kwargs, **cfg)
+                    raw_text = copy.copy(result["text"])
+                    if return_raw_text: result['raw_text'] = raw_text
+                    result["text"] = punc_res[0]["text"]
             else:
                 raw_text = None
-                
+
             # speaker embedding cluster after resorted
             if self.spk_model is not None and kwargs.get('return_spk_res', True):
                 if raw_text is None:
@@ -429,13 +440,14 @@
                                                    return_raw_text=return_raw_text)
                 result['sentence_info'] = sentence_list
             if "spk_embedding" in result: del result['spk_embedding']
-                    
+
             result["key"] = key
             results_ret_list.append(result)
             end_asr_total = time.time()
             time_escape_total_per_sample = end_asr_total - beg_asr_total
-            pbar_total.update(1)
-            pbar_total.set_description(f"rtf_avg: {time_escape_total_per_sample / time_speech_total_per_sample:0.3f}, "
+            if pbar_total:
+                pbar_total.update(1)
+                pbar_total.set_description(f"rtf_avg: {time_escape_total_per_sample / time_speech_total_per_sample:0.3f}, "
                                  f"time_speech: {time_speech_total_per_sample: 0.3f}, "
                                  f"time_escape: {time_escape_total_per_sample:0.3f}")
 

--
Gitblit v1.9.1