From a7d7a0f3a2e7cd44a337ced34e3536b12ccb534e Mon Sep 17 00:00:00 2001
From: zhifu gao <zhifu.gzf@alibaba-inc.com>
Date: 星期一, 11 三月 2024 19:24:44 +0800
Subject: [PATCH] Dev gzf (#1467)

---
 funasr/auto/auto_model.py |  216 ++++++++++++++++++++++++++++++++++++-----------------
 1 files changed, 145 insertions(+), 71 deletions(-)

diff --git a/funasr/auto/auto_model.py b/funasr/auto/auto_model.py
index 1345157..28b9e94 100644
--- a/funasr/auto/auto_model.py
+++ b/funasr/auto/auto_model.py
@@ -1,26 +1,35 @@
+#!/usr/bin/env python3
+# -*- encoding: utf-8 -*-
+# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
+#  MIT License  (https://opensource.org/licenses/MIT)
+
 import json
 import time
+import copy
 import torch
-import hydra
 import random
 import string
 import logging
 import os.path
 import numpy as np
 from tqdm import tqdm
-from omegaconf import DictConfig, OmegaConf, ListConfig
 
+from funasr.utils.misc import deep_update
 from funasr.register import tables
 from funasr.utils.load_utils import load_bytes
 from funasr.download.file import download_from_url
+from funasr.utils.timestamp_tools import timestamp_sentence
 from funasr.download.download_from_hub import download_model
 from funasr.utils.vad_utils import slice_padding_audio_samples
+from funasr.utils.load_utils import load_audio_text_image_video
 from funasr.train_utils.set_all_random_seed import set_all_random_seed
 from funasr.train_utils.load_pretrained_model import load_pretrained_model
-from funasr.utils.load_utils import load_audio_text_image_video, extract_fbank
-from funasr.utils.timestamp_tools import timestamp_sentence
-from funasr.models.campplus.utils import sv_chunk, postprocess, distribute_spk
-from funasr.models.campplus.cluster_backend import ClusterBackend
+from funasr.utils import export_utils
+try:
+    from funasr.models.campplus.utils import sv_chunk, postprocess, distribute_spk
+    from funasr.models.campplus.cluster_backend import ClusterBackend
+except:
+    print("If you want to use the speaker diarization, please `pip install hdbscan`")
 
 
 def prepare_data_iterator(data_in, input_len=None, data_type=None, key=None):
@@ -34,11 +43,12 @@
     """
     data_list = []
     key_list = []
-    filelist = [".scp", ".txt", ".json", ".jsonl"]
+    filelist = [".scp", ".txt", ".json", ".jsonl", ".text"]
     
     chars = string.ascii_letters + string.digits
     if isinstance(data_in, str) and data_in.startswith('http'): # url
         data_in = download_from_url(data_in)
+
     if isinstance(data_in, str) and os.path.exists(data_in): # wav_path; filelist: wav.scp, file.jsonl;text.txt;
         _, file_extension = os.path.splitext(data_in)
         file_extension = file_extension.lower()
@@ -88,9 +98,9 @@
 class AutoModel:
     
     def __init__(self, **kwargs):
-        if not kwargs.get("disable_log", False):
+        if not kwargs.get("disable_log", True):
             tables.print()
-        
+
         model, kwargs = self.build_model(**kwargs)
         
         # if vad_model is not None, build vad model else None
@@ -135,18 +145,18 @@
     def build_model(self, **kwargs):
         assert "model" in kwargs
         if "model_conf" not in kwargs:
-            logging.info("download models from model hub: {}".format(kwargs.get("model_hub", "ms")))
+            logging.info("download models from model hub: {}".format(kwargs.get("hub", "ms")))
             kwargs = download_model(**kwargs)
         
         set_all_random_seed(kwargs.get("seed", 0))
-        
+
         device = kwargs.get("device", "cuda")
         if not torch.cuda.is_available() or kwargs.get("ngpu", 1) == 0:
             device = "cpu"
             kwargs["batch_size"] = 1
         kwargs["device"] = device
         
-        if kwargs.get("ncpu", None):
+        if kwargs.get("ncpu", 4):
             torch.set_num_threads(kwargs.get("ncpu"))
         
         # build tokenizer
@@ -155,43 +165,47 @@
             tokenizer_class = tables.tokenizer_classes.get(tokenizer)
             tokenizer = tokenizer_class(**kwargs["tokenizer_conf"])
             kwargs["tokenizer"] = tokenizer
-            kwargs["token_list"] = tokenizer.token_list
-            vocab_size = len(tokenizer.token_list)
+
+            kwargs["token_list"] = tokenizer.token_list if hasattr(tokenizer, "token_list") else None
+            kwargs["token_list"] = tokenizer.get_vocab() if hasattr(tokenizer, "get_vocab") else kwargs["token_list"]
+            vocab_size = len(kwargs["token_list"]) if kwargs["token_list"] is not None else -1
         else:
             vocab_size = -1
-        
         # build frontend
         frontend = kwargs.get("frontend", None)
+        kwargs["input_size"] = None
         if frontend is not None:
             frontend_class = tables.frontend_classes.get(frontend)
             frontend = frontend_class(**kwargs["frontend_conf"])
             kwargs["frontend"] = frontend
-            kwargs["input_size"] = frontend.output_size()
+            kwargs["input_size"] = frontend.output_size() if hasattr(frontend, "output_size") else None
         
         # build model
         model_class = tables.model_classes.get(kwargs["model"])
-        model = model_class(**kwargs, **kwargs["model_conf"], vocab_size=vocab_size)
-        
+        model = model_class(**kwargs, **kwargs.get("model_conf", {}), vocab_size=vocab_size)
         model.to(device)
         
         # init_param
         init_param = kwargs.get("init_param", None)
         if init_param is not None:
-            logging.info(f"Loading pretrained params from {init_param}")
-            load_pretrained_model(
-                model=model,
-                path=init_param,
-                ignore_init_mismatch=kwargs.get("ignore_init_mismatch", False),
-                oss_bucket=kwargs.get("oss_bucket", None),
-                scope_map=kwargs.get("scope_map", None),
-                excludes=kwargs.get("excludes", None),
-            )
+            if os.path.exists(init_param):
+                logging.info(f"Loading pretrained params from {init_param}")
+                load_pretrained_model(
+                    model=model,
+                    path=init_param,
+                    ignore_init_mismatch=kwargs.get("ignore_init_mismatch", False),
+                    oss_bucket=kwargs.get("oss_bucket", None),
+                    scope_map=kwargs.get("scope_map", []),
+                    excludes=kwargs.get("excludes", None),
+                )
+            else:
+                print(f"error, init_param does not exist!: {init_param}")
         
         return model, kwargs
     
     def __call__(self, *args, **cfg):
         kwargs = self.kwargs
-        kwargs.update(cfg)
+        deep_update(kwargs, cfg)
         res = self.model(*args, kwargs)
         return res
 
@@ -204,20 +218,20 @@
         
     def inference(self, input, input_len=None, model=None, kwargs=None, key=None, **cfg):
         kwargs = self.kwargs if kwargs is None else kwargs
-        kwargs.update(cfg)
+        deep_update(kwargs, cfg)
         model = self.model if model is None else model
         model.eval()
 
         batch_size = kwargs.get("batch_size", 1)
         # if kwargs.get("device", "cpu") == "cpu":
         #     batch_size = 1
-        
+
         key_list, data_list = prepare_data_iterator(input, input_len=input_len, data_type=kwargs.get("data_type", None), key=key)
-        
+
         speed_stats = {}
         asr_result_list = []
         num_samples = len(data_list)
-        disable_pbar = kwargs.get("disable_pbar", False)
+        disable_pbar = self.kwargs.get("disable_pbar", False)
         pbar = tqdm(colour="blue", total=num_samples, dynamic_ncols=True) if not disable_pbar else None
         time_speech_total = 0.0
         time_escape_total = 0.0
@@ -226,15 +240,19 @@
             data_batch = data_list[beg_idx:end_idx]
             key_batch = key_list[beg_idx:end_idx]
             batch = {"data_in": data_batch, "key": key_batch}
+
             if (end_idx - beg_idx) == 1 and kwargs.get("data_type", None) == "fbank": # fbank
                 batch["data_in"] = data_batch[0]
                 batch["data_lengths"] = input_len
-        
+
             time1 = time.perf_counter()
             with torch.no_grad():
-                results, meta_data = model.inference(**batch, **kwargs)
+                 res = model.inference(**batch, **kwargs)
+                 if isinstance(res, (list, tuple)):
+                    results = res[0]
+                    meta_data = res[1] if len(res) > 1 else {}
             time2 = time.perf_counter()
-            
+
             asr_result_list.extend(results)
 
             # batch_data_time = time_per_frame_s * data_batch_i["speech_lengths"].sum().item()
@@ -259,31 +277,29 @@
             pbar.set_description(f"rtf_avg: {time_escape_total/time_speech_total:0.3f}")
         torch.cuda.empty_cache()
         return asr_result_list
-    
+
     def inference_with_vad(self, input, input_len=None, **cfg):
-        
+        kwargs = self.kwargs
         # step.1: compute the vad model
-        self.vad_kwargs.update(cfg)
+        deep_update(self.vad_kwargs, cfg)
         beg_vad = time.time()
         res = self.inference(input, input_len=input_len, model=self.vad_model, kwargs=self.vad_kwargs, **cfg)
         end_vad = time.time()
-        print(f"time cost vad: {end_vad - beg_vad:0.3f}")
 
 
         # step.2 compute asr model
         model = self.model
-        kwargs = self.kwargs
-        kwargs.update(cfg)
+        deep_update(kwargs, cfg)
         batch_size = int(kwargs.get("batch_size_s", 300))*1000
         batch_size_threshold_ms = int(kwargs.get("batch_size_threshold_s", 60))*1000
         kwargs["batch_size"] = batch_size
-        
+
         key_list, data_list = prepare_data_iterator(input, input_len=input_len, data_type=kwargs.get("data_type", None))
         results_ret_list = []
         time_speech_total_all_samples = 1e-6
 
         beg_total = time.time()
-        pbar_total = tqdm(colour="red", total=len(res), dynamic_ncols=True)
+        pbar_total = tqdm(colour="red", total=len(res), dynamic_ncols=True) if not kwargs.get("disable_pbar", False) else None
         for i in range(len(res)):
             key = res[i]["key"]
             vadsegments = res[i]["value"]
@@ -294,14 +310,14 @@
             data_with_index = [(vadsegments[i], i) for i in range(n)]
             sorted_data = sorted(data_with_index, key=lambda x: x[0][1] - x[0][0])
             results_sorted = []
-            
+
             if not len(sorted_data):
                 logging.info("decoding, utt: {}, empty speech".format(key))
                 continue
 
             if len(sorted_data) > 0 and len(sorted_data[0]) > 0:
                 batch_size = max(batch_size, sorted_data[0][0][1] - sorted_data[0][0][0])
-            
+
             batch_size_ms_cum = 0
             beg_idx = 0
             beg_asr_total = time.time()
@@ -320,8 +336,8 @@
                     continue
                 batch_size_ms_cum = 0
                 end_idx = j + 1
-                speech_j, speech_lengths_j = slice_padding_audio_samples(speech, speech_lengths, sorted_data[beg_idx:end_idx])       
-                results = self.inference(speech_j, input_len=None, model=model, kwargs=kwargs, disable_pbar=True, **cfg)
+                speech_j, speech_lengths_j = slice_padding_audio_samples(speech, speech_lengths, sorted_data[beg_idx:end_idx])
+                results = self.inference(speech_j, input_len=None, model=model, kwargs=kwargs, **cfg)
                 if self.spk_model is not None:
                     # compose vad segments: [[start_time_sec, end_time_sec, speech], [...]]
                     for _b in range(len(speech_j)):
@@ -331,26 +347,26 @@
                         segments = sv_chunk(vad_segments)
                         all_segments.extend(segments)
                         speech_b = [i[2] for i in segments]
-                        spk_res = self.inference(speech_b, input_len=None, model=self.spk_model, kwargs=kwargs, disable_pbar=True, **cfg)
+                        spk_res = self.inference(speech_b, input_len=None, model=self.spk_model, kwargs=kwargs, **cfg)
                         results[_b]['spk_embedding'] = spk_res[0]['spk_embedding']
                 beg_idx = end_idx
                 if len(results) < 1:
                     continue
                 results_sorted.extend(results)
-            
+
             # end_asr_total = time.time()
             # time_escape_total_per_sample = end_asr_total - beg_asr_total
             # pbar_sample.update(1)
             # pbar_sample.set_description(f"rtf_avg_per_sample: {time_escape_total_per_sample / time_speech_total_per_sample:0.3f}, "
             #                      f"time_speech_total_per_sample: {time_speech_total_per_sample: 0.3f}, "
             #                      f"time_escape_total_per_sample: {time_escape_total_per_sample:0.3f}")
-            
+
             restored_data = [0] * n
             for j in range(n):
                 index = sorted_data[j][1]
                 restored_data[index] = results_sorted[j]
             result = {}
-            
+
             # results combine for texts, timestamps, speaker embeddings and others
             # TODO: rewrite for clean code
             for j in range(n):
@@ -377,16 +393,26 @@
                             result[k] = restored_data[j][k]
                         else:
                             result[k] += restored_data[j][k]
-                            
+
+            return_raw_text = kwargs.get('return_raw_text', False)
             # step.3 compute punc model
             if self.punc_model is not None:
-                self.punc_kwargs.update(cfg)
-                punc_res = self.inference(result["text"], model=self.punc_model, kwargs=self.punc_kwargs, disable_pbar=True, **cfg)
-                import copy; raw_text = copy.copy(result["text"])
-                result["text"] = punc_res[0]["text"]
-                
+                if not len(result["text"]):
+                    if return_raw_text:
+                        result['raw_text'] = ''
+                else:
+                    deep_update(self.punc_kwargs, cfg)
+                    punc_res = self.inference(result["text"], model=self.punc_model, kwargs=self.punc_kwargs, **cfg)
+                    raw_text = copy.copy(result["text"])
+                    if return_raw_text: result['raw_text'] = raw_text
+                    result["text"] = punc_res[0]["text"]
+            else:
+                raw_text = None
+
             # speaker embedding cluster after resorted
             if self.spk_model is not None and kwargs.get('return_spk_res', True):
+                if raw_text is None:
+                    logging.error("Missing punc_model, which is required by spk_model.")
                 all_segments = sorted(all_segments, key=lambda x: x[0])
                 spk_embedding = result['spk_embedding']
                 labels = self.cb_model(spk_embedding.cpu(), oracle_num=kwargs.get('preset_spk_num', None))
@@ -395,29 +421,43 @@
                 if self.spk_mode == 'vad_segment':  # recover sentence_list
                     sentence_list = []
                     for res, vadsegment in zip(restored_data, vadsegments):
-                        sentence_list.append({"start": vadsegment[0],\
-                                                "end": vadsegment[1],
-                                                "sentence": res['raw_text'],
-                                                "timestamp": res['timestamp']})
+                        if 'timestamp' not in res:
+                            logging.error("Only 'iic/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch' \
+                                           and 'iic/speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch'\
+                                           can predict timestamp, and speaker diarization relies on timestamps.")
+                        sentence_list.append({"start": vadsegment[0],
+                                              "end": vadsegment[1],
+                                              "sentence": res['text'],
+                                              "timestamp": res['timestamp']})
                 elif self.spk_mode == 'punc_segment':
-                    sentence_list = timestamp_sentence(punc_res[0]['punc_array'], \
-                                                        result['timestamp'], \
-                                                        result['raw_text'])
+                    if 'timestamp' not in result:
+                        logging.error("Only 'iic/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch' \
+                                       and 'iic/speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch'\
+                                       can predict timestamp, and speaker diarization relies on timestamps.")
+                    sentence_list = timestamp_sentence(punc_res[0]['punc_array'],
+                                                       result['timestamp'],
+                                                       raw_text,
+                                                       return_raw_text=return_raw_text)
                 distribute_spk(sentence_list, sv_output)
                 result['sentence_info'] = sentence_list
             elif kwargs.get("sentence_timestamp", False):
-                sentence_list = timestamp_sentence(punc_res[0]['punc_array'], \
-                                                        result['timestamp'], \
-                                                        result['raw_text'])
+                if not len(result['text']):
+                    sentence_list = []
+                else:
+                    sentence_list = timestamp_sentence(punc_res[0]['punc_array'],
+                                                       result['timestamp'],
+                                                       raw_text,
+                                                       return_raw_text=return_raw_text)
                 result['sentence_info'] = sentence_list
-            del result['spk_embedding']
-                    
+            if "spk_embedding" in result: del result['spk_embedding']
+
             result["key"] = key
             results_ret_list.append(result)
             end_asr_total = time.time()
             time_escape_total_per_sample = end_asr_total - beg_asr_total
-            pbar_total.update(1)
-            pbar_total.set_description(f"rtf_avg: {time_escape_total_per_sample / time_speech_total_per_sample:0.3f}, "
+            if pbar_total:
+                pbar_total.update(1)
+                pbar_total.set_description(f"rtf_avg: {time_escape_total_per_sample / time_speech_total_per_sample:0.3f}, "
                                  f"time_speech: {time_speech_total_per_sample: 0.3f}, "
                                  f"time_escape: {time_escape_total_per_sample:0.3f}")
 
@@ -429,3 +469,37 @@
         #                      f"time_escape_all: {time_escape_total_all_samples:0.3f}")
         return results_ret_list
 
+    def export(self, input=None,
+               type : str = "onnx",
+               quantize: bool = False,
+               fallback_num: int = 5,
+               calib_num: int = 100,
+               opset_version: int = 14,
+               **cfg):
+    
+        device = cfg.get("device", "cpu")
+        model = self.model.to(device=device)
+        kwargs = self.kwargs
+        deep_update(kwargs, cfg)
+        kwargs["device"] = device
+        del kwargs["model"]
+        model.eval()
+
+        batch_size = 1
+
+        key_list, data_list = prepare_data_iterator(input, input_len=None, data_type=kwargs.get("data_type", None), key=None)
+
+        with torch.no_grad():
+            
+            if type == "onnx":
+                export_dir = export_utils.export_onnx(
+                                        model=model,
+                                        data_in=data_list,
+                                        **kwargs)
+            else:
+                export_dir = export_utils.export_torchscripts(
+                                        model=model,
+                                        data_in=data_list,
+                                        **kwargs)
+
+        return export_dir
\ No newline at end of file

--
Gitblit v1.9.1