游雁
2024-02-29 429bc77c036e85e1891fd216ecbcd4704e3558ba
funasr/auto/auto_model.py
@@ -1,25 +1,33 @@
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
#  MIT License  (https://opensource.org/licenses/MIT)
import json
import time
import copy
import torch
import hydra
import random
import string
import logging
import os.path
import numpy as np
from tqdm import tqdm
from omegaconf import DictConfig, OmegaConf, ListConfig
from funasr.register import tables
from funasr.utils.load_utils import load_bytes
from funasr.download.file import download_from_url
from funasr.utils.timestamp_tools import timestamp_sentence
from funasr.download.download_from_hub import download_model
from funasr.utils.vad_utils import slice_padding_audio_samples
from funasr.utils.load_utils import load_audio_text_image_video
from funasr.train_utils.set_all_random_seed import set_all_random_seed
from funasr.train_utils.load_pretrained_model import load_pretrained_model
from funasr.utils.load_utils import load_audio_text_image_video, extract_fbank
from funasr.utils.timestamp_tools import timestamp_sentence
from funasr.models.campplus.utils import sv_chunk, postprocess, distribute_spk
from funasr.models.campplus.cluster_backend import ClusterBackend
try:
    from funasr.models.campplus.cluster_backend import ClusterBackend
except:
    print("If you want to use the speaker diarization, please `pip install hdbscan`")
def prepare_data_iterator(data_in, input_len=None, data_type=None, key=None):
@@ -87,7 +95,8 @@
class AutoModel:
    
    def __init__(self, **kwargs):
        tables.print()
        if not kwargs.get("disable_log", True):
            tables.print()
        
        model, kwargs = self.build_model(**kwargs)
        
@@ -96,7 +105,7 @@
        vad_kwargs = kwargs.get("vad_model_revision", None)
        if vad_model is not None:
            logging.info("Building VAD model.")
            vad_kwargs = {"model": vad_model, "model_revision": vad_kwargs}
            vad_kwargs = {"model": vad_model, "model_revision": vad_kwargs, "device": kwargs["device"]}
            vad_model, vad_kwargs = self.build_model(**vad_kwargs)
        # if punc_model is not None, build punc model else None
@@ -104,7 +113,7 @@
        punc_kwargs = kwargs.get("punc_model_revision", None)
        if punc_model is not None:
            logging.info("Building punc model.")
            punc_kwargs = {"model": punc_model, "model_revision": punc_kwargs}
            punc_kwargs = {"model": punc_model, "model_revision": punc_kwargs, "device": kwargs["device"]}
            punc_model, punc_kwargs = self.build_model(**punc_kwargs)
        # if spk_model is not None, build spk model else None
@@ -112,17 +121,13 @@
        spk_kwargs = kwargs.get("spk_model_revision", None)
        if spk_model is not None:
            logging.info("Building SPK model.")
            spk_kwargs = {"model": spk_model, "model_revision": spk_kwargs}
            spk_kwargs = {"model": spk_model, "model_revision": spk_kwargs, "device": kwargs["device"]}
            spk_model, spk_kwargs = self.build_model(**spk_kwargs)
            self.cb_model = ClusterBackend()
            self.cb_model = ClusterBackend().to(kwargs["device"])
            spk_mode = kwargs.get("spk_mode", 'punc_segment')
            if spk_mode not in ["default", "vad_segment", "punc_segment"]:
                logging.error("spk_mode should be one of default, vad_segment and punc_segment.")
            self.spk_mode = spk_mode
            self.preset_spk_num = kwargs.get("preset_spk_num", None)
            if self.preset_spk_num:
                logging.warning("Using preset speaker number: {}".format(self.preset_spk_num))
            logging.warning("Many to print when using speaker model...")
            
        self.kwargs = kwargs
        self.model = model
@@ -132,8 +137,7 @@
        self.punc_kwargs = punc_kwargs
        self.spk_model = spk_model
        self.spk_kwargs = spk_kwargs
        self.model_path = kwargs["model_path"]
        self.model_path = kwargs.get("model_path")
        
    def build_model(self, **kwargs):
        assert "model" in kwargs
@@ -144,9 +148,9 @@
        set_all_random_seed(kwargs.get("seed", 0))
        
        device = kwargs.get("device", "cuda")
        if not torch.cuda.is_available() or kwargs.get("ngpu", 0):
        if not torch.cuda.is_available() or kwargs.get("ngpu", 1) == 0:
            device = "cpu"
            # kwargs["batch_size"] = 1
            kwargs["batch_size"] = 1
        kwargs["device"] = device
        
        if kwargs.get("ncpu", None):
@@ -158,8 +162,10 @@
            tokenizer_class = tables.tokenizer_classes.get(tokenizer)
            tokenizer = tokenizer_class(**kwargs["tokenizer_conf"])
            kwargs["tokenizer"] = tokenizer
            kwargs["token_list"] = tokenizer.token_list
            vocab_size = len(tokenizer.token_list)
            kwargs["token_list"] = tokenizer.token_list if hasattr(tokenizer, "token_list") else None
            kwargs["token_list"] = tokenizer.get_vocab() if hasattr(tokenizer, "get_vocab") else kwargs["token_list"]
            vocab_size = len(kwargs["token_list"])
        else:
            vocab_size = -1
        
@@ -174,19 +180,24 @@
        # build model
        model_class = tables.model_classes.get(kwargs["model"])
        model = model_class(**kwargs, **kwargs["model_conf"], vocab_size=vocab_size)
        model.eval()
        model.to(device)
        
        # init_param
        init_param = kwargs.get("init_param", None)
        if init_param is not None:
            logging.info(f"Loading pretrained params from {init_param}")
            load_pretrained_model(
                model=model,
                init_param=init_param,
                ignore_init_mismatch=kwargs.get("ignore_init_mismatch", False),
                oss_bucket=kwargs.get("oss_bucket", None),
            )
            if os.path.exists(init_param):
                logging.info(f"Loading pretrained params from {init_param}")
                load_pretrained_model(
                    model=model,
                    path=init_param,
                    ignore_init_mismatch=kwargs.get("ignore_init_mismatch", False),
                    oss_bucket=kwargs.get("oss_bucket", None),
                    scope_map=kwargs.get("scope_map", []),
                    excludes=kwargs.get("excludes", None),
                )
            else:
                print(f"error, init_param does not exist!: {init_param}")
        
        return model, kwargs
    
@@ -195,8 +206,6 @@
        kwargs.update(cfg)
        res = self.model(*args, kwargs)
        return res
    def generate(self, input, input_len=None, **cfg):
        if self.vad_model is None:
@@ -209,6 +218,7 @@
        kwargs = self.kwargs if kwargs is None else kwargs
        kwargs.update(cfg)
        model = self.model if model is None else model
        model.eval()
        batch_size = kwargs.get("batch_size", 1)
        # if kwargs.get("device", "cpu") == "cpu":
@@ -219,7 +229,8 @@
        speed_stats = {}
        asr_result_list = []
        num_samples = len(data_list)
        pbar = tqdm(colour="blue", total=num_samples+1, dynamic_ncols=True)
        disable_pbar = self.kwargs.get("disable_pbar", False)
        pbar = tqdm(colour="blue", total=num_samples, dynamic_ncols=True) if not disable_pbar else None
        time_speech_total = 0.0
        time_escape_total = 0.0
        for beg_idx in range(0, num_samples, batch_size):
@@ -227,18 +238,17 @@
            data_batch = data_list[beg_idx:end_idx]
            key_batch = key_list[beg_idx:end_idx]
            batch = {"data_in": data_batch, "key": key_batch}
            if (end_idx - beg_idx) == 1 and isinstance(data_batch[0], torch.Tensor): # fbank
            if (end_idx - beg_idx) == 1 and kwargs.get("data_type", None) == "fbank": # fbank
                batch["data_in"] = data_batch[0]
                batch["data_lengths"] = input_len
            time1 = time.perf_counter()
            with torch.no_grad():
                results, meta_data = model.inference(**batch, **kwargs)
            time2 = time.perf_counter()
            asr_result_list.extend(results)
            pbar.update(1)
            # batch_data_time = time_per_frame_s * data_batch_i["speech_lengths"].sum().item()
            batch_data_time = meta_data.get("batch_data_time", -1)
            time_escape = time2 - time1
@@ -250,39 +260,40 @@
            description = (
                f"{speed_stats}, "
            )
            pbar.set_description(description)
            if pbar:
                pbar.update(1)
                pbar.set_description(description)
            time_speech_total += batch_data_time
            time_escape_total += time_escape
        pbar.update(1)
        pbar.set_description(f"rtf_avg: {time_escape_total/time_speech_total:0.3f}")
        if pbar:
            # pbar.update(1)
            pbar.set_description(f"rtf_avg: {time_escape_total/time_speech_total:0.3f}")
        torch.cuda.empty_cache()
        return asr_result_list
    def inference_with_vad(self, input, input_len=None, **cfg):
        kwargs = self.kwargs
        # step.1: compute the vad model
        self.vad_kwargs.update(cfg)
        beg_vad = time.time()
        res = self.inference(input, input_len=input_len, model=self.vad_model, kwargs=self.vad_kwargs, **cfg)
        end_vad = time.time()
        print(f"time cost vad: {end_vad - beg_vad:0.3f}")
        # step.2 compute asr model
        model = self.model
        kwargs = self.kwargs
        kwargs.update(cfg)
        batch_size = int(kwargs.get("batch_size_s", 300))*1000
        batch_size_threshold_ms = int(kwargs.get("batch_size_threshold_s", 60))*1000
        kwargs["batch_size"] = batch_size
        key_list, data_list = prepare_data_iterator(input, input_len=input_len, data_type=kwargs.get("data_type", None))
        results_ret_list = []
        time_speech_total_all_samples = 0.0
        time_speech_total_all_samples = 1e-6
        beg_total = time.time()
        pbar_total = tqdm(colour="red", total=len(res) + 1, dynamic_ncols=True)
        pbar_total = tqdm(colour="red", total=len(res), dynamic_ncols=True) if not kwargs.get("disable_pbar", False) else None
        for i in range(len(res)):
            key = res[i]["key"]
            vadsegments = res[i]["value"]
@@ -293,21 +304,25 @@
            data_with_index = [(vadsegments[i], i) for i in range(n)]
            sorted_data = sorted(data_with_index, key=lambda x: x[0][1] - x[0][0])
            results_sorted = []
            if not len(sorted_data):
                logging.info("decoding, utt: {}, empty speech".format(key))
                continue
            if len(sorted_data) > 0 and len(sorted_data[0]) > 0:
                batch_size = max(batch_size, sorted_data[0][0][1] - sorted_data[0][0][0])
            batch_size_ms_cum = 0
            beg_idx = 0
            beg_asr_total = time.time()
            time_speech_total_per_sample = speech_lengths/16000
            time_speech_total_all_samples += time_speech_total_per_sample
            # pbar_sample = tqdm(colour="blue", total=n, dynamic_ncols=True)
            all_segments = []
            for j, _ in enumerate(range(0, n)):
                # pbar_sample.update(1)
                batch_size_ms_cum += (sorted_data[j][0][1] - sorted_data[j][0][0])
                if j < n - 1 and (
                    batch_size_ms_cum + sorted_data[j + 1][0][1] - sorted_data[j + 1][0][0]) < batch_size and (
@@ -315,15 +330,14 @@
                    continue
                batch_size_ms_cum = 0
                end_idx = j + 1
                speech_j, speech_lengths_j = slice_padding_audio_samples(speech, speech_lengths, sorted_data[beg_idx:end_idx])
                speech_j, speech_lengths_j = slice_padding_audio_samples(speech, speech_lengths, sorted_data[beg_idx:end_idx])
                results = self.inference(speech_j, input_len=None, model=model, kwargs=kwargs, **cfg)
                if self.spk_model is not None:
                    all_segments = []
                    # compose vad segments: [[start_time_sec, end_time_sec, speech], [...]]
                    for _b in range(len(speech_j)):
                        vad_segments = [[sorted_data[beg_idx:end_idx][_b][0][0]/1000.0, \
                                        sorted_data[beg_idx:end_idx][_b][0][1]/1000.0, \
                                        speech_j[_b]]]
                        vad_segments = [[sorted_data[beg_idx:end_idx][_b][0][0]/1000.0,
                                        sorted_data[beg_idx:end_idx][_b][0][1]/1000.0,
                                        np.array(speech_j[_b])]]
                        segments = sv_chunk(vad_segments)
                        all_segments.extend(segments)
                        speech_b = [i[2] for i in segments]
@@ -334,20 +348,19 @@
                    continue
                results_sorted.extend(results)
            pbar_total.update(1)
            end_asr_total = time.time()
            time_escape_total_per_sample = end_asr_total - beg_asr_total
            pbar_total.set_description(f"rtf_avg_per_sample: {time_escape_total_per_sample / time_speech_total_per_sample:0.3f}, "
                                 f"time_speech_total_per_sample: {time_speech_total_per_sample: 0.3f}, "
                                 f"time_escape_total_per_sample: {time_escape_total_per_sample:0.3f}")
            # end_asr_total = time.time()
            # time_escape_total_per_sample = end_asr_total - beg_asr_total
            # pbar_sample.update(1)
            # pbar_sample.set_description(f"rtf_avg_per_sample: {time_escape_total_per_sample / time_speech_total_per_sample:0.3f}, "
            #                      f"time_speech_total_per_sample: {time_speech_total_per_sample: 0.3f}, "
            #                      f"time_escape_total_per_sample: {time_escape_total_per_sample:0.3f}")
            restored_data = [0] * n
            for j in range(n):
                index = sorted_data[j][1]
                restored_data[index] = results_sorted[j]
            result = {}
            # results combine for texts, timestamps, speaker embeddings and others
            # TODO: rewrite for clean code
            for j in range(n):
@@ -364,7 +377,7 @@
                            result[k] = restored_data[j][k]
                        else:
                            result[k] = torch.cat([result[k], restored_data[j][k]], dim=0)
                    elif k == 'text':
                    elif 'text' in k:
                        if k not in result:
                            result[k] = restored_data[j][k]
                        else:
@@ -374,43 +387,79 @@
                            result[k] = restored_data[j][k]
                        else:
                            result[k] += restored_data[j][k]
            return_raw_text = kwargs.get('return_raw_text', False)
            # step.3 compute punc model
            if self.punc_model is not None:
                self.punc_kwargs.update(cfg)
                punc_res = self.inference(result["text"], model=self.punc_model, kwargs=self.punc_kwargs, **cfg)
                result["text_with_punc"] = punc_res[0]["text"]
                if not len(result["text"]):
                    if return_raw_text:
                        result['raw_text'] = ''
                else:
                    self.punc_kwargs.update(cfg)
                    punc_res = self.inference(result["text"], model=self.punc_model, kwargs=self.punc_kwargs, **cfg)
                    raw_text = copy.copy(result["text"])
                    if return_raw_text: result['raw_text'] = raw_text
                    result["text"] = punc_res[0]["text"]
            else:
                raw_text = None
            # speaker embedding cluster after resorted
            if self.spk_model is not None:
            if self.spk_model is not None and kwargs.get('return_spk_res', True):
                if raw_text is None:
                    logging.error("Missing punc_model, which is required by spk_model.")
                all_segments = sorted(all_segments, key=lambda x: x[0])
                spk_embedding = result['spk_embedding']
                labels = self.cb_model(spk_embedding, oracle_num=self.preset_spk_num)
                del result['spk_embedding']
                labels = self.cb_model(spk_embedding.cpu(), oracle_num=kwargs.get('preset_spk_num', None))
                # del result['spk_embedding']
                sv_output = postprocess(all_segments, None, labels, spk_embedding.cpu())
                if self.spk_mode == 'vad_segment':
                if self.spk_mode == 'vad_segment':  # recover sentence_list
                    sentence_list = []
                    for res, vadsegment in zip(restored_data, vadsegments):
                        sentence_list.append({"start": vadsegment[0],\
                                                "end": vadsegment[1],
                                                "sentence": res['text'],
                                                "timestamp": res['timestamp']})
                else: # punc_segment
                    sentence_list = timestamp_sentence(punc_res[0]['punc_array'], \
                                                        result['timestamp'], \
                                                        result['text'])
                        if 'timestamp' not in res:
                            logging.error("Only 'iic/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch' \
                                           and 'iic/speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch'\
                                           can predict timestamp, and speaker diarization relies on timestamps.")
                        sentence_list.append({"start": vadsegment[0],
                                              "end": vadsegment[1],
                                              "sentence": res['text'],
                                              "timestamp": res['timestamp']})
                elif self.spk_mode == 'punc_segment':
                    if 'timestamp' not in result:
                        logging.error("Only 'iic/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch' \
                                       and 'iic/speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch'\
                                       can predict timestamp, and speaker diarization relies on timestamps.")
                    sentence_list = timestamp_sentence(punc_res[0]['punc_array'],
                                                       result['timestamp'],
                                                       raw_text,
                                                       return_raw_text=return_raw_text)
                distribute_spk(sentence_list, sv_output)
                result['sentence_info'] = sentence_list
            elif kwargs.get("sentence_timestamp", False):
                if not len(result['text']):
                    sentence_list = []
                else:
                    sentence_list = timestamp_sentence(punc_res[0]['punc_array'],
                                                       result['timestamp'],
                                                       raw_text,
                                                       return_raw_text=return_raw_text)
                result['sentence_info'] = sentence_list
            if "spk_embedding" in result: del result['spk_embedding']
            result["key"] = key
            results_ret_list.append(result)
            pbar_total.update(1)
        pbar_total.update(1)
        end_total = time.time()
        time_escape_total_all_samples = end_total - beg_total
        pbar_total.set_description(f"rtf_avg_all_samples: {time_escape_total_all_samples / time_speech_total_all_samples:0.3f}, "
                             f"time_speech_total_all_samples: {time_speech_total_all_samples: 0.3f}, "
                             f"time_escape_total_all_samples: {time_escape_total_all_samples:0.3f}")
            end_asr_total = time.time()
            time_escape_total_per_sample = end_asr_total - beg_asr_total
            if pbar_total:
                pbar_total.update(1)
                pbar_total.set_description(f"rtf_avg: {time_escape_total_per_sample / time_speech_total_per_sample:0.3f}, "
                                 f"time_speech: {time_speech_total_per_sample: 0.3f}, "
                                 f"time_escape: {time_escape_total_per_sample:0.3f}")
        # end_total = time.time()
        # time_escape_total_all_samples = end_total - beg_total
        # print(f"rtf_avg_all: {time_escape_total_all_samples / time_speech_total_all_samples:0.3f}, "
        #                      f"time_speech_all: {time_speech_total_all_samples: 0.3f}, "
        #                      f"time_escape_all: {time_escape_total_all_samples:0.3f}")
        return results_ret_list