| | |
| | | import numpy as np |
| | | from tqdm import tqdm |
| | | |
| | | from omegaconf import DictConfig, ListConfig |
| | | from funasr.utils.misc import deep_update |
| | | from funasr.register import tables |
| | | from funasr.utils.load_utils import load_bytes |
| | |
| | | try: |
| | | from funasr.utils.version_checker import check_for_update |
| | | |
| | | print( |
| | | "Check update of funasr, and it would cost few times. You may disable it by set `disable_update=True` in AutoModel" |
| | | ) |
| | | check_for_update(disable=kwargs.get("disable_update", False)) |
| | | except: |
| | | pass |
| | |
| | | # if spk_model is not None, build spk model else None |
| | | spk_model = kwargs.get("spk_model", None) |
| | | spk_kwargs = {} if kwargs.get("spk_kwargs", {}) is None else kwargs.get("spk_kwargs", {}) |
| | | cb_kwargs = ( |
| | | {} if spk_kwargs.get("cb_kwargs", {}) is None else spk_kwargs.get("cb_kwargs", {}) |
| | | ) |
| | | if spk_model is not None: |
| | | logging.info("Building SPK model.") |
| | | spk_kwargs["model"] = spk_model |
| | | spk_kwargs["model_revision"] = kwargs.get("spk_model_revision", "master") |
| | | spk_kwargs["device"] = kwargs["device"] |
| | | spk_model, spk_kwargs = self.build_model(**spk_kwargs) |
| | | self.cb_model = ClusterBackend().to(kwargs["device"]) |
| | | self.cb_model = ClusterBackend(**cb_kwargs).to(kwargs["device"]) |
| | | spk_mode = kwargs.get("spk_mode", "punc_segment") |
| | | if spk_mode not in ["default", "vad_segment", "punc_segment"]: |
| | | logging.error("spk_mode should be one of default, vad_segment and punc_segment.") |
| | |
| | | set_all_random_seed(kwargs.get("seed", 0)) |
| | | |
| | | device = kwargs.get("device", "cuda") |
| | | if not torch.cuda.is_available() or kwargs.get("ngpu", 1) == 0: |
| | | if ((device =="cuda" and not torch.cuda.is_available()) |
| | | or (device == "xpu" and not torch.xpu.is_available()) |
| | | or (device == "mps" and not torch.backends.mps.is_available()) |
| | | or kwargs.get("ngpu", 1) == 0): |
| | | device = "cpu" |
| | | kwargs["batch_size"] = 1 |
| | | kwargs["device"] = device |
| | |
| | | |
| | | # build tokenizer |
| | | tokenizer = kwargs.get("tokenizer", None) |
| | | if tokenizer is not None: |
| | | tokenizer_class = tables.tokenizer_classes.get(tokenizer) |
| | | tokenizer = tokenizer_class(**kwargs.get("tokenizer_conf", {})) |
| | | kwargs["token_list"] = ( |
| | | tokenizer.token_list if hasattr(tokenizer, "token_list") else None |
| | | ) |
| | | kwargs["token_list"] = ( |
| | | tokenizer.get_vocab() if hasattr(tokenizer, "get_vocab") else kwargs["token_list"] |
| | | ) |
| | | vocab_size = len(kwargs["token_list"]) if kwargs["token_list"] is not None else -1 |
| | | if vocab_size == -1 and hasattr(tokenizer, "get_vocab_size"): |
| | | vocab_size = tokenizer.get_vocab_size() |
| | | else: |
| | | vocab_size = -1 |
| | | kwargs["tokenizer"] = tokenizer |
| | | kwargs["vocab_size"] = -1 |
| | | |
| | | if tokenizer is not None: |
| | | tokenizers = ( |
| | | tokenizer.split(",") if isinstance(tokenizer, str) else tokenizer |
| | | ) # type of tokenizers is list!!! |
| | | tokenizers_conf = kwargs.get("tokenizer_conf", {}) |
| | | tokenizers_build = [] |
| | | vocab_sizes = [] |
| | | token_lists = [] |
| | | |
| | | ### === only for kws === |
| | | token_list_files = kwargs.get("token_lists", []) |
| | | seg_dicts = kwargs.get("seg_dicts", []) |
| | | ### === only for kws === |
| | | |
| | | if not isinstance(tokenizers_conf, (list, tuple, ListConfig)): |
| | | tokenizers_conf = [tokenizers_conf] * len(tokenizers) |
| | | |
| | | for i, tokenizer in enumerate(tokenizers): |
| | | tokenizer_class = tables.tokenizer_classes.get(tokenizer) |
| | | tokenizer_conf = tokenizers_conf[i] |
| | | |
| | | ### === only for kws === |
| | | if len(token_list_files) > 1: |
| | | tokenizer_conf["token_list"] = token_list_files[i] |
| | | if len(seg_dicts) > 1: |
| | | tokenizer_conf["seg_dict"] = seg_dicts[i] |
| | | ### === only for kws === |
| | | |
| | | tokenizer = tokenizer_class(**tokenizer_conf) |
| | | tokenizers_build.append(tokenizer) |
| | | token_list = tokenizer.token_list if hasattr(tokenizer, "token_list") else None |
| | | token_list = ( |
| | | tokenizer.get_vocab() if hasattr(tokenizer, "get_vocab") else token_list |
| | | ) |
| | | vocab_size = -1 |
| | | if token_list is not None: |
| | | vocab_size = len(token_list) |
| | | |
| | | if vocab_size == -1 and hasattr(tokenizer, "get_vocab_size"): |
| | | vocab_size = tokenizer.get_vocab_size() |
| | | token_lists.append(token_list) |
| | | vocab_sizes.append(vocab_size) |
| | | |
| | | if len(tokenizers_build) <= 1: |
| | | tokenizers_build = tokenizers_build[0] |
| | | token_lists = token_lists[0] |
| | | vocab_sizes = vocab_sizes[0] |
| | | |
| | | kwargs["tokenizer"] = tokenizers_build |
| | | kwargs["vocab_size"] = vocab_sizes |
| | | kwargs["token_list"] = token_lists |
| | | |
| | | # build frontend |
| | | frontend = kwargs.get("frontend", None) |
| | |
| | | model_conf = {} |
| | | deep_update(model_conf, kwargs.get("model_conf", {})) |
| | | deep_update(model_conf, kwargs) |
| | | model = model_class(**model_conf, vocab_size=vocab_size) |
| | | model = model_class(**model_conf) |
| | | |
| | | # init_param |
| | | init_param = kwargs.get("init_param", None) |
| | |
| | | res = self.model(*args, kwargs) |
| | | return res |
| | | |
| | | def generate(self, input, input_len=None, **cfg): |
| | | def generate(self, input, input_len=None, progress_callback=None, **cfg): |
| | | if self.vad_model is None: |
| | | return self.inference(input, input_len=input_len, **cfg) |
| | | return self.inference( |
| | | input, input_len=input_len, progress_callback=progress_callback, **cfg |
| | | ) |
| | | |
| | | else: |
| | | return self.inference_with_vad(input, input_len=input_len, **cfg) |
| | | return self.inference_with_vad( |
| | | input, input_len=input_len, progress_callback=progress_callback, **cfg |
| | | ) |
| | | |
| | | def inference(self, input, input_len=None, model=None, kwargs=None, key=None, **cfg): |
| | | def inference( |
| | | self, |
| | | input, |
| | | input_len=None, |
| | | model=None, |
| | | kwargs=None, |
| | | key=None, |
| | | progress_callback=None, |
| | | **cfg, |
| | | ): |
| | | kwargs = self.kwargs if kwargs is None else kwargs |
| | | if "cache" in kwargs: |
| | | kwargs.pop("cache") |
| | |
| | | if pbar: |
| | | pbar.update(end_idx - beg_idx) |
| | | pbar.set_description(description) |
| | | if progress_callback: |
| | | try: |
| | | progress_callback(end_idx, num_samples) |
| | | except Exception as e: |
| | | logging.error(f"progress_callback error: {e}") |
| | | time_speech_total += batch_data_time |
| | | time_escape_total += time_escape |
| | | |
| | | if pbar: |
| | | # pbar.update(1) |
| | | pbar.set_description(f"rtf_avg: {time_escape_total/time_speech_total:0.3f}") |
| | | torch.cuda.empty_cache() |
| | | |
| | | device = next(model.parameters()).device |
| | | if device.type == "cuda": |
| | | with torch.cuda.device(device): |
| | | torch.cuda.empty_cache() |
| | | return asr_result_list |
| | | |
| | | def inference_with_vad(self, input, input_len=None, **cfg): |