| | |
| | | from funasr.models.campplus.cluster_backend import ClusterBackend |
| | | except: |
| | | print("If you want to use the speaker diarization, please `pip install hdbscan`") |
| | | |
| | | import pdb |
| | | |
| | | def prepare_data_iterator(data_in, input_len=None, data_type=None, key=None): |
| | | """ |
| | |
| | | chars = string.ascii_letters + string.digits |
| | | if isinstance(data_in, str) and data_in.startswith('http'): # url |
| | | data_in = download_from_url(data_in) |
| | | |
| | | if isinstance(data_in, str) and os.path.exists(data_in): # wav_path; filelist: wav.scp, file.jsonl;text.txt; |
| | | _, file_extension = os.path.splitext(data_in) |
| | | file_extension = file_extension.lower() |
| | |
| | | def build_model(self, **kwargs): |
| | | assert "model" in kwargs |
| | | if "model_conf" not in kwargs: |
| | | logging.info("download models from model hub: {}".format(kwargs.get("model_hub", "ms"))) |
| | | logging.info("download models from model hub: {}".format(kwargs.get("hub", "ms"))) |
| | | kwargs = download_model(**kwargs) |
| | | |
| | | set_all_random_seed(kwargs.get("seed", 0)) |
| | | |
| | | |
| | | device = kwargs.get("device", "cuda") |
| | | if not torch.cuda.is_available() or kwargs.get("ngpu", 1) == 0: |
| | | device = "cpu" |
| | |
| | | |
| | | kwargs["token_list"] = tokenizer.token_list if hasattr(tokenizer, "token_list") else None |
| | | kwargs["token_list"] = tokenizer.get_vocab() if hasattr(tokenizer, "get_vocab") else kwargs["token_list"] |
| | | vocab_size = len(kwargs["token_list"]) |
| | | vocab_size = len(kwargs["token_list"]) if kwargs["token_list"] is not None else -1 |
| | | else: |
| | | vocab_size = -1 |
| | | |
| | | # build frontend |
| | | frontend = kwargs.get("frontend", None) |
| | | kwargs["input_size"] = None |
| | | if frontend is not None: |
| | | frontend_class = tables.frontend_classes.get(frontend) |
| | | frontend = frontend_class(**kwargs["frontend_conf"]) |
| | | kwargs["frontend"] = frontend |
| | | kwargs["input_size"] = frontend.output_size() |
| | | kwargs["input_size"] = frontend.output_size() if hasattr(frontend, "output_size") else None |
| | | |
| | | # build model |
| | | model_class = tables.model_classes.get(kwargs["model"]) |
| | | model = model_class(**kwargs, **kwargs["model_conf"], vocab_size=vocab_size) |
| | | |
| | | model = model_class(**kwargs, **kwargs.get("model_conf", {}), vocab_size=vocab_size) |
| | | model.to(device) |
| | | |
| | | # init_param |
| | |
| | | path=init_param, |
| | | ignore_init_mismatch=kwargs.get("ignore_init_mismatch", False), |
| | | oss_bucket=kwargs.get("oss_bucket", None), |
| | | scope_map=kwargs.get("scope_map", "module.,None"), |
| | | scope_map=kwargs.get("scope_map", []), |
| | | excludes=kwargs.get("excludes", None), |
| | | ) |
| | | else: |
| | |
| | | batch_size = kwargs.get("batch_size", 1) |
| | | # if kwargs.get("device", "cpu") == "cpu": |
| | | # batch_size = 1 |
| | | |
| | | |
| | | key_list, data_list = prepare_data_iterator(input, input_len=input_len, data_type=kwargs.get("data_type", None), key=key) |
| | | |
| | | |
| | | speed_stats = {} |
| | | asr_result_list = [] |
| | | num_samples = len(data_list) |
| | |
| | | data_batch = data_list[beg_idx:end_idx] |
| | | key_batch = key_list[beg_idx:end_idx] |
| | | batch = {"data_in": data_batch, "key": key_batch} |
| | | |
| | | if (end_idx - beg_idx) == 1 and kwargs.get("data_type", None) == "fbank": # fbank |
| | | batch["data_in"] = data_batch[0] |
| | | batch["data_lengths"] = input_len |
| | | |
| | | time1 = time.perf_counter() |
| | | with torch.no_grad(): |
| | | results, meta_data = model.inference(**batch, **kwargs) |
| | | res = model.inference(**batch, **kwargs) |
| | | if isinstance(res, (list, tuple)): |
| | | results = res[0] |
| | | meta_data = res[1] if len(res) > 1 else {} |
| | | time2 = time.perf_counter() |
| | | |
| | | asr_result_list.extend(results) |
| | |
| | | # step.3 compute punc model |
| | | if self.punc_model is not None: |
| | | if not len(result["text"]): |
| | | result['raw_text'] = '' |
| | | if return_raw_text: |
| | | result['raw_text'] = '' |
| | | else: |
| | | self.punc_kwargs.update(cfg) |
| | | punc_res = self.inference(result["text"], model=self.punc_model, kwargs=self.punc_kwargs, **cfg) |
| | |
| | | distribute_spk(sentence_list, sv_output) |
| | | result['sentence_info'] = sentence_list |
| | | elif kwargs.get("sentence_timestamp", False): |
| | | sentence_list = timestamp_sentence(punc_res[0]['punc_array'], |
| | | result['timestamp'], |
| | | raw_text, |
| | | return_raw_text=return_raw_text) |
| | | if not len(result['text']): |
| | | sentence_list = [] |
| | | else: |
| | | sentence_list = timestamp_sentence(punc_res[0]['punc_array'], |
| | | result['timestamp'], |
| | | raw_text, |
| | | return_raw_text=return_raw_text) |
| | | result['sentence_info'] = sentence_list |
| | | if "spk_embedding" in result: del result['spk_embedding'] |
| | | |