| | |
| | | from funasr.utils.load_utils import load_bytes |
| | | from funasr.download.file import download_from_url |
| | | from funasr.utils.timestamp_tools import timestamp_sentence |
| | | from funasr.utils.timestamp_tools import timestamp_sentence_en |
| | | from funasr.download.download_from_hub import download_model |
| | | from funasr.utils.vad_utils import slice_padding_audio_samples |
| | | from funasr.utils.vad_utils import merge_vad |
| | |
| | | if isinstance(data_i, str) and os.path.exists(data_i): |
| | | key = misc.extract_filename_without_extension(data_i) |
| | | else: |
| | | key = "rand_key_" + "".join(random.choice(chars) for _ in range(13)) |
| | | if key is None: |
| | | key = "rand_key_" + "".join(random.choice(chars) for _ in range(13)) |
| | | key_list.append(key) |
| | | |
| | | else: # raw text; audio sample point, fbank; bytes |
| | |
| | | deep_update(model_conf, kwargs.get("model_conf", {})) |
| | | deep_update(model_conf, kwargs) |
| | | model = model_class(**model_conf, vocab_size=vocab_size) |
| | | model.to(device) |
| | | |
| | | # init_param |
| | | init_param = kwargs.get("init_param", None) |
| | |
| | | # fp16 |
| | | if kwargs.get("fp16", False): |
| | | model.to(torch.float16) |
| | | elif kwargs.get("bf16", False): |
| | | model.to(torch.bfloat16) |
| | | model.to(device) |
| | | return model, kwargs |
| | | |
| | | def __call__(self, *args, **cfg): |
| | |
| | | # f"time_speech_total_per_sample: {time_speech_total_per_sample: 0.3f}, " |
| | | # f"time_escape_total_per_sample: {time_escape_total_per_sample:0.3f}") |
| | | |
| | | if len(results_sorted) != n: |
| | | results_ret_list.append({"key": key, "text": "", "timestamp": []}) |
| | | logging.info("decoding, utt: {}, empty result".format(key)) |
| | | continue |
| | | restored_data = [0] * n |
| | | for j in range(n): |
| | | index = sorted_data[j][1] |
| | |
| | | else: |
| | | result[k] += restored_data[j][k] |
| | | |
| | | if not len(result["text"].strip()): |
| | | continue |
| | | return_raw_text = kwargs.get("return_raw_text", False) |
| | | # step.3 compute punc model |
| | | raw_text = None |
| | | if self.punc_model is not None: |
| | | if not len(result["text"].strip()): |
| | | if return_raw_text: |
| | | result["raw_text"] = "" |
| | | else: |
| | | deep_update(self.punc_kwargs, cfg) |
| | | punc_res = self.inference( |
| | | result["text"], model=self.punc_model, kwargs=self.punc_kwargs, **cfg |
| | | ) |
| | | raw_text = copy.copy(result["text"]) |
| | | if return_raw_text: |
| | | result["raw_text"] = raw_text |
| | | result["text"] = punc_res[0]["text"] |
| | | else: |
| | | raw_text = None |
| | | deep_update(self.punc_kwargs, cfg) |
| | | punc_res = self.inference( |
| | | result["text"], model=self.punc_model, kwargs=self.punc_kwargs, **cfg |
| | | ) |
| | | raw_text = copy.copy(result["text"]) |
| | | if return_raw_text: |
| | | result["raw_text"] = raw_text |
| | | result["text"] = punc_res[0]["text"] |
| | | |
| | | # speaker embedding cluster after resorted |
| | | if self.spk_model is not None and kwargs.get("return_spk_res", True): |
| | |
| | | and 'iic/speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch'\ |
| | | can predict timestamp, and speaker diarization relies on timestamps." |
| | | ) |
| | | sentence_list = timestamp_sentence( |
| | | punc_res[0]["punc_array"], |
| | | result["timestamp"], |
| | | raw_text, |
| | | return_raw_text=return_raw_text, |
| | | ) |
| | | if kwargs.get("en_post_proc", False): |
| | | sentence_list = timestamp_sentence_en( |
| | | punc_res[0]["punc_array"], |
| | | result["timestamp"], |
| | | raw_text, |
| | | return_raw_text=return_raw_text, |
| | | ) |
| | | else: |
| | | sentence_list = timestamp_sentence( |
| | | punc_res[0]["punc_array"], |
| | | result["timestamp"], |
| | | raw_text, |
| | | return_raw_text=return_raw_text, |
| | | ) |
| | | distribute_spk(sentence_list, sv_output) |
| | | result["sentence_info"] = sentence_list |
| | | elif kwargs.get("sentence_timestamp", False): |
| | | if not len(result["text"].strip()): |
| | | sentence_list = [] |
| | | else: |
| | | sentence_list = timestamp_sentence( |
| | | punc_res[0]["punc_array"], |
| | | result["timestamp"], |
| | | raw_text, |
| | | return_raw_text=return_raw_text, |
| | | ) |
| | | if kwargs.get("en_post_proc", False): |
| | | sentence_list = timestamp_sentence_en( |
| | | punc_res[0]["punc_array"], |
| | | result["timestamp"], |
| | | raw_text, |
| | | return_raw_text=return_raw_text, |
| | | ) |
| | | else: |
| | | sentence_list = timestamp_sentence( |
| | | punc_res[0]["punc_array"], |
| | | result["timestamp"], |
| | | raw_text, |
| | | return_raw_text=return_raw_text, |
| | | ) |
| | | result["sentence_info"] = sentence_list |
| | | if "spk_embedding" in result: |
| | | del result["spk_embedding"] |
| | |
| | | ) |
| | | |
| | | with torch.no_grad(): |
| | | |
| | | if type == "onnx": |
| | | export_dir = export_utils.export_onnx(model=model, data_in=data_list, **kwargs) |
| | | else: |
| | | export_dir = export_utils.export_torchscripts( |
| | | model=model, data_in=data_list, **kwargs |
| | | ) |
| | | export_dir = export_utils.export(model=model, data_in=data_list, **kwargs) |
| | | |
| | | return export_dir |