Funasr1.0 bugfix, audio sample input for the vad model (#1333)
* funasr1.0.5
* funasr1.0.5 audio samples input
| | |
| | | # spk_model_revision="v2.0.2", |
| | | ) |
| | | |
| | | |
| | | # example1 |
| | | res = model.generate(input="https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav", |
| | | hotword='达摩院 魔搭', |
| | | # sentence_timestamp=True, # return sentence level information when spk_model is not given |
| | | ) |
| | | print(res) |
| | | print(res) |
| | | |
| | | # example2 |
| | | import torchaudio |
| | | wav_file = os.path.join(model.model_path, "example/asr_example.wav") |
| | | input_tensor, sample_rate = torchaudio.load(wav_file) |
| | | input_tensor = input_tensor.mean(0) |
| | | res = model.generate(input=[input_tensor], batch_size_s=300, is_final=True) |
| | | |
| | | |
| | | # example3 |
| | | import soundfile |
| | | import os |
| | | wav_file = os.path.join(model.model_path, "example/asr_example.wav") |
| | | speech, sample_rate = soundfile.read(wav_file) |
| | | res = model.generate(input=[speech], batch_size_s=300, is_final=True) |
| | | |
| | |
| | | data_batch = data_list[beg_idx:end_idx] |
| | | key_batch = key_list[beg_idx:end_idx] |
| | | batch = {"data_in": data_batch, "key": key_batch} |
| | | if (end_idx - beg_idx) == 1 and isinstance(data_batch[0], torch.Tensor): # fbank |
| | | if (end_idx - beg_idx) == 1 and kwargs.get("data_type", None) == "fbank": # fbank |
| | | batch["data_in"] = data_batch[0] |
| | | batch["data_lengths"] = input_len |
| | | |
| | |
| | | token = tokenizer.ids2tokens(token_int) |
| | | text = tokenizer.tokens2text(token) |
| | | |
| | | # text_postprocessed, _ = postprocess_utils.sentence_postprocess(token) |
| | | result_i = {"key": key[i], "token": token, "text": text} |
| | | text_postprocessed, _ = postprocess_utils.sentence_postprocess(token) |
| | | result_i = {"key": key[i], "token": token, "text": text_postprocessed} |
| | | results.append(result_i) |
| | | |
| | | if ibest_writer is not None: |
| | | ibest_writer["token"][key[i]] = " ".join(token) |
| | | ibest_writer["text"][key[i]] = text |
| | | ibest_writer["text"][key[i]] = text_postprocessed |
| | | |
| | | return results, meta_data |
| | | |