zhifu gao
2024-01-31 dec1c875b2fcf0161755b93717d3eac856c6d15d
Funasr1.0 bugfix, audio sample input for the vad model (#1333)

* funasr1.0.5

* funasr1.0.5 audio samples input
3个文件已修改
28 ■■■■ 已修改文件
examples/industrial_data_pretraining/seaco_paraformer/demo.py 20 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
funasr/auto/auto_model.py 2 ●●● 补丁 | 查看 | 原始文档 | blame | 历史
funasr/models/transformer/model.py 6 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
examples/industrial_data_pretraining/seaco_paraformer/demo.py
@@ -15,8 +15,26 @@
                  # spk_model_revision="v2.0.2",
                  )
# example1
res = model.generate(input="https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav",
                     hotword='达摩院 魔搭',
                     # sentence_timestamp=True,  # return sentence level information when spk_model is not given
                    )
print(res)
print(res)
# example2
import torchaudio
wav_file = os.path.join(model.model_path, "example/asr_example.wav")
input_tensor, sample_rate = torchaudio.load(wav_file)
input_tensor = input_tensor.mean(0)
res = model.generate(input=[input_tensor], batch_size_s=300, is_final=True)
# example3
import soundfile
import os
wav_file = os.path.join(model.model_path, "example/asr_example.wav")
speech, sample_rate = soundfile.read(wav_file)
res = model.generate(input=[speech], batch_size_s=300, is_final=True)
funasr/auto/auto_model.py
@@ -228,7 +228,7 @@
            data_batch = data_list[beg_idx:end_idx]
            key_batch = key_list[beg_idx:end_idx]
            batch = {"data_in": data_batch, "key": key_batch}
            if (end_idx - beg_idx) == 1 and isinstance(data_batch[0], torch.Tensor): # fbank
            if (end_idx - beg_idx) == 1 and kwargs.get("data_type", None) == "fbank": # fbank
                batch["data_in"] = data_batch[0]
                batch["data_lengths"] = input_len
        
funasr/models/transformer/model.py
@@ -439,13 +439,13 @@
                token = tokenizer.ids2tokens(token_int)
                text = tokenizer.tokens2text(token)
                
                # text_postprocessed, _ = postprocess_utils.sentence_postprocess(token)
                result_i = {"key": key[i], "token": token, "text": text}
                text_postprocessed, _ = postprocess_utils.sentence_postprocess(token)
                result_i = {"key": key[i], "token": token, "text": text_postprocessed}
                results.append(result_i)
                
                if ibest_writer is not None:
                    ibest_writer["token"][key[i]] = " ".join(token)
                    ibest_writer["text"][key[i]] = text
                    ibest_writer["text"][key[i]] = text_postprocessed
        
        return results, meta_data