liugz18
2024-07-18 d80ac2fd2df4e7fb8a28acfa512bb11472b5cc99
examples/common_voice/whisper_lid/demo_funasr.py
@@ -12,8 +12,8 @@
    "example_ko.mp3",
]
model = AutoModel(model="iic/speech_whisper-large_lid_multilingual_pytorch", model_revision="v2.0.4")
model = AutoModel(model="iic/speech_whisper-large_lid_multilingual_pytorch")
for wav_id in multilingual_wavs:
    wav_file = f"{model.model_path}/examples/{wav_id}"
    res = model.generate(input=wav_file, data_type="sound", inference_clip_length=250)
    print("detect sample {}: {}".format(wav_id, res))
    print("detect sample {}: {}".format(wav_id, res))