Shi Xian
2024-04-09 c6574bf4f4390a9c0dfdd5aa2c4f39eb85e1557e
examples/common_voice/whisper_lid/demo_modelscope.py
@@ -15,7 +15,7 @@
inference_pipeline = pipeline(
    task=Tasks.auto_speech_recognition,
    model='iic/speech_whisper-large_lid_multilingual_pytorch', model_revision="v2.0.4")
    model='iic/speech_whisper-large_lid_multilingual_pytorch')
for wav in multilingual_wavs:
    rec_result = inference_pipeline(input=wav, inference_clip_length=250)