examples/common_voice/whisper_lid/demo_modelscope.py
@@ -15,7 +15,7 @@ inference_pipeline = pipeline( task=Tasks.auto_speech_recognition, model='iic/speech_whisper-large_lid_multilingual_pytorch', model_revision="v2.0.4") model='iic/speech_whisper-large_lid_multilingual_pytorch', model_revision="master") for wav in multilingual_wavs: rec_result = inference_pipeline(input=wav, inference_clip_length=250)