gaochangfeng
2024-04-11 fce4e1d1b48f23cd8332e60afce3df8d6209a6a7
examples/common_voice/whisper_lid/demo_modelscope.py
@@ -15,7 +15,7 @@
inference_pipeline = pipeline(
    task=Tasks.auto_speech_recognition,
    model='iic/speech_whisper-large_lid_multilingual_pytorch', model_revision="v2.0.4")
    model='iic/speech_whisper-large_lid_multilingual_pytorch')
for wav in multilingual_wavs:
    rec_result = inference_pipeline(input=wav, inference_clip_length=250)