wuhongsheng
2024-07-05 3a4281f4959534b1bf5d01acf0085f4f8e6f2ec8
examples/common_voice/whisper_lid/demo_modelscope.py
@@ -6,7 +6,7 @@
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
multilingual_wavs=[
multilingual_wavs = [
    "https://www.modelscope.cn/api/v1/models/iic/speech_whisper-large_lid_multilingual_pytorch/repo?Revision=master&FilePath=examples/example_zh-CN.mp3",
    "https://www.modelscope.cn/api/v1/models/iic/speech_whisper-large_lid_multilingual_pytorch/repo?Revision=master&FilePath=examples/example_en.mp3",
    "https://www.modelscope.cn/api/v1/models/iic/speech_whisper-large_lid_multilingual_pytorch/repo?Revision=master&FilePath=examples/example_ja.mp3",
@@ -14,9 +14,9 @@
]
inference_pipeline = pipeline(
    task=Tasks.auto_speech_recognition,
    model='iic/speech_whisper-large_lid_multilingual_pytorch')
    task=Tasks.auto_speech_recognition, model="iic/speech_whisper-large_lid_multilingual_pytorch"
)
for wav in multilingual_wavs:
    rec_result = inference_pipeline(input=wav, inference_clip_length=250)
    print(rec_result)
    print(rec_result)