游雁
2024-03-27 9b4e9cc8a0311e5243d69b73ed073e7ea441982e
examples/industrial_data_pretraining/whisper/demo_from_openai.py
@@ -3,15 +3,22 @@
# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
#  MIT License  (https://opensource.org/licenses/MIT)
# To install requirements: pip3 install -U openai-whisper
from funasr import AutoModel
# model = AutoModel(model="Whisper-small", hub="openai")
# model = AutoModel(model="Whisper-medium", hub="openai")
model = AutoModel(model="Whisper-large-v2", hub="openai")
# model = AutoModel(model="Whisper-large-v3", hub="openai")
# model = AutoModel(model="Whisper-large-v2", hub="openai")
model = AutoModel(model="Whisper-large-v3",
                  vad_model="iic/speech_fsmn_vad_zh-cn-16k-common-pytorch",
                  vad_kwargs={"max_single_segment_time": 30000},
              hub="openai",
                  )
res = model.generate(
   language=None,
   task="transcribe",
   batch_size_s=0,
   input="https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav")
print(res)