VirtuosoQ
2024-04-26 e9d2cfc3a134b00f4e98271fbee3838d1ccecbcc
examples/industrial_data_pretraining/whisper/demo.py
@@ -3,11 +3,26 @@
# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
#  MIT License  (https://opensource.org/licenses/MIT)
# To install requirements: pip3 install -U openai-whisper
from funasr import AutoModel
model = AutoModel(model="iic/Whisper-large-v3",
                  model_revision="v2.0.4",
                  vad_model="iic/speech_fsmn_vad_zh-cn-16k-common-pytorch",
              vad_kwargs={"max_single_segment_time": 30000},
                  )
res = model.generate(input="https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav", language=None)
DecodingOptions = {
   "task": "transcribe",
   "language": None,
   "beam_size": None,
   "fp16": True,
   "without_timestamps": False,
   "prompt": None,
   }
res = model.generate(
   DecodingOptions=DecodingOptions,
   batch_size_s=0,
   input="https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav")
print(res)