From 2779602177ae5374547c7a7e17de0b11a166326d Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期一, 29 四月 2024 15:08:46 +0800
Subject: [PATCH] Merge branch 'dev_gzf_exp' of github.com:alibaba-damo-academy/FunASR into dev_gzf_exp merge
---
examples/industrial_data_pretraining/whisper/demo_from_openai.py | 27 ++++++++++++++++++++++-----
1 files changed, 22 insertions(+), 5 deletions(-)
diff --git a/examples/industrial_data_pretraining/whisper/demo_from_openai.py b/examples/industrial_data_pretraining/whisper/demo_from_openai.py
index 0b88a95..097e942 100644
--- a/examples/industrial_data_pretraining/whisper/demo_from_openai.py
+++ b/examples/industrial_data_pretraining/whisper/demo_from_openai.py
@@ -3,15 +3,32 @@
# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
# MIT License (https://opensource.org/licenses/MIT)
+# To install requirements: pip3 install -U openai-whisper
+
from funasr import AutoModel
# model = AutoModel(model="Whisper-small", hub="openai")
# model = AutoModel(model="Whisper-medium", hub="openai")
-model = AutoModel(model="Whisper-large-v2", hub="openai")
-# model = AutoModel(model="Whisper-large-v3", hub="openai")
+# model = AutoModel(model="Whisper-large-v2", hub="openai")
+model = AutoModel(
+ model="Whisper-large-v3",
+ vad_model="iic/speech_fsmn_vad_zh-cn-16k-common-pytorch",
+ vad_kwargs={"max_single_segment_time": 30000},
+ hub="openai",
+)
+DecodingOptions = {
+ "task": "transcribe",
+ "language": None,
+ "beam_size": None,
+ "fp16": True,
+ "without_timestamps": False,
+ "prompt": None,
+}
res = model.generate(
- language=None,
- task="transcribe",
- input="https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav")
+ DecodingOptions=DecodingOptions,
+ batch_size_s=0,
+ input="https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav",
+)
+
print(res)
--
Gitblit v1.9.1