From 6d932da239b3584b5735f4efb2dbb50b84c385db Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期五, 11 十月 2024 14:37:27 +0800
Subject: [PATCH] whisper-large-v3-turbo
---
funasr/version.txt | 2 +-
funasr/models/whisper/model.py | 1 +
examples/industrial_data_pretraining/whisper/demo.py | 2 +-
funasr/download/name_maps_from_hub.py | 2 ++
README_zh.md | 6 ++++--
examples/industrial_data_pretraining/whisper/demo_from_openai.py | 2 +-
README.md | 7 ++++---
7 files changed, 14 insertions(+), 8 deletions(-)
diff --git a/README.md b/README.md
index 487cc5e..225dd13 100644
--- a/README.md
+++ b/README.md
@@ -29,6 +29,7 @@
<a name="whats-new"></a>
## What's new:
+- 2024/10/10锛欰dded support for the Whisper-large-v3-turbo model, a multitasking model that can perform multilingual speech recognition, speech translation, and language identification. It can be downloaded from the[modelscope](examples/industrial_data_pretraining/whisper/demo.py), and [openai](examples/industrial_data_pretraining/whisper/demo_from_openai.py).
- 2024/09/26: Offline File Transcription Service 4.6, Offline File Transcription Service of English 1.7锛孯eal-time Transcription Service 1.11 released锛宖ix memory leak & Support the SensevoiceSmall onnx model锛汧ile Transcription Service 2.0 GPU released, Fix GPU memory leak; ([docs](runtime/readme.md));
- 2024/09/25锛歬eyword spotting models are new supported. Supports fine-tuning and inference for four models: [fsmn_kws](https://modelscope.cn/models/iic/speech_sanm_kws_phone-xiaoyun-commands-online), [fsmn_kws_mt](https://modelscope.cn/models/iic/speech_sanm_kws_phone-xiaoyun-commands-online), [sanm_kws](https://modelscope.cn/models/iic/speech_sanm_kws_phone-xiaoyun-commands-offline), [sanm_kws_streaming](https://modelscope.cn/models/iic/speech_sanm_kws_phone-xiaoyun-commands-online).
- 2024/07/04锛歔SenseVoice](https://github.com/FunAudioLLM/SenseVoice) is a speech foundation model with multiple speech understanding capabilities, including ASR, LID, SER, and AED.
@@ -95,18 +96,18 @@
| Model Name | Task Details | Training Data | Parameters |
|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|:--------------------------------------------------------------------------------:|:--------------------------------:|:----------:|
-| SenseVoiceSmall <br> ([猸怾(https://www.modelscope.cn/models/iic/SenseVoiceSmall) [馃](https://huggingface.co/FunAudioLLM/SenseVoiceSmall) ) | multiple speech understanding capabilities, including ASR, ITN, LID, SER, and AED, support languages such as zh, yue, en, ja, ko | 300000 hours | 234M |
+| SenseVoiceSmall <br> ([猸怾(https://www.modelscope.cn/models/iic/SenseVoiceSmall) [馃](https://huggingface.co/FunAudioLLM/SenseVoiceSmall) ) | multiple speech understanding capabilities, including ASR, ITN, LID, SER, and AED, support languages such as zh, yue, en, ja, ko | 300000 hours | 234M |
| paraformer-zh <br> ([猸怾(https://www.modelscope.cn/models/damo/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch/summary) [馃](https://huggingface.co/funasr/paraformer-zh) ) | speech recognition, with timestamps, non-streaming | 60000 hours, Mandarin | 220M |
| <nobr>paraformer-zh-streaming <br> ( [猸怾(https://modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-online/summary) [馃](https://huggingface.co/funasr/paraformer-zh-streaming) )</nobr> | speech recognition, streaming | 60000 hours, Mandarin | 220M |
| paraformer-en <br> ( [猸怾(https://www.modelscope.cn/models/damo/speech_paraformer-large-vad-punc_asr_nat-en-16k-common-vocab10020/summary) [馃](https://huggingface.co/funasr/paraformer-en) ) | speech recognition, without timestamps, non-streaming | 50000 hours, English | 220M |
| conformer-en <br> ( [猸怾(https://modelscope.cn/models/damo/speech_conformer_asr-en-16k-vocab4199-pytorch/summary) [馃](https://huggingface.co/funasr/conformer-en) ) | speech recognition, non-streaming | 50000 hours, English | 220M |
| ct-punc <br> ( [猸怾(https://modelscope.cn/models/damo/punc_ct-transformer_cn-en-common-vocab471067-large/summary) [馃](https://huggingface.co/funasr/ct-punc) ) | punctuation restoration | 100M, Mandarin and English | 290M |
| fsmn-vad <br> ( [猸怾(https://modelscope.cn/models/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch/summary) [馃](https://huggingface.co/funasr/fsmn-vad) ) | voice activity detection | 5000 hours, Mandarin and English | 0.4M |
-| fsmn-kws <br> ( [猸怾(https://modelscope.cn/models/iic/speech_charctc_kws_phone-xiaoyun/summary) ) | keyword spotting锛宻treaming | 5000 hours, Mandarin | 0.7M |
+| fsmn-kws <br> ( [猸怾(https://modelscope.cn/models/iic/speech_charctc_kws_phone-xiaoyun/summary) ) | keyword spotting锛宻treaming | 5000 hours, Mandarin | 0.7M |
| fa-zh <br> ( [猸怾(https://modelscope.cn/models/damo/speech_timestamp_prediction-v1-16k-offline/summary) [馃](https://huggingface.co/funasr/fa-zh) ) | timestamp prediction | 5000 hours, Mandarin | 38M |
| cam++ <br> ( [猸怾(https://modelscope.cn/models/iic/speech_campplus_sv_zh-cn_16k-common/summary) [馃](https://huggingface.co/funasr/campplus) ) | speaker verification/diarization | 5000 hours | 7.2M |
-| Whisper-large-v2 <br> ([猸怾(https://www.modelscope.cn/models/iic/speech_whisper-large_asr_multilingual/summary) [馃崁](https://github.com/openai/whisper) ) | speech recognition, with timestamps, non-streaming | multilingual | 1550 M |
| Whisper-large-v3 <br> ([猸怾(https://www.modelscope.cn/models/iic/Whisper-large-v3/summary) [馃崁](https://github.com/openai/whisper) ) | speech recognition, with timestamps, non-streaming | multilingual | 1550 M |
+| Whisper-large-v3-turbo <br> ([猸怾(https://www.modelscope.cn/models/iic/Whisper-large-v3-turbo/summary) [馃崁](https://github.com/openai/whisper) ) | speech recognition, with timestamps, non-streaming | multilingual | 1550 M |
| Qwen-Audio <br> ([猸怾(examples/industrial_data_pretraining/qwen_audio/demo.py) [馃](https://huggingface.co/Qwen/Qwen-Audio) ) | audio-text multimodal models (pretraining) | multilingual | 8B |
| Qwen-Audio-Chat <br> ([猸怾(examples/industrial_data_pretraining/qwen_audio/demo_chat.py) [馃](https://huggingface.co/Qwen/Qwen-Audio-Chat) ) | audio-text multimodal models (chat) | multilingual | 8B |
| emotion2vec+large <br> ([猸怾(https://modelscope.cn/models/iic/emotion2vec_plus_large/summary) [馃](https://huggingface.co/emotion2vec/emotion2vec_plus_large) ) | speech emotion recongintion | 40000 hours | 300M |
diff --git a/README_zh.md b/README_zh.md
index 1f0eaa7..5ae1169 100644
--- a/README_zh.md
+++ b/README_zh.md
@@ -33,6 +33,7 @@
<a name="鏈�鏂板姩鎬�"></a>
## 鏈�鏂板姩鎬�
+- 2024/10/10锛氭柊澧炲姞Whisper-large-v3-turbo妯″瀷鏀寔锛屽璇█璇煶璇嗗埆/缈昏瘧/璇璇嗗埆锛屾敮鎸佷粠 [modelscope](examples/industrial_data_pretraining/whisper/demo.py)浠撳簱涓嬭浇锛屼篃鏀寔浠� [openai](examples/industrial_data_pretraining/whisper/demo_from_openai.py)浠撳簱涓嬭浇妯″瀷銆�
- 2024/09/26: 涓枃绂荤嚎鏂囦欢杞啓鏈嶅姟 4.6銆佽嫳鏂囩绾挎枃浠惰浆鍐欐湇鍔� 1.7銆佷腑鏂囧疄鏃惰闊冲惉鍐欐湇鍔� 1.11 鍙戝竷锛屼慨澶峅NNX鍐呭瓨娉勬紡銆佹敮鎸丼ensevoiceSmall onnx妯″瀷锛涗腑鏂囩绾挎枃浠惰浆鍐欐湇鍔PU 2.0 鍙戝竷锛屼慨澶嶆樉瀛樻硠婕�; 璇︾粏淇℃伅鍙傞槄([閮ㄧ讲鏂囨。](runtime/readme_cn.md))
- 2024/09/25锛氭柊澧炶闊冲敜閱掓ā鍨嬶紝鏀寔[fsmn_kws](https://modelscope.cn/models/iic/speech_sanm_kws_phone-xiaoyun-commands-online), [fsmn_kws_mt](https://modelscope.cn/models/iic/speech_sanm_kws_phone-xiaoyun-commands-online), [sanm_kws](https://modelscope.cn/models/iic/speech_sanm_kws_phone-xiaoyun-commands-offline), [sanm_kws_streaming](https://modelscope.cn/models/iic/speech_sanm_kws_phone-xiaoyun-commands-online) 4涓ā鍨嬬殑寰皟鍜屾帹鐞嗐��
- 2024/07/04锛歔SenseVoice](https://github.com/FunAudioLLM/SenseVoice) 鏄竴涓熀纭�璇煶鐞嗚В妯″瀷锛屽叿澶囧绉嶈闊崇悊瑙h兘鍔涳紝娑电洊浜嗚嚜鍔ㄨ闊宠瘑鍒紙ASR锛夈�佽瑷�璇嗗埆锛圠ID锛夈�佹儏鎰熻瘑鍒紙SER锛変互鍙婇煶棰戜簨浠舵娴嬶紙AED锛夈��
@@ -102,17 +103,18 @@
| 妯″瀷鍚嶅瓧 | 浠诲姟璇︽儏 | 璁粌鏁版嵁 | 鍙傛暟閲� |
|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|:------------------:|:--------------:|:------:|
-| SenseVoiceSmall <br> ([猸怾(https://www.modelscope.cn/models/iic/SenseVoiceSmall) [馃](https://huggingface.co/FunAudioLLM/SenseVoiceSmall) ) | 澶氱璇煶鐞嗚В鑳藉姏锛屾兜鐩栦簡鑷姩璇煶璇嗗埆锛圓SR锛夈�佽瑷�璇嗗埆锛圠ID锛夈�佹儏鎰熻瘑鍒紙SER锛変互鍙婇煶棰戜簨浠舵娴嬶紙AED锛� | 400000灏忔椂锛屼腑鏂� | 330M |
+| SenseVoiceSmall <br> ([猸怾(https://www.modelscope.cn/models/iic/SenseVoiceSmall) [馃](https://huggingface.co/FunAudioLLM/SenseVoiceSmall) ) | 澶氱璇煶鐞嗚В鑳藉姏锛屾兜鐩栦簡鑷姩璇煶璇嗗埆锛圓SR锛夈�佽瑷�璇嗗埆锛圠ID锛夈�佹儏鎰熻瘑鍒紙SER锛変互鍙婇煶棰戜簨浠舵娴嬶紙AED锛� | 400000灏忔椂锛屼腑鏂� | 330M |
| paraformer-zh <br> ([猸怾(https://www.modelscope.cn/models/damo/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch/summary) [馃](https://huggingface.co/funasr/paraformer-zh) ) | 璇煶璇嗗埆锛屽甫鏃堕棿鎴宠緭鍑猴紝闈炲疄鏃� | 60000灏忔椂锛屼腑鏂� | 220M |
| paraformer-zh-streaming <br> ( [猸怾(https://modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-online/summary) [馃](https://huggingface.co/funasr/paraformer-zh-streaming) ) | 璇煶璇嗗埆锛屽疄鏃� | 60000灏忔椂锛屼腑鏂� | 220M |
| paraformer-en <br> ( [猸怾(https://www.modelscope.cn/models/damo/speech_paraformer-large-vad-punc_asr_nat-en-16k-common-vocab10020/summary) [馃](https://huggingface.co/funasr/paraformer-en) ) | 璇煶璇嗗埆锛岄潪瀹炴椂 | 50000灏忔椂锛岃嫳鏂� | 220M |
| conformer-en <br> ( [猸怾(https://modelscope.cn/models/damo/speech_conformer_asr-en-16k-vocab4199-pytorch/summary) [馃](https://huggingface.co/funasr/conformer-en) ) | 璇煶璇嗗埆锛岄潪瀹炴椂 | 50000灏忔椂锛岃嫳鏂� | 220M |
| ct-punc <br> ( [猸怾(https://modelscope.cn/models/damo/punc_ct-transformer_cn-en-common-vocab471067-large/summary) [馃](https://huggingface.co/funasr/ct-punc) ) | 鏍囩偣鎭㈠ | 100M锛屼腑鏂囦笌鑻辨枃 | 290M |
| fsmn-vad <br> ( [猸怾(https://modelscope.cn/models/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch/summary) [馃](https://huggingface.co/funasr/fsmn-vad) ) | 璇煶绔偣妫�娴嬶紝瀹炴椂 | 5000灏忔椂锛屼腑鏂囦笌鑻辨枃 | 0.4M |
-| fsmn-kws <br> ( [猸怾(https://modelscope.cn/models/iic/speech_charctc_kws_phone-xiaoyun/summary) ) | 璇煶鍞ら啋锛屽疄鏃� | 5000灏忔椂锛屼腑鏂� | 0.7M |
+| fsmn-kws <br> ( [猸怾(https://modelscope.cn/models/iic/speech_charctc_kws_phone-xiaoyun/summary) ) | 璇煶鍞ら啋锛屽疄鏃� | 5000灏忔椂锛屼腑鏂� | 0.7M |
| fa-zh <br> ( [猸怾(https://modelscope.cn/models/damo/speech_timestamp_prediction-v1-16k-offline/summary) [馃](https://huggingface.co/funasr/fa-zh) ) | 瀛楃骇鍒椂闂存埑棰勬祴 | 50000灏忔椂锛屼腑鏂� | 38M |
| cam++ <br> ( [猸怾(https://modelscope.cn/models/iic/speech_campplus_sv_zh-cn_16k-common/summary) [馃](https://huggingface.co/funasr/campplus) ) | 璇磋瘽浜虹‘璁�/鍒嗗壊 | 5000灏忔椂 | 7.2M |
| Whisper-large-v3 <br> ([猸怾(https://www.modelscope.cn/models/iic/Whisper-large-v3/summary) [馃崁](https://github.com/openai/whisper) ) | 璇煶璇嗗埆锛屽甫鏃堕棿鎴宠緭鍑猴紝闈炲疄鏃� | 澶氳瑷� | 1550 M |
+| Whisper-large-v3-turbo <br> ([猸怾(https://www.modelscope.cn/models/iic/Whisper-large-v3-turbo/summary) [馃崁](https://github.com/openai/whisper) ) | 璇煶璇嗗埆锛屽甫鏃堕棿鎴宠緭鍑猴紝闈炲疄鏃� | 澶氳瑷� | 809 M |
| Qwen-Audio <br> ([猸怾(examples/industrial_data_pretraining/qwen_audio/demo.py) [馃](https://huggingface.co/Qwen/Qwen-Audio) ) | 闊抽鏂囨湰澶氭ā鎬佸ぇ妯″瀷锛堥璁粌锛� | 澶氳瑷� | 8B |
| Qwen-Audio-Chat <br> ([猸怾(examples/industrial_data_pretraining/qwen_audio/demo_chat.py) [馃](https://huggingface.co/Qwen/Qwen-Audio-Chat) ) | 闊抽鏂囨湰澶氭ā鎬佸ぇ妯″瀷锛坈hat鐗堟湰锛� | 澶氳瑷� | 8B |
| emotion2vec+large <br> ([猸怾(https://modelscope.cn/models/iic/emotion2vec_plus_large/summary) [馃](https://huggingface.co/emotion2vec/emotion2vec_plus_large) ) | 鎯呮劅璇嗗埆妯″瀷 | 40000灏忔椂锛�4绉嶆儏鎰熺被鍒� | 300M |
diff --git a/examples/industrial_data_pretraining/whisper/demo.py b/examples/industrial_data_pretraining/whisper/demo.py
index e9c2a83..5665550 100644
--- a/examples/industrial_data_pretraining/whisper/demo.py
+++ b/examples/industrial_data_pretraining/whisper/demo.py
@@ -8,7 +8,7 @@
from funasr import AutoModel
model = AutoModel(
- model="iic/Whisper-large-v3",
+ model="Whisper-large-v3-turbo",
vad_model="iic/speech_fsmn_vad_zh-cn-16k-common-pytorch",
vad_kwargs={"max_single_segment_time": 30000},
)
diff --git a/examples/industrial_data_pretraining/whisper/demo_from_openai.py b/examples/industrial_data_pretraining/whisper/demo_from_openai.py
index 097e942..2678a5a 100644
--- a/examples/industrial_data_pretraining/whisper/demo_from_openai.py
+++ b/examples/industrial_data_pretraining/whisper/demo_from_openai.py
@@ -11,7 +11,7 @@
# model = AutoModel(model="Whisper-medium", hub="openai")
# model = AutoModel(model="Whisper-large-v2", hub="openai")
model = AutoModel(
- model="Whisper-large-v3",
+ model="Whisper-large-v3-turbo",
vad_model="iic/speech_fsmn_vad_zh-cn-16k-common-pytorch",
vad_kwargs={"max_single_segment_time": 30000},
hub="openai",
diff --git a/funasr/download/name_maps_from_hub.py b/funasr/download/name_maps_from_hub.py
index 54ec61f..42ecf70 100644
--- a/funasr/download/name_maps_from_hub.py
+++ b/funasr/download/name_maps_from_hub.py
@@ -36,6 +36,7 @@
"iic/emotion2vec_plus_base": "emotion2vec/emotion2vec_plus_base",
"emotion2vec_plus_seed": "emotion2vec/emotion2vec_plus_seed",
"iic/emotion2vec_plus_seed": "emotion2vec/emotion2vec_plus_seed",
+ "Whisper-large-v3-turbo": "iic/Whisper-large-v3-turbo",
}
name_maps_openai = {
@@ -51,4 +52,5 @@
"Whisper-large-v2": "large-v2",
"Whisper-large-v3": "large-v3",
"Whisper-large": "large",
+ "Whisper-large-v3-turbo": "turbo",
}
diff --git a/funasr/models/whisper/model.py b/funasr/models/whisper/model.py
index 791fddd..398eea3 100644
--- a/funasr/models/whisper/model.py
+++ b/funasr/models/whisper/model.py
@@ -28,6 +28,7 @@
@tables.register("model_classes", "Whisper-large-v1")
@tables.register("model_classes", "Whisper-large-v2")
@tables.register("model_classes", "Whisper-large-v3")
+@tables.register("model_classes", "Whisper-large-v3-turbo")
@tables.register("model_classes", "WhisperWarp")
class WhisperWarp(nn.Module):
def __init__(self, *args, **kwargs):
diff --git a/funasr/version.txt b/funasr/version.txt
index 5165303..d7f1518 100644
--- a/funasr/version.txt
+++ b/funasr/version.txt
@@ -1 +1 @@
-1.1.11
\ No newline at end of file
+1.1.12
\ No newline at end of file
--
Gitblit v1.9.1