From 369382050bf71c249944545f009a29a8632fdda5 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期四, 25 一月 2024 15:04:47 +0800
Subject: [PATCH] funasr1.0.2

---
 README.md |   41 +++++++++++++++++++++--------------------
 1 files changed, 21 insertions(+), 20 deletions(-)

diff --git a/README.md b/README.md
index 0094dc4..499bb40 100644
--- a/README.md
+++ b/README.md
@@ -55,16 +55,16 @@
 (Note: 馃 represents the Huggingface model zoo link, 猸� represents the ModelScope model zoo link)
 
 
-|                                                                             Model Name                                                                             |                                Task Details                                 |          Training Data           | Parameters |
-|:------------------------------------------------------------------------------------------------------------------------------------------------------------------:|:---------------------------------------------------------------------------:|:--------------------------------:|:----------:|
-|    paraformer-zh <br> ([猸怾(https://www.modelscope.cn/models/damo/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch/summary)  [馃]() )    |             speech recognition, with timestamps, non-streaming              |      60000 hours, Mandarin       |    220M    |
-|                paraformer-zh-spk <br> ( [猸怾(https://modelscope.cn/models/damo/speech_paraformer-large-vad-punc-spk_asr_nat-zh-cn/summary)  [馃]() )                | speech recognition with speaker diarization, with timestamps, non-streaming |      60000 hours, Mandarin       |    220M    |
-| <nobr>paraformer-zh-online <br> ( [猸怾(https://modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-online/summary) [馃]() )</nobr> |                        speech recognition, streaming                        |      60000 hours, Mandarin       |    220M    |
-|         paraformer-en <br> ( [猸怾(https://www.modelscope.cn/models/damo/speech_paraformer-large-vad-punc_asr_nat-en-16k-common-vocab10020/summary) [馃]() )         |             speech recognition, with timestamps, non-streaming              |       50000 hours, English       |    220M    |
-|                     conformer-en <br> ( [猸怾(https://modelscope.cn/models/damo/speech_conformer_asr-en-16k-vocab4199-pytorch/summary) [馃]() )                      |                      speech recognition, non-streaming                      |       50000 hours, English       |    220M    |
-|                     ct-punc <br> ( [猸怾(https://modelscope.cn/models/damo/punc_ct-transformer_cn-en-common-vocab471067-large/summary) [馃]() )                      |                           punctuation restoration                           |    100M, Mandarin and English    |    1.1G    | 
-|                          fsmn-vad <br> ( [猸怾(https://modelscope.cn/models/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch/summary) [馃]() )                          |                          voice activity detection                           | 5000 hours, Mandarin and English |    0.4M    | 
-|                          fa-zh <br> ( [猸怾(https://modelscope.cn/models/damo/speech_timestamp_prediction-v1-16k-offline/summary) [馃]() )                           |                            timestamp prediction                             |       5000 hours, Mandarin       |    38M     | 
+|                                                                             Model Name                                                                             |                    Task Details                    |          Training Data           | Parameters |
+|:------------------------------------------------------------------------------------------------------------------------------------------------------------------:|:--------------------------------------------------:|:--------------------------------:|:----------:|
+|    paraformer-zh <br> ([猸怾(https://www.modelscope.cn/models/damo/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch/summary)  [馃]() )    | speech recognition, with timestamps, non-streaming |      60000 hours, Mandarin       |    220M    |
+| <nobr>paraformer-zh-online <br> ( [猸怾(https://modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-online/summary) [馃]() )</nobr> |           speech recognition, streaming            |      60000 hours, Mandarin       |    220M    |
+|         paraformer-en <br> ( [猸怾(https://www.modelscope.cn/models/damo/speech_paraformer-large-vad-punc_asr_nat-en-16k-common-vocab10020/summary) [馃]() )         | speech recognition, with timestamps, non-streaming |       50000 hours, English       |    220M    |
+|                     conformer-en <br> ( [猸怾(https://modelscope.cn/models/damo/speech_conformer_asr-en-16k-vocab4199-pytorch/summary) [馃]() )                      |         speech recognition, non-streaming          |       50000 hours, English       |    220M    |
+|                     ct-punc <br> ( [猸怾(https://modelscope.cn/models/damo/punc_ct-transformer_cn-en-common-vocab471067-large/summary) [馃]() )                      |              punctuation restoration               |    100M, Mandarin and English    |    1.1G    | 
+|                          fsmn-vad <br> ( [猸怾(https://modelscope.cn/models/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch/summary) [馃]() )                          |              voice activity detection              | 5000 hours, Mandarin and English |    0.4M    | 
+|                          fa-zh <br> ( [猸怾(https://modelscope.cn/models/damo/speech_timestamp_prediction-v1-16k-offline/summary) [馃]() )                           |                timestamp prediction                |       5000 hours, Mandarin       |    38M     | 
+|                cam++ <br> ( [猸怾(https://modelscope.cn/models/iic/speech_campplus_sv_zh-cn_16k-common/summary) [馃]() )                                             |        speaker verification/diarization            |            5000 hours            |    7.2M    | 
 
 
 
@@ -91,12 +91,13 @@
 from funasr import AutoModel
 # paraformer-zh is a multi-functional asr model
 # use vad, punc, spk or not as you need
-model = AutoModel(model="paraformer-zh", model_revision="v2.0.2", \
-                  vad_model="fsmn-vad", vad_model_revision="v2.0.2", \
-                  punc_model="ct-punc-c", punc_model_revision="v2.0.2", \
-                  spk_model="cam++", spk_model_revision="v2.0.2")
+model = AutoModel(model="paraformer-zh", model_revision="v2.0.4",
+                  vad_model="fsmn-vad", vad_model_revision="v2.0.4",
+                  punc_model="ct-punc-c", punc_model_revision="v2.0.4",
+                  # spk_model="cam++", spk_model_revision="v2.0.2",
+                  )
 res = model.generate(input=f"{model.model_path}/example/asr_example.wav", 
-                     batch_size=64, 
+                     batch_size_s=300, 
                      hotword='榄旀惌')
 print(res)
 ```
@@ -110,7 +111,7 @@
 encoder_chunk_look_back = 4 #number of chunks to lookback for encoder self-attention
 decoder_chunk_look_back = 1 #number of encoder chunks to lookback for decoder cross-attention
 
-model = AutoModel(model="paraformer-zh-streaming", model_revision="v2.0.2")
+model = AutoModel(model="paraformer-zh-streaming", model_revision="v2.0.4")
 
 import soundfile
 import os
@@ -133,7 +134,7 @@
 ```python
 from funasr import AutoModel
 
-model = AutoModel(model="fsmn-vad", model_revision="v2.0.2")
+model = AutoModel(model="fsmn-vad", model_revision="v2.0.4")
 wav_file = f"{model.model_path}/example/asr_example.wav"
 res = model.generate(input=wav_file)
 print(res)
@@ -143,7 +144,7 @@
 from funasr import AutoModel
 
 chunk_size = 200 # ms
-model = AutoModel(model="fsmn-vad", model_revision="v2.0.2")
+model = AutoModel(model="fsmn-vad", model_revision="v2.0.4")
 
 import soundfile
 
@@ -164,7 +165,7 @@
 ```python
 from funasr import AutoModel
 
-model = AutoModel(model="ct-punc", model_revision="v2.0.2")
+model = AutoModel(model="ct-punc", model_revision="v2.0.4")
 res = model.generate(input="閭d粖澶╃殑浼氬氨鍒拌繖閲屽惂 happy new year 鏄庡勾瑙�")
 print(res)
 ```
@@ -172,7 +173,7 @@
 ```python
 from funasr import AutoModel
 
-model = AutoModel(model="fa-zh", model_revision="v2.0.2")
+model = AutoModel(model="fa-zh", model_revision="v2.0.4")
 wav_file = f"{model.model_path}/example/asr_example.wav"
 text_file = f"{model.model_path}/example/text.txt"
 res = model.generate(input=(wav_file, text_file), data_type=("sound", "text"))

--
Gitblit v1.9.1