From c6361cc2a7e99be802d7d7e81a93e874f0faf5cd Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期一, 15 一月 2024 14:15:53 +0800
Subject: [PATCH] funasr1.0

---
 README_zh.md |  129 ++++++++++++++++++++++---------
 README.md    |  107 +++++++++++++++++++++-----
 2 files changed, 176 insertions(+), 60 deletions(-)

diff --git a/README.md b/README.md
index 23d197a..50ca183 100644
--- a/README.md
+++ b/README.md
@@ -76,57 +76,120 @@
 
 <a name="quick-start"></a>
 ## Quick Start
-Quick start for new users锛圼tutorial](https://alibaba-damo-academy.github.io/FunASR/en/funasr/quick_start.html)锛�
-
-FunASR supports inference and fine-tuning of models trained on industrial data for tens of thousands of hours. For more details, please refer to [modelscope_egs](https://alibaba-damo-academy.github.io/FunASR/en/modelscope_pipeline/quick_start.html). It also supports training and fine-tuning of models on academic standard datasets. For more information, please refer to [egs](https://alibaba-damo-academy.github.io/FunASR/en/academic_recipe/asr_recipe.html).
 
 Below is a quick start tutorial. Test audio files ([Mandarin](https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/vad_example.wav), [English]()).
 
 ### Command-line usage
 
 ```shell
-funasr --model paraformer-zh asr_example_zh.wav
+funasr +model=paraformer-zh +vad_model="fsmn-vad" +punc_model="ct-punc" +input=asr_example_zh.wav
 ```
 
 Notes: Support recognition of single audio file, as well as file list in Kaldi-style wav.scp format: `wav_id wav_pat`
 
 ### Speech Recognition (Non-streaming)
 ```python
-from funasr import infer
+from funasr import AutoModel
 
-p = infer(model="paraformer-zh", vad_model="fsmn-vad", punc_model="ct-punc", model_hub="ms")
+model = AutoModel(model="paraformer-zh")
+# for the long duration wav, you could add vad model
+# model = AutoModel(model="paraformer-zh", vad_model="fsmn-vad", punc_model="ct-punc")
 
-res = p("asr_example_zh.wav", batch_size_token=5000)
+res = model(input="asr_example_zh.wav", batch_size=64)
 print(res)
 ```
 Note: `model_hub`: represents the model repository, `ms` stands for selecting ModelScope download, `hf` stands for selecting Huggingface download.
 
 ### Speech Recognition (Streaming)
 ```python
-from funasr import infer
-
-p = infer(model="paraformer-zh-streaming", model_hub="ms")
+from funasr import AutoModel
 
 chunk_size = [0, 10, 5] #[0, 10, 5] 600ms, [0, 8, 4] 480ms
-param_dict = {"cache": dict(), "is_final": False, "chunk_size": chunk_size, "encoder_chunk_look_back": 4, "decoder_chunk_look_back": 1}
+encoder_chunk_look_back = 4 #number of chunks to lookback for encoder self-attention
+decoder_chunk_look_back = 1 #number of encoder chunks to lookback for decoder cross-attention
 
-import torchaudio
-speech = torchaudio.load("asr_example_zh.wav")[0][0]
-speech_length = speech.shape[0]
+model = AutoModel(model="paraformer-zh-streaming", model_revision="v2.0.0")
 
-stride_size = chunk_size[1] * 960
-sample_offset = 0
-for sample_offset in range(0, speech_length, min(stride_size, speech_length - sample_offset)):
-    param_dict["is_final"] = True if sample_offset + stride_size >= speech_length - 1 else False
-    input = speech[sample_offset: sample_offset + stride_size]
-    rec_result = p(input=input, param_dict=param_dict)
-    print(rec_result)
+import soundfile
+import os
+
+wav_file = os.path.join(model.model_path, "example/asr_example.wav")
+speech, sample_rate = soundfile.read(wav_file)
+chunk_stride = chunk_size[1] * 960 # 600ms
+
+cache = {}
+total_chunk_num = int(len((speech)-1)/chunk_stride+1)
+for i in range(total_chunk_num):
+    speech_chunk = speech[i*chunk_stride:(i+1)*chunk_stride]
+    is_final = i == total_chunk_num - 1
+    res = model(input=speech_chunk,
+                cache=cache,
+                is_final=is_final,
+                chunk_size=chunk_size,
+                encoder_chunk_look_back=encoder_chunk_look_back,
+                decoder_chunk_look_back=decoder_chunk_look_back,
+                )
+    print(res)
 ```
 Note: `chunk_size` is the configuration for streaming latency.` [0,10,5]` indicates that the real-time display granularity is `10*60=600ms`, and the lookahead information is `5*60=300ms`. Each inference input is `600ms` (sample points are `16000*0.6=960`), and the output is the corresponding text. For the last speech segment input, `is_final=True` needs to be set to force the output of the last word.
 
-Quick start for new users can be found in [docs](https://alibaba-damo-academy.github.io/FunASR/en/funasr/quick_start_zh.html)
+### Voice Activity Detection (streaming)
+```python
+from funasr import AutoModel
 
+model = AutoModel(model="fsmn-vad", model_revision="v2.0.2")
 
+wav_file = f"{model.model_path}/example/asr_example.wav"
+res = model(input=wav_file)
+print(res)
+```
+### Voice Activity Detection (Non-streaming)
+```python
+from funasr import AutoModel
+
+chunk_size = 200 # ms
+model = AutoModel(model="fsmn-vad", model_revision="v2.0.2")
+
+import soundfile
+
+wav_file = f"{model.model_path}/example/vad_example.wav"
+speech, sample_rate = soundfile.read(wav_file)
+chunk_stride = int(chunk_size * sample_rate / 1000)
+
+cache = {}
+total_chunk_num = int(len((speech)-1)/chunk_stride+1)
+for i in range(total_chunk_num):
+    speech_chunk = speech[i*chunk_stride:(i+1)*chunk_stride]
+    is_final = i == total_chunk_num - 1
+    res = model(input=speech_chunk,
+                cache=cache,
+                is_final=is_final,
+                chunk_size=chunk_size,
+                )
+    if len(res[0]["value"]):
+        print(res)
+```
+### Punctuation Restoration
+```python
+from funasr import AutoModel
+
+model = AutoModel(model="ct-punc", model_revision="v2.0.1")
+
+res = model(input="閭d粖澶╃殑浼氬氨鍒拌繖閲屽惂 happy new year 鏄庡勾瑙�")
+print(res)
+```
+### Timestamp Prediction
+```python
+from funasr import AutoModel
+
+model = AutoModel(model="fa-zh", model_revision="v2.0.0")
+
+wav_file = f"{model.model_path}/example/asr_example.wav"
+text_file = f"{model.model_path}/example/asr_example.wav"
+res = model(input=(wav_file, text_file),
+            data_type=("sound", "text"))
+print(res)
+```
 [//]: # (FunASR supports inference and fine-tuning of models trained on industrial datasets of tens of thousands of hours. For more details, please refer to &#40;[modelscope_egs]&#40;https://alibaba-damo-academy.github.io/FunASR/en/modelscope_pipeline/quick_start.html&#41;&#41;. It also supports training and fine-tuning of models on academic standard datasets. For more details, please refer to&#40;[egs]&#40;https://alibaba-damo-academy.github.io/FunASR/en/academic_recipe/asr_recipe.html&#41;&#41;. The models include speech recognition &#40;ASR&#41;, speech activity detection &#40;VAD&#41;, punctuation recovery, language model, speaker verification, speaker separation, and multi-party conversation speech recognition. For a detailed list of models, please refer to the [Model Zoo]&#40;https://github.com/alibaba-damo-academy/FunASR/blob/main/docs/model_zoo/modelscope_models.md&#41;:)
 
 ## Deployment Service
diff --git a/README_zh.md b/README_zh.md
index 6c75e42..5a489ee 100644
--- a/README_zh.md
+++ b/README_zh.md
@@ -57,29 +57,28 @@
 锛堟敞锛歔馃]()琛ㄧずHuggingface妯″瀷浠撳簱閾炬帴锛孾猸怾()琛ㄧずModelScope妯″瀷浠撳簱閾炬帴锛�
 
 
-|                                                                              妯″瀷鍚嶅瓧                                                                               |        浠诲姟璇︽儏        |     璁粌鏁版嵁     | 鍙傛暟閲�  |
-|:---------------------------------------------------------------------------------------------------------------------------------------------------------------:|:------------------:|:------------:|:----:|
-|     paraformer-zh <br> ([猸怾(https://www.modelscope.cn/models/damo/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch/summary)  [馃]() )     |  璇煶璇嗗埆锛屽甫鏃堕棿鎴宠緭鍑猴紝闈炲疄鏃�   |  60000灏忔椂锛屼腑鏂�  | 220M |
-|                 paraformer-zh-spk <br> ( [猸怾(https://modelscope.cn/models/damo/speech_paraformer-large-vad-punc-spk_asr_nat-zh-cn/summary)  [馃]() )                 | 鍒嗚鑹茶闊宠瘑鍒紝甯︽椂闂存埑杈撳嚭锛岄潪瀹炴椂 |  60000灏忔椂锛屼腑鏂�  | 220M |
-|        paraformer-zh-online <br> ( [猸怾(https://modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-online/summary) [馃]() )         |      璇煶璇嗗埆锛屽疄鏃�       |  60000灏忔椂锛屼腑鏂�  | 220M |
-|          paraformer-en <br> ( [猸怾(https://www.modelscope.cn/models/damo/speech_paraformer-large-vad-punc_asr_nat-en-16k-common-vocab10020/summary) [馃]() )          | 璇煶璇嗗埆锛岄潪瀹炴椂 |  50000灏忔椂锛岃嫳鏂�  | 220M |
-|                                                                paraformer-en-spk <br> ([猸怾() [馃]() )                                                                |      璇煶璇嗗埆锛岄潪瀹炴椂      |  50000灏忔椂锛岃嫳鏂�  | 220M |
-|                      conformer-en <br> ( [猸怾(https://modelscope.cn/models/damo/speech_conformer_asr-en-16k-vocab4199-pytorch/summary) [馃]() )                       |      璇煶璇嗗埆锛岄潪瀹炴椂      |  50000灏忔椂锛岃嫳鏂�  | 220M |
-|                      ct-punc <br> ( [猸怾(https://modelscope.cn/models/damo/punc_ct-transformer_cn-en-common-vocab471067-large/summary) [馃]() )                       |      鏍囩偣鎭㈠      |  100M锛屼腑鏂囦笌鑻辨枃  | 1.1G | 
-|                           fsmn-vad <br> ( [猸怾(https://modelscope.cn/models/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch/summary) [馃]() )                           |     璇煶绔偣妫�娴嬶紝瀹炴椂      | 5000灏忔椂锛屼腑鏂囦笌鑻辨枃 | 0.4M | 
-|                           fa-zh <br> ( [猸怾(https://modelscope.cn/models/damo/speech_timestamp_prediction-v1-16k-offline/summary) [馃]() )                            |   瀛楃骇鍒椂闂存埑棰勬祴         |  50000灏忔椂锛屼腑鏂�  | 38M  |
+|                                                                             妯″瀷鍚嶅瓧                                                                             |        浠诲姟璇︽儏        |     璁粌鏁版嵁     | 鍙傛暟閲�  |
+|:------------------------------------------------------------------------------------------------------------------------------------------------------------:|:------------------:|:------------:|:----:|
+| paraformer-zh <br> ([猸怾(https://www.modelscope.cn/models/damo/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch/summary)  [馃]() ) |  璇煶璇嗗埆锛屽甫鏃堕棿鎴宠緭鍑猴紝闈炲疄鏃�   |  60000灏忔椂锛屼腑鏂�  | 220M |
+|             paraformer-zh-spk <br> ( [猸怾(https://modelscope.cn/models/damo/speech_paraformer-large-vad-punc-spk_asr_nat-zh-cn/summary)  [馃]() )             | 鍒嗚鑹茶闊宠瘑鍒紝甯︽椂闂存埑杈撳嚭锛岄潪瀹炴椂 |  60000灏忔椂锛屼腑鏂�  | 220M |
+|   paraformer-zh-streaming <br> ( [猸怾(https://modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-online/summary) [馃]() )   |      璇煶璇嗗埆锛屽疄鏃�       |  60000灏忔椂锛屼腑鏂�  | 220M |
+|      paraformer-en <br> ( [猸怾(https://www.modelscope.cn/models/damo/speech_paraformer-large-vad-punc_asr_nat-en-16k-common-vocab10020/summary) [馃]() )      | 璇煶璇嗗埆锛岄潪瀹炴椂 |  50000灏忔椂锛岃嫳鏂�  | 220M |
+|                                                            paraformer-en-spk <br> ([猸怾() [馃]() )                                                            |      璇煶璇嗗埆锛岄潪瀹炴椂      |  50000灏忔椂锛岃嫳鏂�  | 220M |
+|                  conformer-en <br> ( [猸怾(https://modelscope.cn/models/damo/speech_conformer_asr-en-16k-vocab4199-pytorch/summary) [馃]() )                   |      璇煶璇嗗埆锛岄潪瀹炴椂      |  50000灏忔椂锛岃嫳鏂�  | 220M |
+|                  ct-punc <br> ( [猸怾(https://modelscope.cn/models/damo/punc_ct-transformer_cn-en-common-vocab471067-large/summary) [馃]() )                   |      鏍囩偣鎭㈠      |  100M锛屼腑鏂囦笌鑻辨枃  | 1.1G | 
+|                       fsmn-vad <br> ( [猸怾(https://modelscope.cn/models/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch/summary) [馃]() )                       |     璇煶绔偣妫�娴嬶紝瀹炴椂      | 5000灏忔椂锛屼腑鏂囦笌鑻辨枃 | 0.4M | 
+|                       fa-zh <br> ( [猸怾(https://modelscope.cn/models/damo/speech_timestamp_prediction-v1-16k-offline/summary) [馃]() )                        |   瀛楃骇鍒椂闂存埑棰勬祴         |  50000灏忔椂锛屼腑鏂�  | 38M  |
 
 
 <a name="蹇�熷紑濮�"></a>
 ## 蹇�熷紑濮�
-FunASR鏀寔鏁颁竾灏忔椂宸ヤ笟鏁版嵁璁粌鐨勬ā鍨嬬殑鎺ㄧ悊鍜屽井璋冿紝璇︾粏淇℃伅鍙互鍙傞槄锛圼modelscope_egs](https://alibaba-damo-academy.github.io/FunASR/en/modelscope_pipeline/quick_start.html)锛夛紱涔熸敮鎸佸鏈爣鍑嗘暟鎹泦妯″瀷鐨勮缁冨拰寰皟锛岃缁嗕俊鎭彲浠ュ弬闃咃紙[egs](https://alibaba-damo-academy.github.io/FunASR/en/academic_recipe/asr_recipe.html)锛夈��
 
 涓嬮潰涓哄揩閫熶笂鎵嬫暀绋嬶紝娴嬭瘯闊抽锛圼涓枃](https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/vad_example.wav)锛孾鑻辨枃]()锛�
 
 ### 鍙墽琛屽懡浠よ
 
 ```shell
-funasr --model paraformer-zh asr_example_zh.wav
+funasr +model=paraformer-zh +vad_model="fsmn-vad" +punc_model="ct-punc" +input=asr_example_zh.wav
 ```
 
 娉細鏀寔鍗曟潯闊抽鏂囦欢璇嗗埆锛屼篃鏀寔鏂囦欢鍒楄〃锛屽垪琛ㄤ负kaldi椋庢牸wav.scp锛歚wav_id   wav_path`
@@ -90,55 +89,109 @@
 
 model = AutoModel(model="paraformer-zh")
 # for the long duration wav, you could add vad model
-# model = AutoModel(model="paraformer-zh", vad_model="fsmn-vad")
+# model = AutoModel(model="paraformer-zh", vad_model="fsmn-vad", punc_model="ct-punc")
 
 res = model(input="asr_example_zh.wav", batch_size=64)
 print(res)
 ```
 娉細`model_hub`锛氳〃绀烘ā鍨嬩粨搴擄紝`ms`涓洪�夋嫨modelscope涓嬭浇锛宍hf`涓洪�夋嫨huggingface涓嬭浇銆�
 
-[//]: # (### 瀹炴椂璇煶璇嗗埆)
+### 瀹炴椂璇煶璇嗗埆
 
-[//]: # (```python)
+```python
+from funasr import AutoModel
 
-[//]: # (from funasr import infer)
+chunk_size = [0, 10, 5] #[0, 10, 5] 600ms, [0, 8, 4] 480ms
+encoder_chunk_look_back = 4 #number of chunks to lookback for encoder self-attention
+decoder_chunk_look_back = 1 #number of encoder chunks to lookback for decoder cross-attention
 
-[//]: # ()
-[//]: # (p = infer&#40;model="paraformer-zh-streaming", model_hub="ms"&#41;)
+model = AutoModel(model="paraformer-zh-streaming", model_revision="v2.0.0")
 
-[//]: # ()
-[//]: # (chunk_size = [0, 10, 5] #[0, 10, 5] 600ms, [0, 8, 4] 480ms)
+import soundfile
+import os
 
-[//]: # (param_dict = {"cache": dict&#40;&#41;, "is_final": False, "chunk_size": chunk_size, "encoder_chunk_look_back": 4, "decoder_chunk_look_back": 1})
+wav_file = os.path.join(model.model_path, "example/asr_example.wav")
+speech, sample_rate = soundfile.read(wav_file)
+chunk_stride = chunk_size[1] * 960 # 600ms
 
-[//]: # ()
-[//]: # (import torchaudio)
+cache = {}
+total_chunk_num = int(len((speech)-1)/chunk_stride+1)
+for i in range(total_chunk_num):
+    speech_chunk = speech[i*chunk_stride:(i+1)*chunk_stride]
+    is_final = i == total_chunk_num - 1
+    res = model(input=speech_chunk,
+                cache=cache,
+                is_final=is_final,
+                chunk_size=chunk_size,
+                encoder_chunk_look_back=encoder_chunk_look_back,
+                decoder_chunk_look_back=decoder_chunk_look_back,
+                )
+    print(res)
+```
 
-[//]: # (speech = torchaudio.load&#40;"asr_example_zh.wav"&#41;[0][0])
+娉細`chunk_size`涓烘祦寮忓欢鏃堕厤缃紝`[0,10,5]`琛ㄧず涓婂睆瀹炴椂鍑哄瓧绮掑害涓篳10*60=600ms`锛屾湭鏉ヤ俊鎭负`5*60=300ms`銆傛瘡娆℃帹鐞嗚緭鍏ヤ负`600ms`锛堥噰鏍风偣鏁颁负`16000*0.6=960`锛夛紝杈撳嚭涓哄搴旀枃瀛楋紝鏈�鍚庝竴涓闊崇墖娈佃緭鍏ラ渶瑕佽缃甡is_final=True`鏉ュ己鍒惰緭鍑烘渶鍚庝竴涓瓧銆�
 
-[//]: # (speech_length = speech.shape[0])
+### 璇煶绔偣妫�娴嬶紙闈炲疄鏃讹級
+```python
+from funasr import AutoModel
 
-[//]: # ()
-[//]: # (stride_size = chunk_size[1] * 960)
+model = AutoModel(model="fsmn-vad", model_revision="v2.0.2")
 
-[//]: # (sample_offset = 0)
+wav_file = f"{model.model_path}/example/asr_example.wav"
+res = model(input=wav_file)
+print(res)
+```
 
-[//]: # (for sample_offset in range&#40;0, speech_length, min&#40;stride_size, speech_length - sample_offset&#41;&#41;:)
+### 璇煶绔偣妫�娴嬶紙瀹炴椂锛�
+```python
+from funasr import AutoModel
 
-[//]: # (    param_dict["is_final"] = True if sample_offset + stride_size >= speech_length - 1 else False)
+chunk_size = 200 # ms
+model = AutoModel(model="fsmn-vad", model_revision="v2.0.2")
 
-[//]: # (    input = speech[sample_offset: sample_offset + stride_size])
+import soundfile
 
-[//]: # (    rec_result = p&#40;input=input, param_dict=param_dict&#41;)
+wav_file = f"{model.model_path}/example/vad_example.wav"
+speech, sample_rate = soundfile.read(wav_file)
+chunk_stride = int(chunk_size * sample_rate / 1000)
 
-[//]: # (    print&#40;rec_result&#41;)
+cache = {}
+total_chunk_num = int(len((speech)-1)/chunk_stride+1)
+for i in range(total_chunk_num):
+    speech_chunk = speech[i*chunk_stride:(i+1)*chunk_stride]
+    is_final = i == total_chunk_num - 1
+    res = model(input=speech_chunk,
+                cache=cache,
+                is_final=is_final,
+                chunk_size=chunk_size,
+                )
+    if len(res[0]["value"]):
+        print(res)
+```
 
-[//]: # (```)
+### 鏍囩偣鎭㈠
+```python
+from funasr import AutoModel
 
-[//]: # (娉細`chunk_size`涓烘祦寮忓欢鏃堕厤缃紝`[0,10,5]`琛ㄧず涓婂睆瀹炴椂鍑哄瓧绮掑害涓篳10*60=600ms`锛屾湭鏉ヤ俊鎭负`5*60=300ms`銆傛瘡娆℃帹鐞嗚緭鍏ヤ负`600ms`锛堥噰鏍风偣鏁颁负`16000*0.6=960`锛夛紝杈撳嚭涓哄搴旀枃瀛楋紝鏈�鍚庝竴涓闊崇墖娈佃緭鍏ラ渶瑕佽缃甡is_final=True`鏉ュ己鍒惰緭鍑烘渶鍚庝竴涓瓧銆�)
+model = AutoModel(model="ct-punc", model_revision="v2.0.1")
 
-[//]: # ()
-[//]: # (鏇村璇︾粏鐢ㄦ硶锛圼鏂颁汉鏂囨。]&#40;https://alibaba-damo-academy.github.io/FunASR/en/funasr/quick_start_zh.html&#41;锛�)
+res = model(input="閭d粖澶╃殑浼氬氨鍒拌繖閲屽惂 happy new year 鏄庡勾瑙�")
+print(res)
+```
+
+### 鏃堕棿鎴抽娴�
+```python
+from funasr import AutoModel
+
+model = AutoModel(model="fa-zh", model_revision="v2.0.0")
+
+wav_file = f"{model.model_path}/example/asr_example.wav"
+text_file = f"{model.model_path}/example/asr_example.wav"
+res = model(input=(wav_file, text_file),
+            data_type=("sound", "text"))
+print(res)
+```
+鏇村璇︾粏鐢ㄦ硶锛圼绀轰緥](examples/industrial_data_pretraining)锛�
 
 
 <a name="鏈嶅姟閮ㄧ讲"></a>

--
Gitblit v1.9.1