From 19645da9e998581e6329d679bae968d0c45d1895 Mon Sep 17 00:00:00 2001
From: shixian.shi <shixian.shi@alibaba-inc.com>
Date: 星期一, 15 一月 2024 20:46:40 +0800
Subject: [PATCH] update readme

---
 README_zh.md |   21 ++++++++++++---------
 1 files changed, 12 insertions(+), 9 deletions(-)

diff --git a/README_zh.md b/README_zh.md
index 62d251b..2da2171 100644
--- a/README_zh.md
+++ b/README_zh.md
@@ -86,12 +86,15 @@
 ### 闈炲疄鏃惰闊宠瘑鍒�
 ```python
 from funasr import AutoModel
-
-model = AutoModel(model="paraformer-zh")
-# for the long duration wav, you could add vad model
-# model = AutoModel(model="paraformer-zh", vad_model="fsmn-vad", punc_model="ct-punc")
-
-res = model(input="asr_example_zh.wav", batch_size=64)
+# paraformer-zh is a multi-functional asr model
+# use vad, punc, spk or not as you need
+model = AutoModel(model="paraformer-zh", model_revision="v2.0.2", \
+                  vad_model="fsmn-vad", vad_model_revision="v2.0.2", \
+                  punc_model="ct-punc-c", punc_model_revision="v2.0.2", \
+                  spk_model="cam++", spk_model_revision="v2.0.2")
+res = model(input=f"{model.model_path}/example/asr_example.wav", 
+            batch_size=64, 
+            hotword='榄旀惌')
 print(res)
 ```
 娉細`model_hub`锛氳〃绀烘ā鍨嬩粨搴擄紝`ms`涓洪�夋嫨modelscope涓嬭浇锛宍hf`涓洪�夋嫨huggingface涓嬭浇銆�
@@ -105,7 +108,7 @@
 encoder_chunk_look_back = 4 #number of chunks to lookback for encoder self-attention
 decoder_chunk_look_back = 1 #number of encoder chunks to lookback for decoder cross-attention
 
-model = AutoModel(model="paraformer-zh-streaming", model_revision="v2.0.0")
+model = AutoModel(model="paraformer-zh-streaming", model_revision="v2.0.2")
 
 import soundfile
 import os
@@ -163,7 +166,7 @@
 ```python
 from funasr import AutoModel
 
-model = AutoModel(model="ct-punc", model_revision="v2.0.1")
+model = AutoModel(model="ct-punc", model_revision="v2.0.2")
 
 res = model(input="閭d粖澶╃殑浼氬氨鍒拌繖閲屽惂 happy new year 鏄庡勾瑙�")
 print(res)
@@ -176,7 +179,7 @@
 model = AutoModel(model="fa-zh", model_revision="v2.0.0")
 
 wav_file = f"{model.model_path}/example/asr_example.wav"
-text_file = f"{model.model_path}/example/asr_example.wav"
+text_file = f"{model.model_path}/example/text.txt"
 res = model(input=(wav_file, text_file), data_type=("sound", "text"))
 print(res)
 ```

--
Gitblit v1.9.1