From 977dc3eb9832f251676f2d908f3d5793ecc45270 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期二, 16 一月 2024 13:53:47 +0800
Subject: [PATCH] docs

---
 examples/industrial_data_pretraining/emotion2vec/demo.py |    3 ++-
 README_zh.md                                             |    2 +-
 README.md                                                |    3 +++
 3 files changed, 6 insertions(+), 2 deletions(-)

diff --git a/README.md b/README.md
index 2bd28e2..0094dc4 100644
--- a/README.md
+++ b/README.md
@@ -178,6 +178,9 @@
 res = model.generate(input=(wav_file, text_file), data_type=("sound", "text"))
 print(res)
 ```
+
+More examples ref to [docs](https://github.com/alibaba-damo-academy/FunASR/tree/main/examples/industrial_data_pretraining)
+
 [//]: # (FunASR supports inference and fine-tuning of models trained on industrial datasets of tens of thousands of hours. For more details, please refer to &#40;[modelscope_egs]&#40;https://alibaba-damo-academy.github.io/FunASR/en/modelscope_pipeline/quick_start.html&#41;&#41;. It also supports training and fine-tuning of models on academic standard datasets. For more details, please refer to&#40;[egs]&#40;https://alibaba-damo-academy.github.io/FunASR/en/academic_recipe/asr_recipe.html&#41;&#41;. The models include speech recognition &#40;ASR&#41;, speech activity detection &#40;VAD&#41;, punctuation recovery, language model, speaker verification, speaker separation, and multi-party conversation speech recognition. For a detailed list of models, please refer to the [Model Zoo]&#40;https://github.com/alibaba-damo-academy/FunASR/blob/main/docs/model_zoo/modelscope_models.md&#41;:)
 
 ## Deployment Service
diff --git a/README_zh.md b/README_zh.md
index dc20302..57a6bbb 100644
--- a/README_zh.md
+++ b/README_zh.md
@@ -182,7 +182,7 @@
 res = model.generate(input=(wav_file, text_file), data_type=("sound", "text"))
 print(res)
 ```
-鏇村璇︾粏鐢ㄦ硶锛圼绀轰緥](examples/industrial_data_pretraining)锛�
+鏇村璇︾粏鐢ㄦ硶锛圼绀轰緥](https://github.com/alibaba-damo-academy/FunASR/tree/main/examples/industrial_data_pretraining)锛�
 
 
 <a name="鏈嶅姟閮ㄧ讲"></a>
diff --git a/examples/industrial_data_pretraining/emotion2vec/demo.py b/examples/industrial_data_pretraining/emotion2vec/demo.py
index ea8da99..91d00aa 100644
--- a/examples/industrial_data_pretraining/emotion2vec/demo.py
+++ b/examples/industrial_data_pretraining/emotion2vec/demo.py
@@ -7,5 +7,6 @@
 
 model = AutoModel(model="damo/emotion2vec_base", model_revision="v2.0.1")
 
-res = model.generate(input="https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav", output_dir="./outputs")
+wav_file = f"{model.model_path}/example/example/test.wav"
+res = model.generate(wav_file, output_dir="./outputs", granularity="utterance")
 print(res)
\ No newline at end of file

--
Gitblit v1.9.1