From 28ccfbfc51068a663a80764e14074df5edf2b5ba Mon Sep 17 00:00:00 2001
From: kongdeqiang <kongdeqiang960204@163.com>
Date: 星期五, 13 三月 2026 17:41:41 +0800
Subject: [PATCH] 提交

---
 examples/industrial_data_pretraining/emotion2vec/demo.py |   22 +++++++++++++++++++---
 1 files changed, 19 insertions(+), 3 deletions(-)

diff --git a/examples/industrial_data_pretraining/emotion2vec/demo.py b/examples/industrial_data_pretraining/emotion2vec/demo.py
index abaa9f4..51b4e27 100644
--- a/examples/industrial_data_pretraining/emotion2vec/demo.py
+++ b/examples/industrial_data_pretraining/emotion2vec/demo.py
@@ -5,7 +5,23 @@
 
 from funasr import AutoModel
 
-model = AutoModel(model="damo/emotion2vec_base", model_revision="v2.0.1")
+# model="iic/emotion2vec_base"
+# model="iic/emotion2vec_base_finetuned"
+# model="iic/emotion2vec_plus_seed"
+# model="iic/emotion2vec_plus_base"
+model = "iic/emotion2vec_plus_large"
 
-res = model(input="https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav", output_dir="./outputs")
-print(res)
\ No newline at end of file
+model = AutoModel(
+    model=model,
+    # vad_model="iic/speech_fsmn_vad_zh-cn-16k-common-pytorch",
+    # vad_model_revision="master",
+    # vad_kwargs={"max_single_segment_time": 2000},
+    hub="ms",  # "ms" or "modelscope" for Mainland China users; "hf" or "huggingface" for Other overseas users
+)
+
+wav_file = f"{model.model_path}/example/test.wav"
+
+res = model.generate(
+    wav_file, output_dir="./outputs", granularity="utterance", extract_embedding=False
+)
+print(res)

--
Gitblit v1.9.1