From e175dae50b1346aa0abf481808862f43b3f5b2e4 Mon Sep 17 00:00:00 2001
From: Anjali Barge <bargeanjali650@gmail.com>
Date: 星期六, 20 九月 2025 22:39:57 +0800
Subject: [PATCH] examples: add improved batch_asr CLI example with error handling (#2675)

---
 examples/industrial_data_pretraining/emotion2vec/demo.py |    9 ++++++++-
 1 files changed, 8 insertions(+), 1 deletions(-)

diff --git a/examples/industrial_data_pretraining/emotion2vec/demo.py b/examples/industrial_data_pretraining/emotion2vec/demo.py
index f33dfee..51b4e27 100644
--- a/examples/industrial_data_pretraining/emotion2vec/demo.py
+++ b/examples/industrial_data_pretraining/emotion2vec/demo.py
@@ -6,14 +6,21 @@
 from funasr import AutoModel
 
 # model="iic/emotion2vec_base"
+# model="iic/emotion2vec_base_finetuned"
+# model="iic/emotion2vec_plus_seed"
+# model="iic/emotion2vec_plus_base"
+model = "iic/emotion2vec_plus_large"
+
 model = AutoModel(
-    model="iic/emotion2vec_base_finetuned",
+    model=model,
     # vad_model="iic/speech_fsmn_vad_zh-cn-16k-common-pytorch",
     # vad_model_revision="master",
     # vad_kwargs={"max_single_segment_time": 2000},
+    hub="ms",  # "ms" or "modelscope" for Mainland China users; "hf" or "huggingface" for Other overseas users
 )
 
 wav_file = f"{model.model_path}/example/test.wav"
+
 res = model.generate(
     wav_file, output_dir="./outputs", granularity="utterance", extract_embedding=False
 )

--
Gitblit v1.9.1