From c4e70144920401a4f5d9b569929b6914af5d324c Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期五, 24 一月 2025 16:59:23 +0800
Subject: [PATCH] v1.2.3
---
examples/industrial_data_pretraining/emotion2vec/demo.py | 9 ++++++++-
1 files changed, 8 insertions(+), 1 deletions(-)
diff --git a/examples/industrial_data_pretraining/emotion2vec/demo.py b/examples/industrial_data_pretraining/emotion2vec/demo.py
index f33dfee..51b4e27 100644
--- a/examples/industrial_data_pretraining/emotion2vec/demo.py
+++ b/examples/industrial_data_pretraining/emotion2vec/demo.py
@@ -6,14 +6,21 @@
from funasr import AutoModel
# model="iic/emotion2vec_base"
+# model="iic/emotion2vec_base_finetuned"
+# model="iic/emotion2vec_plus_seed"
+# model="iic/emotion2vec_plus_base"
+model = "iic/emotion2vec_plus_large"
+
model = AutoModel(
- model="iic/emotion2vec_base_finetuned",
+ model=model,
# vad_model="iic/speech_fsmn_vad_zh-cn-16k-common-pytorch",
# vad_model_revision="master",
# vad_kwargs={"max_single_segment_time": 2000},
+ hub="ms", # "ms" or "modelscope" for Mainland China users; "hf" or "huggingface" for Other overseas users
)
wav_file = f"{model.model_path}/example/test.wav"
+
res = model.generate(
wav_file, output_dir="./outputs", granularity="utterance", extract_embedding=False
)
--
Gitblit v1.9.1