From 699b006ee5a6d0748fc9d37f7c068af7c98c2c8e Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期二, 14 五月 2024 17:13:29 +0800
Subject: [PATCH] add
---
examples/industrial_data_pretraining/emotion2vec/demo.py | 19 ++++++++++++++++---
1 files changed, 16 insertions(+), 3 deletions(-)
diff --git a/examples/industrial_data_pretraining/emotion2vec/demo.py b/examples/industrial_data_pretraining/emotion2vec/demo.py
index a41641e..99d96ae 100644
--- a/examples/industrial_data_pretraining/emotion2vec/demo.py
+++ b/examples/industrial_data_pretraining/emotion2vec/demo.py
@@ -5,8 +5,21 @@
from funasr import AutoModel
-model = AutoModel(model="damo/emotion2vec_base", model_revision="v2.0.1")
+# model="iic/emotion2vec_base"
+# model="iic/emotion2vec_base_finetuned"
+# model="iic/emotion2vec_plus_seed_modelscope"
+# model="iic/emotion2vec_plus_base_modelscope"
+model = "iic/emotion2vec_plus_large_modelscope"
+
+model = AutoModel(
+ model=model,
+ # vad_model="iic/speech_fsmn_vad_zh-cn-16k-common-pytorch",
+ # vad_model_revision="master",
+ # vad_kwargs={"max_single_segment_time": 2000},
+)
wav_file = f"{model.model_path}/example/test.wav"
-res = model.generate(wav_file, output_dir="./outputs", granularity="utterance")
-print(res)
\ No newline at end of file
+res = model.generate(
+ wav_file, output_dir="./outputs", granularity="utterance", extract_embedding=False
+)
+print(res)
--
Gitblit v1.9.1