zhifu gao
2024-05-15 a0f03bd2a87d97d47a1636bbe6f0855a43160331
examples/industrial_data_pretraining/emotion2vec/demo.py
@@ -5,7 +5,22 @@
from funasr import AutoModel
model = AutoModel(model="/Users/zhifu/Downloads/modelscope_models/emotion2vec_base")
# model="iic/emotion2vec_base"
# model="iic/emotion2vec_base_finetuned"
# model="iic/emotion2vec_plus_seed"
# model="iic/emotion2vec_plus_base"
model = "iic/emotion2vec_plus_large"
res = model(input="/Users/zhifu/Downloads/modelscope_models/emotion2vec_base/example/test.wav")
print(res)
model = AutoModel(
    model=model,
    # vad_model="iic/speech_fsmn_vad_zh-cn-16k-common-pytorch",
    # vad_model_revision="master",
    # vad_kwargs={"max_single_segment_time": 2000},
)
wav_file = f"{model.model_path}/example/test.wav"
res = model.generate(
    wav_file, output_dir="./outputs", granularity="utterance", extract_embedding=False
)
print(res)