游雁
2024-03-25 817ff41fbc5afbde346db62ad5e28e33178a622a
examples/industrial_data_pretraining/emotion2vec/demo.py
@@ -5,7 +5,13 @@
from funasr import AutoModel
model = AutoModel(model="damo/emotion2vec_base", model_revision="v2.0.1")
# model="iic/emotion2vec_base"
model = AutoModel(model="iic/emotion2vec_base_finetuned",
                  # vad_model="iic/speech_fsmn_vad_zh-cn-16k-common-pytorch",
                  # vad_model_revision="master",
                  # vad_kwargs={"max_single_segment_time": 2000},
                  )
res = model(input="https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav", output_dir="./outputs")
wav_file = f"{model.model_path}/example/test.wav"
res = model.generate(wav_file, output_dir="./outputs", granularity="utterance", extract_embedding=False)
print(res)