examples/industrial_data_pretraining/emotion2vec/demo.py
@@ -5,7 +5,9 @@ from funasr import AutoModel model = AutoModel(model="/Users/zhifu/Downloads/modelscope_models/emotion2vec_base") # model="damo/emotion2vec_base" model = AutoModel(model="iic/emotion2vec_base_finetuned", model_revision="v2.0.4") res = model(input="/Users/zhifu/Downloads/modelscope_models/emotion2vec_base/example/test.wav") wav_file = f"{model.model_path}/example/test.wav" res = model.generate(wav_file, output_dir="./outputs", granularity="utterance", extract_embedding=False) print(res)