kongdeqiang
5 天以前 28ccfbfc51068a663a80764e14074df5edf2b5ba
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
#  MIT License  (https://opensource.org/licenses/MIT)
 
from funasr import AutoModel
 
# model="iic/emotion2vec_base"
# model="iic/emotion2vec_base_finetuned"
# model="iic/emotion2vec_plus_seed"
# model="iic/emotion2vec_plus_base"
model = "iic/emotion2vec_plus_large"
 
model = AutoModel(
    model=model,
    # vad_model="iic/speech_fsmn_vad_zh-cn-16k-common-pytorch",
    # vad_model_revision="master",
    # vad_kwargs={"max_single_segment_time": 2000},
    hub="ms",  # "ms" or "modelscope" for Mainland China users; "hf" or "huggingface" for Other overseas users
)
 
wav_file = f"{model.model_path}/example/test.wav"
 
res = model.generate(
    wav_file, output_dir="./outputs", granularity="utterance", extract_embedding=False
)
print(res)