Yuekai Zhang
2024-07-15 584cfbdc433cfb3d7852868db83060b6d9aa0edf
examples/industrial_data_pretraining/sense_voice/demo.py
@@ -3,26 +3,26 @@
# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
#  MIT License  (https://opensource.org/licenses/MIT)
import sys
from funasr import AutoModel
from funasr.utils.postprocess_utils import rich_transcription_postprocess
model = AutoModel(
    model="/Users/zhifu/Downloads/modelscope_models/SenseVoiceModelscope",
    # vad_model="iic/speech_fsmn_vad_zh-cn-16k-common-pytorch",
    # vad_kwargs={"max_single_segment_time": 30000},
)
input_wav = (
model_dir = "iic/SenseVoiceSmall"
input_file = (
    "https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav"
)
DecodingOptions = {
    "task": ("ASR", "AED", "SER"),
    "language": "auto",
    "fp16": True,
    "gain_event": True,
    "beam_size": 5,
}
model = AutoModel(
    model=model_dir,
)
res = model.generate(input=input_wav, batch_size_s=0, DecodingOptions=DecodingOptions)
print(res)
res = model.generate(
    input=input_file,
    cache={},
    language="auto",  # "zn", "en", "yue", "ja", "ko", "nospeech"
    use_itn=False,
)
text = rich_transcription_postprocess(res[0]["text"])
print(text)