zhifu gao
2023-03-14 681868deae142875570d8efd6433a8b4e8227f6a
Merge pull request #225 from alibaba-damo-academy/dev_zly

update infer_online.py
4个文件已修改
16 ■■■■■ 已修改文件
egs_modelscope/vad/speech_fsmn_vad_zh-cn-16k-common/infer.py 2 ●●● 补丁 | 查看 | 原始文档 | blame | 历史
egs_modelscope/vad/speech_fsmn_vad_zh-cn-16k-common/infer_online.py 5 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
egs_modelscope/vad/speech_fsmn_vad_zh-cn-8k-common/infer.py 4 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
egs_modelscope/vad/speech_fsmn_vad_zh-cn-8k-common/infer_online.py 5 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
egs_modelscope/vad/speech_fsmn_vad_zh-cn-16k-common/infer.py
@@ -7,7 +7,7 @@
    inference_pipline = pipeline(
        task=Tasks.voice_activity_detection,
        model="damo/speech_fsmn_vad_zh-cn-16k-common-pytorch",
        model_revision=None,
        model_revision='v1.2.0',
        output_dir=output_dir,
        batch_size=1,
    )
egs_modelscope/vad/speech_fsmn_vad_zh-cn-16k-common/infer_online.py
@@ -8,9 +8,10 @@
    inference_pipline = pipeline(
        task=Tasks.voice_activity_detection,
        model="damo/speech_fsmn_vad_zh-cn-16k-common-pytorch",
        model_revision='v1.1.9',
        output_dir=None,
        model_revision='v1.2.0',
        output_dir=output_dir,
        batch_size=1,
        mode='online',
    )
    speech, sample_rate = soundfile.read("./vad_example_16k.wav")
    speech_length = speech.shape[0]
egs_modelscope/vad/speech_fsmn_vad_zh-cn-8k-common/infer.py
@@ -7,8 +7,8 @@
    inference_pipline = pipeline(
        task=Tasks.voice_activity_detection,
        model="damo/speech_fsmn_vad_zh-cn-8k-common",
        model_revision=None,
        output_dir='./output_dir',
        model_revision='v1.2.0',
        output_dir=output_dir,
        batch_size=1,
    )
    segments_result = inference_pipline(audio_in=audio_in)
egs_modelscope/vad/speech_fsmn_vad_zh-cn-8k-common/infer_online.py
@@ -8,9 +8,10 @@
    inference_pipline = pipeline(
        task=Tasks.voice_activity_detection,
        model="damo/speech_fsmn_vad_zh-cn-8k-common",
        model_revision='v1.1.9',
        output_dir='./output_dir',
        model_revision='v1.2.0',
        output_dir=output_dir,
        batch_size=1,
        mode='online',
    )
    speech, sample_rate = soundfile.read("./vad_example_8k.wav")
    speech_length = speech.shape[0]