zhifu gao
2023-04-13 030043f768fa82c73e5decdf95f1016bf49b962a
funasr/runtime/python/onnxruntime/demo_vad_online.py
File was renamed from funasr/runtime/python/onnxruntime/demo_vad.py
@@ -1,21 +1,18 @@
import soundfile
from funasr_onnx import Fsmn_vad
from funasr_onnx.vad_online_bin import Fsmn_vad
model_dir = "/Users/zhifu/Downloads/speech_fsmn_vad_zh-cn-16k-common-pytorch"
wav_path = "/Users/zhifu/Downloads/speech_fsmn_vad_zh-cn-16k-common-pytorch/example/vad_example.wav"
model_dir = "/mnt/ailsa.zly/tfbase/espnet_work/FunASR_dev_zly/export/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch"
wav_path = "/mnt/ailsa.zly/tfbase/espnet_work/FunASR_dev_zly/egs_modelscope/vad/speech_fsmn_vad_zh-cn-16k-common/vad_example_16k.wav"
model = Fsmn_vad(model_dir)
#offline vad
# result = model(wav_path)
# print(result)
#online vad
##online vad
speech, sample_rate = soundfile.read(wav_path)
speech_length = speech.shape[0]
#
sample_offset = 0
step = 160 * 10
step = 1600
param_dict = {'in_cache': []}
for sample_offset in range(0, speech_length, min(step, speech_length - sample_offset)):
    if sample_offset + step >= speech_length - 1:
@@ -26,5 +23,6 @@
    param_dict['is_final'] = is_final
    segments_result = model(audio_in=speech[sample_offset: sample_offset + step],
                            param_dict=param_dict)
    print(segments_result)
    if segments_result:
        print(segments_result)