From 030043f768fa82c73e5decdf95f1016bf49b962a Mon Sep 17 00:00:00 2001
From: zhifu gao <zhifu.gzf@alibaba-inc.com>
Date: 星期四, 13 四月 2023 10:05:16 +0800
Subject: [PATCH] Merge pull request #341 from alibaba-damo-academy/dev_zly2

---
 funasr/runtime/python/onnxruntime/demo_vad_online.py |   18 ++++++++----------
 1 files changed, 8 insertions(+), 10 deletions(-)

diff --git a/funasr/runtime/python/onnxruntime/demo_vad.py b/funasr/runtime/python/onnxruntime/demo_vad_online.py
similarity index 60%
rename from funasr/runtime/python/onnxruntime/demo_vad.py
rename to funasr/runtime/python/onnxruntime/demo_vad_online.py
index 2e17197..15e62da 100644
--- a/funasr/runtime/python/onnxruntime/demo_vad.py
+++ b/funasr/runtime/python/onnxruntime/demo_vad_online.py
@@ -1,21 +1,18 @@
 import soundfile
-from funasr_onnx import Fsmn_vad
+from funasr_onnx.vad_online_bin import Fsmn_vad
 
 
-model_dir = "/Users/zhifu/Downloads/speech_fsmn_vad_zh-cn-16k-common-pytorch"
-wav_path = "/Users/zhifu/Downloads/speech_fsmn_vad_zh-cn-16k-common-pytorch/example/vad_example.wav"
+model_dir = "/mnt/ailsa.zly/tfbase/espnet_work/FunASR_dev_zly/export/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch"
+wav_path = "/mnt/ailsa.zly/tfbase/espnet_work/FunASR_dev_zly/egs_modelscope/vad/speech_fsmn_vad_zh-cn-16k-common/vad_example_16k.wav"
 model = Fsmn_vad(model_dir)
 
-#offline vad
-# result = model(wav_path)
-# print(result)
 
-#online vad
+##online vad
 speech, sample_rate = soundfile.read(wav_path)
 speech_length = speech.shape[0]
-
+#
 sample_offset = 0
-step = 160 * 10
+step = 1600
 param_dict = {'in_cache': []}
 for sample_offset in range(0, speech_length, min(step, speech_length - sample_offset)):
     if sample_offset + step >= speech_length - 1:
@@ -26,5 +23,6 @@
     param_dict['is_final'] = is_final
     segments_result = model(audio_in=speech[sample_offset: sample_offset + step],
                             param_dict=param_dict)
-    print(segments_result)
+    if segments_result:
+        print(segments_result)
 

--
Gitblit v1.9.1