From c039cbc3bf3311c370d891c1bf67b275e95f0cd3 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期三, 29 三月 2023 13:20:27 +0800
Subject: [PATCH] export
---
funasr/runtime/python/onnxruntime/demo_vad.py | 28 +++++++++++++++++++++++-----
1 files changed, 23 insertions(+), 5 deletions(-)
diff --git a/funasr/runtime/python/onnxruntime/demo_vad.py b/funasr/runtime/python/onnxruntime/demo_vad.py
index ae033cc..2e17197 100644
--- a/funasr/runtime/python/onnxruntime/demo_vad.py
+++ b/funasr/runtime/python/onnxruntime/demo_vad.py
@@ -1,12 +1,30 @@
-
+import soundfile
from funasr_onnx import Fsmn_vad
model_dir = "/Users/zhifu/Downloads/speech_fsmn_vad_zh-cn-16k-common-pytorch"
-
+wav_path = "/Users/zhifu/Downloads/speech_fsmn_vad_zh-cn-16k-common-pytorch/example/vad_example.wav"
model = Fsmn_vad(model_dir)
-wav_path = "/Users/zhifu/Downloads/speech_fsmn_vad_zh-cn-16k-common-pytorch/example/vad_example.wav"
+#offline vad
+# result = model(wav_path)
+# print(result)
-result = model(wav_path)
-print(result)
\ No newline at end of file
+#online vad
+speech, sample_rate = soundfile.read(wav_path)
+speech_length = speech.shape[0]
+
+sample_offset = 0
+step = 160 * 10
+param_dict = {'in_cache': []}
+for sample_offset in range(0, speech_length, min(step, speech_length - sample_offset)):
+ if sample_offset + step >= speech_length - 1:
+ step = speech_length - sample_offset
+ is_final = True
+ else:
+ is_final = False
+ param_dict['is_final'] = is_final
+ segments_result = model(audio_in=speech[sample_offset: sample_offset + step],
+ param_dict=param_dict)
+ print(segments_result)
+
--
Gitblit v1.9.1