From e81486f1585273c13cafa10bf42235c2449cc13b Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期四, 18 五月 2023 19:45:35 +0800
Subject: [PATCH] websocket online 2pass bugfix
---
funasr/runtime/python/websocket/ws_server_2pass.py | 17 ++++++++++++-----
1 files changed, 12 insertions(+), 5 deletions(-)
diff --git a/funasr/runtime/python/websocket/ws_server_2pass.py b/funasr/runtime/python/websocket/ws_server_2pass.py
index 186197a..1d3c135 100644
--- a/funasr/runtime/python/websocket/ws_server_2pass.py
+++ b/funasr/runtime/python/websocket/ws_server_2pass.py
@@ -46,7 +46,7 @@
inference_pipeline_punc = pipeline(
task=Tasks.punctuation,
model=args.punc_model,
- model_revision=None,
+ model_revision="v1.0.2",
ngpu=args.ngpu,
ncpu=args.ncpu,
)
@@ -74,6 +74,7 @@
websocket.param_dict_punc = {'cache': list()}
websocket.vad_pre_idx = 0
speech_start = False
+ speech_end_i = False
websocket.wav_name = "microphone"
print("new user connected", flush=True)
@@ -99,7 +100,9 @@
# asr online
frames_asr_online.append(message)
- if len(frames_asr_online) % websocket.chunk_interval == 0:
+ websocket.param_dict_asr_online["is_final"] = speech_end_i
+ if len(frames_asr_online) % websocket.chunk_interval == 0 or websocket.param_dict_asr_online["is_final"]:
+
audio_in = b"".join(frames_asr_online)
await async_asr_online(websocket, audio_in)
frames_asr_online = []
@@ -115,12 +118,13 @@
frames_asr.extend(frames_pre)
# asr punc offline
if speech_end_i or not websocket.is_speaking:
+ # print("vad end point")
audio_in = b"".join(frames_asr)
await async_asr(websocket, audio_in)
frames_asr = []
speech_start = False
- frames_asr_online = []
- websocket.param_dict_asr_online = {"cache": dict()}
+ # frames_asr_online = []
+ # websocket.param_dict_asr_online = {"cache": dict()}
if not websocket.is_speaking:
websocket.vad_pre_idx = 0
frames = []
@@ -173,10 +177,13 @@
async def async_asr_online(websocket, audio_in):
if len(audio_in) > 0:
audio_in = load_bytes(audio_in)
+ # print(websocket.param_dict_asr_online.get("is_final", False))
rec_result = inference_pipeline_asr_online(audio_in=audio_in,
param_dict=websocket.param_dict_asr_online)
+ # print(rec_result)
if websocket.param_dict_asr_online.get("is_final", False):
- websocket.param_dict_asr_online["cache"] = dict()
+ return
+ # websocket.param_dict_asr_online["cache"] = dict()
if "text" in rec_result:
if rec_result["text"] != "sil" and rec_result["text"] != "waiting_for_more_voice":
# print("online", rec_result)
--
Gitblit v1.9.1