From 4ee715e70e36cdba7b05fe044fecab9cf4fa16ff Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期一, 03 七月 2023 17:23:02 +0800
Subject: [PATCH] websocket bug

---
 funasr/runtime/python/websocket/wss_srv_asr.py |   22 ++++++++++------------
 1 files changed, 10 insertions(+), 12 deletions(-)

diff --git a/funasr/runtime/python/websocket/wss_srv_asr.py b/funasr/runtime/python/websocket/wss_srv_asr.py
index 948619b..09f2305 100644
--- a/funasr/runtime/python/websocket/wss_srv_asr.py
+++ b/funasr/runtime/python/websocket/wss_srv_asr.py
@@ -35,8 +35,6 @@
     task=Tasks.voice_activity_detection,
     model=args.vad_model,
     model_revision=None,
-    output_dir=None,
-    batch_size=1,
     mode='online',
     ngpu=args.ngpu,
     ncpu=args.ncpu,
@@ -69,9 +67,9 @@
     websocket.param_dict_asr_online = {"cache": dict()}
     websocket.param_dict_vad = {'in_cache': dict(), "is_final": True}
     websocket.param_dict_asr_online["is_final"]=True
-    audio_in=b''.join(np.zeros(int(16000),dtype=np.int16))
-    inference_pipeline_vad(audio_in=audio_in, param_dict=websocket.param_dict_vad)
-    inference_pipeline_asr_online(audio_in=audio_in, param_dict=websocket.param_dict_asr_online)
+    # audio_in=b''.join(np.zeros(int(16000),dtype=np.int16))
+    # inference_pipeline_vad(audio_in=audio_in, param_dict=websocket.param_dict_vad)
+    # inference_pipeline_asr_online(audio_in=audio_in, param_dict=websocket.param_dict_asr_online)
     await websocket.close()
     
     
@@ -95,7 +93,7 @@
     websocket.param_dict_punc = {'cache': list()}
     websocket.vad_pre_idx = 0
     speech_start = False
-    speech_end_i = False
+    speech_end_i = -1
     websocket.wav_name = "microphone"
     websocket.mode = "2pass"
     print("new user connected", flush=True)
@@ -124,7 +122,7 @@
         
                     # asr online
                     frames_asr_online.append(message)
-                    websocket.param_dict_asr_online["is_final"] = speech_end_i
+                    websocket.param_dict_asr_online["is_final"] = speech_end_i != -1
                     if len(frames_asr_online) % websocket.chunk_interval == 0 or websocket.param_dict_asr_online["is_final"]:
                         if websocket.mode == "2pass" or websocket.mode == "online":
                             audio_in = b"".join(frames_asr_online)
@@ -134,14 +132,14 @@
                         frames_asr.append(message)
                     # vad online
                     speech_start_i, speech_end_i = await async_vad(websocket, message)
-                    if speech_start_i:
+                    if speech_start_i != -1:
                         speech_start = True
                         beg_bias = (websocket.vad_pre_idx-speech_start_i)//duration_ms
                         frames_pre = frames[-beg_bias:]
                         frames_asr = []
                         frames_asr.extend(frames_pre)
                 # asr punc offline
-                if speech_end_i or not websocket.is_speaking:
+                if speech_end_i != -1 or not websocket.is_speaking:
                     # print("vad end point")
                     if websocket.mode == "2pass" or websocket.mode == "offline":
                         audio_in = b"".join(frames_asr)
@@ -172,15 +170,15 @@
 
     segments_result = inference_pipeline_vad(audio_in=audio_in, param_dict=websocket.param_dict_vad)
 
-    speech_start = False
-    speech_end = False
+    speech_start = -1
+    speech_end = -1
     
     if len(segments_result) == 0 or len(segments_result["text"]) > 1:
         return speech_start, speech_end
     if segments_result["text"][0][0] != -1:
         speech_start = segments_result["text"][0][0]
     if segments_result["text"][0][1] != -1:
-        speech_end = True
+        speech_end = segments_result["text"][0][1]
     return speech_start, speech_end
 
 

--
Gitblit v1.9.1