From 787b9d8e7e0107f6cd74a71b3d29494617960ccf Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期二, 06 六月 2023 22:09:42 +0800
Subject: [PATCH] model license

---
 funasr/runtime/python/websocket/wss_srv_asr.py |   14 +++++++-------
 1 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/funasr/runtime/python/websocket/wss_srv_asr.py b/funasr/runtime/python/websocket/wss_srv_asr.py
index 948619b..3810cd6 100644
--- a/funasr/runtime/python/websocket/wss_srv_asr.py
+++ b/funasr/runtime/python/websocket/wss_srv_asr.py
@@ -95,7 +95,7 @@
     websocket.param_dict_punc = {'cache': list()}
     websocket.vad_pre_idx = 0
     speech_start = False
-    speech_end_i = False
+    speech_end_i = -1
     websocket.wav_name = "microphone"
     websocket.mode = "2pass"
     print("new user connected", flush=True)
@@ -124,7 +124,7 @@
         
                     # asr online
                     frames_asr_online.append(message)
-                    websocket.param_dict_asr_online["is_final"] = speech_end_i
+                    websocket.param_dict_asr_online["is_final"] = speech_end_i != -1
                     if len(frames_asr_online) % websocket.chunk_interval == 0 or websocket.param_dict_asr_online["is_final"]:
                         if websocket.mode == "2pass" or websocket.mode == "online":
                             audio_in = b"".join(frames_asr_online)
@@ -134,14 +134,14 @@
                         frames_asr.append(message)
                     # vad online
                     speech_start_i, speech_end_i = await async_vad(websocket, message)
-                    if speech_start_i:
+                    if speech_start_i != -1:
                         speech_start = True
                         beg_bias = (websocket.vad_pre_idx-speech_start_i)//duration_ms
                         frames_pre = frames[-beg_bias:]
                         frames_asr = []
                         frames_asr.extend(frames_pre)
                 # asr punc offline
-                if speech_end_i or not websocket.is_speaking:
+                if speech_end_i != -1 or not websocket.is_speaking:
                     # print("vad end point")
                     if websocket.mode == "2pass" or websocket.mode == "offline":
                         audio_in = b"".join(frames_asr)
@@ -172,15 +172,15 @@
 
     segments_result = inference_pipeline_vad(audio_in=audio_in, param_dict=websocket.param_dict_vad)
 
-    speech_start = False
-    speech_end = False
+    speech_start = -1
+    speech_end = -1
     
     if len(segments_result) == 0 or len(segments_result["text"]) > 1:
         return speech_start, speech_end
     if segments_result["text"][0][0] != -1:
         speech_start = segments_result["text"][0][0]
     if segments_result["text"][0][1] != -1:
-        speech_end = True
+        speech_end = segments_result["text"][0][1]
     return speech_start, speech_end
 
 

--
Gitblit v1.9.1