From a30953f75997d31fbc1b18fedf4ec7f82b2f58cf Mon Sep 17 00:00:00 2001
From: zhaomingwork <zhaomingwork@qq.com>
Date: 星期五, 12 五月 2023 11:19:19 +0800
Subject: [PATCH] cpp python websocket compatible
---
funasr/runtime/python/websocket/ws_server_online.py | 53 +++++++++++++++++++++++++++++++----------------------
1 files changed, 31 insertions(+), 22 deletions(-)
diff --git a/funasr/runtime/python/websocket/ws_server_online.py b/funasr/runtime/python/websocket/ws_server_online.py
index 6ea8f39..44edf98 100644
--- a/funasr/runtime/python/websocket/ws_server_online.py
+++ b/funasr/runtime/python/websocket/ws_server_online.py
@@ -37,31 +37,41 @@
async def ws_serve(websocket, path):
- frames_online = []
+ frames_asr_online = []
global websocket_users
- websocket.send_msg = Queue()
websocket_users.add(websocket)
websocket.param_dict_asr_online = {"cache": dict()}
- websocket.speek_online = Queue()
-
+ print("new user connected",flush=True)
try:
async for message in websocket:
- message = json.loads(message)
- is_finished = message["is_finished"]
- if not is_finished:
- audio = bytes(message['audio'], 'ISO-8859-1')
-
- is_speaking = message["is_speaking"]
- websocket.param_dict_asr_online["is_final"] = not is_speaking
-
- websocket.param_dict_asr_online["chunk_size"] = message["chunk_size"]
-
- frames_online.append(audio)
- if len(frames_online) % message["chunk_interval"] == 0 or not is_speaking:
- audio_in = b"".join(frames_online)
+
+
+ if isinstance(message,str):
+ messagejson = json.loads(message)
+
+ if "is_speaking" in messagejson:
+ websocket.is_speaking = messagejson["is_speaking"]
+ websocket.param_dict_asr_online["is_final"] = not websocket.is_speaking
+ if "is_finished" in messagejson:
+ websocket.is_speaking = False
+ websocket.param_dict_asr_online["is_final"] = True
+ if "chunk_interval" in messagejson:
+ websocket.chunk_interval=messagejson["chunk_interval"]
+ if "wav_name" in messagejson:
+ websocket.wav_name = messagejson.get("wav_name", "demo")
+ if "chunk_size" in messagejson:
+ websocket.param_dict_asr_online["chunk_size"] = messagejson["chunk_size"]
+ # if has bytes in buffer or message is bytes
+ if len(frames_asr_online)>0 or not isinstance(message,str):
+ if not isinstance(message,str):
+ frames_asr_online.append(message)
+ if len(frames_asr_online) % websocket.chunk_interval == 0 or not websocket.is_speaking:
+ audio_in = b"".join(frames_asr_online)
+ if not websocket.is_speaking:
+ #padding 0.5s at end gurantee that asr engine can fire out last word
+ audio_in=audio_in+b''.join(np.zeros(int(16000*0.5),dtype=np.int16))
await async_asr_online(websocket,audio_in)
- frames_online = []
-
+ frames_asr_online = []
except websockets.ConnectionClosed:
@@ -71,6 +81,7 @@
print("InvalidState...")
except Exception as e:
print("Exception:", e)
+
async def async_asr_online(websocket,audio_in):
if len(audio_in) > 0:
@@ -81,9 +92,7 @@
websocket.param_dict_asr_online["cache"] = dict()
if "text" in rec_result:
if rec_result["text"] != "sil" and rec_result["text"] != "waiting_for_more_voice":
- if len(rec_result["text"])>0:
- rec_result["text"][0]=rec_result["text"][0] #.replace(" ","")
- message = json.dumps({"mode": "online", "text": rec_result["text"]})
+ message = json.dumps({"mode": "online", "text": rec_result["text"], "wav_name": websocket.wav_name})
await websocket.send(message)
--
Gitblit v1.9.1