From a30953f75997d31fbc1b18fedf4ec7f82b2f58cf Mon Sep 17 00:00:00 2001
From: zhaomingwork <zhaomingwork@qq.com>
Date: 星期五, 12 五月 2023 11:19:19 +0800
Subject: [PATCH] cpp python websocket compatible

---
 funasr/runtime/python/websocket/ws_client.py        |   19 +++++++++++++------
 funasr/runtime/python/websocket/ws_server_online.py |   41 +++++++++++++++++++++++++++--------------
 2 files changed, 40 insertions(+), 20 deletions(-)

diff --git a/funasr/runtime/python/websocket/ws_client.py b/funasr/runtime/python/websocket/ws_client.py
index a4a6d9f..f8632ce 100644
--- a/funasr/runtime/python/websocket/ws_client.py
+++ b/funasr/runtime/python/websocket/ws_client.py
@@ -84,12 +84,13 @@
                     rate=RATE,
                     input=True,
                     frames_per_buffer=CHUNK)
+    message = json.dumps({"chunk_size": args.chunk_size, "chunk_interval": args.chunk_interval, "wav_name": wav_name,"is_speaking": True})
+    voices.put(message)
     is_speaking = True
     while True:
 
         data = stream.read(CHUNK)
-        data = data.decode('ISO-8859-1')
-        message = json.dumps({"chunk_size": args.chunk_size, "chunk_interval": args.chunk_interval, "audio": data, "is_speaking": is_speaking, "is_finished": is_finished})
+        message = data  
         
         voices.put(message)
 
@@ -122,15 +123,21 @@
         stride = int(60*args.chunk_size[1]/args.chunk_interval/1000*16000*2)
         chunk_num = (len(audio_bytes)-1)//stride + 1
         # print(stride)
+        
+        # send first time
+        message = json.dumps({"chunk_size": args.chunk_size, "chunk_interval": args.chunk_interval, "wav_name": wav_name,"is_speaking": True})
+        voices.put(message)
         is_speaking = True
         for i in range(chunk_num):
-            if i == chunk_num-1:
-                is_speaking = False
+
             beg = i*stride
             data = audio_bytes[beg:beg+stride]
-            data = data.decode('ISO-8859-1')
-            message = json.dumps({"chunk_size": args.chunk_size, "chunk_interval": args.chunk_interval, "is_speaking": is_speaking, "audio": data, "is_finished": is_finished, "wav_name": wav_name})
+            message = data  
             voices.put(message)
+            if i == chunk_num-1:
+                is_speaking = False
+                message = json.dumps({"is_speaking": is_speaking})
+                voices.put(message)
             # print("data_chunk: ", len(data_chunk))
             # print(voices.qsize())
             sleep_duration = 0.001 if args.send_without_sleep else 60*args.chunk_size[1]/args.chunk_interval/1000
diff --git a/funasr/runtime/python/websocket/ws_server_online.py b/funasr/runtime/python/websocket/ws_server_online.py
index 3c0fb16..44edf98 100644
--- a/funasr/runtime/python/websocket/ws_server_online.py
+++ b/funasr/runtime/python/websocket/ws_server_online.py
@@ -41,25 +41,37 @@
     global websocket_users
     websocket_users.add(websocket)
     websocket.param_dict_asr_online = {"cache": dict()}
-
+    print("new user connected",flush=True)
     try:
         async for message in websocket:
-            message = json.loads(message)
-            is_finished = message["is_finished"]
-            if not is_finished:
-                audio = bytes(message['audio'], 'ISO-8859-1')
-
-                is_speaking = message["is_speaking"]
-                websocket.param_dict_asr_online["is_final"] = not is_speaking
-                websocket.wav_name = message.get("wav_name", "demo")
-                websocket.param_dict_asr_online["chunk_size"] = message["chunk_size"]
-                
-                frames_asr_online.append(audio)
-                if len(frames_asr_online) % message["chunk_interval"] == 0 or not is_speaking:
+            
+ 
+            if isinstance(message,str):
+              messagejson = json.loads(message)
+               
+              if "is_speaking" in messagejson:
+                  websocket.is_speaking = messagejson["is_speaking"]  
+                  websocket.param_dict_asr_online["is_final"] = not websocket.is_speaking
+              if "is_finished" in messagejson:
+                  websocket.is_speaking = False
+                  websocket.param_dict_asr_online["is_final"] = True
+              if "chunk_interval" in messagejson:
+                  websocket.chunk_interval=messagejson["chunk_interval"]
+              if "wav_name" in messagejson:
+                  websocket.wav_name = messagejson.get("wav_name", "demo")
+              if "chunk_size" in messagejson:
+                  websocket.param_dict_asr_online["chunk_size"] = messagejson["chunk_size"]
+            # if has bytes in buffer or message is bytes
+            if len(frames_asr_online)>0 or not isinstance(message,str):
+               if not isinstance(message,str):
+                 frames_asr_online.append(message)
+               if len(frames_asr_online) % websocket.chunk_interval == 0 or not websocket.is_speaking:
                     audio_in = b"".join(frames_asr_online)
+                    if not websocket.is_speaking:
+                       #padding 0.5s at end gurantee that asr engine can fire out last word
+                       audio_in=audio_in+b''.join(np.zeros(int(16000*0.5),dtype=np.int16))
                     await async_asr_online(websocket,audio_in)
                     frames_asr_online = []
-
 
      
     except websockets.ConnectionClosed:
@@ -69,6 +81,7 @@
         print("InvalidState...")
     except Exception as e:
         print("Exception:", e)
+
  
 async def async_asr_online(websocket,audio_in):
             if len(audio_in) > 0:

--
Gitblit v1.9.1