From cc91b8fb60c31fbcf4b8479eead42ad0a16d35bb Mon Sep 17 00:00:00 2001
From: lyblsgo <lyblsgo@163.com>
Date: 星期一, 08 五月 2023 14:18:41 +0800
Subject: [PATCH] modify copyright

---
 funasr/runtime/python/websocket/ws_server_online.py |   19 ++++++++++---------
 1 files changed, 10 insertions(+), 9 deletions(-)

diff --git a/funasr/runtime/python/websocket/ws_server_online.py b/funasr/runtime/python/websocket/ws_server_online.py
index a395d73..b1cd4ea 100644
--- a/funasr/runtime/python/websocket/ws_server_online.py
+++ b/funasr/runtime/python/websocket/ws_server_online.py
@@ -12,7 +12,7 @@
 from modelscope.pipelines import pipeline
 from modelscope.utils.constant import Tasks
 from modelscope.utils.logger import get_logger
-from funasr_onnx.utils.frontend import load_bytes
+from funasr.runtime.python.onnxruntime.funasr_onnx.utils.frontend import load_bytes
 
 tracemalloc.start()
 
@@ -28,6 +28,8 @@
 inference_pipeline_asr_online = pipeline(
     task=Tasks.auto_speech_recognition,
     model=args.asr_model_online,
+    ngpu=args.ngpu,
+    ncpu=args.ncpu,
     model_revision='v1.0.4')
 
 print("model loaded")
@@ -51,7 +53,7 @@
 
                 is_speaking = message["is_speaking"]
                 websocket.param_dict_asr_online["is_final"] = not is_speaking
-
+                websocket.wav_name = message.get("wav_name", "demo")
                 websocket.param_dict_asr_online["chunk_size"] = message["chunk_size"]
                 
                 frames_online.append(audio)
@@ -63,14 +65,14 @@
 
      
     except websockets.ConnectionClosed:
-        print("ConnectionClosed...", websocket_users)    # 閾炬帴鏂紑
+        print("ConnectionClosed...", websocket_users)
         websocket_users.remove(websocket)
     except websockets.InvalidState:
-        print("InvalidState...")    # 鏃犳晥鐘舵��
+        print("InvalidState...")
     except Exception as e:
         print("Exception:", e)
  
-async def async_asr_online(websocket,audio_in): # ASR鎺ㄧ悊
+async def async_asr_online(websocket,audio_in):
             if len(audio_in) > 0:
                 audio_in = load_bytes(audio_in)
                 rec_result = inference_pipeline_asr_online(audio_in=audio_in,
@@ -79,12 +81,11 @@
                     websocket.param_dict_asr_online["cache"] = dict()
                 if "text" in rec_result:
                     if rec_result["text"] != "sil" and rec_result["text"] != "waiting_for_more_voice":
-                        if len(rec_result["text"])>0:
-                            rec_result["text"][0]=rec_result["text"][0].replace(" ","")
-                        message = json.dumps({"mode": "online", "text": rec_result["text"]})
+                        # if len(rec_result["text"])>0:
+                        #     rec_result["text"][0]=rec_result["text"][0] #.replace(" ","")
+                        message = json.dumps({"mode": "online", "text": rec_result["text"], "wav_name": websocket.wav_name})
                         await websocket.send(message)
 
- 
 
 
 start_server = websockets.serve(ws_serve, args.host, args.port, subprotocols=["binary"], ping_interval=None)

--
Gitblit v1.9.1