From 7eaf608c2d4473a77bd1590f93ea9bdbedde346a Mon Sep 17 00:00:00 2001
From: zhifu gao <zhifu.gzf@alibaba-inc.com>
Date: 星期五, 19 五月 2023 11:31:33 +0800
Subject: [PATCH] Merge pull request #531 from alibaba-damo-academy/dev_new

---
 funasr/runtime/html5/readme_cn.md                                                           |    2 +-
 funasr/runtime/python/websocket/ws_server_2pass.py                                          |   17 ++++++++++++-----
 funasr/runtime/python/websocket/ws_server_online.py                                         |    2 ++
 egs_modelscope/punctuation/punc_ct-transformer_zh-cn-common-vadrealtime-vocab272727/demo.py |    2 +-
 4 files changed, 16 insertions(+), 7 deletions(-)

diff --git a/egs_modelscope/punctuation/punc_ct-transformer_zh-cn-common-vadrealtime-vocab272727/demo.py b/egs_modelscope/punctuation/punc_ct-transformer_zh-cn-common-vadrealtime-vocab272727/demo.py
index c449ab2..583a1eb 100644
--- a/egs_modelscope/punctuation/punc_ct-transformer_zh-cn-common-vadrealtime-vocab272727/demo.py
+++ b/egs_modelscope/punctuation/punc_ct-transformer_zh-cn-common-vadrealtime-vocab272727/demo.py
@@ -9,7 +9,7 @@
 inference_pipeline = pipeline(
     task=Tasks.punctuation,
     model='damo/punc_ct-transformer_zh-cn-common-vad_realtime-vocab272727',
-    model_revision = 'v1.0.2'
+    model_revision='v1.0.2'
 )
 
 ##################text浜岃繘鍒舵暟鎹�#####################
diff --git a/funasr/runtime/html5/readme_cn.md b/funasr/runtime/html5/readme_cn.md
index d1a56eb..d7fb1aa 100644
--- a/funasr/runtime/html5/readme_cn.md
+++ b/funasr/runtime/html5/readme_cn.md
@@ -28,7 +28,7 @@
 ```shell
 usage: h5Server.py [-h] [--host HOST] [--port PORT] [--certfile CERTFILE]
                    [--keyfile KEYFILE]
-python h5Server.py --port 1337
+python h5Server.py --port 1337 --keyfile server.key
 ```
 ## 2.鍚姩ws or wss asr online srv
 [鍏蜂綋璇风湅online asr](https://github.com/alibaba-damo-academy/FunASR/tree/main/funasr/runtime/python/websocket)
diff --git a/funasr/runtime/python/websocket/ws_server_2pass.py b/funasr/runtime/python/websocket/ws_server_2pass.py
index 186197a..1d3c135 100644
--- a/funasr/runtime/python/websocket/ws_server_2pass.py
+++ b/funasr/runtime/python/websocket/ws_server_2pass.py
@@ -46,7 +46,7 @@
     inference_pipeline_punc = pipeline(
         task=Tasks.punctuation,
         model=args.punc_model,
-        model_revision=None,
+        model_revision="v1.0.2",
         ngpu=args.ngpu,
         ncpu=args.ncpu,
     )
@@ -74,6 +74,7 @@
     websocket.param_dict_punc = {'cache': list()}
     websocket.vad_pre_idx = 0
     speech_start = False
+    speech_end_i = False
     websocket.wav_name = "microphone"
     print("new user connected", flush=True)
 
@@ -99,7 +100,9 @@
         
                     # asr online
                     frames_asr_online.append(message)
-                    if len(frames_asr_online) % websocket.chunk_interval == 0:
+                    websocket.param_dict_asr_online["is_final"] = speech_end_i
+                    if len(frames_asr_online) % websocket.chunk_interval == 0 or websocket.param_dict_asr_online["is_final"]:
+                        
                         audio_in = b"".join(frames_asr_online)
                         await async_asr_online(websocket, audio_in)
                         frames_asr_online = []
@@ -115,12 +118,13 @@
                         frames_asr.extend(frames_pre)
                 # asr punc offline
                 if speech_end_i or not websocket.is_speaking:
+                    # print("vad end point")
                     audio_in = b"".join(frames_asr)
                     await async_asr(websocket, audio_in)
                     frames_asr = []
                     speech_start = False
-                    frames_asr_online = []
-                    websocket.param_dict_asr_online = {"cache": dict()}
+                    # frames_asr_online = []
+                    # websocket.param_dict_asr_online = {"cache": dict()}
                     if not websocket.is_speaking:
                         websocket.vad_pre_idx = 0
                         frames = []
@@ -173,10 +177,13 @@
 async def async_asr_online(websocket, audio_in):
     if len(audio_in) > 0:
         audio_in = load_bytes(audio_in)
+        # print(websocket.param_dict_asr_online.get("is_final", False))
         rec_result = inference_pipeline_asr_online(audio_in=audio_in,
                                                    param_dict=websocket.param_dict_asr_online)
+        # print(rec_result)
         if websocket.param_dict_asr_online.get("is_final", False):
-            websocket.param_dict_asr_online["cache"] = dict()
+            return
+            #     websocket.param_dict_asr_online["cache"] = dict()
         if "text" in rec_result:
             if rec_result["text"] != "sil" and rec_result["text"] != "waiting_for_more_voice":
                 # print("online", rec_result)
diff --git a/funasr/runtime/python/websocket/ws_server_online.py b/funasr/runtime/python/websocket/ws_server_online.py
index 2255688..4cecd5f 100644
--- a/funasr/runtime/python/websocket/ws_server_online.py
+++ b/funasr/runtime/python/websocket/ws_server_online.py
@@ -106,8 +106,10 @@
 async def async_asr_online(websocket,audio_in):
 	if len(audio_in) >= 0:
 		audio_in = load_bytes(audio_in)
+		# print(websocket.param_dict_asr_online.get("is_final", False))
 		rec_result = inference_pipeline_asr_online(audio_in=audio_in,
 		                                           param_dict=websocket.param_dict_asr_online)
+		# print(rec_result)
 		if websocket.param_dict_asr_online.get("is_final", False):
 			websocket.param_dict_asr_online["cache"] = dict()
 		if "text" in rec_result:

--
Gitblit v1.9.1