From 4e44c9f46e550eab4ec6b70c099dcdae44eb9d61 Mon Sep 17 00:00:00 2001
From: zhifu gao <zhifu.gzf@alibaba-inc.com>
Date: 星期四, 23 三月 2023 20:34:21 +0800
Subject: [PATCH] Merge pull request #288 from alibaba-damo-academy/dev_gzf

---
 funasr/runtime/python/websocket/ASR_server.py |   92 +++++++++++++++++++++++++--------------------
 1 files changed, 51 insertions(+), 41 deletions(-)

diff --git a/funasr/runtime/python/websocket/ASR_server.py b/funasr/runtime/python/websocket/ASR_server.py
index ac63d3c..9d0fd0b 100644
--- a/funasr/runtime/python/websocket/ASR_server.py
+++ b/funasr/runtime/python/websocket/ASR_server.py
@@ -1,4 +1,10 @@
-# server.py   娉ㄦ剰鏈緥浠呭鐞嗗崟涓猚lent鍙戦�佺殑璇煶鏁版嵁锛屽苟鏈澶歝lient杩炴帴杩涜鍒ゆ柇鍜屽鐞�
+import asyncio
+import websockets
+import time
+from queue import Queue
+import threading
+import argparse
+
 from modelscope.pipelines import pipeline
 from modelscope.utils.constant import Tasks
 from modelscope.utils.logger import get_logger
@@ -7,12 +13,6 @@
 logger = get_logger(log_level=logging.CRITICAL)
 logger.setLevel(logging.CRITICAL)
 
-import asyncio
-import websockets
-import time
-from queue import Queue
-import threading
-import argparse
 
 parser = argparse.ArgumentParser()
 parser.add_argument("--host",
@@ -36,7 +36,7 @@
 
 parser.add_argument("--punc_model",
                     type=str,
-                    default="",
+                    default="damo/punc_ct-transformer_zh-cn-common-vad_realtime-vocab272727",
                     help="model from modelscope")
 parser.add_argument("--ngpu",
                     type=int,
@@ -49,23 +49,38 @@
 voices = Queue()
 speek = Queue()
 
-# 鍒涘缓涓�涓猇AD瀵硅薄
-vad_pipline = pipeline(
+# vad
+inference_pipeline_vad = pipeline(
     task=Tasks.voice_activity_detection,
     model=args.vad_model,
-    model_revision="v1.2.0",
+    model_revision=None,
     output_dir=None,
     batch_size=1,
+    mode='online',
+    ngpu=args.ngpu,
 )
+param_dict_vad = {'in_cache': dict(), "is_final": False}
   
-# 鍒涘缓涓�涓狝SR瀵硅薄
-param_dict = dict()
+# asr
+param_dict_asr = {}
 # param_dict["hotword"] = "灏忎簲 灏忎簲鏈�"  # 璁剧疆鐑瘝锛岀敤绌烘牸闅斿紑
-inference_pipeline2 = pipeline(
+inference_pipeline_asr = pipeline(
     task=Tasks.auto_speech_recognition,
     model=args.asr_model,
-    param_dict=param_dict,
+    param_dict=param_dict_asr,
+    ngpu=args.ngpu,
 )
+if args.punc_model is not None:
+    param_dict_punc = {'cache': list()}
+    inference_pipeline_punc = pipeline(
+        task=Tasks.punctuation,
+        model=args.punc_model,
+        model_revision=None,
+        ngpu=args.ngpu,
+    )
+else:
+    inference_pipeline_punc = None
+
 print("model loaded")
 
 
@@ -85,29 +100,35 @@
 
 
 def vad(data):  # 鎺ㄧ悊
-    global vad_pipline
+    global vad_pipline, param_dict_vad
     #print(type(data))
-    segments_result = vad_pipline(audio_in=data)
-    #print(segments_result)
+    # print(param_dict_vad)
+    segments_result = inference_pipeline_vad(audio_in=data, param_dict=param_dict_vad)
+    # print(segments_result)
+    # print(param_dict_vad)
     speech_start = False
     speech_end = False
-    if len(segments_result) == 0 or len(segments_result["text"] > 1):
-        return False
-    elif segments_result["text"][0][0] != -1:
+    
+    if len(segments_result) == 0 or len(segments_result["text"]) > 1:
+        return speech_start, speech_end
+    if segments_result["text"][0][0] != -1:
         speech_start = True
-    elif segments_result["text"][0][1] != -1:
+    if segments_result["text"][0][1] != -1:
         speech_end = True
     return speech_start, speech_end
 
 def asr():  # 鎺ㄧ悊
     global inference_pipeline2
-    global speek
+    global speek, param_dict_punc
     while True:
         while not speek.empty():
             audio_in = speek.get()
             speek.task_done()
-            rec_result = inference_pipeline2(audio_in=audio_in)
-            print(rec_result)
+            if len(audio_in) > 0:
+                rec_result = inference_pipeline_asr(audio_in=audio_in)
+                if inference_pipeline_punc is not None and 'text' in rec_result:
+                    rec_result = inference_pipeline_punc(text_in=rec_result['text'], param_dict=param_dict_punc)
+                print(rec_result["text"])
             time.sleep(0.1)
         time.sleep(0.1)    
 
@@ -135,20 +156,21 @@
                 frames.append(data)
                 RECORD_NUM += 1
             speech_start_i, speech_end_i = vad(data)
+            # print(speech_start_i, speech_end_i)
             if speech_start_i:
                 speech_start = speech_start_i
                 # if not speech_detected:
-                print("妫�娴嬪埌浜哄0...")
+                # print("妫�娴嬪埌浜哄0...")
                 # speech_detected = True  # 鏍囪涓烘娴嬪埌璇煶
                 frames = []
                 frames.extend(buffer)  # 鎶婁箣鍓�2涓闊虫暟鎹揩鍔犲叆
                 # silence_count = 0  # 閲嶇疆闈欓煶娆℃暟
-            elif speech_end_i or RECORD_NUM > 300:
+            if speech_end_i or RECORD_NUM > 300:
                 # silence_count += 1  # 澧炲姞闈欓煶娆℃暟
                 # speech_end = speech_end_i
                 speech_start = False
                 # if RECORD_NUM > 300: #杩欓噷 50 鍙牴鎹渶姹傛敼涓哄悎閫傜殑鏁版嵁蹇暟閲�
-                print("璇磋瘽缁撴潫鎴栬�呰秴杩囪缃渶闀挎椂闂�...")
+                # print("璇磋瘽缁撴潫鎴栬�呰秴杩囪缃渶闀挎椂闂�...")
                 audio_in = b"".join(frames)
                 #asrt = threading.Thread(target=asr,args=(audio_in,))
                 #asrt.start()
@@ -170,16 +192,4 @@
 s.start()
 
 asyncio.get_event_loop().run_until_complete(start_server)
-asyncio.get_event_loop().run_forever()
-
-
- 
-
-
-
-
-
- 
-
-        
-
+asyncio.get_event_loop().run_forever()
\ No newline at end of file

--
Gitblit v1.9.1