From b0887f17678e0e5c4bd895e36695b242f2f1aee8 Mon Sep 17 00:00:00 2001
From: mengzhe.cmz <mengzhe.cmz@alibaba-inc.com>
Date: 星期四, 23 三月 2023 19:59:28 +0800
Subject: [PATCH] Merge branch 'dev_gzf' of github.com:alibaba-damo-academy/FunASR into dev_gzf

---
 funasr/runtime/python/websocket/ASR_server.py |  191 +++++++++++++++++++++++++++++++++++++++++++++++
 1 files changed, 191 insertions(+), 0 deletions(-)

diff --git a/funasr/runtime/python/websocket/ASR_server.py b/funasr/runtime/python/websocket/ASR_server.py
new file mode 100644
index 0000000..20de20a
--- /dev/null
+++ b/funasr/runtime/python/websocket/ASR_server.py
@@ -0,0 +1,191 @@
+import asyncio
+import websockets
+import time
+from queue import Queue
+import threading
+import argparse
+
+from modelscope.pipelines import pipeline
+from modelscope.utils.constant import Tasks
+from modelscope.utils.logger import get_logger
+import logging
+
+logger = get_logger(log_level=logging.CRITICAL)
+logger.setLevel(logging.CRITICAL)
+
+
+parser = argparse.ArgumentParser()
+parser.add_argument("--host",
+                    type=str,
+                    default="0.0.0.0",
+                    required=False,
+                    help="host ip, localhost, 0.0.0.0")
+parser.add_argument("--port",
+                    type=int,
+                    default=10095,
+                    required=False,
+                    help="grpc server port")
+parser.add_argument("--asr_model",
+                    type=str,
+                    default="damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch",
+                    help="model from modelscope")
+parser.add_argument("--vad_model",
+                    type=str,
+                    default="damo/speech_fsmn_vad_zh-cn-16k-common-pytorch",
+                    help="model from modelscope")
+
+parser.add_argument("--punc_model",
+                    type=str,
+                    default="damo/punc_ct-transformer_zh-cn-common-vad_realtime-vocab272727",
+                    help="model from modelscope")
+parser.add_argument("--ngpu",
+                    type=int,
+                    default=1,
+                    help="0 for cpu, 1 for gpu")
+
+args = parser.parse_args()
+
+print("model loading")
+voices = Queue()
+speek = Queue()
+
+# vad
+inference_pipeline_vad = pipeline(
+    task=Tasks.voice_activity_detection,
+    model=args.vad_model,
+    model_revision=None,
+    output_dir=None,
+    batch_size=1,
+    mode='online',
+    ngpu=args.ngpu,
+)
+param_dict_vad = {'in_cache': dict(), "is_final": False}
+  
+# asr
+param_dict_asr = {}
+# param_dict["hotword"] = "灏忎簲 灏忎簲鏈�"  # 璁剧疆鐑瘝锛岀敤绌烘牸闅斿紑
+inference_pipeline_asr = pipeline(
+    task=Tasks.auto_speech_recognition,
+    model=args.asr_model,
+    param_dict=param_dict_asr,
+    ngpu=args.ngpu,
+)
+
+param_dict_punc = {'cache': list()}
+inference_pipeline_punc = pipeline(
+    task=Tasks.punctuation,
+    model=args.punc_model,
+    model_revision=None,
+    ngpu=args.ngpu,
+)
+
+print("model loaded")
+
+
+
+async def ws_serve(websocket, path):
+    global voices
+    try:
+        async for message in websocket:
+            voices.put(message)
+            #print("put")
+    except websockets.exceptions.ConnectionClosedError as e:
+        print('Connection closed with exception:', e)
+    except Exception as e:
+        print('Exception occurred:', e)
+
+start_server = websockets.serve(ws_serve, args.host, args.port, subprotocols=["binary"], ping_interval=None)
+
+
+def vad(data):  # 鎺ㄧ悊
+    global vad_pipline, param_dict_vad
+    #print(type(data))
+    # print(param_dict_vad)
+    segments_result = inference_pipeline_vad(audio_in=data, param_dict=param_dict_vad)
+    # print(segments_result)
+    # print(param_dict_vad)
+    speech_start = False
+    speech_end = False
+    
+    if len(segments_result) == 0 or len(segments_result["text"]) > 1:
+        return speech_start, speech_end
+    if segments_result["text"][0][0] != -1:
+        speech_start = True
+    if segments_result["text"][0][1] != -1:
+        speech_end = True
+    return speech_start, speech_end
+
+def asr():  # 鎺ㄧ悊
+    global inference_pipeline2
+    global speek
+    while True:
+        while not speek.empty():
+            audio_in = speek.get()
+            speek.task_done()
+            rec_result = inference_pipeline_asr(audio_in=audio_in)
+            rec_result_punc = inference_pipeline_punc(text_in=rec_result['text'], param_dict=param_dict_punc)
+            print(rec_result_punc)
+            time.sleep(0.1)
+        time.sleep(0.1)    
+
+
+def main():  # 鎺ㄧ悊
+    frames = []  # 瀛樺偍鎵�鏈夌殑甯ф暟鎹�
+    buffer = []  # 瀛樺偍缂撳瓨涓殑甯ф暟鎹紙鏈�澶氫袱涓墖娈碉級
+    # silence_count = 0  # 缁熻杩炵画闈欓煶鐨勬鏁�
+    # speech_detected = False  # 鏍囪鏄惁妫�娴嬪埌璇煶
+    RECORD_NUM = 0
+    global voices 
+    global speek
+    speech_start, speech_end = False, False
+    while True:
+        while not voices.empty():
+            
+            data = voices.get()
+            #print("闃熷垪鎺掗槦鏁�",voices.qsize())
+            voices.task_done()
+            buffer.append(data)
+            if len(buffer) > 2:
+                buffer.pop(0)  # 濡傛灉缂撳瓨瓒呰繃涓や釜鐗囨锛屽垯鍒犻櫎鏈�鏃╃殑涓�涓�
+            
+            if speech_start:
+                frames.append(data)
+                RECORD_NUM += 1
+            speech_start_i, speech_end_i = vad(data)
+            # print(speech_start_i, speech_end_i)
+            if speech_start_i:
+                speech_start = speech_start_i
+                # if not speech_detected:
+                # print("妫�娴嬪埌浜哄0...")
+                # speech_detected = True  # 鏍囪涓烘娴嬪埌璇煶
+                frames = []
+                frames.extend(buffer)  # 鎶婁箣鍓�2涓闊虫暟鎹揩鍔犲叆
+                # silence_count = 0  # 閲嶇疆闈欓煶娆℃暟
+            if speech_end_i or RECORD_NUM > 300:
+                # silence_count += 1  # 澧炲姞闈欓煶娆℃暟
+                # speech_end = speech_end_i
+                speech_start = False
+                # if RECORD_NUM > 300: #杩欓噷 50 鍙牴鎹渶姹傛敼涓哄悎閫傜殑鏁版嵁蹇暟閲�
+                # print("璇磋瘽缁撴潫鎴栬�呰秴杩囪缃渶闀挎椂闂�...")
+                audio_in = b"".join(frames)
+                #asrt = threading.Thread(target=asr,args=(audio_in,))
+                #asrt.start()
+                speek.put(audio_in)
+                #rec_result = inference_pipeline2(audio_in=audio_in)  # ASR 妯″瀷閲岃窇涓�璺�
+                frames = []  # 娓呯┖鎵�鏈夌殑甯ф暟鎹�
+                buffer = []  # 娓呯┖缂撳瓨涓殑甯ф暟鎹紙鏈�澶氫袱涓墖娈碉級
+                # silence_count = 0  # 缁熻杩炵画闈欓煶鐨勬鏁版竻闆�
+                # speech_detected = False  # 鏍囪鏄惁妫�娴嬪埌璇煶
+                RECORD_NUM = 0
+            time.sleep(0.01)
+        time.sleep(0.01)
+            
+
+
+s = threading.Thread(target=main)
+s.start()
+s = threading.Thread(target=asr)
+s.start()
+
+asyncio.get_event_loop().run_until_complete(start_server)
+asyncio.get_event_loop().run_forever()
\ No newline at end of file

--
Gitblit v1.9.1