From 4e44c9f46e550eab4ec6b70c099dcdae44eb9d61 Mon Sep 17 00:00:00 2001
From: zhifu gao <zhifu.gzf@alibaba-inc.com>
Date: 星期四, 23 三月 2023 20:34:21 +0800
Subject: [PATCH] Merge pull request #288 from alibaba-damo-academy/dev_gzf
---
funasr/runtime/python/websocket/ASR_server.py | 56 ++++++++++++++++++++++++++++++++++++--------------------
1 files changed, 36 insertions(+), 20 deletions(-)
diff --git a/funasr/runtime/python/websocket/ASR_server.py b/funasr/runtime/python/websocket/ASR_server.py
index cfa9a42..9d0fd0b 100644
--- a/funasr/runtime/python/websocket/ASR_server.py
+++ b/funasr/runtime/python/websocket/ASR_server.py
@@ -1,4 +1,10 @@
-# server.py 娉ㄦ剰鏈緥浠呭鐞嗗崟涓猚lent鍙戦�佺殑璇煶鏁版嵁锛屽苟鏈澶歝lient杩炴帴杩涜鍒ゆ柇鍜屽鐞�
+import asyncio
+import websockets
+import time
+from queue import Queue
+import threading
+import argparse
+
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
from modelscope.utils.logger import get_logger
@@ -7,12 +13,6 @@
logger = get_logger(log_level=logging.CRITICAL)
logger.setLevel(logging.CRITICAL)
-import asyncio
-import websockets
-import time
-from queue import Queue
-import threading
-import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--host",
@@ -36,7 +36,7 @@
parser.add_argument("--punc_model",
type=str,
- default="",
+ default="damo/punc_ct-transformer_zh-cn-common-vad_realtime-vocab272727",
help="model from modelscope")
parser.add_argument("--ngpu",
type=int,
@@ -49,25 +49,38 @@
voices = Queue()
speek = Queue()
-# 鍒涘缓涓�涓猇AD瀵硅薄
-vad_pipline = pipeline(
+# vad
+inference_pipeline_vad = pipeline(
task=Tasks.voice_activity_detection,
model=args.vad_model,
- model_revision="v1.2.0",
+ model_revision=None,
output_dir=None,
batch_size=1,
- mode='online'
+ mode='online',
+ ngpu=args.ngpu,
)
param_dict_vad = {'in_cache': dict(), "is_final": False}
-# 鍒涘缓涓�涓狝SR瀵硅薄
-param_dict = dict()
+# asr
+param_dict_asr = {}
# param_dict["hotword"] = "灏忎簲 灏忎簲鏈�" # 璁剧疆鐑瘝锛岀敤绌烘牸闅斿紑
-inference_pipeline2 = pipeline(
+inference_pipeline_asr = pipeline(
task=Tasks.auto_speech_recognition,
model=args.asr_model,
- param_dict=param_dict,
+ param_dict=param_dict_asr,
+ ngpu=args.ngpu,
)
+if args.punc_model is not None:
+ param_dict_punc = {'cache': list()}
+ inference_pipeline_punc = pipeline(
+ task=Tasks.punctuation,
+ model=args.punc_model,
+ model_revision=None,
+ ngpu=args.ngpu,
+ )
+else:
+ inference_pipeline_punc = None
+
print("model loaded")
@@ -90,7 +103,7 @@
global vad_pipline, param_dict_vad
#print(type(data))
# print(param_dict_vad)
- segments_result = vad_pipline(audio_in=data, param_dict=param_dict_vad)
+ segments_result = inference_pipeline_vad(audio_in=data, param_dict=param_dict_vad)
# print(segments_result)
# print(param_dict_vad)
speech_start = False
@@ -106,13 +119,16 @@
def asr(): # 鎺ㄧ悊
global inference_pipeline2
- global speek
+ global speek, param_dict_punc
while True:
while not speek.empty():
audio_in = speek.get()
speek.task_done()
- rec_result = inference_pipeline2(audio_in=audio_in)
- print(rec_result)
+ if len(audio_in) > 0:
+ rec_result = inference_pipeline_asr(audio_in=audio_in)
+ if inference_pipeline_punc is not None and 'text' in rec_result:
+ rec_result = inference_pipeline_punc(text_in=rec_result['text'], param_dict=param_dict_punc)
+ print(rec_result["text"])
time.sleep(0.1)
time.sleep(0.1)
--
Gitblit v1.9.1