From 4e44c9f46e550eab4ec6b70c099dcdae44eb9d61 Mon Sep 17 00:00:00 2001
From: zhifu gao <zhifu.gzf@alibaba-inc.com>
Date: 星期四, 23 三月 2023 20:34:21 +0800
Subject: [PATCH] Merge pull request #288 from alibaba-damo-academy/dev_gzf

---
 funasr/runtime/python/websocket/ASR_server.py |   30 ++++++++++++++++++------------
 1 files changed, 18 insertions(+), 12 deletions(-)

diff --git a/funasr/runtime/python/websocket/ASR_server.py b/funasr/runtime/python/websocket/ASR_server.py
index ee1a7c6..9d0fd0b 100644
--- a/funasr/runtime/python/websocket/ASR_server.py
+++ b/funasr/runtime/python/websocket/ASR_server.py
@@ -53,7 +53,7 @@
 inference_pipeline_vad = pipeline(
     task=Tasks.voice_activity_detection,
     model=args.vad_model,
-    model_revision="v1.2.0",
+    model_revision=None,
     output_dir=None,
     batch_size=1,
     mode='online',
@@ -62,7 +62,7 @@
 param_dict_vad = {'in_cache': dict(), "is_final": False}
   
 # asr
-param_dict_asr = dict()
+param_dict_asr = {}
 # param_dict["hotword"] = "灏忎簲 灏忎簲鏈�"  # 璁剧疆鐑瘝锛岀敤绌烘牸闅斿紑
 inference_pipeline_asr = pipeline(
     task=Tasks.auto_speech_recognition,
@@ -70,13 +70,16 @@
     param_dict=param_dict_asr,
     ngpu=args.ngpu,
 )
-
-inference_pipline_punc = pipeline(
-    task=Tasks.punctuation,
-    model=args.punc_model,
-    model_revision="v1.0.1",
-    ngpu=args.ngpu,
-)
+if args.punc_model is not None:
+    param_dict_punc = {'cache': list()}
+    inference_pipeline_punc = pipeline(
+        task=Tasks.punctuation,
+        model=args.punc_model,
+        model_revision=None,
+        ngpu=args.ngpu,
+    )
+else:
+    inference_pipeline_punc = None
 
 print("model loaded")
 
@@ -116,13 +119,16 @@
 
 def asr():  # 鎺ㄧ悊
     global inference_pipeline2
-    global speek
+    global speek, param_dict_punc
     while True:
         while not speek.empty():
             audio_in = speek.get()
             speek.task_done()
-            rec_result = inference_pipeline_asr(audio_in=audio_in)
-            print(rec_result)
+            if len(audio_in) > 0:
+                rec_result = inference_pipeline_asr(audio_in=audio_in)
+                if inference_pipeline_punc is not None and 'text' in rec_result:
+                    rec_result = inference_pipeline_punc(text_in=rec_result['text'], param_dict=param_dict_punc)
+                print(rec_result["text"])
             time.sleep(0.1)
         time.sleep(0.1)    
 

--
Gitblit v1.9.1