From 4e44c9f46e550eab4ec6b70c099dcdae44eb9d61 Mon Sep 17 00:00:00 2001
From: zhifu gao <zhifu.gzf@alibaba-inc.com>
Date: 星期四, 23 三月 2023 20:34:21 +0800
Subject: [PATCH] Merge pull request #288 from alibaba-damo-academy/dev_gzf

---
 funasr/runtime/python/websocket/ASR_server.py |   30 +++++++++++++++++-------------
 1 files changed, 17 insertions(+), 13 deletions(-)

diff --git a/funasr/runtime/python/websocket/ASR_server.py b/funasr/runtime/python/websocket/ASR_server.py
index 175159c..9d0fd0b 100644
--- a/funasr/runtime/python/websocket/ASR_server.py
+++ b/funasr/runtime/python/websocket/ASR_server.py
@@ -59,7 +59,7 @@
     mode='online',
     ngpu=args.ngpu,
 )
-param_dict_vad = {'in_cache': dict()}
+param_dict_vad = {'in_cache': dict(), "is_final": False}
   
 # asr
 param_dict_asr = {}
@@ -70,14 +70,16 @@
     param_dict=param_dict_asr,
     ngpu=args.ngpu,
 )
-
-param_dict_punc = {'cache': list()}
-inference_pipeline_punc = pipeline(
-    task=Tasks.punctuation,
-    model=args.punc_model,
-    model_revision=None,
-    ngpu=args.ngpu,
-)
+if args.punc_model is not None:
+    param_dict_punc = {'cache': list()}
+    inference_pipeline_punc = pipeline(
+        task=Tasks.punctuation,
+        model=args.punc_model,
+        model_revision=None,
+        ngpu=args.ngpu,
+    )
+else:
+    inference_pipeline_punc = None
 
 print("model loaded")
 
@@ -117,14 +119,16 @@
 
 def asr():  # 鎺ㄧ悊
     global inference_pipeline2
-    global speek
+    global speek, param_dict_punc
     while True:
         while not speek.empty():
             audio_in = speek.get()
             speek.task_done()
-            rec_result = inference_pipeline_asr(audio_in=audio_in)
-            rec_result_punc = inference_pipeline_punc(text_in=rec_result['text'], param_dict=param_dict_punc)
-            print(rec_result_punc)
+            if len(audio_in) > 0:
+                rec_result = inference_pipeline_asr(audio_in=audio_in)
+                if inference_pipeline_punc is not None and 'text' in rec_result:
+                    rec_result = inference_pipeline_punc(text_in=rec_result['text'], param_dict=param_dict_punc)
+                print(rec_result["text"])
             time.sleep(0.1)
         time.sleep(0.1)    
 

--
Gitblit v1.9.1