From 7a8bedace2ccccaa162b335cad3f3f00acf84b4b Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期四, 23 三月 2023 17:12:02 +0800
Subject: [PATCH] websocket
---
funasr/runtime/python/websocket/ASR_server.py | 40 +++++++++++++++++-----------------------
funasr/bin/vad_inference_online.py | 7 -------
2 files changed, 17 insertions(+), 30 deletions(-)
diff --git a/funasr/bin/vad_inference_online.py b/funasr/bin/vad_inference_online.py
index faee1fc..dadfd8c 100644
--- a/funasr/bin/vad_inference_online.py
+++ b/funasr/bin/vad_inference_online.py
@@ -30,14 +30,7 @@
from funasr.models.frontend.wav_frontend import WavFrontend
from funasr.bin.vad_inference import Speech2VadSegment
-header_colors = '\033[95m'
-end_colors = '\033[0m'
-global_asr_language: str = 'zh-cn'
-global_sample_rate: Union[int, Dict[Any, int]] = {
- 'audio_fs': 16000,
- 'model_fs': 16000
-}
class Speech2VadSegmentOnline(Speech2VadSegment):
diff --git a/funasr/runtime/python/websocket/ASR_server.py b/funasr/runtime/python/websocket/ASR_server.py
index ac63d3c..cfa9a42 100644
--- a/funasr/runtime/python/websocket/ASR_server.py
+++ b/funasr/runtime/python/websocket/ASR_server.py
@@ -56,7 +56,9 @@
model_revision="v1.2.0",
output_dir=None,
batch_size=1,
+ mode='online'
)
+param_dict_vad = {'in_cache': dict(), "is_final": False}
# 鍒涘缓涓�涓狝SR瀵硅薄
param_dict = dict()
@@ -85,17 +87,20 @@
def vad(data): # 鎺ㄧ悊
- global vad_pipline
+ global vad_pipline, param_dict_vad
#print(type(data))
- segments_result = vad_pipline(audio_in=data)
- #print(segments_result)
+ # print(param_dict_vad)
+ segments_result = vad_pipline(audio_in=data, param_dict=param_dict_vad)
+ # print(segments_result)
+ # print(param_dict_vad)
speech_start = False
speech_end = False
- if len(segments_result) == 0 or len(segments_result["text"] > 1):
- return False
- elif segments_result["text"][0][0] != -1:
+
+ if len(segments_result) == 0 or len(segments_result["text"]) > 1:
+ return speech_start, speech_end
+ if segments_result["text"][0][0] != -1:
speech_start = True
- elif segments_result["text"][0][1] != -1:
+ if segments_result["text"][0][1] != -1:
speech_end = True
return speech_start, speech_end
@@ -135,20 +140,21 @@
frames.append(data)
RECORD_NUM += 1
speech_start_i, speech_end_i = vad(data)
+ # print(speech_start_i, speech_end_i)
if speech_start_i:
speech_start = speech_start_i
# if not speech_detected:
- print("妫�娴嬪埌浜哄0...")
+ # print("妫�娴嬪埌浜哄0...")
# speech_detected = True # 鏍囪涓烘娴嬪埌璇煶
frames = []
frames.extend(buffer) # 鎶婁箣鍓�2涓闊虫暟鎹揩鍔犲叆
# silence_count = 0 # 閲嶇疆闈欓煶娆℃暟
- elif speech_end_i or RECORD_NUM > 300:
+ if speech_end_i or RECORD_NUM > 300:
# silence_count += 1 # 澧炲姞闈欓煶娆℃暟
# speech_end = speech_end_i
speech_start = False
# if RECORD_NUM > 300: #杩欓噷 50 鍙牴鎹渶姹傛敼涓哄悎閫傜殑鏁版嵁蹇暟閲�
- print("璇磋瘽缁撴潫鎴栬�呰秴杩囪缃渶闀挎椂闂�...")
+ # print("璇磋瘽缁撴潫鎴栬�呰秴杩囪缃渶闀挎椂闂�...")
audio_in = b"".join(frames)
#asrt = threading.Thread(target=asr,args=(audio_in,))
#asrt.start()
@@ -170,16 +176,4 @@
s.start()
asyncio.get_event_loop().run_until_complete(start_server)
-asyncio.get_event_loop().run_forever()
-
-
-
-
-
-
-
-
-
-
-
-
+asyncio.get_event_loop().run_forever()
\ No newline at end of file
--
Gitblit v1.9.1