From 8706e767affc6bdc8cb7a67ca3a20a62779ff048 Mon Sep 17 00:00:00 2001
From: 雾聪 <wucong.lyb@alibaba-inc.com>
Date: 星期三, 17 五月 2023 15:45:46 +0800
Subject: [PATCH] Merge branch 'main' of https://github.com/alibaba-damo-academy/FunASR into main
---
funasr/runtime/python/websocket/ws_server_offline.py | 60 ++++++++++++++++++++++++++++++++++--------------------------
1 files changed, 34 insertions(+), 26 deletions(-)
diff --git a/funasr/runtime/python/websocket/ws_server_offline.py b/funasr/runtime/python/websocket/ws_server_offline.py
index 7873918..1fcc246 100644
--- a/funasr/runtime/python/websocket/ws_server_offline.py
+++ b/funasr/runtime/python/websocket/ws_server_offline.py
@@ -65,38 +65,45 @@
websocket.param_dict_punc = {'cache': list()}
websocket.vad_pre_idx = 0
speech_start = False
+ websocket.wav_name = "microphone"
+ print("new user connected", flush=True)
try:
async for message in websocket:
- message = json.loads(message)
- is_finished = message["is_finished"]
- if not is_finished:
- audio = bytes(message['audio'], 'ISO-8859-1')
- frames.append(audio)
- duration_ms = len(audio)//32
- websocket.vad_pre_idx += duration_ms
-
- is_speaking = message["is_speaking"]
- websocket.param_dict_vad["is_final"] = not is_speaking
- if speech_start:
- frames_asr.append(audio)
- speech_start_i, speech_end_i = await async_vad(websocket, audio)
- if speech_start_i:
- speech_start = True
- beg_bias = (websocket.vad_pre_idx-speech_start_i)//duration_ms
- frames_pre = frames[-beg_bias:]
- frames_asr = []
- frames_asr.extend(frames_pre)
- if speech_end_i or not is_speaking:
+ if isinstance(message, str):
+ messagejson = json.loads(message)
+ if "is_speaking" in messagejson:
+ websocket.is_speaking = messagejson["is_speaking"]
+ websocket.param_dict_vad["is_final"] = not websocket.is_speaking
+ if "wav_name" in messagejson:
+ websocket.wav_name = messagejson.get("wav_name")
+
+ if len(frames_asr) > 0 or not isinstance(message, str):
+ if not isinstance(message, str):
+ frames.append(message)
+ duration_ms = len(message)//32
+ websocket.vad_pre_idx += duration_ms
+
+ if speech_start:
+ frames_asr.append(message)
+ speech_start_i, speech_end_i = await async_vad(websocket, message)
+ if speech_start_i:
+ speech_start = True
+ beg_bias = (websocket.vad_pre_idx-speech_start_i)//duration_ms
+ frames_pre = frames[-beg_bias:]
+ frames_asr = []
+ frames_asr.extend(frames_pre)
+ if speech_end_i or not websocket.is_speaking:
audio_in = b"".join(frames_asr)
await async_asr(websocket, audio_in)
frames_asr = []
speech_start = False
- if not is_speaking:
+ if not websocket.is_speaking:
websocket.vad_pre_idx = 0
frames = []
+ websocket.param_dict_vad = {'in_cache': dict()}
else:
- frames = frames[-10:]
+ frames = frames[-20:]
except websockets.ConnectionClosed:
@@ -131,14 +138,15 @@
rec_result = inference_pipeline_asr(audio_in=audio_in,
param_dict=websocket.param_dict_asr)
- # print(rec_result)
+ print(rec_result)
if inference_pipeline_punc is not None and 'text' in rec_result and len(rec_result["text"])>0:
rec_result = inference_pipeline_punc(text_in=rec_result['text'],
param_dict=websocket.param_dict_punc)
# print(rec_result)
- message = json.dumps({"mode": "offline", "text": [rec_result["text"]]})
- await websocket.send(message)
-
+ message = json.dumps({"mode": "offline", "text": rec_result["text"], "wav_name": websocket.wav_name})
+ await websocket.send(message)
+
+
--
Gitblit v1.9.1