From 296ec00997d4ad4715286c8dad9cc2226d064b71 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期日, 07 五月 2023 23:02:56 +0800
Subject: [PATCH] websocket offline
---
funasr/runtime/python/websocket/ws_server_offline.py | 147 +++++++++++++++++++++++++++++
funasr/runtime/python/websocket/README.md | 32 ++++--
funasr/runtime/python/websocket/parse_args.py | 7 +
funasr/runtime/python/websocket/ws_client.py | 45 ++++----
funasr/runtime/python/websocket/ws_server_online.py | 11 +-
5 files changed, 203 insertions(+), 39 deletions(-)
diff --git a/funasr/runtime/python/websocket/README.md b/funasr/runtime/python/websocket/README.md
index b0ef206..76405ea 100644
--- a/funasr/runtime/python/websocket/README.md
+++ b/funasr/runtime/python/websocket/README.md
@@ -22,15 +22,13 @@
### Start server
#### ASR offline server
+```shell
+python ws_server_offline.py --port 10095 --asr_model "damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
+```
-[//]: # (```shell)
-
-[//]: # (python ws_server_online.py --host "0.0.0.0" --port 10095 --asr_model "damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch")
-
-[//]: # (```)
#### ASR streaming server
```shell
-python ws_server_online.py --host "0.0.0.0" --port 10095 --asr_model_online "damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-online"
+python ws_server_online.py --port 10095 --asr_model_online "damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-online"
```
#### ASR offline/online 2pass server
@@ -51,17 +49,31 @@
```
### Start client
-#### Recording from mircrophone
+#### ASR offline client
+##### Recording from mircrophone
+```shell
+# --chunk_interval, "10": 600/10=60ms, "5"=600/5=120ms, "20": 600/12=30ms
+python ws_client.py --host "0.0.0.0" --port 10095 --chunk_interval 10 --words_max_print 100
+```
+##### Loadding from wav.scp(kaldi style)
+```shell
+# --chunk_interval, "10": 600/10=60ms, "5"=600/5=120ms, "20": 600/12=30ms
+python ws_client.py --host "0.0.0.0" --port 10095 --chunk_interval 10 --words_max_print 100 --audio_in "./data/wav.scp" --send_without_sleep
+```
+#### ASR streaming client
+##### Recording from mircrophone
```shell
# --chunk_size, "5,10,5"=600ms, "8,8,4"=480ms
-python ws_client.py --host "127.0.0.1" --port 10095 --chunk_size "5,10,5" --words_max_print 100
+python ws_client.py --host "0.0.0.0" --port 10095 --chunk_size "5,10,5" --words_max_print 100
```
-#### Loadding from wav.scp(kaldi style)
+##### Loadding from wav.scp(kaldi style)
```shell
# --chunk_size, "5,10,5"=600ms, "8,8,4"=480ms
-python ws_client.py --host "127.0.0.1" --port 10095 --chunk_size "5,10,5" --audio_in "./data/wav.scp" --words_max_print 100
+python ws_client.py --host "0.0.0.0" --port 10095 --chunk_size "5,10,5" --audio_in "./data/wav.scp" --words_max_print 100
```
+#### ASR offline/online 2pass client
+
## Acknowledge
1. This project is maintained by [FunASR community](https://github.com/alibaba-damo-academy/FunASR).
2. We acknowledge [zhaoming](https://github.com/zhaomingwork/FunASR/tree/fix_bug_for_python_websocket) for contributing the websocket service.
diff --git a/funasr/runtime/python/websocket/parse_args.py b/funasr/runtime/python/websocket/parse_args.py
index 2528a76..d170be8 100644
--- a/funasr/runtime/python/websocket/parse_args.py
+++ b/funasr/runtime/python/websocket/parse_args.py
@@ -31,5 +31,10 @@
type=int,
default=1,
help="0 for cpu, 1 for gpu")
+parser.add_argument("--ncpu",
+ type=int,
+ default=1,
+ help="cpu cores")
-args = parser.parse_args()
\ No newline at end of file
+args = parser.parse_args()
+print(args)
\ No newline at end of file
diff --git a/funasr/runtime/python/websocket/ws_client.py b/funasr/runtime/python/websocket/ws_client.py
index d32ce0a..d8bbb65 100644
--- a/funasr/runtime/python/websocket/ws_client.py
+++ b/funasr/runtime/python/websocket/ws_client.py
@@ -31,7 +31,10 @@
type=str,
default=None,
help="audio_in")
-
+parser.add_argument("--send_without_sleep",
+ action="store_true",
+ default=False,
+ help="if audio_in is set, send_without_sleep")
parser.add_argument("--test_thread_num",
type=int,
default=1,
@@ -43,12 +46,11 @@
args = parser.parse_args()
args.chunk_size = [int(x) for x in args.chunk_size.split(",")]
-
+print(args)
# voices = asyncio.Queue()
from queue import Queue
voices = Queue()
-# 鍏朵粬鍑芥暟鍙互閫氳繃璋冪敤send(data)鏉ュ彂閫佹暟鎹紝渚嬪锛�
async def record_microphone():
is_finished = False
import pyaudio
@@ -75,11 +77,9 @@
message = json.dumps({"chunk_size": args.chunk_size, "chunk_interval": args.chunk_interval, "audio": data, "is_speaking": is_speaking, "is_finished": is_finished})
voices.put(message)
- #print(voices.qsize())
await asyncio.sleep(0.005)
-# 鍏朵粬鍑芥暟鍙互閫氳繃璋冪敤send(data)鏉ュ彂閫佹暟鎹紝渚嬪锛�
async def record_from_scp():
import wave
global voices
@@ -95,15 +95,11 @@
# bytes_f = open(wav_path, "rb")
# bytes_data = bytes_f.read()
with wave.open(wav_path, "rb") as wav_file:
- # 鑾峰彇闊抽鍙傛暟
params = wav_file.getparams()
- # 鑾峰彇澶翠俊鎭殑闀垮害
# header_length = wav_file.getheaders()[0][1]
- # 璇诲彇闊抽甯ф暟鎹紝璺宠繃澶翠俊鎭�
# wav_file.setpos(header_length)
frames = wav_file.readframes(wav_file.getnframes())
- # 灏嗛煶棰戝抚鏁版嵁杞崲涓哄瓧鑺傜被鍨嬬殑鏁版嵁
audio_bytes = bytes(frames)
# stride = int(args.chunk_size/1000*16000*2)
stride = int(60*args.chunk_size[1]/args.chunk_interval/1000*16000*2)
@@ -120,8 +116,8 @@
voices.put(message)
# print("data_chunk: ", len(data_chunk))
# print(voices.qsize())
-
- await asyncio.sleep(60*args.chunk_size[1]/args.chunk_interval/1000)
+ sleep_duration = 0.001 if args.send_without_sleep else 60*args.chunk_size[1]/args.chunk_interval/1000
+ await asyncio.sleep(sleep_duration)
is_finished = True
message = json.dumps({"is_finished": is_finished})
@@ -136,7 +132,7 @@
data = voices.get()
voices.task_done()
try:
- await websocket.send(data) # 閫氳繃ws瀵硅薄鍙戦�佹暟鎹�
+ await websocket.send(data)
except Exception as e:
print('Exception occurred:', e)
traceback.print_exc()
@@ -155,9 +151,14 @@
meg = json.loads(meg)
# print(meg, end = '')
# print("\r")
- text_print += " {}".format(meg["text"][0])
+ # print(meg)
+ text = meg["text"][0]
+ if meg["mode"] == "online":
+ text_print += " {}".format(text)
+ else:
+ text_print += "{}".format(text)
text_print = text_print[-args.words_max_print:]
- #os.system('clear')
+ os.system('clear')
print("\r"+str(id)+":"+text_print)
except Exception as e:
print("Exception:", e)
@@ -177,17 +178,15 @@
exit(0)
async def ws_client(id):
- global websocket # 瀹氫箟涓�涓叏灞�鍙橀噺ws锛岀敤浜庝繚瀛榳ebsocket杩炴帴瀵硅薄
- # uri = "ws://11.167.134.197:8899"
+ global websocket
uri = "ws://{}:{}".format(args.host, args.port)
- #ws = await websockets.connect(uri, subprotocols=["binary"]) # 鍒涘缓涓�涓暱杩炴帴
async for websocket in websockets.connect(uri, subprotocols=["binary"], ping_interval=None):
if args.audio_in is not None:
- task = asyncio.create_task(record_from_scp()) # 鍒涘缓涓�涓悗鍙颁换鍔″綍闊�
+ task = asyncio.create_task(record_from_scp())
else:
- task = asyncio.create_task(record_microphone()) # 鍒涘缓涓�涓悗鍙颁换鍔″綍闊�
- task2 = asyncio.create_task(ws_send()) # 鍒涘缓涓�涓悗鍙颁换鍔″彂閫�
- task3 = asyncio.create_task(message(id)) # 鍒涘缓涓�涓悗鍙版帴鏀舵秷鎭殑浠诲姟
+ task = asyncio.create_task(record_microphone())
+ task2 = asyncio.create_task(ws_send())
+ task3 = asyncio.create_task(message(id))
await asyncio.gather(task, task2, task3)
def one_thread(id):
@@ -198,13 +197,13 @@
if __name__ == '__main__':
process_list = []
for i in range(args.test_thread_num):
- p = Process(target=one_thread,args=(i,)) #瀹炰緥鍖栬繘绋嬪璞�
+ p = Process(target=one_thread,args=(i,))
p.start()
process_list.append(p)
for i in process_list:
p.join()
- print('缁撴潫娴嬭瘯')
+ print('end')
diff --git a/funasr/runtime/python/websocket/ws_server_offline.py b/funasr/runtime/python/websocket/ws_server_offline.py
new file mode 100644
index 0000000..7873918
--- /dev/null
+++ b/funasr/runtime/python/websocket/ws_server_offline.py
@@ -0,0 +1,147 @@
+import asyncio
+import json
+import websockets
+import time
+import logging
+import tracemalloc
+import numpy as np
+
+from parse_args import args
+from modelscope.pipelines import pipeline
+from modelscope.utils.constant import Tasks
+from modelscope.utils.logger import get_logger
+from funasr.runtime.python.onnxruntime.funasr_onnx.utils.frontend import load_bytes
+
+tracemalloc.start()
+
+logger = get_logger(log_level=logging.CRITICAL)
+logger.setLevel(logging.CRITICAL)
+
+
+websocket_users = set()
+
+print("model loading")
+# asr
+inference_pipeline_asr = pipeline(
+ task=Tasks.auto_speech_recognition,
+ model=args.asr_model,
+ ngpu=args.ngpu,
+ ncpu=args.ncpu,
+ model_revision=None)
+
+
+# vad
+inference_pipeline_vad = pipeline(
+ task=Tasks.voice_activity_detection,
+ model=args.vad_model,
+ model_revision=None,
+ output_dir=None,
+ batch_size=1,
+ mode='online',
+ ngpu=args.ngpu,
+ ncpu=args.ncpu,
+)
+
+if args.punc_model != "":
+ inference_pipeline_punc = pipeline(
+ task=Tasks.punctuation,
+ model=args.punc_model,
+ model_revision=None,
+ ngpu=args.ngpu,
+ ncpu=args.ncpu,
+ )
+else:
+ inference_pipeline_punc = None
+
+print("model loaded")
+
+async def ws_serve(websocket, path):
+ frames = []
+ frames_asr = []
+ global websocket_users
+ websocket_users.add(websocket)
+ websocket.param_dict_asr = {}
+ websocket.param_dict_vad = {'in_cache': dict(), "is_final": False}
+ websocket.param_dict_punc = {'cache': list()}
+ websocket.vad_pre_idx = 0
+ speech_start = False
+
+ try:
+ async for message in websocket:
+ message = json.loads(message)
+ is_finished = message["is_finished"]
+ if not is_finished:
+ audio = bytes(message['audio'], 'ISO-8859-1')
+ frames.append(audio)
+ duration_ms = len(audio)//32
+ websocket.vad_pre_idx += duration_ms
+
+ is_speaking = message["is_speaking"]
+ websocket.param_dict_vad["is_final"] = not is_speaking
+ if speech_start:
+ frames_asr.append(audio)
+ speech_start_i, speech_end_i = await async_vad(websocket, audio)
+ if speech_start_i:
+ speech_start = True
+ beg_bias = (websocket.vad_pre_idx-speech_start_i)//duration_ms
+ frames_pre = frames[-beg_bias:]
+ frames_asr = []
+ frames_asr.extend(frames_pre)
+ if speech_end_i or not is_speaking:
+ audio_in = b"".join(frames_asr)
+ await async_asr(websocket, audio_in)
+ frames_asr = []
+ speech_start = False
+ if not is_speaking:
+ websocket.vad_pre_idx = 0
+ frames = []
+ else:
+ frames = frames[-10:]
+
+
+ except websockets.ConnectionClosed:
+ print("ConnectionClosed...", websocket_users)
+ websocket_users.remove(websocket)
+ except websockets.InvalidState:
+ print("InvalidState...")
+ except Exception as e:
+ print("Exception:", e)
+
+
+async def async_vad(websocket, audio_in):
+
+ segments_result = inference_pipeline_vad(audio_in=audio_in, param_dict=websocket.param_dict_vad)
+
+ speech_start = False
+ speech_end = False
+
+ if len(segments_result) == 0 or len(segments_result["text"]) > 1:
+ return speech_start, speech_end
+ if segments_result["text"][0][0] != -1:
+ speech_start = segments_result["text"][0][0]
+ if segments_result["text"][0][1] != -1:
+ speech_end = True
+ return speech_start, speech_end
+
+
+async def async_asr(websocket, audio_in):
+ if len(audio_in) > 0:
+ # print(len(audio_in))
+ audio_in = load_bytes(audio_in)
+
+ rec_result = inference_pipeline_asr(audio_in=audio_in,
+ param_dict=websocket.param_dict_asr)
+ # print(rec_result)
+ if inference_pipeline_punc is not None and 'text' in rec_result and len(rec_result["text"])>0:
+ rec_result = inference_pipeline_punc(text_in=rec_result['text'],
+ param_dict=websocket.param_dict_punc)
+ # print(rec_result)
+ message = json.dumps({"mode": "offline", "text": [rec_result["text"]]})
+ await websocket.send(message)
+
+
+
+
+start_server = websockets.serve(ws_serve, args.host, args.port, subprotocols=["binary"], ping_interval=None)
+asyncio.get_event_loop().run_until_complete(start_server)
+asyncio.get_event_loop().run_forever()
\ No newline at end of file
diff --git a/funasr/runtime/python/websocket/ws_server_online.py b/funasr/runtime/python/websocket/ws_server_online.py
index 4e6c38e..6ea8f39 100644
--- a/funasr/runtime/python/websocket/ws_server_online.py
+++ b/funasr/runtime/python/websocket/ws_server_online.py
@@ -12,7 +12,7 @@
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
from modelscope.utils.logger import get_logger
-from funasr_onnx.utils.frontend import load_bytes
+from funasr.runtime.python.onnxruntime.funasr_onnx.utils.frontend import load_bytes
tracemalloc.start()
@@ -28,6 +28,8 @@
inference_pipeline_asr_online = pipeline(
task=Tasks.auto_speech_recognition,
model=args.asr_model_online,
+ ngpu=args.ngpu,
+ ncpu=args.ncpu,
model_revision='v1.0.4')
print("model loaded")
@@ -63,14 +65,14 @@
except websockets.ConnectionClosed:
- print("ConnectionClosed...", websocket_users) # 閾炬帴鏂紑
+ print("ConnectionClosed...", websocket_users)
websocket_users.remove(websocket)
except websockets.InvalidState:
- print("InvalidState...") # 鏃犳晥鐘舵��
+ print("InvalidState...")
except Exception as e:
print("Exception:", e)
-async def async_asr_online(websocket,audio_in): # ASR鎺ㄧ悊
+async def async_asr_online(websocket,audio_in):
if len(audio_in) > 0:
audio_in = load_bytes(audio_in)
rec_result = inference_pipeline_asr_online(audio_in=audio_in,
@@ -84,7 +86,6 @@
message = json.dumps({"mode": "online", "text": rec_result["text"]})
await websocket.send(message)
-
start_server = websockets.serve(ws_serve, args.host, args.port, subprotocols=["binary"], ping_interval=None)
--
Gitblit v1.9.1