| | |
| | | |
| | | ### Start server |
| | | #### ASR offline server |
| | | ```shell |
| | | python ws_server_offline.py --port 10095 --asr_model "damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch" |
| | | ``` |
| | | |
| | | [//]: # (```shell) |
| | | |
| | | [//]: # (python ws_server_online.py --host "0.0.0.0" --port 10095 --asr_model "damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch") |
| | | |
| | | [//]: # (```) |
| | | #### ASR streaming server |
| | | ```shell |
| | | python ws_server_online.py --host "0.0.0.0" --port 10095 --asr_model_online "damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-online" |
| | | python ws_server_online.py --port 10095 --asr_model_online "damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-online" |
| | | ``` |
| | | |
| | | #### ASR offline/online 2pass server |
| | |
| | | ``` |
| | | |
| | | ### Start client |
| | | #### Recording from mircrophone |
| | | #### ASR offline client |
| | | ##### Recording from mircrophone |
| | | ```shell |
| | | # --chunk_interval, "10": 600/10=60ms, "5"=600/5=120ms, "20": 600/12=30ms |
| | | python ws_client.py --host "0.0.0.0" --port 10095 --chunk_interval 10 --words_max_print 100 |
| | | ``` |
| | | ##### Loadding from wav.scp(kaldi style) |
| | | ```shell |
| | | # --chunk_interval, "10": 600/10=60ms, "5"=600/5=120ms, "20": 600/12=30ms |
| | | python ws_client.py --host "0.0.0.0" --port 10095 --chunk_interval 10 --words_max_print 100 --audio_in "./data/wav.scp" --send_without_sleep |
| | | ``` |
| | | #### ASR streaming client |
| | | ##### Recording from mircrophone |
| | | ```shell |
| | | # --chunk_size, "5,10,5"=600ms, "8,8,4"=480ms |
| | | python ws_client.py --host "127.0.0.1" --port 10095 --chunk_size "5,10,5" --words_max_print 100 |
| | | python ws_client.py --host "0.0.0.0" --port 10095 --chunk_size "5,10,5" --words_max_print 100 |
| | | ``` |
| | | #### Loadding from wav.scp(kaldi style) |
| | | ##### Loadding from wav.scp(kaldi style) |
| | | ```shell |
| | | # --chunk_size, "5,10,5"=600ms, "8,8,4"=480ms |
| | | python ws_client.py --host "127.0.0.1" --port 10095 --chunk_size "5,10,5" --audio_in "./data/wav.scp" --words_max_print 100 |
| | | python ws_client.py --host "0.0.0.0" --port 10095 --chunk_size "5,10,5" --audio_in "./data/wav.scp" --words_max_print 100 |
| | | ``` |
| | | |
| | | #### ASR offline/online 2pass client |
| | | |
| | | ## Acknowledge |
| | | 1. This project is maintained by [FunASR community](https://github.com/alibaba-damo-academy/FunASR). |
| | | 2. We acknowledge [zhaoming](https://github.com/zhaomingwork/FunASR/tree/fix_bug_for_python_websocket) for contributing the websocket service. |
| | |
| | | type=int, |
| | | default=1, |
| | | help="0 for cpu, 1 for gpu") |
| | | parser.add_argument("--ncpu", |
| | | type=int, |
| | | default=1, |
| | | help="cpu cores") |
| | | |
| | | args = parser.parse_args() |
| | | print(args) |
| | |
| | | type=str, |
| | | default=None, |
| | | help="audio_in") |
| | | |
| | | parser.add_argument("--send_without_sleep", |
| | | action="store_true", |
| | | default=False, |
| | | help="if audio_in is set, send_without_sleep") |
| | | parser.add_argument("--test_thread_num", |
| | | type=int, |
| | | default=1, |
| | |
| | | |
| | | args = parser.parse_args() |
| | | args.chunk_size = [int(x) for x in args.chunk_size.split(",")] |
| | | |
| | | print(args) |
| | | # voices = asyncio.Queue() |
| | | from queue import Queue |
| | | voices = Queue() |
| | | |
| | | # 其他函数可以通过调用send(data)来发送数据,例如: |
| | | async def record_microphone(): |
| | | is_finished = False |
| | | import pyaudio |
| | |
| | | message = json.dumps({"chunk_size": args.chunk_size, "chunk_interval": args.chunk_interval, "audio": data, "is_speaking": is_speaking, "is_finished": is_finished}) |
| | | |
| | | voices.put(message) |
| | | #print(voices.qsize()) |
| | | |
| | | await asyncio.sleep(0.005) |
| | | |
| | | # 其他函数可以通过调用send(data)来发送数据,例如: |
| | | async def record_from_scp(): |
| | | import wave |
| | | global voices |
| | |
| | | # bytes_f = open(wav_path, "rb") |
| | | # bytes_data = bytes_f.read() |
| | | with wave.open(wav_path, "rb") as wav_file: |
| | | # 获取音频参数 |
| | | params = wav_file.getparams() |
| | | # 获取头信息的长度 |
| | | # header_length = wav_file.getheaders()[0][1] |
| | | # 读取音频帧数据,跳过头信息 |
| | | # wav_file.setpos(header_length) |
| | | frames = wav_file.readframes(wav_file.getnframes()) |
| | | |
| | | # 将音频帧数据转换为字节类型的数据 |
| | | audio_bytes = bytes(frames) |
| | | # stride = int(args.chunk_size/1000*16000*2) |
| | | stride = int(60*args.chunk_size[1]/args.chunk_interval/1000*16000*2) |
| | |
| | | voices.put(message) |
| | | # print("data_chunk: ", len(data_chunk)) |
| | | # print(voices.qsize()) |
| | | |
| | | await asyncio.sleep(60*args.chunk_size[1]/args.chunk_interval/1000) |
| | | sleep_duration = 0.001 if args.send_without_sleep else 60*args.chunk_size[1]/args.chunk_interval/1000 |
| | | await asyncio.sleep(sleep_duration) |
| | | |
| | | is_finished = True |
| | | message = json.dumps({"is_finished": is_finished}) |
| | |
| | | data = voices.get() |
| | | voices.task_done() |
| | | try: |
| | | await websocket.send(data) # 通过ws对象发送数据 |
| | | await websocket.send(data) |
| | | except Exception as e: |
| | | print('Exception occurred:', e) |
| | | traceback.print_exc() |
| | |
| | | meg = json.loads(meg) |
| | | # print(meg, end = '') |
| | | # print("\r") |
| | | text_print += " {}".format(meg["text"][0]) |
| | | # print(meg) |
| | | text = meg["text"][0] |
| | | if meg["mode"] == "online": |
| | | text_print += " {}".format(text) |
| | | else: |
| | | text_print += "{}".format(text) |
| | | text_print = text_print[-args.words_max_print:] |
| | | #os.system('clear') |
| | | os.system('clear') |
| | | print("\r"+str(id)+":"+text_print) |
| | | except Exception as e: |
| | | print("Exception:", e) |
| | |
| | | exit(0) |
| | | |
| | | async def ws_client(id): |
| | | global websocket # 定义一个全局变量ws,用于保存websocket连接对象 |
| | | # uri = "ws://11.167.134.197:8899" |
| | | global websocket |
| | | uri = "ws://{}:{}".format(args.host, args.port) |
| | | #ws = await websockets.connect(uri, subprotocols=["binary"]) # 创建一个长连接 |
| | | async for websocket in websockets.connect(uri, subprotocols=["binary"], ping_interval=None): |
| | | if args.audio_in is not None: |
| | | task = asyncio.create_task(record_from_scp()) # 创建一个后台任务录音 |
| | | task = asyncio.create_task(record_from_scp()) |
| | | else: |
| | | task = asyncio.create_task(record_microphone()) # 创建一个后台任务录音 |
| | | task2 = asyncio.create_task(ws_send()) # 创建一个后台任务发送 |
| | | task3 = asyncio.create_task(message(id)) # 创建一个后台接收消息的任务 |
| | | task = asyncio.create_task(record_microphone()) |
| | | task2 = asyncio.create_task(ws_send()) |
| | | task3 = asyncio.create_task(message(id)) |
| | | await asyncio.gather(task, task2, task3) |
| | | |
| | | def one_thread(id): |
| | |
| | | if __name__ == '__main__': |
| | | process_list = [] |
| | | for i in range(args.test_thread_num): |
| | | p = Process(target=one_thread,args=(i,)) #实例化进程对象 |
| | | p = Process(target=one_thread,args=(i,)) |
| | | p.start() |
| | | process_list.append(p) |
| | | |
| | | for i in process_list: |
| | | p.join() |
| | | |
| | | print('结束测试') |
| | | print('end') |
| | | |
| | | |
| New file |
| | |
| | | import asyncio |
| | | import json |
| | | import websockets |
| | | import time |
| | | import logging |
| | | import tracemalloc |
| | | import numpy as np |
| | | |
| | | from parse_args import args |
| | | from modelscope.pipelines import pipeline |
| | | from modelscope.utils.constant import Tasks |
| | | from modelscope.utils.logger import get_logger |
| | | from funasr.runtime.python.onnxruntime.funasr_onnx.utils.frontend import load_bytes |
| | | |
| | | tracemalloc.start() |
| | | |
| | | logger = get_logger(log_level=logging.CRITICAL) |
| | | logger.setLevel(logging.CRITICAL) |
| | | |
| | | |
| | | websocket_users = set() |
| | | |
| | | print("model loading") |
| | | # asr |
| | | inference_pipeline_asr = pipeline( |
| | | task=Tasks.auto_speech_recognition, |
| | | model=args.asr_model, |
| | | ngpu=args.ngpu, |
| | | ncpu=args.ncpu, |
| | | model_revision=None) |
| | | |
| | | |
| | | # vad |
| | | inference_pipeline_vad = pipeline( |
| | | task=Tasks.voice_activity_detection, |
| | | model=args.vad_model, |
| | | model_revision=None, |
| | | output_dir=None, |
| | | batch_size=1, |
| | | mode='online', |
| | | ngpu=args.ngpu, |
| | | ncpu=args.ncpu, |
| | | ) |
| | | |
| | | if args.punc_model != "": |
| | | inference_pipeline_punc = pipeline( |
| | | task=Tasks.punctuation, |
| | | model=args.punc_model, |
| | | model_revision=None, |
| | | ngpu=args.ngpu, |
| | | ncpu=args.ncpu, |
| | | ) |
| | | else: |
| | | inference_pipeline_punc = None |
| | | |
| | | print("model loaded") |
| | | |
| | | async def ws_serve(websocket, path): |
| | | frames = [] |
| | | frames_asr = [] |
| | | global websocket_users |
| | | websocket_users.add(websocket) |
| | | websocket.param_dict_asr = {} |
| | | websocket.param_dict_vad = {'in_cache': dict(), "is_final": False} |
| | | websocket.param_dict_punc = {'cache': list()} |
| | | websocket.vad_pre_idx = 0 |
| | | speech_start = False |
| | | |
| | | try: |
| | | async for message in websocket: |
| | | message = json.loads(message) |
| | | is_finished = message["is_finished"] |
| | | if not is_finished: |
| | | audio = bytes(message['audio'], 'ISO-8859-1') |
| | | frames.append(audio) |
| | | duration_ms = len(audio)//32 |
| | | websocket.vad_pre_idx += duration_ms |
| | | |
| | | is_speaking = message["is_speaking"] |
| | | websocket.param_dict_vad["is_final"] = not is_speaking |
| | | if speech_start: |
| | | frames_asr.append(audio) |
| | | speech_start_i, speech_end_i = await async_vad(websocket, audio) |
| | | if speech_start_i: |
| | | speech_start = True |
| | | beg_bias = (websocket.vad_pre_idx-speech_start_i)//duration_ms |
| | | frames_pre = frames[-beg_bias:] |
| | | frames_asr = [] |
| | | frames_asr.extend(frames_pre) |
| | | if speech_end_i or not is_speaking: |
| | | audio_in = b"".join(frames_asr) |
| | | await async_asr(websocket, audio_in) |
| | | frames_asr = [] |
| | | speech_start = False |
| | | if not is_speaking: |
| | | websocket.vad_pre_idx = 0 |
| | | frames = [] |
| | | else: |
| | | frames = frames[-10:] |
| | | |
| | | |
| | | except websockets.ConnectionClosed: |
| | | print("ConnectionClosed...", websocket_users) |
| | | websocket_users.remove(websocket) |
| | | except websockets.InvalidState: |
| | | print("InvalidState...") |
| | | except Exception as e: |
| | | print("Exception:", e) |
| | | |
| | | |
| | | async def async_vad(websocket, audio_in): |
| | | |
| | | segments_result = inference_pipeline_vad(audio_in=audio_in, param_dict=websocket.param_dict_vad) |
| | | |
| | | speech_start = False |
| | | speech_end = False |
| | | |
| | | if len(segments_result) == 0 or len(segments_result["text"]) > 1: |
| | | return speech_start, speech_end |
| | | if segments_result["text"][0][0] != -1: |
| | | speech_start = segments_result["text"][0][0] |
| | | if segments_result["text"][0][1] != -1: |
| | | speech_end = True |
| | | return speech_start, speech_end |
| | | |
| | | |
| | | async def async_asr(websocket, audio_in): |
| | | if len(audio_in) > 0: |
| | | # print(len(audio_in)) |
| | | audio_in = load_bytes(audio_in) |
| | | |
| | | rec_result = inference_pipeline_asr(audio_in=audio_in, |
| | | param_dict=websocket.param_dict_asr) |
| | | # print(rec_result) |
| | | if inference_pipeline_punc is not None and 'text' in rec_result and len(rec_result["text"])>0: |
| | | rec_result = inference_pipeline_punc(text_in=rec_result['text'], |
| | | param_dict=websocket.param_dict_punc) |
| | | # print(rec_result) |
| | | message = json.dumps({"mode": "offline", "text": [rec_result["text"]]}) |
| | | await websocket.send(message) |
| | | |
| | | |
| | | |
| | | |
| | | start_server = websockets.serve(ws_serve, args.host, args.port, subprotocols=["binary"], ping_interval=None) |
| | | asyncio.get_event_loop().run_until_complete(start_server) |
| | | asyncio.get_event_loop().run_forever() |
| | |
| | | from modelscope.pipelines import pipeline |
| | | from modelscope.utils.constant import Tasks |
| | | from modelscope.utils.logger import get_logger |
| | | from funasr_onnx.utils.frontend import load_bytes |
| | | from funasr.runtime.python.onnxruntime.funasr_onnx.utils.frontend import load_bytes |
| | | |
| | | tracemalloc.start() |
| | | |
| | |
| | | inference_pipeline_asr_online = pipeline( |
| | | task=Tasks.auto_speech_recognition, |
| | | model=args.asr_model_online, |
| | | ngpu=args.ngpu, |
| | | ncpu=args.ncpu, |
| | | model_revision='v1.0.4') |
| | | |
| | | print("model loaded") |
| | |
| | | |
| | | |
| | | except websockets.ConnectionClosed: |
| | | print("ConnectionClosed...", websocket_users) # 链接断开 |
| | | print("ConnectionClosed...", websocket_users) |
| | | websocket_users.remove(websocket) |
| | | except websockets.InvalidState: |
| | | print("InvalidState...") # 无效状态 |
| | | print("InvalidState...") |
| | | except Exception as e: |
| | | print("Exception:", e) |
| | | |
| | | async def async_asr_online(websocket,audio_in): # ASR推理 |
| | | async def async_asr_online(websocket,audio_in): |
| | | if len(audio_in) > 0: |
| | | audio_in = load_bytes(audio_in) |
| | | rec_result = inference_pipeline_asr_online(audio_in=audio_in, |
| | |
| | | rec_result["text"][0]=rec_result["text"][0] #.replace(" ","") |
| | | message = json.dumps({"mode": "online", "text": rec_result["text"]}) |
| | | await websocket.send(message) |
| | | |
| | | |
| | | |
| | | |