| | |
| | | from modelscope.pipelines import pipeline |
| | | from modelscope.utils.constant import Tasks |
| | | from modelscope.utils.logger import get_logger |
| | | from funasr_onnx.utils.frontend import load_bytes |
| | | from funasr.runtime.python.onnxruntime.funasr_onnx.utils.frontend import load_bytes |
| | | |
| | | tracemalloc.start() |
| | | |
| | |
| | | inference_pipeline_asr_online = pipeline( |
| | | task=Tasks.auto_speech_recognition, |
| | | model=args.asr_model_online, |
| | | ngpu=args.ngpu, |
| | | ncpu=args.ncpu, |
| | | model_revision='v1.0.4') |
| | | |
| | | print("model loaded") |
| | |
| | | |
| | | |
| | | async def ws_serve(websocket, path): |
| | | frames_online = [] |
| | | frames_asr_online = [] |
| | | global websocket_users |
| | | websocket.send_msg = Queue() |
| | | websocket_users.add(websocket) |
| | | websocket.param_dict_asr_online = {"cache": dict()} |
| | | websocket.speek_online = Queue() |
| | | ss_online = threading.Thread(target=asr_online, args=(websocket,)) |
| | | ss_online.start() |
| | | |
| | | try: |
| | | async for message in websocket: |
| | |
| | | |
| | | is_speaking = message["is_speaking"] |
| | | websocket.param_dict_asr_online["is_final"] = not is_speaking |
| | | |
| | | websocket.wav_name = message.get("wav_name", "demo") |
| | | websocket.param_dict_asr_online["chunk_size"] = message["chunk_size"] |
| | | |
| | | |
| | | frames_online.append(audio) |
| | | |
| | | if len(frames_online) % message["chunk_interval"] == 0 or not is_speaking: |
| | | |
| | | audio_in = b"".join(frames_online) |
| | | websocket.speek_online.put(audio_in) |
| | | frames_online = [] |
| | | frames_asr_online.append(audio) |
| | | if len(frames_asr_online) % message["chunk_interval"] == 0 or not is_speaking: |
| | | audio_in = b"".join(frames_asr_online) |
| | | await async_asr_online(websocket,audio_in) |
| | | frames_asr_online = [] |
| | | |
| | | if not websocket.send_msg.empty(): |
| | | await websocket.send(websocket.send_msg.get()) |
| | | websocket.send_msg.task_done() |
| | | |
| | | |
| | | except websockets.ConnectionClosed: |
| | | print("ConnectionClosed...", websocket_users) # 链接断开 |
| | | print("ConnectionClosed...", websocket_users) |
| | | websocket_users.remove(websocket) |
| | | except websockets.InvalidState: |
| | | print("InvalidState...") # 无效状态 |
| | | print("InvalidState...") |
| | | except Exception as e: |
| | | print("Exception:", e) |
| | | |
| | | |
| | | |
| | | def asr_online(websocket): # ASR推理 |
| | | global websocket_users |
| | | while websocket in websocket_users: |
| | | if not websocket.speek_online.empty(): |
| | | audio_in = websocket.speek_online.get() |
| | | websocket.speek_online.task_done() |
| | | async def async_asr_online(websocket,audio_in): |
| | | if len(audio_in) > 0: |
| | | # print(len(audio_in)) |
| | | audio_in = load_bytes(audio_in) |
| | | rec_result = inference_pipeline_asr_online(audio_in=audio_in, |
| | | param_dict=websocket.param_dict_asr_online) |
| | | if websocket.param_dict_asr_online["is_final"]: |
| | | websocket.param_dict_asr_online["cache"] = dict() |
| | | |
| | | if "text" in rec_result: |
| | | if rec_result["text"] != "sil" and rec_result["text"] != "waiting_for_more_voice": |
| | | print(rec_result["text"]) |
| | | message = json.dumps({"mode": "online", "text": rec_result["text"]}) |
| | | websocket.send_msg.put(message) |
| | | |
| | | time.sleep(0.005) |
| | | message = json.dumps({"mode": "online", "text": rec_result["text"], "wav_name": websocket.wav_name}) |
| | | await websocket.send(message) |
| | | |
| | | |
| | | |
| | | start_server = websockets.serve(ws_serve, args.host, args.port, subprotocols=["binary"], ping_interval=None) |