zhaomingwork
2023-05-12 a30953f75997d31fbc1b18fedf4ec7f82b2f58cf
funasr/runtime/python/websocket/ws_server_online.py
@@ -12,7 +12,7 @@
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
from modelscope.utils.logger import get_logger
from funasr_onnx.utils.frontend import load_bytes
from funasr.runtime.python.onnxruntime.funasr_onnx.utils.frontend import load_bytes
tracemalloc.start()
@@ -28,6 +28,8 @@
inference_pipeline_asr_online = pipeline(
    task=Tasks.auto_speech_recognition,
    model=args.asr_model_online,
    ngpu=args.ngpu,
    ncpu=args.ncpu,
    model_revision='v1.0.4')
print("model loaded")
@@ -35,72 +37,64 @@
async def ws_serve(websocket, path):
    frames_online = []
    frames_asr_online = []
    global websocket_users
    websocket.send_msg = Queue()
    websocket_users.add(websocket)
    websocket.param_dict_asr_online = {"cache": dict()}
    websocket.speek_online = Queue()
    ss_online = threading.Thread(target=asr_online, args=(websocket,))
    ss_online.start()
    print("new user connected",flush=True)
    try:
        async for message in websocket:
            message = json.loads(message)
            is_finished = message["is_finished"]
            if not is_finished:
                audio = bytes(message['audio'], 'ISO-8859-1')
                is_speaking = message["is_speaking"]
                websocket.param_dict_asr_online["is_final"] = not is_speaking
                websocket.param_dict_asr_online["chunk_size"] = message["chunk_size"]
                frames_online.append(audio)
                if len(frames_online) % message["chunk_interval"] == 0 or not is_speaking:
                    audio_in = b"".join(frames_online)
                    websocket.speek_online.put(audio_in)
                    frames_online = []
            if not websocket.send_msg.empty():
                await websocket.send(websocket.send_msg.get())
                websocket.send_msg.task_done()
            if isinstance(message,str):
              messagejson = json.loads(message)
              if "is_speaking" in messagejson:
                  websocket.is_speaking = messagejson["is_speaking"]
                  websocket.param_dict_asr_online["is_final"] = not websocket.is_speaking
              if "is_finished" in messagejson:
                  websocket.is_speaking = False
                  websocket.param_dict_asr_online["is_final"] = True
              if "chunk_interval" in messagejson:
                  websocket.chunk_interval=messagejson["chunk_interval"]
              if "wav_name" in messagejson:
                  websocket.wav_name = messagejson.get("wav_name", "demo")
              if "chunk_size" in messagejson:
                  websocket.param_dict_asr_online["chunk_size"] = messagejson["chunk_size"]
            # if has bytes in buffer or message is bytes
            if len(frames_asr_online)>0 or not isinstance(message,str):
               if not isinstance(message,str):
                 frames_asr_online.append(message)
               if len(frames_asr_online) % websocket.chunk_interval == 0 or not websocket.is_speaking:
                    audio_in = b"".join(frames_asr_online)
                    if not websocket.is_speaking:
                       #padding 0.5s at end gurantee that asr engine can fire out last word
                       audio_in=audio_in+b''.join(np.zeros(int(16000*0.5),dtype=np.int16))
                    await async_asr_online(websocket,audio_in)
                    frames_asr_online = []
     
    except websockets.ConnectionClosed:
        print("ConnectionClosed...", websocket_users)    # 链接断开
        print("ConnectionClosed...", websocket_users)
        websocket_users.remove(websocket)
    except websockets.InvalidState:
        print("InvalidState...")    # 无效状态
        print("InvalidState...")
    except Exception as e:
        print("Exception:", e)
 
def asr_online(websocket):  # ASR推理
    global websocket_users
    while websocket in websocket_users:
        if not websocket.speek_online.empty():
            audio_in = websocket.speek_online.get()
            websocket.speek_online.task_done()
async def async_asr_online(websocket,audio_in):
            if len(audio_in) > 0:
                # print(len(audio_in))
                audio_in = load_bytes(audio_in)
                rec_result = inference_pipeline_asr_online(audio_in=audio_in,
                                                           param_dict=websocket.param_dict_asr_online)
                if websocket.param_dict_asr_online["is_final"]:
                    websocket.param_dict_asr_online["cache"] = dict()
                if "text" in rec_result:
                    if rec_result["text"] != "sil" and rec_result["text"] != "waiting_for_more_voice":
                        print(rec_result["text"])
                        message = json.dumps({"mode": "online", "text": rec_result["text"]})
                        websocket.send_msg.put(message)
        time.sleep(0.005)
                        message = json.dumps({"mode": "online", "text": rec_result["text"], "wav_name": websocket.wav_name})
                        await websocket.send(message)
start_server = websockets.serve(ws_serve, args.host, args.port, subprotocols=["binary"], ping_interval=None)