雾聪
2023-06-13 59113b50b578e097be5656c4044a58282b4d0b9e
update wss_client_asr.py
1个文件已修改
187 ■■■■ 已修改文件
funasr/runtime/python/websocket/wss_client_asr.py 187 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
funasr/runtime/python/websocket/wss_client_asr.py
@@ -1,7 +1,7 @@
# -*- encoding: utf-8 -*-
import os
import time
import websockets,ssl
import websockets, ssl
import asyncio
# import threading
import argparse
@@ -12,6 +12,7 @@
import logging
SUPPORT_AUDIO_TYPE_SETS = ['.wav', '.pcm']
logging.basicConfig(level=logging.ERROR)
parser = argparse.ArgumentParser()
@@ -53,7 +54,7 @@
                    type=str,
                    default=None,
                    help="output_dir")
parser.add_argument("--ssl",
                    type=int,
                    default=1,
@@ -68,6 +69,7 @@
print(args)
# voices = asyncio.Queue()
from queue import Queue
voices = Queue()
ibest_writer = None
@@ -75,15 +77,16 @@
    writer = DatadirWriter(args.output_dir)
    ibest_writer = writer[f"1best_recog"]
async def record_microphone():
    is_finished = False
    import pyaudio
    #print("2")
    global voices
    # print("2")
    global voices
    FORMAT = pyaudio.paInt16
    CHANNELS = 1
    RATE = 16000
    chunk_size = 60*args.chunk_size[1]/args.chunk_interval
    chunk_size = 60 * args.chunk_size[1] / args.chunk_interval
    CHUNK = int(RATE / 1000 * chunk_size)
    p = pyaudio.PyAudio()
@@ -94,19 +97,16 @@
                    input=True,
                    frames_per_buffer=CHUNK)
    message = json.dumps({"mode": args.mode, "chunk_size": args.chunk_size, "chunk_interval": args.chunk_interval, "wav_name": "microphone", "is_speaking": True})
    message = json.dumps({"mode": args.mode, "chunk_size": args.chunk_size, "chunk_interval": args.chunk_interval,
                          "wav_name": "microphone", "is_speaking": True})
    voices.put(message)
    while True:
        data = stream.read(CHUNK)
        message = data
        message = data
        voices.put(message)
        await asyncio.sleep(0.005)
async def record_from_scp(chunk_begin,chunk_size):
    import wave
async def record_from_scp(chunk_begin, chunk_size):
    global voices
    is_finished = False
    if args.audio_in.endswith(".scp"):
@@ -114,46 +114,50 @@
        wavs = f_scp.readlines()
    else:
        wavs = [args.audio_in]
    if chunk_size>0:
        wavs=wavs[chunk_begin:chunk_begin+chunk_size]
    if chunk_size > 0:
        wavs = wavs[chunk_begin:chunk_begin + chunk_size]
    for wav in wavs:
        wav_splits = wav.strip().split()
        wav_name = wav_splits[0] if len(wav_splits) > 1 else "demo"
        wav_path = wav_splits[1] if len(wav_splits) > 1 else wav_splits[0]
        # bytes_f = open(wav_path, "rb")
        # bytes_data = bytes_f.read()
        with wave.open(wav_path, "rb") as wav_file:
            params = wav_file.getparams()
            # header_length = wav_file.getheaders()[0][1]
            # wav_file.setpos(header_length)
            frames = wav_file.readframes(wav_file.getnframes())
        audio_bytes = bytes(frames)
        if wav_path.endswith(".pcm"):
            with open(wav_path, "rb") as f:
                audio_bytes = f.read()
        elif wav_path.endswith(".wav"):
            import wave
            with wave.open(wav_path, "rb") as wav_file:
                params = wav_file.getparams()
                frames = wav_file.readframes(wav_file.getnframes())
                audio_bytes = bytes(frames)
        else:
            raise NotImplementedError(
                f'Not supported audio type')
        # stride = int(args.chunk_size/1000*16000*2)
        stride = int(60*args.chunk_size[1]/args.chunk_interval/1000*16000*2)
        chunk_num = (len(audio_bytes)-1)//stride + 1
        stride = int(60 * args.chunk_size[1] / args.chunk_interval / 1000 * 16000 * 2)
        chunk_num = (len(audio_bytes) - 1) // stride + 1
        # print(stride)
        # send first time
        message = json.dumps({"mode": args.mode, "chunk_size": args.chunk_size, "chunk_interval": args.chunk_interval, "wav_name": wav_name,"is_speaking": True})
        message = json.dumps({"mode": args.mode, "chunk_size": args.chunk_size, "chunk_interval": args.chunk_interval,
                              "wav_name": wav_name, "is_speaking": True})
        voices.put(message)
        is_speaking = True
        for i in range(chunk_num):
            beg = i*stride
            data = audio_bytes[beg:beg+stride]
            message = data
            beg = i * stride
            data = audio_bytes[beg:beg + stride]
            message = data
            voices.put(message)
            if i == chunk_num-1:
            if i == chunk_num - 1:
                is_speaking = False
                message = json.dumps({"is_speaking": is_speaking})
                voices.put(message)
            # print("data_chunk: ", len(data_chunk))
            # print(voices.qsize())
            sleep_duration = 0.001 if args.send_without_sleep else 60*args.chunk_size[1]/args.chunk_interval/1000
            sleep_duration = 0.001 if args.send_without_sleep else 60 * args.chunk_size[1] / args.chunk_interval / 1000
            await asyncio.sleep(sleep_duration)
async def ws_send():
    global voices
@@ -172,8 +176,6 @@
            await asyncio.sleep(0.005)
        await asyncio.sleep(0.005)
async def message(id):
    global websocket
    text_print = ""
@@ -188,17 +190,17 @@
            text = meg["text"]
            if ibest_writer is not None:
                ibest_writer["text"][wav_name] = text
            if meg["mode"] == "online":
                text_print += "{}".format(text)
                text_print = text_print[-args.words_max_print:]
                os.system('clear')
                print("\rpid"+str(id)+": "+text_print)
                print("\rpid" + str(id) + ": " + text_print)
            elif meg["mode"] == "offline":
                text_print += "{}".format(text)
                text_print = text_print[-args.words_max_print:]
                os.system('clear')
                print("\rpid"+str(id)+": "+text_print)
                print("\rpid" + str(id) + ": " + text_print)
            else:
                if meg["mode"] == "2pass-online":
                    text_print_2pass_online += "{}".format(text)
@@ -216,6 +218,7 @@
            traceback.print_exc()
            exit(0)
async def print_messge():
    global websocket
    while True:
@@ -228,69 +231,75 @@
            traceback.print_exc()
            exit(0)
async def ws_client(id,chunk_begin,chunk_size):
async def ws_client(id, chunk_begin, chunk_size):
    global websocket
    if  args.ssl==1:
       ssl_context = ssl.SSLContext()
       ssl_context.check_hostname = False
       ssl_context.verify_mode = ssl.CERT_NONE
       uri = "wss://{}:{}".format(args.host, args.port)
    if args.ssl == 1:
        ssl_context = ssl.SSLContext()
        ssl_context.check_hostname = False
        ssl_context.verify_mode = ssl.CERT_NONE
        uri = "wss://{}:{}".format(args.host, args.port)
    else:
       uri = "ws://{}:{}".format(args.host, args.port)
       ssl_context=None
    print("connect to",uri)
    async for websocket in websockets.connect(uri, subprotocols=["binary"], ping_interval=None,ssl=ssl_context):
        uri = "ws://{}:{}".format(args.host, args.port)
        ssl_context = None
    print("connect to", uri)
    async for websocket in websockets.connect(uri, subprotocols=["binary"], ping_interval=None, ssl=ssl_context):
        if args.audio_in is not None:
            task = asyncio.create_task(record_from_scp(chunk_begin,chunk_size))
            task = asyncio.create_task(record_from_scp(chunk_begin, chunk_size))
        else:
            task = asyncio.create_task(record_microphone())
        task2 = asyncio.create_task(ws_send())
        task3 = asyncio.create_task(message(id))
        await asyncio.gather(task, task2, task3)
def one_thread(id,chunk_begin,chunk_size):
   asyncio.get_event_loop().run_until_complete(ws_client(id,chunk_begin,chunk_size))
   asyncio.get_event_loop().run_forever()
def one_thread(id, chunk_begin, chunk_size):
    asyncio.get_event_loop().run_until_complete(ws_client(id, chunk_begin, chunk_size))
    asyncio.get_event_loop().run_forever()
if __name__ == '__main__':
   # for microphone
   if  args.audio_in is  None:
     p = Process(target=one_thread,args=(0, 0, 0))
     p.start()
     p.join()
     print('end')
   else:
     # calculate the number of wavs for each preocess
     if args.audio_in.endswith(".scp"):
         f_scp = open(args.audio_in)
         wavs = f_scp.readlines()
     else:
         wavs = [args.audio_in]
     total_len=len(wavs)
     if total_len>=args.test_thread_num:
          chunk_size=int((total_len)/args.test_thread_num)
          remain_wavs=total_len-chunk_size*args.test_thread_num
     else:
          chunk_size=1
          remain_wavs=0
    # for microphone
    if args.audio_in is None:
        p = Process(target=one_thread, args=(0, 0, 0))
        p.start()
        p.join()
        print('end')
    else:
        # calculate the number of wavs for each preocess
        if args.audio_in.endswith(".scp"):
            f_scp = open(args.audio_in)
            wavs = f_scp.readlines()
        else:
            wavs = [args.audio_in]
        for wav in wavs:
            wav_splits = wav.strip().split()
            wav_name = wav_splits[0] if len(wav_splits) > 1 else "demo"
            wav_path = wav_splits[1] if len(wav_splits) > 1 else wav_splits[0]
            audio_type = os.path.splitext(wav_path)[-1].lower()
            if audio_type not in SUPPORT_AUDIO_TYPE_SETS:
                raise NotImplementedError(
                    f'Not supported audio type: {audio_type}')
     process_list = []
     chunk_begin=0
     for i in range(args.test_thread_num):
         now_chunk_size= chunk_size
         if remain_wavs>0:
             now_chunk_size=chunk_size+1
             remain_wavs=remain_wavs-1
         # process i handle wavs at chunk_begin and size of now_chunk_size
         p = Process(target=one_thread,args=(i,chunk_begin,now_chunk_size))
         chunk_begin=chunk_begin+now_chunk_size
         p.start()
         process_list.append(p)
        total_len = len(wavs)
        if total_len >= args.test_thread_num:
            chunk_size = int(total_len / args.test_thread_num)
            remain_wavs = total_len - chunk_size * args.test_thread_num
        else:
            chunk_size = 1
            remain_wavs = 0
     for i in process_list:
         p.join()
        process_list = []
        chunk_begin = 0
        for i in range(args.test_thread_num):
            now_chunk_size = chunk_size
            if remain_wavs > 0:
                now_chunk_size = chunk_size + 1
                remain_wavs = remain_wavs - 1
            # process i handle wavs at chunk_begin and size of now_chunk_size
            p = Process(target=one_thread, args=(i, chunk_begin, now_chunk_size))
            chunk_begin = chunk_begin + now_chunk_size
            p.start()
            process_list.append(p)
     print('end')
        for i in process_list:
            p.join()
        print('end')