From 678a6c0f7293a86fb1046cf043afec29e88fd5f1 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期一, 24 四月 2023 15:54:54 +0800
Subject: [PATCH] websocket

---
 funasr/runtime/python/websocket/ASR_client.py       |   41 ++++++++++++++++++++++++++++++++++-------
 funasr/runtime/python/websocket/ASR_server_2pass.py |    2 +-
 2 files changed, 35 insertions(+), 8 deletions(-)

diff --git a/funasr/runtime/python/websocket/ASR_client.py b/funasr/runtime/python/websocket/ASR_client.py
index fa95328..cc0e7b6 100644
--- a/funasr/runtime/python/websocket/ASR_client.py
+++ b/funasr/runtime/python/websocket/ASR_client.py
@@ -1,9 +1,8 @@
-import pyaudio
+
 # import websocket #鍖哄埆鏈嶅姟绔繖閲屾槸 websocket-client搴�
 import time
 import websockets
 import asyncio
-from queue import Queue
 # import threading
 import argparse
 import json
@@ -30,12 +29,13 @@
 
 args = parser.parse_args()
 
+# voices = asyncio.Queue()
+from queue import Queue
 voices = Queue()
-
-
     
 # 鍏朵粬鍑芥暟鍙互閫氳繃璋冪敤send(data)鏉ュ彂閫佹暟鎹紝渚嬪锛�
-async def record():
+async def record_microphone():
+    import pyaudio
     #print("2")
     global voices 
     FORMAT = pyaudio.paInt16
@@ -59,8 +59,32 @@
         #print(voices.qsize())
 
         await asyncio.sleep(0.01)
-    
 
+# 鍏朵粬鍑芥暟鍙互閫氳繃璋冪敤send(data)鏉ュ彂閫佹暟鎹紝渚嬪锛�
+async def record_from_scp():
+    global voices
+    if args.audio_in.endswith(".scp"):
+        f_scp = open(args.audio_in)
+        wavs = f_scp.readlines()
+    else:
+        wavs = [args.audio_in]
+    for wav in wavs:
+        wav_splits = wav.strip().split()
+        wav_path = wav_splits[1] if len(wav_splits) > 1 else wav_splits[0]
+        bytes = open(wav_path, "rb")
+        bytes = bytes.read()
+        
+        stride = int(args.chunk_size/1000*16000*2)
+        chunk_num = (len(bytes)-1)//stride + 1
+        for i in range(chunk_num):
+            beg = i*stride
+            data_chunk = bytes[beg:beg+stride]
+            voices.put(data_chunk)
+            # print("data_chunk: ", len(data_chunk))
+            # print(voices.qsize())
+        
+            await asyncio.sleep(args.chunk_size/1000)
+     
 
 async def ws_send():
     global voices
@@ -97,7 +121,10 @@
     uri = "ws://{}:{}".format(args.host, args.port)
     #ws = await websockets.connect(uri, subprotocols=["binary"]) # 鍒涘缓涓�涓暱杩炴帴
     async for websocket in websockets.connect(uri, subprotocols=["binary"], ping_interval=None):
-        task = asyncio.create_task(record()) # 鍒涘缓涓�涓悗鍙颁换鍔″綍闊�
+        if args.audio_in is not None:
+            task = asyncio.create_task(record_from_scp()) # 鍒涘缓涓�涓悗鍙颁换鍔″綍闊�
+        else:
+            task = asyncio.create_task(record_microphone())  # 鍒涘缓涓�涓悗鍙颁换鍔″綍闊�
         task2 = asyncio.create_task(ws_send()) # 鍒涘缓涓�涓悗鍙颁换鍔″彂閫�
         task3 = asyncio.create_task(message()) # 鍒涘缓涓�涓悗鍙版帴鏀舵秷鎭殑浠诲姟
         await asyncio.gather(task, task2, task3)
diff --git a/funasr/runtime/python/websocket/ASR_server_2pass.py b/funasr/runtime/python/websocket/ASR_server_2pass.py
index 55dc2e2..135a3cc 100644
--- a/funasr/runtime/python/websocket/ASR_server_2pass.py
+++ b/funasr/runtime/python/websocket/ASR_server_2pass.py
@@ -105,7 +105,7 @@
 inference_pipeline_asr_online = pipeline(
     task=Tasks.auto_speech_recognition,
     model='damo/speech_paraformer_asr_nat-zh-cn-16k-common-vocab8404-online',
-    model_revision='v1.0.2')
+    model_revision=None)
 
 
 print("model loaded")

--
Gitblit v1.9.1