From 378ced7edb0cb9957263675215192e19e490d226 Mon Sep 17 00:00:00 2001
From: dyyzhmm <dyyzhmm@163.com>
Date: 星期三, 15 三月 2023 10:10:57 +0800
Subject: [PATCH] Merge pull request #1 from alibaba-damo-academy/dev_hmm

---
 funasr/runtime/python/grpc/grpc_main_client_mic.py |   31 ++++++++++---------------------
 1 files changed, 10 insertions(+), 21 deletions(-)

diff --git a/funasr/runtime/python/grpc/grpc_main_client_mic.py b/funasr/runtime/python/grpc/grpc_main_client_mic.py
index acbe90b..220e8b5 100644
--- a/funasr/runtime/python/grpc/grpc_main_client_mic.py
+++ b/funasr/runtime/python/grpc/grpc_main_client_mic.py
@@ -1,7 +1,6 @@
 import pyaudio
 import grpc
 import json
-import webrtcvad
 import time
 import asyncio
 import argparse
@@ -11,24 +10,16 @@
 
 async def deal_chunk(sig_mic):
     global stub,SPEAKING,asr_user,language,sample_rate
-    if vad.is_speech(sig_mic, sample_rate): #speaking
-        SPEAKING = True
-        response = transcribe_audio_bytes(stub, sig_mic, user=asr_user, language=language, speaking = True, isEnd = False) #speaking, send audio to server.
-    else: #silence   
-        begin_time = 0
-        if SPEAKING: #means we have some audio recorded, send recognize order to server.
-            SPEAKING = False
-            begin_time = int(round(time.time() * 1000))            
-            response = transcribe_audio_bytes(stub, None, user=asr_user, language=language, speaking = False, isEnd = False) #speak end, call server for recognize one sentence
-            resp = response.next()           
-            if "decoding" == resp.action:   
-                resp = response.next() #TODO, blocking operation may leads to miss some audio clips. C++ multi-threading is preferred.
-                if "finish" == resp.action:        
-                    end_time = int(round(time.time() * 1000))
-                    print (json.loads(resp.sentence))
-                    print ("delay in ms: %d " % (end_time - begin_time))
-                else:
-                    pass
+    
+    SPEAKING = True
+    resp = transcribe_audio_bytes(stub, sig_mic, user=asr_user, language=language, speaking = True, isEnd = False) #speaking, send audio to server.
+          
+    if "decoding" == resp.action:     #partial result
+        print(json.loads(resp.sentence))
+    elif "finish" == resp.action:     #final result
+        print (json.loads(resp.sentence))
+
+
         
 
 async def record(host,port,sample_rate,mic_chunk,record_seconds,asr_user,language):
@@ -88,8 +79,6 @@
     language = 'zh-CN'  
     
 
-    vad = webrtcvad.Vad()
-    vad.set_mode(1)
 
     FORMAT = pyaudio.paInt16
     CHANNELS = 1

--
Gitblit v1.9.1