From 1d4ab65c8bfebaecbcb0eec0064bae9a321cad75 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期二, 14 二月 2023 16:27:37 +0800
Subject: [PATCH] export model

---
 funasr/runtime/python/grpc/grpc_server.py |   19 +++++++++----------
 1 files changed, 9 insertions(+), 10 deletions(-)

diff --git a/funasr/runtime/python/grpc/grpc_server.py b/funasr/runtime/python/grpc/grpc_server.py
index f42cbb3..19b7354 100644
--- a/funasr/runtime/python/grpc/grpc_server.py
+++ b/funasr/runtime/python/grpc/grpc_server.py
@@ -1,13 +1,12 @@
 from concurrent import futures
 import grpc
 import json
-import paraformer_pb2
-import paraformer_pb2_grpc
 import time
 
-from paraformer_pb2 import Response
 from modelscope.pipelines import pipeline
 from modelscope.utils.constant import Tasks
+import paraformer_pb2_grpc
+from paraformer_pb2 import Response
 
 
 class ASRServicer(paraformer_pb2_grpc.ASRServicer):
@@ -17,7 +16,7 @@
         self.client_buffers = {}
         self.client_transcription = {}
         self.auth_user = user_allowed.split("|")
-        self.inference_16k_pipline = pipeline(task=Tasks.auto_speech_recognition, model=model)
+        self.inference_16k_pipeline = pipeline(task=Tasks.auto_speech_recognition, model=model)
         self.sample_rate = sample_rate
 
     def clear_states(self, user):
@@ -69,7 +68,7 @@
                 if req.user not in self.client_buffers:
                     result = {}
                     result["success"] = True
-                    result["detail"] = "waiting_for_voice"
+                    result["detail"] = "waiting_for_more_voice"
                     result["text"] = ""
                     yield Response(sentence=json.dumps(result), user=req.user, action="waiting", language=req.language)
                 else:
@@ -81,18 +80,18 @@
                     result["detail"] = "decoding data: %d bytes" % len(tmp_data)
                     result["text"] = ""
                     yield Response(sentence=json.dumps(result), user=req.user, action="decoding", language=req.language)
-                    if len(tmp_data) < 800: #min input_len for asr model 
+                    if len(tmp_data) < 9600: #min input_len for asr model , 300ms
                         end_time = int(round(time.time() * 1000))
                         delay_str = str(end_time - begin_time)
                         result = {}
                         result["success"] = True
-                        result["detail"] = "finish_sentence_data_is_not_long_enough"
+                        result["detail"] = "waiting_for_more_voice"
                         result["server_delay_ms"] = delay_str
                         result["text"] = ""
-                        print ("user: %s , delay(ms): %s, error: %s " % (req.user, delay_str, "data_is_not_long_enough"))
-                        yield Response(sentence=json.dumps(result), user=req.user, action="finish", language=req.language)
+                        print ("user: %s , delay(ms): %s, info: %s " % (req.user, delay_str, "waiting_for_more_voice"))
+                        yield Response(sentence=json.dumps(result), user=req.user, action="waiting", language=req.language)
                     else:                           
-                        asr_result = self.inference_16k_pipline(audio_in=tmp_data, audio_fs = self.sample_rate)
+                        asr_result = self.inference_16k_pipeline(audio_in=tmp_data, audio_fs = self.sample_rate)
                         if "text" in asr_result:
                             asr_result = asr_result['text']
                         else:

--
Gitblit v1.9.1