From 6427c834dfd97b1f05c6659cdc7ccf010bf82fe1 Mon Sep 17 00:00:00 2001
From: 嘉渊 <wangjiaming.wjm@alibaba-inc.com>
Date: 星期一, 24 四月 2023 19:50:07 +0800
Subject: [PATCH] update

---
 funasr/runtime/python/grpc/grpc_server.py |   65 ++++++++++++++++++--------------
 1 files changed, 37 insertions(+), 28 deletions(-)

diff --git a/funasr/runtime/python/grpc/grpc_server.py b/funasr/runtime/python/grpc/grpc_server.py
index 7dea893..4fd4f95 100644
--- a/funasr/runtime/python/grpc/grpc_server.py
+++ b/funasr/runtime/python/grpc/grpc_server.py
@@ -1,30 +1,34 @@
 from concurrent import futures
 import grpc
 import json
-import paraformer_pb2
-import paraformer_pb2_grpc
 import time
 
-
+import paraformer_pb2_grpc
 from paraformer_pb2 import Response
 
 
-from modelscope.pipelines import pipeline
-from modelscope.utils.constant import Tasks
-
-inference_16k_pipline = pipeline(
-   task=Tasks.auto_speech_recognition,
-   model='damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8358-tensorflow1')
-
-auth_user = ['zksz_futureTV_1','zksz_dangaomei_1','zksz_test_1','zksz_test_2','zksz_test_3','zksz_test_4','zksz_test_5','zksz_test_6','zksz_test_7']
-
-
 class ASRServicer(paraformer_pb2_grpc.ASRServicer):
-    def __init__(self):
+    def __init__(self, user_allowed, model, sample_rate, backend, onnx_dir, vad_model='', punc_model=''):
         print("ASRServicer init")
+        self.backend = backend
         self.init_flag = 0
         self.client_buffers = {}
         self.client_transcription = {}
+        self.auth_user = user_allowed.split("|")
+        if self.backend == "pipeline":
+            try:
+                from modelscope.pipelines import pipeline
+                from modelscope.utils.constant import Tasks
+            except ImportError:
+                raise ImportError(f"Please install modelscope")
+            self.inference_16k_pipeline = pipeline(task=Tasks.auto_speech_recognition, model=model, vad_model=vad_model, punc_model=punc_model)
+        elif self.backend == "onnxruntime":
+            try:
+                from funasr_onnx import Paraformer
+            except ImportError:
+                raise ImportError(f"Please install onnxruntime environment")
+            self.inference_16k_pipeline = Paraformer(model_dir=onnx_dir)
+        self.sample_rate = sample_rate
 
     def clear_states(self, user):
         self.clear_buffers(user)
@@ -46,13 +50,13 @@
         
             
         for req in request_iterator:
-            if req.user not in auth_user:
+            if req.user not in self.auth_user:
                 result = {}
                 result["success"] = False
                 result["detail"] = "Not Authorized user: %s " % req.user
                 result["text"] = ""
                 yield Response(sentence=json.dumps(result), user=req.user, action="terminate", language=req.language)
-            if req.isEnd: #end grpc
+            elif req.isEnd: #end grpc
                 print("asr end")
                 self.disconnect(req.user)
                 result = {}
@@ -75,34 +79,39 @@
                 if req.user not in self.client_buffers:
                     result = {}
                     result["success"] = True
-                    result["detail"] = "waiting_for_voice"
+                    result["detail"] = "waiting_for_more_voice"
                     result["text"] = ""
                     yield Response(sentence=json.dumps(result), user=req.user, action="waiting", language=req.language)
                 else:
                     begin_time = int(round(time.time() * 1000))
-                    tmp_data = self.client_buffers[req.user] #TODO make a test, about local variable in class parralle circumstance.
+                    tmp_data = self.client_buffers[req.user]
                     self.clear_states(req.user)
                     result = {}
                     result["success"] = True
                     result["detail"] = "decoding data: %d bytes" % len(tmp_data)
                     result["text"] = ""
                     yield Response(sentence=json.dumps(result), user=req.user, action="decoding", language=req.language)
-                    if len(tmp_data) < 800: #min input_len for asr model 
+                    if len(tmp_data) < 9600: #min input_len for asr model , 300ms
                         end_time = int(round(time.time() * 1000))
                         delay_str = str(end_time - begin_time)
                         result = {}
                         result["success"] = True
-                        result["detail"] = "finish_sentence_data_is_not_long_enough"
+                        result["detail"] = "waiting_for_more_voice"
                         result["server_delay_ms"] = delay_str
                         result["text"] = ""
-                        print ("user: %s , delay(ms): %s, error: %s " % (req.user, delay_str, "data_is_not_long_enough"))
-                        yield Response(sentence=json.dumps(result), user=req.user, action="finish", language=req.language)
-                    else:                           
-                        asr_result = inference_16k_pipline(audio_in=tmp_data, audio_fs = 16000)
-                        if "text" in asr_result:
-                            asr_result = asr_result['text']
-                        else:
-                            asr_result = ""
+                        print ("user: %s , delay(ms): %s, info: %s " % (req.user, delay_str, "waiting_for_more_voice"))
+                        yield Response(sentence=json.dumps(result), user=req.user, action="waiting", language=req.language)
+                    else:
+                        if self.backend == "pipeline":
+                            asr_result = self.inference_16k_pipeline(audio_in=tmp_data, audio_fs = self.sample_rate)
+                            if "text" in asr_result:
+                                asr_result = asr_result['text']
+                            else:
+                                asr_result = ""
+                        elif self.backend == "onnxruntime":
+                            from funasr_onnx.utils.frontend import load_bytes
+                            array = load_bytes(tmp_data)
+                            asr_result = self.inference_16k_pipeline(array)[0]
                         end_time = int(round(time.time() * 1000))
                         delay_str = str(end_time - begin_time)
                         print ("user: %s , delay(ms): %s, text: %s " % (req.user, delay_str, asr_result))

--
Gitblit v1.9.1