From 6427c834dfd97b1f05c6659cdc7ccf010bf82fe1 Mon Sep 17 00:00:00 2001
From: 嘉渊 <wangjiaming.wjm@alibaba-inc.com>
Date: 星期一, 24 四月 2023 19:50:07 +0800
Subject: [PATCH] update
---
funasr/runtime/python/grpc/grpc_main_server.py | 42 +++++++++++++++++++++++++++++++++++++++---
1 files changed, 39 insertions(+), 3 deletions(-)
diff --git a/funasr/runtime/python/grpc/grpc_main_server.py b/funasr/runtime/python/grpc/grpc_main_server.py
index 0f9aa0b..ae386fa 100644
--- a/funasr/runtime/python/grpc/grpc_main_server.py
+++ b/funasr/runtime/python/grpc/grpc_main_server.py
@@ -1,14 +1,16 @@
+import grpc
from concurrent import futures
+import argparse
+
import paraformer_pb2_grpc
from grpc_server import ASRServicer
-import grpc
-import argparse
def serve(args):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10),
# interceptors=(AuthInterceptor('Bearer mysecrettoken'),)
)
- paraformer_pb2_grpc.add_ASRServicer_to_server(ASRServicer(args.user_allowed), server)
+ paraformer_pb2_grpc.add_ASRServicer_to_server(
+ ASRServicer(args.user_allowed, args.model, args.sample_rate, args.backend, args.onnx_dir, vad_model=args.vad_model, punc_model=args.punc_model), server)
port = "[::]:" + str(args.port)
server.add_insecure_port(port)
server.start()
@@ -20,11 +22,45 @@
parser.add_argument("--port",
type=int,
default=10095,
+ required=True,
help="grpc server port")
+
parser.add_argument("--user_allowed",
type=str,
default="project1_user1|project1_user2|project2_user3",
help="allowed user for grpc client")
+
+ parser.add_argument("--model",
+ type=str,
+ default="damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch",
+ help="model from modelscope")
+ parser.add_argument("--vad_model",
+ type=str,
+ default="damo/speech_fsmn_vad_zh-cn-16k-common-pytorch",
+ help="model from modelscope")
+
+ parser.add_argument("--punc_model",
+ type=str,
+ default="",
+ help="model from modelscope")
+
+ parser.add_argument("--sample_rate",
+ type=int,
+ default=16000,
+ help="audio sample_rate from client")
+
+ parser.add_argument("--backend",
+ type=str,
+ default="pipeline",
+ choices=("pipeline", "onnxruntime"),
+ help="backend, optional modelscope pipeline or onnxruntime")
+
+ parser.add_argument("--onnx_dir",
+ type=str,
+ default="/nfs/models/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch",
+ help="onnx model dir")
+
+
args = parser.parse_args()
--
Gitblit v1.9.1