From 9ba0dbd98bf69c830dfcfde8f109a400cb65e4e5 Mon Sep 17 00:00:00 2001
From: 雾聪 <wucong.lyb@alibaba-inc.com>
Date: 星期五, 29 三月 2024 17:24:59 +0800
Subject: [PATCH] fix func Forward
---
runtime/websocket/bin/funasr-wss-server.cpp | 59 ++++++++++++++++++++++++++++++++++++++---------------------
1 files changed, 38 insertions(+), 21 deletions(-)
diff --git a/runtime/websocket/bin/funasr-wss-server.cpp b/runtime/websocket/bin/funasr-wss-server.cpp
index 9c3af70..cc4c156 100644
--- a/runtime/websocket/bin/funasr-wss-server.cpp
+++ b/runtime/websocket/bin/funasr-wss-server.cpp
@@ -26,6 +26,10 @@
}
int main(int argc, char* argv[]) {
+#ifdef _WIN32
+ #include <windows.h>
+ SetConsoleOutputCP(65001);
+#endif
try {
google::InitGoogleLogging(argv[0]);
@@ -51,6 +55,10 @@
"", QUANTIZE,
"true (Default), load the model of model_quant.onnx in model_dir. If set "
"false, load the model of model.onnx in model_dir",
+ false, "true", "string");
+ TCLAP::ValueArg<std::string> bladedisc(
+ "", BLADEDISC,
+ "true (Default), load the model of bladedisc in model_dir.",
false, "true", "string");
TCLAP::ValueArg<std::string> vad_dir(
"", VAD_DIR,
@@ -91,7 +99,7 @@
"0.0.0.0", "string");
TCLAP::ValueArg<int> port("", "port", "port", false, 10095, "int");
TCLAP::ValueArg<int> io_thread_num("", "io-thread-num", "io thread num",
- false, 8, "int");
+ false, 2, "int");
TCLAP::ValueArg<int> decoder_thread_num(
"", "decoder-thread-num", "decoder thread num", false, 8, "int");
TCLAP::ValueArg<int> model_thread_num("", "model-thread-num",
@@ -111,12 +119,14 @@
TCLAP::ValueArg<std::string> lm_dir("", LM_DIR,
"the LM model path, which contains compiled models: TLG.fst, config.yaml ", false, "damo/speech_ngram_lm_zh-cn-ai-wesp-fst", "string");
TCLAP::ValueArg<std::string> lm_revision(
- "", "lm-revision", "LM model revision", false, "v1.0.1", "string");
+ "", "lm-revision", "LM model revision", false, "v1.0.2", "string");
TCLAP::ValueArg<std::string> hotword("", HOTWORD,
"the hotword file, one hotword perline, Format: Hotword Weight (could be: 闃块噷宸村反 20)",
false, "/workspace/resources/hotwords.txt", "string");
TCLAP::ValueArg<std::int32_t> fst_inc_wts("", FST_INC_WTS,
"the fst hotwords incremental bias", false, 20, "int32_t");
+ TCLAP::SwitchArg use_gpu("", INFER_GPU, "Whether to use GPU, default is false", false);
+ TCLAP::ValueArg<std::int32_t> batch_size("", "batch-size", "batch_size for ASR model when using GPU", false, 5, "int32_t");
// add file
cmd.add(hotword);
@@ -131,6 +141,7 @@
cmd.add(model_dir);
cmd.add(model_revision);
cmd.add(quantize);
+ cmd.add(bladedisc);
cmd.add(vad_dir);
cmd.add(vad_revision);
cmd.add(vad_quant);
@@ -147,11 +158,14 @@
cmd.add(io_thread_num);
cmd.add(decoder_thread_num);
cmd.add(model_thread_num);
+ cmd.add(use_gpu);
+ cmd.add(batch_size);
cmd.parse(argc, argv);
std::map<std::string, std::string> model_path;
GetValue(model_dir, MODEL_DIR, model_path);
GetValue(quantize, QUANTIZE, model_path);
+ GetValue(bladedisc, BLADEDISC, model_path);
GetValue(vad_dir, VAD_DIR, model_path);
GetValue(vad_quant, VAD_QUANT, model_path);
GetValue(punc_dir, PUNC_DIR, model_path);
@@ -169,6 +183,8 @@
global_beam_ = global_beam.getValue();
lattice_beam_ = lattice_beam.getValue();
am_scale_ = am_scale.getValue();
+ bool use_gpu_ = use_gpu.getValue();
+ int batch_size_ = batch_size.getValue();
// Download model form Modelscope
try{
@@ -183,7 +199,7 @@
std::string s_itn_path = model_path[ITN_DIR];
std::string s_lm_path = model_path[LM_DIR];
- std::string python_cmd = "python -m funasr.utils.runtime_sdk_download_tool --type onnx --quantize True ";
+ std::string python_cmd = "python -m funasr.download.runtime_sdk_download_tool --type onnx --quantize True ";
if(vad_dir.isSet() && !s_vad_path.empty()){
std::string python_cmd_vad;
@@ -226,28 +242,29 @@
std::string down_asr_path;
std::string down_asr_model;
+ // modify model-revision by model name
+ size_t found = s_asr_path.find("speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404");
+ if (found != std::string::npos) {
+ model_path["model-revision"]="v1.2.4";
+ }
+
+ found = s_asr_path.find("speech_paraformer-large-contextual_asr_nat-zh-cn-16k-common-vocab8404");
+ if (found != std::string::npos) {
+ model_path["model-revision"]="v1.0.5";
+ }
+
+ found = s_asr_path.find("speech_paraformer-large_asr_nat-en-16k-common-vocab10020");
+ if (found != std::string::npos) {
+ model_path["model-revision"]="v1.0.0";
+ s_itn_path="";
+ s_lm_path="";
+ }
+
if (access(s_asr_path.c_str(), F_OK) == 0){
// local
python_cmd_asr = python_cmd + " --model-name " + s_asr_path + " --export-dir ./ " + " --model_revision " + model_path["model-revision"];
down_asr_path = s_asr_path;
}else{
- size_t found = s_asr_path.find("speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404");
- if (found != std::string::npos) {
- model_path["model-revision"]="v1.2.4";
- }
-
- found = s_asr_path.find("speech_paraformer-large-contextual_asr_nat-zh-cn-16k-common-vocab8404");
- if (found != std::string::npos) {
- model_path["model-revision"]="v1.0.5";
- }
-
- found = s_asr_path.find("speech_paraformer-large_asr_nat-en-16k-common-vocab10020");
- if (found != std::string::npos) {
- model_path["model-revision"]="v1.0.0";
- s_itn_path="";
- s_lm_path="";
- }
-
// modelscope
LOG(INFO) << "Download model: " << s_asr_path << " from modelscope: ";
python_cmd_asr = python_cmd + " --model-name " + s_asr_path + " --export-dir " + s_download_model_dir + " --model_revision " + model_path["model-revision"];
@@ -463,7 +480,7 @@
WebSocketServer websocket_srv(
io_decoder, is_ssl, server, wss_server, s_certfile,
s_keyfile); // websocket server for asr engine
- websocket_srv.initAsr(model_path, s_model_thread_num); // init asr model
+ websocket_srv.initAsr(model_path, s_model_thread_num, use_gpu_, batch_size_); // init asr model
LOG(INFO) << "decoder-thread-num: " << s_decoder_thread_num;
LOG(INFO) << "io-thread-num: " << s_io_thread_num;
--
Gitblit v1.9.1