From 0ff321caea4a1dcc1368f50cd0e40d199f0da7d2 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期四, 30 五月 2024 15:10:51 +0800
Subject: [PATCH] docs
---
runtime/websocket/bin/funasr-wss-server.cpp | 17 ++++++++++-------
1 files changed, 10 insertions(+), 7 deletions(-)
diff --git a/runtime/websocket/bin/funasr-wss-server.cpp b/runtime/websocket/bin/funasr-wss-server.cpp
index bd82277..100cf35 100644
--- a/runtime/websocket/bin/funasr-wss-server.cpp
+++ b/runtime/websocket/bin/funasr-wss-server.cpp
@@ -50,7 +50,7 @@
TCLAP::ValueArg<std::string> model_revision(
"", "model-revision",
"ASR model revision",
- false, "v1.2.1", "string");
+ false, "v2.0.4", "string");
TCLAP::ValueArg<std::string> quantize(
"", QUANTIZE,
"true (Default), load the model of model_quant.onnx in model_dir. If set "
@@ -67,7 +67,7 @@
TCLAP::ValueArg<std::string> vad_revision(
"", "vad-revision",
"VAD model revision",
- false, "v1.2.0", "string");
+ false, "v2.0.4", "string");
TCLAP::ValueArg<std::string> vad_quant(
"", VAD_QUANT,
"true (Default), load the model of model_quant.onnx in vad_dir. If set "
@@ -81,7 +81,7 @@
TCLAP::ValueArg<std::string> punc_revision(
"", "punc-revision",
"PUNC model revision",
- false, "v1.1.7", "string");
+ false, "v2.0.4", "string");
TCLAP::ValueArg<std::string> punc_quant(
"", PUNC_QUANT,
"true (Default), load the model of model_quant.onnx in punc_dir. If set "
@@ -126,6 +126,7 @@
TCLAP::ValueArg<std::int32_t> fst_inc_wts("", FST_INC_WTS,
"the fst hotwords incremental bias", false, 20, "int32_t");
TCLAP::SwitchArg use_gpu("", INFER_GPU, "Whether to use GPU, default is false", false);
+ TCLAP::ValueArg<std::int32_t> batch_size("", BATCHSIZE, "batch_size for ASR model when using GPU", false, 4, "int32_t");
// add file
cmd.add(hotword);
@@ -158,6 +159,7 @@
cmd.add(decoder_thread_num);
cmd.add(model_thread_num);
cmd.add(use_gpu);
+ cmd.add(batch_size);
cmd.parse(argc, argv);
std::map<std::string, std::string> model_path;
@@ -182,6 +184,7 @@
lattice_beam_ = lattice_beam.getValue();
am_scale_ = am_scale.getValue();
bool use_gpu_ = use_gpu.getValue();
+ int batch_size_ = batch_size.getValue();
// Download model form Modelscope
try{
@@ -242,17 +245,17 @@
// modify model-revision by model name
size_t found = s_asr_path.find("speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404");
if (found != std::string::npos) {
- model_path["model-revision"]="v1.2.4";
+ model_path["model-revision"]="v2.0.4";
}
found = s_asr_path.find("speech_paraformer-large-contextual_asr_nat-zh-cn-16k-common-vocab8404");
if (found != std::string::npos) {
- model_path["model-revision"]="v1.0.5";
+ model_path["model-revision"]="v2.0.5";
}
found = s_asr_path.find("speech_paraformer-large_asr_nat-en-16k-common-vocab10020");
if (found != std::string::npos) {
- model_path["model-revision"]="v1.0.0";
+ model_path["model-revision"]="v2.0.4";
s_itn_path="";
s_lm_path="";
}
@@ -477,7 +480,7 @@
WebSocketServer websocket_srv(
io_decoder, is_ssl, server, wss_server, s_certfile,
s_keyfile); // websocket server for asr engine
- websocket_srv.initAsr(model_path, s_model_thread_num, use_gpu_); // init asr model
+ websocket_srv.initAsr(model_path, s_model_thread_num, use_gpu_, batch_size_); // init asr model
LOG(INFO) << "decoder-thread-num: " << s_decoder_thread_num;
LOG(INFO) << "io-thread-num: " << s_io_thread_num;
--
Gitblit v1.9.1