From d80ac2fd2df4e7fb8a28acfa512bb11472b5cc99 Mon Sep 17 00:00:00 2001
From: liugz18 <57401541+liugz18@users.noreply.github.com>
Date: 星期四, 18 七月 2024 21:34:55 +0800
Subject: [PATCH] Rename 'res' in line 514 to avoid with naming conflict with line 365
---
runtime/onnxruntime/src/offline-stream.cpp | 66 ++++++++++++++++++++-------------
1 files changed, 40 insertions(+), 26 deletions(-)
diff --git a/runtime/onnxruntime/src/offline-stream.cpp b/runtime/onnxruntime/src/offline-stream.cpp
index 8941a05..166d3c9 100644
--- a/runtime/onnxruntime/src/offline-stream.cpp
+++ b/runtime/onnxruntime/src/offline-stream.cpp
@@ -1,7 +1,7 @@
#include "precomp.h"
namespace funasr {
-OfflineStream::OfflineStream(std::map<std::string, std::string>& model_path, int thread_num, bool use_gpu)
+OfflineStream::OfflineStream(std::map<std::string, std::string>& model_path, int thread_num, bool use_gpu, int batch_size)
{
// VAD model
if(model_path.find(VAD_DIR) != model_path.end()){
@@ -32,44 +32,55 @@
string am_model_path;
string am_cmvn_path;
string am_config_path;
- string hw_compile_model_path;
+ string token_path;
+ string hw_cpu_model_path;
+ string hw_gpu_model_path;
string seg_dict_path;
if(use_gpu){
+ #ifdef USE_GPU
asr_handle = make_unique<ParaformerTorch>();
+ asr_handle->SetBatchSize(batch_size);
+ #else
+ LOG(ERROR) <<"GPU is not supported! CPU will be used! If you want to use GPU, please add -DGPU=ON when cmake";
+ asr_handle = make_unique<Paraformer>();
+ use_gpu = false;
+ #endif
}else{
asr_handle = make_unique<Paraformer>();
}
bool enable_hotword = false;
- hw_compile_model_path = PathAppend(model_path.at(MODEL_DIR), MODEL_EB_NAME);
+ hw_cpu_model_path = PathAppend(model_path.at(MODEL_DIR), MODEL_EB_NAME);
+ hw_gpu_model_path = PathAppend(model_path.at(MODEL_DIR), TORCH_MODEL_EB_NAME);
seg_dict_path = PathAppend(model_path.at(MODEL_DIR), MODEL_SEG_DICT);
- if (access(hw_compile_model_path.c_str(), F_OK) == 0) { // if model_eb.onnx exist, hotword enabled
+ if (access(hw_cpu_model_path.c_str(), F_OK) == 0) { // if model_eb.onnx exist, hotword enabled
enable_hotword = true;
- asr_handle->InitHwCompiler(hw_compile_model_path, thread_num);
+ asr_handle->InitHwCompiler(hw_cpu_model_path, thread_num);
asr_handle->InitSegDict(seg_dict_path);
}
- if (enable_hotword) {
- am_model_path = PathAppend(model_path.at(MODEL_DIR), MODEL_NAME);
- if(model_path.find(QUANTIZE) != model_path.end() && model_path.at(QUANTIZE) == "true"){
- am_model_path = PathAppend(model_path.at(MODEL_DIR), QUANT_MODEL_NAME);
- }
- } else {
- am_model_path = PathAppend(model_path.at(MODEL_DIR), MODEL_NAME);
- if(model_path.find(QUANTIZE) != model_path.end() && model_path.at(QUANTIZE) == "true"){
- am_model_path = PathAppend(model_path.at(MODEL_DIR), QUANT_MODEL_NAME);
- }
- if(use_gpu){
- am_model_path = PathAppend(model_path.at(MODEL_DIR), TORCH_MODEL_NAME);
- if(model_path.find(QUANTIZE) != model_path.end() && model_path.at(QUANTIZE) == "true"){
- am_model_path = PathAppend(model_path.at(MODEL_DIR), TORCH_QUANT_MODEL_NAME);
- }
- }
+ if (use_gpu && access(hw_gpu_model_path.c_str(), F_OK) == 0) { // if model_eb.torchscript exist, hotword enabled
+ enable_hotword = true;
+ asr_handle->InitHwCompiler(hw_gpu_model_path, thread_num);
+ asr_handle->InitSegDict(seg_dict_path);
}
+
+ am_model_path = PathAppend(model_path.at(MODEL_DIR), MODEL_NAME);
+ if(model_path.find(QUANTIZE) != model_path.end() && model_path.at(QUANTIZE) == "true"){
+ am_model_path = PathAppend(model_path.at(MODEL_DIR), QUANT_MODEL_NAME);
+ }
+ if(use_gpu){
+ am_model_path = PathAppend(model_path.at(MODEL_DIR), TORCH_MODEL_NAME);
+ if(model_path.find(BLADEDISC) != model_path.end() && model_path.at(BLADEDISC) == "true"){
+ am_model_path = PathAppend(model_path.at(MODEL_DIR), BLADE_MODEL_NAME);
+ }
+ }
+
am_cmvn_path = PathAppend(model_path.at(MODEL_DIR), AM_CMVN_NAME);
am_config_path = PathAppend(model_path.at(MODEL_DIR), AM_CONFIG_NAME);
+ token_path = PathAppend(model_path.at(MODEL_DIR), TOKEN_PATH);
- asr_handle->InitAsr(am_model_path, am_cmvn_path, am_config_path, thread_num);
+ asr_handle->InitAsr(am_model_path, am_cmvn_path, am_config_path, token_path, thread_num);
}
// Lm resource
@@ -90,20 +101,23 @@
if(model_path.find(PUNC_DIR) != model_path.end()){
string punc_model_path;
string punc_config_path;
+ string token_path;
punc_model_path = PathAppend(model_path.at(PUNC_DIR), MODEL_NAME);
if(model_path.find(PUNC_QUANT) != model_path.end() && model_path.at(PUNC_QUANT) == "true"){
punc_model_path = PathAppend(model_path.at(PUNC_DIR), QUANT_MODEL_NAME);
}
punc_config_path = PathAppend(model_path.at(PUNC_DIR), PUNC_CONFIG_NAME);
+ token_path = PathAppend(model_path.at(PUNC_DIR), TOKEN_PATH);
if (access(punc_model_path.c_str(), F_OK) != 0 ||
- access(punc_config_path.c_str(), F_OK) != 0 )
+ access(punc_config_path.c_str(), F_OK) != 0 ||
+ access(token_path.c_str(), F_OK) != 0)
{
LOG(INFO) << "PUNC model file is not exist, skip load punc model.";
}else{
punc_handle = make_unique<CTTransformer>();
- punc_handle->InitPunc(punc_model_path, punc_config_path, thread_num);
+ punc_handle->InitPunc(punc_model_path, punc_config_path, token_path, thread_num);
use_punc = true;
}
}
@@ -126,10 +140,10 @@
#endif
}
-OfflineStream *CreateOfflineStream(std::map<std::string, std::string>& model_path, int thread_num, bool use_gpu)
+OfflineStream *CreateOfflineStream(std::map<std::string, std::string>& model_path, int thread_num, bool use_gpu, int batch_size)
{
OfflineStream *mm;
- mm = new OfflineStream(model_path, thread_num, use_gpu);
+ mm = new OfflineStream(model_path, thread_num, use_gpu, batch_size);
return mm;
}
--
Gitblit v1.9.1