From 9ba0dbd98bf69c830dfcfde8f109a400cb65e4e5 Mon Sep 17 00:00:00 2001
From: 雾聪 <wucong.lyb@alibaba-inc.com>
Date: 星期五, 29 三月 2024 17:24:59 +0800
Subject: [PATCH] fix func Forward
---
runtime/onnxruntime/src/offline-stream.cpp | 39 +++++++++++++++++++++++++++++++++------
1 files changed, 33 insertions(+), 6 deletions(-)
diff --git a/runtime/onnxruntime/src/offline-stream.cpp b/runtime/onnxruntime/src/offline-stream.cpp
index 2709ca6..3f914aa 100644
--- a/runtime/onnxruntime/src/offline-stream.cpp
+++ b/runtime/onnxruntime/src/offline-stream.cpp
@@ -1,7 +1,7 @@
#include "precomp.h"
namespace funasr {
-OfflineStream::OfflineStream(std::map<std::string, std::string>& model_path, int thread_num)
+OfflineStream::OfflineStream(std::map<std::string, std::string>& model_path, int thread_num, bool use_gpu, int batch_size)
{
// VAD model
if(model_path.find(VAD_DIR) != model_path.end()){
@@ -35,7 +35,19 @@
string hw_compile_model_path;
string seg_dict_path;
- asr_handle = make_unique<Paraformer>();
+ if(use_gpu){
+ #ifdef USE_GPU
+ asr_handle = make_unique<ParaformerTorch>();
+ asr_handle->SetBatchSize(batch_size);
+ #else
+ LOG(ERROR) <<"GPU is not supported! CPU will be used! If you want to use GPU, please add -DGPU=ON when cmake";
+ asr_handle = make_unique<Paraformer>();
+ use_gpu = false;
+ #endif
+ }else{
+ asr_handle = make_unique<Paraformer>();
+ }
+
bool enable_hotword = false;
hw_compile_model_path = PathAppend(model_path.at(MODEL_DIR), MODEL_EB_NAME);
seg_dict_path = PathAppend(model_path.at(MODEL_DIR), MODEL_SEG_DICT);
@@ -54,6 +66,15 @@
if(model_path.find(QUANTIZE) != model_path.end() && model_path.at(QUANTIZE) == "true"){
am_model_path = PathAppend(model_path.at(MODEL_DIR), QUANT_MODEL_NAME);
}
+ if(use_gpu){
+ am_model_path = PathAppend(model_path.at(MODEL_DIR), TORCH_MODEL_NAME);
+ if(model_path.find(QUANTIZE) != model_path.end() && model_path.at(QUANTIZE) == "true"){
+ am_model_path = PathAppend(model_path.at(MODEL_DIR), TORCH_QUANT_MODEL_NAME);
+ }
+ if(model_path.find(BLADEDISC) != model_path.end() && model_path.at(BLADEDISC) == "true"){
+ am_model_path = PathAppend(model_path.at(MODEL_DIR), BLADE_MODEL_NAME);
+ }
+ }
}
am_cmvn_path = PathAppend(model_path.at(MODEL_DIR), AM_CMVN_NAME);
am_config_path = PathAppend(model_path.at(MODEL_DIR), AM_CONFIG_NAME);
@@ -63,10 +84,16 @@
// Lm resource
if (model_path.find(LM_DIR) != model_path.end() && model_path.at(LM_DIR) != "") {
- string fst_path, lm_config_path, hws_path;
+ string fst_path, lm_config_path, lex_path;
fst_path = PathAppend(model_path.at(LM_DIR), LM_FST_RES);
lm_config_path = PathAppend(model_path.at(LM_DIR), LM_CONFIG_NAME);
- asr_handle->InitLm(fst_path, lm_config_path);
+ lex_path = PathAppend(model_path.at(LM_DIR), LEX_PATH);
+ if (access(lex_path.c_str(), F_OK) != 0 )
+ {
+ LOG(ERROR) << "Lexicon.txt file is not exist, please use the latest version. Skip load LM model.";
+ }else{
+ asr_handle->InitLm(fst_path, lm_config_path, lex_path);
+ }
}
// PUNC model
@@ -109,10 +136,10 @@
#endif
}
-OfflineStream *CreateOfflineStream(std::map<std::string, std::string>& model_path, int thread_num)
+OfflineStream *CreateOfflineStream(std::map<std::string, std::string>& model_path, int thread_num, bool use_gpu, int batch_size)
{
OfflineStream *mm;
- mm = new OfflineStream(model_path, thread_num);
+ mm = new OfflineStream(model_path, thread_num, use_gpu, batch_size);
return mm;
}
--
Gitblit v1.9.1