From cad1979179a110b154568dd6281035ece9aaf0b8 Mon Sep 17 00:00:00 2001
From: 雾聪 <wucong.lyb@alibaba-inc.com>
Date: 星期五, 29 三月 2024 16:48:03 +0800
Subject: [PATCH] add batch for offline-stream
---
runtime/onnxruntime/src/offline-stream.cpp | 7 ++++---
1 files changed, 4 insertions(+), 3 deletions(-)
diff --git a/runtime/onnxruntime/src/offline-stream.cpp b/runtime/onnxruntime/src/offline-stream.cpp
index 69befc6..3f914aa 100644
--- a/runtime/onnxruntime/src/offline-stream.cpp
+++ b/runtime/onnxruntime/src/offline-stream.cpp
@@ -1,7 +1,7 @@
#include "precomp.h"
namespace funasr {
-OfflineStream::OfflineStream(std::map<std::string, std::string>& model_path, int thread_num, bool use_gpu)
+OfflineStream::OfflineStream(std::map<std::string, std::string>& model_path, int thread_num, bool use_gpu, int batch_size)
{
// VAD model
if(model_path.find(VAD_DIR) != model_path.end()){
@@ -38,6 +38,7 @@
if(use_gpu){
#ifdef USE_GPU
asr_handle = make_unique<ParaformerTorch>();
+ asr_handle->SetBatchSize(batch_size);
#else
LOG(ERROR) <<"GPU is not supported! CPU will be used! If you want to use GPU, please add -DGPU=ON when cmake";
asr_handle = make_unique<Paraformer>();
@@ -135,10 +136,10 @@
#endif
}
-OfflineStream *CreateOfflineStream(std::map<std::string, std::string>& model_path, int thread_num, bool use_gpu)
+OfflineStream *CreateOfflineStream(std::map<std::string, std::string>& model_path, int thread_num, bool use_gpu, int batch_size)
{
OfflineStream *mm;
- mm = new OfflineStream(model_path, thread_num, use_gpu);
+ mm = new OfflineStream(model_path, thread_num, use_gpu, batch_size);
return mm;
}
--
Gitblit v1.9.1