From ade08818b7a579aac75182b906a5bd3b8126411c Mon Sep 17 00:00:00 2001
From: Yabin Li <wucong.lyb@alibaba-inc.com>
Date: 星期一, 27 五月 2024 15:46:26 +0800
Subject: [PATCH] Merge branch 'dev_batch' into main
---
runtime/onnxruntime/src/funasrruntime.cpp | 178 +++++++++++++++++++++++++++++++++++++++++-----------------
1 files changed, 125 insertions(+), 53 deletions(-)
diff --git a/runtime/onnxruntime/src/funasrruntime.cpp b/runtime/onnxruntime/src/funasrruntime.cpp
index 4bc64af..d235e6f 100644
--- a/runtime/onnxruntime/src/funasrruntime.cpp
+++ b/runtime/onnxruntime/src/funasrruntime.cpp
@@ -33,9 +33,9 @@
return mm;
}
- _FUNASRAPI FUNASR_HANDLE FunOfflineInit(std::map<std::string, std::string>& model_path, int thread_num)
+ _FUNASRAPI FUNASR_HANDLE FunOfflineInit(std::map<std::string, std::string>& model_path, int thread_num, bool use_gpu, int batch_size)
{
- funasr::OfflineStream* mm = funasr::CreateOfflineStream(model_path, thread_num);
+ funasr::OfflineStream* mm = funasr::CreateOfflineStream(model_path, thread_num, use_gpu, batch_size);
return mm;
}
@@ -74,16 +74,11 @@
if(p_result->snippet_time == 0){
return p_result;
}
- int n_step = 0;
- int n_total = audio.GetQueueSize();
+
while (audio.Fetch(buff, len, flag) > 0) {
string msg = recog_obj->Forward(buff, len, input_finished);
p_result->msg += msg;
- n_step++;
- if (fn_callback)
- fn_callback(n_step, n_total);
}
-
return p_result;
}
@@ -109,8 +104,6 @@
float* buff;
int len;
int flag = 0;
- int n_step = 0;
- int n_total = audio.GetQueueSize();
funasr::FUNASR_RECOG_RESULT* p_result = new funasr::FUNASR_RECOG_RESULT;
p_result->snippet_time = audio.GetTimeLen();
if(p_result->snippet_time == 0){
@@ -119,11 +112,7 @@
while (audio.Fetch(buff, len, flag) > 0) {
string msg = recog_obj->Forward(buff, len, true);
p_result->msg += msg;
- n_step++;
- if (fn_callback)
- fn_callback(n_step, n_total);
}
-
return p_result;
}
@@ -244,26 +233,53 @@
if(p_result->snippet_time == 0){
return p_result;
}
+ std::vector<int> index_vector={0};
+ int msg_idx = 0;
if(offline_stream->UseVad()){
- audio.CutSplit(offline_stream);
+ audio.CutSplit(offline_stream, index_vector);
}
+ std::vector<string> msgs(index_vector.size());
+ std::vector<float> msg_stimes(index_vector.size());
- float* buff;
- int len;
- int flag = 0;
+ float** buff;
+ int* len;
+ int* flag;
+ float* start_time;
+ int batch_size = offline_stream->asr_handle->GetBatchSize();
+ int batch_in = 0;
- int n_step = 0;
- int n_total = audio.GetQueueSize();
- float start_time = 0.0;
std::string cur_stamp = "[";
std::string lang = (offline_stream->asr_handle)->GetLang();
- while (audio.Fetch(buff, len, flag, start_time) > 0) {
+ while (audio.FetchDynamic(buff, len, flag, start_time, batch_size, batch_in) > 0) {
// dec reset
funasr::WfstDecoder* wfst_decoder = (funasr::WfstDecoder*)dec_handle;
if (wfst_decoder){
wfst_decoder->StartUtterance();
}
- string msg = (offline_stream->asr_handle)->Forward(buff, len, true, hw_emb, dec_handle);
+ vector<string> msg_batch = (offline_stream->asr_handle)->Forward(buff, len, true, hw_emb, dec_handle, batch_in);
+ for(int idx=0; idx<batch_in; idx++){
+ string msg = msg_batch[idx];
+ if(msg_idx < index_vector.size()){
+ msgs[index_vector[msg_idx]] = msg;
+ msg_stimes[index_vector[msg_idx]] = start_time[idx];
+ msg_idx++;
+ }else{
+ LOG(ERROR) << "msg_idx: " << msg_idx <<" is out of range " << index_vector.size();
+ }
+ }
+
+ // release
+ delete[] buff;
+ buff = nullptr;
+ delete[] len;
+ len = nullptr;
+ delete[] flag;
+ flag = nullptr;
+ delete[] start_time;
+ start_time = nullptr;
+ }
+ for(int idx=0; idx<msgs.size(); idx++){
+ string msg = msgs[idx];
std::vector<std::string> msg_vec = funasr::split(msg, '|');
if(msg_vec.size()==0){
continue;
@@ -276,14 +292,11 @@
if(msg_vec.size() > 1){
std::vector<std::string> msg_stamp = funasr::split(msg_vec[1], ',');
for(int i=0; i<msg_stamp.size()-1; i+=2){
- float begin = std::stof(msg_stamp[i])+start_time;
- float end = std::stof(msg_stamp[i+1])+start_time;
+ float begin = std::stof(msg_stamp[i])+msg_stimes[idx];
+ float end = std::stof(msg_stamp[i+1])+msg_stimes[idx];
cur_stamp += "["+std::to_string((int)(1000*begin))+","+std::to_string((int)(1000*end))+"],";
}
}
- n_step++;
- if (fn_callback)
- fn_callback(n_step, n_total);
}
if(cur_stamp != "["){
cur_stamp.erase(cur_stamp.length() - 1);
@@ -342,25 +355,53 @@
if(p_result->snippet_time == 0){
return p_result;
}
+ std::vector<int> index_vector={0};
+ int msg_idx = 0;
if(offline_stream->UseVad()){
- audio.CutSplit(offline_stream);
+ audio.CutSplit(offline_stream, index_vector);
}
+ std::vector<string> msgs(index_vector.size());
+ std::vector<float> msg_stimes(index_vector.size());
- float* buff;
- int len;
- int flag = 0;
- int n_step = 0;
- int n_total = audio.GetQueueSize();
- float start_time = 0.0;
+ float** buff;
+ int* len;
+ int* flag;
+ float* start_time;
+ int batch_size = offline_stream->asr_handle->GetBatchSize();
+ int batch_in = 0;
+
std::string cur_stamp = "[";
std::string lang = (offline_stream->asr_handle)->GetLang();
- while (audio.Fetch(buff, len, flag, start_time) > 0) {
+ while (audio.FetchDynamic(buff, len, flag, start_time, batch_size, batch_in) > 0) {
// dec reset
funasr::WfstDecoder* wfst_decoder = (funasr::WfstDecoder*)dec_handle;
if (wfst_decoder){
wfst_decoder->StartUtterance();
}
- string msg = (offline_stream->asr_handle)->Forward(buff, len, true, hw_emb, dec_handle);
+ vector<string> msg_batch = (offline_stream->asr_handle)->Forward(buff, len, true, hw_emb, dec_handle, batch_in);
+ for(int idx=0; idx<batch_in; idx++){
+ string msg = msg_batch[idx];
+ if(msg_idx < index_vector.size()){
+ msgs[index_vector[msg_idx]] = msg;
+ msg_stimes[index_vector[msg_idx]] = start_time[idx];
+ msg_idx++;
+ }else{
+ LOG(ERROR) << "msg_idx: " << msg_idx <<" is out of range " << index_vector.size();
+ }
+ }
+
+ // release
+ delete[] buff;
+ buff = nullptr;
+ delete[] len;
+ len = nullptr;
+ delete[] flag;
+ flag = nullptr;
+ delete[] start_time;
+ start_time = nullptr;
+ }
+ for(int idx=0; idx<msgs.size(); idx++){
+ string msg = msgs[idx];
std::vector<std::string> msg_vec = funasr::split(msg, '|');
if(msg_vec.size()==0){
continue;
@@ -373,15 +414,11 @@
if(msg_vec.size() > 1){
std::vector<std::string> msg_stamp = funasr::split(msg_vec[1], ',');
for(int i=0; i<msg_stamp.size()-1; i+=2){
- float begin = std::stof(msg_stamp[i])+start_time;
- float end = std::stof(msg_stamp[i+1])+start_time;
+ float begin = std::stof(msg_stamp[i])+msg_stimes[idx];
+ float end = std::stof(msg_stamp[i+1])+msg_stimes[idx];
cur_stamp += "["+std::to_string((int)(1000*begin))+","+std::to_string((int)(1000*end))+"],";
}
}
-
- n_step++;
- if (fn_callback)
- fn_callback(n_step, n_total);
}
if(cur_stamp != "["){
cur_stamp.erase(cur_stamp.length() - 1);
@@ -518,8 +555,14 @@
if (wfst_decoder){
wfst_decoder->StartUtterance();
}
- string msg = ((funasr::Paraformer*)asr_handle)->Forward(frame->data, frame->len, frame->is_final, hw_emb, dec_handle);
-
+ float** buff;
+ int* len;
+ buff = new float*[1];
+ len = new int[1];
+ buff[0] = frame->data;
+ len[0] = frame->len;
+ vector<string> msgs = ((funasr::Paraformer*)asr_handle)->Forward(buff, len, frame->is_final, hw_emb, dec_handle);
+ string msg = msgs.size()>0?msgs[0]:"";
std::vector<std::string> msg_vec = funasr::split(msg, '|'); // split with timestamp
if(msg_vec.size()==0){
continue;
@@ -767,16 +810,45 @@
funasr::WfstDecoder* mm = nullptr;
if (asr_type == ASR_OFFLINE) {
funasr::OfflineStream* offline_stream = (funasr::OfflineStream*)handle;
- funasr::Paraformer* paraformer = (funasr::Paraformer*)offline_stream->asr_handle.get();
- if (paraformer->lm_)
- mm = new funasr::WfstDecoder(paraformer->lm_.get(),
- paraformer->GetPhoneSet(), paraformer->GetLmVocab(), glob_beam, lat_beam, am_scale);
+ auto paraformer = dynamic_cast<funasr::Paraformer*>(offline_stream->asr_handle.get());
+ if(paraformer !=nullptr){
+ if (paraformer->lm_){
+ mm = new funasr::WfstDecoder(paraformer->lm_.get(),
+ paraformer->GetPhoneSet(), paraformer->GetLmVocab(), glob_beam, lat_beam, am_scale);
+ }
+ return mm;
+ }
+ #ifdef USE_GPU
+ auto paraformer_torch = dynamic_cast<funasr::ParaformerTorch*>(offline_stream->asr_handle.get());
+ if(paraformer_torch !=nullptr){
+ if (paraformer_torch->lm_){
+ mm = new funasr::WfstDecoder(paraformer_torch->lm_.get(),
+ paraformer_torch->GetPhoneSet(), paraformer_torch->GetLmVocab(), glob_beam, lat_beam, am_scale);
+ }
+ return mm;
+ }
+ #endif
+
} else if (asr_type == ASR_TWO_PASS){
funasr::TpassStream* tpass_stream = (funasr::TpassStream*)handle;
- funasr::Paraformer* paraformer = (funasr::Paraformer*)tpass_stream->asr_handle.get();
- if (paraformer->lm_)
- mm = new funasr::WfstDecoder(paraformer->lm_.get(),
- paraformer->GetPhoneSet(), paraformer->GetLmVocab(), glob_beam, lat_beam, am_scale);
+ auto paraformer = dynamic_cast<funasr::Paraformer*>(tpass_stream->asr_handle.get());
+ if(paraformer !=nullptr){
+ if (paraformer->lm_){
+ mm = new funasr::WfstDecoder(paraformer->lm_.get(),
+ paraformer->GetPhoneSet(), paraformer->GetLmVocab(), glob_beam, lat_beam, am_scale);
+ }
+ return mm;
+ }
+ #ifdef USE_GPU
+ auto paraformer_torch = dynamic_cast<funasr::ParaformerTorch*>(tpass_stream->asr_handle.get());
+ if(paraformer_torch !=nullptr){
+ if (paraformer_torch->lm_){
+ mm = new funasr::WfstDecoder(paraformer_torch->lm_.get(),
+ paraformer_torch->GetPhoneSet(), paraformer_torch->GetLmVocab(), glob_beam, lat_beam, am_scale);
+ }
+ return mm;
+ }
+ #endif
}
return mm;
}
--
Gitblit v1.9.1