From 8e449d676d4f06ba3de02c451c3b4fa3433a3792 Mon Sep 17 00:00:00 2001
From: lyblsgo <lyblsgo@163.com>
Date: 星期二, 11 四月 2023 10:35:29 +0800
Subject: [PATCH] debug onnxruntime multithread bugs
---
funasr/runtime/onnxruntime/src/paraformer_onnx.cpp | 71 ++++++++++++++++++++++++++++-------
1 files changed, 57 insertions(+), 14 deletions(-)
diff --git a/funasr/runtime/onnxruntime/src/paraformer_onnx.cpp b/funasr/runtime/onnxruntime/src/paraformer_onnx.cpp
index 8eb0e89..bb00849 100644
--- a/funasr/runtime/onnxruntime/src/paraformer_onnx.cpp
+++ b/funasr/runtime/onnxruntime/src/paraformer_onnx.cpp
@@ -6,16 +6,19 @@
ModelImp::ModelImp(const char* path,int nNumThread, bool quantize)
{
string model_path;
- string vocab_path;
+ string cmvn_path;
+ string config_path;
+
if(quantize)
{
model_path = pathAppend(path, "model_quant.onnx");
}else{
model_path = pathAppend(path, "model.onnx");
}
- vocab_path = pathAppend(path, "vocab.txt");
+ cmvn_path = pathAppend(path, "am.mvn");
+ config_path = pathAppend(path, "config.yaml");
- fe = new FeatureExtract(3);
+ //fe = new FeatureExtract(3);
//sessionOptions.SetInterOpNumThreads(1);
sessionOptions.SetIntraOpNumThreads(nNumThread);
@@ -43,13 +46,14 @@
m_szInputNames.push_back(item.c_str());
for (auto& item : m_strOutputNames)
m_szOutputNames.push_back(item.c_str());
- vocab = new Vocab(vocab_path.c_str());
+ vocab = new Vocab(config_path.c_str());
+ load_cmvn(cmvn_path.c_str());
}
ModelImp::~ModelImp()
{
- if(fe)
- delete fe;
+ //if(fe)
+ // delete fe;
if (m_session)
{
delete m_session;
@@ -61,7 +65,8 @@
void ModelImp::reset()
{
- fe->reset();
+ //fe->reset();
+ printf("Not Imp!!!!!!\n");
}
void ModelImp::apply_lfr(Tensor<float>*& din)
@@ -88,16 +93,49 @@
din = tmp;
}
+void ModelImp::load_cmvn(const char *filename)
+{
+ ifstream cmvn_stream(filename);
+ string line;
+
+ while (getline(cmvn_stream, line)) {
+ istringstream iss(line);
+ vector<string> line_item{istream_iterator<string>{iss}, istream_iterator<string>{}};
+ if (line_item[0] == "<AddShift>") {
+ getline(cmvn_stream, line);
+ istringstream means_lines_stream(line);
+ vector<string> means_lines{istream_iterator<string>{means_lines_stream}, istream_iterator<string>{}};
+ if (means_lines[0] == "<LearnRateCoef>") {
+ for (int j = 3; j < means_lines.size() - 1; j++) {
+ means_list.push_back(stof(means_lines[j]));
+ }
+ continue;
+ }
+ }
+ else if (line_item[0] == "<Rescale>") {
+ getline(cmvn_stream, line);
+ istringstream vars_lines_stream(line);
+ vector<string> vars_lines{istream_iterator<string>{vars_lines_stream}, istream_iterator<string>{}};
+ if (vars_lines[0] == "<LearnRateCoef>") {
+ for (int j = 3; j < vars_lines.size() - 1; j++) {
+ vars_list.push_back(stof(vars_lines[j])*scale);
+ }
+ continue;
+ }
+ }
+ }
+}
+
void ModelImp::apply_cmvn(Tensor<float>* din)
{
const float* var;
const float* mean;
- float scale = 22.6274169979695;
+ var = vars_list.data();
+ mean= means_list.data();
+
int m = din->size[2];
int n = din->size[3];
- var = (const float*)paraformer_cmvn_var_hex;
- mean = (const float*)paraformer_cmvn_mean_hex;
for (int i = 0; i < m; i++) {
for (int j = 0; j < n; j++) {
int idx = i * n + j;
@@ -122,8 +160,9 @@
string ModelImp::forward(float* din, int len, int flag)
{
-
Tensor<float>* in;
+ FeatureExtract* fe = new FeatureExtract(3);
+ fe->reset();
fe->insert(din, len, flag);
fe->fetch(in);
apply_lfr(in);
@@ -155,7 +194,6 @@
auto outputTensor = m_session->Run(run_option, m_szInputNames.data(), input_onnx.data(), m_szInputNames.size(), m_szOutputNames.data(), m_szOutputNames.size());
std::vector<int64_t> outputShape = outputTensor[0].GetTensorTypeAndShapeInfo().GetShape();
-
int64_t outputCount = std::accumulate(outputShape.begin(), outputShape.end(), 1, std::multiplies<int64_t>());
float* floatData = outputTensor[0].GetTensorMutableData<float>();
auto encoder_out_lens = outputTensor[1].GetTensorMutableData<int64_t>();
@@ -166,9 +204,14 @@
result = "";
}
-
- if(in)
+ if(in){
delete in;
+ in = nullptr;
+ }
+ if(fe){
+ delete fe;
+ fe = nullptr;
+ }
return result;
}
--
Gitblit v1.9.1