From b9bcf1f093c3053fdc4e2cf4a1d38e27bbf429fb Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期四, 19 十月 2023 14:03:48 +0800
Subject: [PATCH] docs
---
funasr/runtime/onnxruntime/src/paraformer.cpp | 90 ++++++++++++++++++++++++++++++--------------
1 files changed, 61 insertions(+), 29 deletions(-)
diff --git a/funasr/runtime/onnxruntime/src/paraformer.cpp b/funasr/runtime/onnxruntime/src/paraformer.cpp
index c042e0d..5bbaeef 100644
--- a/funasr/runtime/onnxruntime/src/paraformer.cpp
+++ b/funasr/runtime/onnxruntime/src/paraformer.cpp
@@ -37,11 +37,11 @@
session_options_.DisableCpuMemArena();
try {
- m_session_ = std::make_unique<Ort::Session>(env_, am_model.c_str(), session_options_);
+ m_session_ = std::make_unique<Ort::Session>(env_, ORTSTRING(am_model).c_str(), session_options_);
LOG(INFO) << "Successfully load model from " << am_model;
} catch (std::exception const &e) {
LOG(ERROR) << "Error when load am onnx model: " << e.what();
- exit(0);
+ exit(-1);
}
string strName;
@@ -65,6 +65,7 @@
for (auto& item : m_strOutputNames)
m_szOutputNames.push_back(item.c_str());
vocab = new Vocab(am_config.c_str());
+ LoadConfigFromYaml(am_config.c_str());
LoadCmvn(am_cmvn.c_str());
}
@@ -89,19 +90,19 @@
session_options_.DisableCpuMemArena();
try {
- encoder_session_ = std::make_unique<Ort::Session>(env_, en_model.c_str(), session_options_);
+ encoder_session_ = std::make_unique<Ort::Session>(env_, ORTSTRING(en_model).c_str(), session_options_);
LOG(INFO) << "Successfully load model from " << en_model;
} catch (std::exception const &e) {
LOG(ERROR) << "Error when load am encoder model: " << e.what();
- exit(0);
+ exit(-1);
}
try {
- decoder_session_ = std::make_unique<Ort::Session>(env_, de_model.c_str(), session_options_);
+ decoder_session_ = std::make_unique<Ort::Session>(env_, ORTSTRING(de_model).c_str(), session_options_);
LOG(INFO) << "Successfully load model from " << de_model;
} catch (std::exception const &e) {
LOG(ERROR) << "Error when load am decoder model: " << e.what();
- exit(0);
+ exit(-1);
}
// encoder
@@ -152,11 +153,11 @@
// offline
try {
- m_session_ = std::make_unique<Ort::Session>(env_, am_model.c_str(), session_options_);
+ m_session_ = std::make_unique<Ort::Session>(env_, ORTSTRING(am_model).c_str(), session_options_);
LOG(INFO) << "Successfully load model from " << am_model;
} catch (std::exception const &e) {
LOG(ERROR) << "Error when load am onnx model: " << e.what();
- exit(0);
+ exit(-1);
}
string strName;
@@ -164,16 +165,44 @@
m_strInputNames.push_back(strName.c_str());
GetInputName(m_session_.get(), strName,1);
m_strInputNames.push_back(strName);
+
+ if (use_hotword) {
+ GetInputName(m_session_.get(), strName, 2);
+ m_strInputNames.push_back(strName);
+ }
- GetOutputName(m_session_.get(), strName);
- m_strOutputNames.push_back(strName);
- GetOutputName(m_session_.get(), strName,1);
- m_strOutputNames.push_back(strName);
+ // support time stamp
+ size_t numOutputNodes = m_session_->GetOutputCount();
+ for(int index=0; index<numOutputNodes; index++){
+ GetOutputName(m_session_.get(), strName, index);
+ m_strOutputNames.push_back(strName);
+ }
for (auto& item : m_strInputNames)
m_szInputNames.push_back(item.c_str());
for (auto& item : m_strOutputNames)
m_szOutputNames.push_back(item.c_str());
+}
+
+void Paraformer::LoadConfigFromYaml(const char* filename){
+
+ YAML::Node config;
+ try{
+ config = YAML::LoadFile(filename);
+ }catch(exception const &e){
+ LOG(ERROR) << "Error loading file, yaml file error or not exist.";
+ exit(-1);
+ }
+
+ try{
+ YAML::Node lang_conf = config["lang"];
+ if (lang_conf.IsDefined()){
+ language = lang_conf.as<string>();
+ }
+ }catch(exception const &e){
+ LOG(ERROR) << "Error when load argument from vad config YAML.";
+ exit(-1);
+ }
}
void Paraformer::LoadOnlineConfigFromYaml(const char* filename){
@@ -221,11 +250,11 @@
hw_session_options.DisableCpuMemArena();
try {
- hw_m_session = std::make_unique<Ort::Session>(hw_env_, hw_model.c_str(), hw_session_options);
+ hw_m_session = std::make_unique<Ort::Session>(hw_env_, ORTSTRING(hw_model).c_str(), hw_session_options);
LOG(INFO) << "Successfully load model from " << hw_model;
} catch (std::exception const &e) {
LOG(ERROR) << "Error when load hw compiler onnx model: " << e.what();
- exit(0);
+ exit(-1);
}
string strName;
@@ -292,7 +321,7 @@
ifstream cmvn_stream(filename);
if (!cmvn_stream.is_open()) {
LOG(ERROR) << "Failed to open file: " << filename;
- exit(0);
+ exit(-1);
}
string line;
@@ -335,7 +364,7 @@
hyps.push_back(max_idx);
}
if(!is_stamp){
- return vocab->Vector2StringV2(hyps);
+ return vocab->Vector2StringV2(hyps, language);
}else{
std::vector<string> char_list;
std::vector<std::vector<float>> timestamp_list;
@@ -475,7 +504,9 @@
if (char_list.back() == "</s>") {
char_list.pop_back();
}
-
+ if (char_list.empty()) {
+ return ;
+ }
vector<vector<float>> timestamp_list;
vector<string> new_char_list;
vector<float> fire_place;
@@ -490,6 +521,9 @@
if(num_peak != (int)char_list.size() + 1){
float sum = std::accumulate(us_alphas.begin(), us_alphas.end(), 0.0f);
float scale = sum/((int)char_list.size() + 1);
+ if(scale == 0){
+ return;
+ }
cif_peak.clear();
sum = 0.0;
for(auto &alpha:us_alphas){
@@ -507,6 +541,11 @@
fire_place.push_back(i + total_offset);
}
}
+ }
+
+ num_peak = fire_place.size();
+ if(fire_place.size() == 0){
+ return;
}
// begin silence
@@ -530,6 +569,10 @@
}
// tail token and end silence
+ if(timestamp_list.size()==0){
+ LOG(ERROR)<<"timestamp_list's size is 0!";
+ return;
+ }
if (num_frames - fire_place.back() > START_END_THRESHOLD) {
float _end = (num_frames + fire_place.back()) / 2.0;
timestamp_list.back()[1] = _end * TIME_RATE;
@@ -641,7 +684,7 @@
return "";
}
//PrintMat(hw_emb, "input_clas_emb");
- const int64_t hotword_shape[3] = {1, hw_emb.size(), hw_emb[0].size()};
+ const int64_t hotword_shape[3] = {1, static_cast<int64_t>(hw_emb.size()), static_cast<int64_t>(hw_emb[0].size())};
embedding.reserve(hw_emb.size() * hw_emb[0].size());
for (auto item : hw_emb) {
embedding.insert(embedding.end(), item.begin(), item.end());
@@ -686,17 +729,6 @@
}else{
result = GreedySearch(floatData, *encoder_out_lens, outputShape[2]);
}
-// int pos = 0;
-// std::vector<std::vector<float>> logits;
-// for (int j = 0; j < outputShape[1]; j++)
-// {
-// std::vector<float> vec_token;
-// vec_token.insert(vec_token.begin(), floatData + pos, floatData + pos + outputShape[2]);
-// logits.push_back(vec_token);
-// pos += outputShape[2];
-// }
-// //PrintMat(logits, "logits_out");
-// result = GreedySearch(floatData, *encoder_out_lens, outputShape[2]);
}
catch (std::exception const &e)
{
--
Gitblit v1.9.1