From a05e753d11d9c36983ec4e58c421dbcf86d1dcd4 Mon Sep 17 00:00:00 2001
From: Xian Shi <40013335+R1ckShi@users.noreply.github.com>
Date: 星期二, 17 十月 2023 16:47:27 +0800
Subject: [PATCH] Merge branch 'main' into dev_onnx
---
funasr/runtime/onnxruntime/src/paraformer.cpp | 505 +++++++++++++++++++++++++++++++++++++++++++++++++++++--
1 files changed, 480 insertions(+), 25 deletions(-)
diff --git a/funasr/runtime/onnxruntime/src/paraformer.cpp b/funasr/runtime/onnxruntime/src/paraformer.cpp
index ef2a182..5bbaeef 100644
--- a/funasr/runtime/onnxruntime/src/paraformer.cpp
+++ b/funasr/runtime/onnxruntime/src/paraformer.cpp
@@ -4,13 +4,17 @@
*/
#include "precomp.h"
+#include "paraformer.h"
+#include "encode_converter.h"
+#include <cstddef>
using namespace std;
-
namespace funasr {
Paraformer::Paraformer()
-:env_(ORT_LOGGING_LEVEL_ERROR, "paraformer"),session_options_{}{
+:use_hotword(false),
+ env_(ORT_LOGGING_LEVEL_ERROR, "paraformer"),session_options_{},
+ hw_env_(ORT_LOGGING_LEVEL_ERROR, "paraformer_hw"),hw_session_options{} {
}
// offline
@@ -33,11 +37,11 @@
session_options_.DisableCpuMemArena();
try {
- m_session_ = std::make_unique<Ort::Session>(env_, am_model.c_str(), session_options_);
+ m_session_ = std::make_unique<Ort::Session>(env_, ORTSTRING(am_model).c_str(), session_options_);
LOG(INFO) << "Successfully load model from " << am_model;
} catch (std::exception const &e) {
LOG(ERROR) << "Error when load am onnx model: " << e.what();
- exit(0);
+ exit(-1);
}
string strName;
@@ -45,17 +49,23 @@
m_strInputNames.push_back(strName.c_str());
GetInputName(m_session_.get(), strName,1);
m_strInputNames.push_back(strName);
+ if (use_hotword) {
+ GetInputName(m_session_.get(), strName, 2);
+ m_strInputNames.push_back(strName);
+ }
- GetOutputName(m_session_.get(), strName);
- m_strOutputNames.push_back(strName);
- GetOutputName(m_session_.get(), strName,1);
- m_strOutputNames.push_back(strName);
+ size_t numOutputNodes = m_session_->GetOutputCount();
+ for(int index=0; index<numOutputNodes; index++){
+ GetOutputName(m_session_.get(), strName, index);
+ m_strOutputNames.push_back(strName);
+ }
for (auto& item : m_strInputNames)
m_szInputNames.push_back(item.c_str());
for (auto& item : m_strOutputNames)
m_szOutputNames.push_back(item.c_str());
vocab = new Vocab(am_config.c_str());
+ LoadConfigFromYaml(am_config.c_str());
LoadCmvn(am_cmvn.c_str());
}
@@ -80,19 +90,19 @@
session_options_.DisableCpuMemArena();
try {
- encoder_session_ = std::make_unique<Ort::Session>(env_, en_model.c_str(), session_options_);
+ encoder_session_ = std::make_unique<Ort::Session>(env_, ORTSTRING(en_model).c_str(), session_options_);
LOG(INFO) << "Successfully load model from " << en_model;
} catch (std::exception const &e) {
LOG(ERROR) << "Error when load am encoder model: " << e.what();
- exit(0);
+ exit(-1);
}
try {
- decoder_session_ = std::make_unique<Ort::Session>(env_, de_model.c_str(), session_options_);
+ decoder_session_ = std::make_unique<Ort::Session>(env_, ORTSTRING(de_model).c_str(), session_options_);
LOG(INFO) << "Successfully load model from " << de_model;
} catch (std::exception const &e) {
LOG(ERROR) << "Error when load am decoder model: " << e.what();
- exit(0);
+ exit(-1);
}
// encoder
@@ -143,11 +153,11 @@
// offline
try {
- m_session_ = std::make_unique<Ort::Session>(env_, am_model.c_str(), session_options_);
+ m_session_ = std::make_unique<Ort::Session>(env_, ORTSTRING(am_model).c_str(), session_options_);
LOG(INFO) << "Successfully load model from " << am_model;
} catch (std::exception const &e) {
LOG(ERROR) << "Error when load am onnx model: " << e.what();
- exit(0);
+ exit(-1);
}
string strName;
@@ -155,16 +165,44 @@
m_strInputNames.push_back(strName.c_str());
GetInputName(m_session_.get(), strName,1);
m_strInputNames.push_back(strName);
+
+ if (use_hotword) {
+ GetInputName(m_session_.get(), strName, 2);
+ m_strInputNames.push_back(strName);
+ }
- GetOutputName(m_session_.get(), strName);
- m_strOutputNames.push_back(strName);
- GetOutputName(m_session_.get(), strName,1);
- m_strOutputNames.push_back(strName);
+ // support time stamp
+ size_t numOutputNodes = m_session_->GetOutputCount();
+ for(int index=0; index<numOutputNodes; index++){
+ GetOutputName(m_session_.get(), strName, index);
+ m_strOutputNames.push_back(strName);
+ }
for (auto& item : m_strInputNames)
m_szInputNames.push_back(item.c_str());
for (auto& item : m_strOutputNames)
m_szOutputNames.push_back(item.c_str());
+}
+
+void Paraformer::LoadConfigFromYaml(const char* filename){
+
+ YAML::Node config;
+ try{
+ config = YAML::LoadFile(filename);
+ }catch(exception const &e){
+ LOG(ERROR) << "Error loading file, yaml file error or not exist.";
+ exit(-1);
+ }
+
+ try{
+ YAML::Node lang_conf = config["lang"];
+ if (lang_conf.IsDefined()){
+ language = lang_conf.as<string>();
+ }
+ }catch(exception const &e){
+ LOG(ERROR) << "Error when load argument from vad config YAML.";
+ exit(-1);
+ }
}
void Paraformer::LoadOnlineConfigFromYaml(const char* filename){
@@ -205,10 +243,47 @@
}
}
+void Paraformer::InitHwCompiler(const std::string &hw_model, int thread_num) {
+ hw_session_options.SetIntraOpNumThreads(thread_num);
+ hw_session_options.SetGraphOptimizationLevel(ORT_ENABLE_ALL);
+ // DisableCpuMemArena can improve performance
+ hw_session_options.DisableCpuMemArena();
+
+ try {
+ hw_m_session = std::make_unique<Ort::Session>(hw_env_, ORTSTRING(hw_model).c_str(), hw_session_options);
+ LOG(INFO) << "Successfully load model from " << hw_model;
+ } catch (std::exception const &e) {
+ LOG(ERROR) << "Error when load hw compiler onnx model: " << e.what();
+ exit(-1);
+ }
+
+ string strName;
+ GetInputName(hw_m_session.get(), strName);
+ hw_m_strInputNames.push_back(strName.c_str());
+ //GetInputName(hw_m_session.get(), strName,1);
+ //hw_m_strInputNames.push_back(strName);
+
+ GetOutputName(hw_m_session.get(), strName);
+ hw_m_strOutputNames.push_back(strName);
+
+ for (auto& item : hw_m_strInputNames)
+ hw_m_szInputNames.push_back(item.c_str());
+ for (auto& item : hw_m_strOutputNames)
+ hw_m_szOutputNames.push_back(item.c_str());
+ // if init hotword compiler is called, this is a hotword paraformer model
+ use_hotword = true;
+}
+
+void Paraformer::InitSegDict(const std::string &seg_dict_model) {
+ seg_dict = new SegDict(seg_dict_model.c_str());
+}
+
Paraformer::~Paraformer()
{
if(vocab)
delete vocab;
+ if(seg_dict)
+ delete seg_dict;
}
void Paraformer::Reset()
@@ -227,6 +302,10 @@
int32_t feature_dim = fbank_opts_.mel_opts.num_bins;
vector<float> features(frames * feature_dim);
float *p = features.data();
+ //std::cout << "samples " << len << std::endl;
+ //std::cout << "fbank frames " << frames << std::endl;
+ //std::cout << "fbank dim " << feature_dim << std::endl;
+ //std::cout << "feature size " << features.size() << std::endl;
for (int32_t i = 0; i != frames; ++i) {
const float *f = fbank_.GetFrame(i);
@@ -242,7 +321,7 @@
ifstream cmvn_stream(filename);
if (!cmvn_stream.is_open()) {
LOG(ERROR) << "Failed to open file: " << filename;
- exit(0);
+ exit(-1);
}
string line;
@@ -274,7 +353,7 @@
}
}
-string Paraformer::GreedySearch(float * in, int n_len, int64_t token_nums)
+string Paraformer::GreedySearch(float * in, int n_len, int64_t token_nums, bool is_stamp, std::vector<float> us_alphas, std::vector<float> us_cif_peak)
{
vector<int> hyps;
int Tmax = n_len;
@@ -284,8 +363,243 @@
FindMax(in + i * token_nums, token_nums, max_val, max_idx);
hyps.push_back(max_idx);
}
+ if(!is_stamp){
+ return vocab->Vector2StringV2(hyps, language);
+ }else{
+ std::vector<string> char_list;
+ std::vector<std::vector<float>> timestamp_list;
+ std::string res_str;
+ vocab->Vector2String(hyps, char_list);
+ std::vector<string> raw_char(char_list);
+ TimestampOnnx(us_alphas, us_cif_peak, char_list, res_str, timestamp_list);
- return vocab->Vector2StringV2(hyps);
+ return PostProcess(raw_char, timestamp_list);
+ }
+}
+
+string Paraformer::PostProcess(std::vector<string> &raw_char, std::vector<std::vector<float>> ×tamp_list){
+ std::vector<std::vector<float>> timestamp_merge;
+ int i;
+ list<string> words;
+ int is_pre_english = false;
+ int pre_english_len = 0;
+ int is_combining = false;
+ string combine = "";
+
+ float begin=-1;
+ for (i=0; i<raw_char.size(); i++){
+ string word = raw_char[i];
+ // step1 space character skips
+ if (word == "<s>" || word == "</s>" || word == "<unk>")
+ continue;
+ // step2 combie phoneme to full word
+ {
+ int sub_word = !(word.find("@@") == string::npos);
+ // process word start and middle part
+ if (sub_word) {
+ combine += word.erase(word.length() - 2);
+ if(!is_combining){
+ begin = timestamp_list[i][0];
+ }
+ is_combining = true;
+ continue;
+ }
+ // process word end part
+ else if (is_combining) {
+ combine += word;
+ is_combining = false;
+ word = combine;
+ combine = "";
+ }
+ }
+
+ // step3 process english word deal with space , turn abbreviation to upper case
+ {
+ // input word is chinese, not need process
+ if (vocab->IsChinese(word)) {
+ words.push_back(word);
+ timestamp_merge.emplace_back(timestamp_list[i]);
+ is_pre_english = false;
+ }
+ // input word is english word
+ else {
+ // pre word is chinese
+ if (!is_pre_english) {
+ // word[0] = word[0] - 32;
+ words.push_back(word);
+ begin = (begin==-1)?timestamp_list[i][0]:begin;
+ std::vector<float> vec = {begin, timestamp_list[i][1]};
+ timestamp_merge.emplace_back(vec);
+ begin = -1;
+ pre_english_len = word.size();
+ }
+ // pre word is english word
+ else {
+ // single letter turn to upper case
+ // if (word.size() == 1) {
+ // word[0] = word[0] - 32;
+ // }
+
+ if (pre_english_len > 1) {
+ words.push_back(" ");
+ words.push_back(word);
+ begin = (begin==-1)?timestamp_list[i][0]:begin;
+ std::vector<float> vec = {begin, timestamp_list[i][1]};
+ timestamp_merge.emplace_back(vec);
+ begin = -1;
+ pre_english_len = word.size();
+ }
+ else {
+ // if (word.size() > 1) {
+ // words.push_back(" ");
+ // }
+ words.push_back(" ");
+ words.push_back(word);
+ begin = (begin==-1)?timestamp_list[i][0]:begin;
+ std::vector<float> vec = {begin, timestamp_list[i][1]};
+ timestamp_merge.emplace_back(vec);
+ begin = -1;
+ pre_english_len = word.size();
+ }
+ }
+ is_pre_english = true;
+ }
+ }
+ }
+ string stamp_str="";
+ for (i=0; i<timestamp_merge.size(); i++) {
+ stamp_str += std::to_string(timestamp_merge[i][0]);
+ stamp_str += ", ";
+ stamp_str += std::to_string(timestamp_merge[i][1]);
+ if(i!=timestamp_merge.size()-1){
+ stamp_str += ",";
+ }
+ }
+
+ stringstream ss;
+ for (auto it = words.begin(); it != words.end(); it++) {
+ ss << *it;
+ }
+
+ return ss.str()+" | "+stamp_str;
+}
+
+void Paraformer::TimestampOnnx(std::vector<float>& us_alphas,
+ std::vector<float> us_cif_peak,
+ std::vector<string>& char_list,
+ std::string &res_str,
+ std::vector<std::vector<float>> ×tamp_vec,
+ float begin_time,
+ float total_offset){
+ if (char_list.empty()) {
+ return ;
+ }
+
+ const float START_END_THRESHOLD = 5.0;
+ const float MAX_TOKEN_DURATION = 30.0;
+ const float TIME_RATE = 10.0 * 6 / 1000 / 3;
+ // 3 times upsampled, cif_peak is flattened into a 1D array
+ std::vector<float> cif_peak = us_cif_peak;
+ int num_frames = cif_peak.size();
+ if (char_list.back() == "</s>") {
+ char_list.pop_back();
+ }
+ if (char_list.empty()) {
+ return ;
+ }
+ vector<vector<float>> timestamp_list;
+ vector<string> new_char_list;
+ vector<float> fire_place;
+ // for bicif model trained with large data, cif2 actually fires when a character starts
+ // so treat the frames between two peaks as the duration of the former token
+ for (int i = 0; i < num_frames; i++) {
+ if (cif_peak[i] > 1.0 - 1e-4) {
+ fire_place.push_back(i + total_offset);
+ }
+ }
+ int num_peak = fire_place.size();
+ if(num_peak != (int)char_list.size() + 1){
+ float sum = std::accumulate(us_alphas.begin(), us_alphas.end(), 0.0f);
+ float scale = sum/((int)char_list.size() + 1);
+ if(scale == 0){
+ return;
+ }
+ cif_peak.clear();
+ sum = 0.0;
+ for(auto &alpha:us_alphas){
+ alpha = alpha/scale;
+ sum += alpha;
+ cif_peak.emplace_back(sum);
+ if(sum>=1.0 - 1e-4){
+ sum -=(1.0 - 1e-4);
+ }
+ }
+
+ fire_place.clear();
+ for (int i = 0; i < num_frames; i++) {
+ if (cif_peak[i] > 1.0 - 1e-4) {
+ fire_place.push_back(i + total_offset);
+ }
+ }
+ }
+
+ num_peak = fire_place.size();
+ if(fire_place.size() == 0){
+ return;
+ }
+
+ // begin silence
+ if (fire_place[0] > START_END_THRESHOLD) {
+ new_char_list.push_back("<sil>");
+ timestamp_list.push_back({0.0, fire_place[0] * TIME_RATE});
+ }
+
+ // tokens timestamp
+ for (int i = 0; i < num_peak - 1; i++) {
+ new_char_list.push_back(char_list[i]);
+ if (i == num_peak - 2 || MAX_TOKEN_DURATION < 0 || fire_place[i + 1] - fire_place[i] < MAX_TOKEN_DURATION) {
+ timestamp_list.push_back({fire_place[i] * TIME_RATE, fire_place[i + 1] * TIME_RATE});
+ } else {
+ // cut the duration to token and sil of the 0-weight frames last long
+ float _split = fire_place[i] + MAX_TOKEN_DURATION;
+ timestamp_list.push_back({fire_place[i] * TIME_RATE, _split * TIME_RATE});
+ timestamp_list.push_back({_split * TIME_RATE, fire_place[i + 1] * TIME_RATE});
+ new_char_list.push_back("<sil>");
+ }
+ }
+
+ // tail token and end silence
+ if(timestamp_list.size()==0){
+ LOG(ERROR)<<"timestamp_list's size is 0!";
+ return;
+ }
+ if (num_frames - fire_place.back() > START_END_THRESHOLD) {
+ float _end = (num_frames + fire_place.back()) / 2.0;
+ timestamp_list.back()[1] = _end * TIME_RATE;
+ timestamp_list.push_back({_end * TIME_RATE, num_frames * TIME_RATE});
+ new_char_list.push_back("<sil>");
+ } else {
+ timestamp_list.back()[1] = num_frames * TIME_RATE;
+ }
+
+ if (begin_time) { // add offset time in model with vad
+ for (auto& timestamp : timestamp_list) {
+ timestamp[0] += begin_time / 1000.0;
+ timestamp[1] += begin_time / 1000.0;
+ }
+ }
+
+ assert(new_char_list.size() == timestamp_list.size());
+
+ for (int i = 0; i < (int)new_char_list.size(); i++) {
+ res_str += new_char_list[i] + " " + to_string(timestamp_list[i][0]) + " " + to_string(timestamp_list[i][1]) + ";";
+ }
+
+ for (int i = 0; i < (int)new_char_list.size(); i++) {
+ if(new_char_list[i] != "<sil>"){
+ timestamp_vec.push_back(timestamp_list[i]);
+ }
+ }
}
vector<float> Paraformer::ApplyLfr(const std::vector<float> &in)
@@ -327,7 +641,7 @@
}
}
-string Paraformer::Forward(float* din, int len, bool input_finished)
+string Paraformer::Forward(float* din, int len, bool input_finished, const std::vector<std::vector<float>> &hw_emb)
{
int32_t in_feat_dim = fbank_opts_.mel_opts.num_bins;
@@ -337,6 +651,7 @@
int32_t feat_dim = lfr_m*in_feat_dim;
int32_t num_frames = wav_feats.size() / feat_dim;
+ //std::cout << "feat in: " << num_frames << " " << feat_dim << std::endl;
#ifdef _WIN_X86
Ort::MemoryInfo m_memoryInfo = Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU);
@@ -356,20 +671,64 @@
paraformer_length.emplace_back(num_frames);
Ort::Value onnx_feats_len = Ort::Value::CreateTensor<int32_t>(
m_memoryInfo, paraformer_length.data(), paraformer_length.size(), paraformer_length_shape, 1);
-
+
std::vector<Ort::Value> input_onnx;
input_onnx.emplace_back(std::move(onnx_feats));
input_onnx.emplace_back(std::move(onnx_feats_len));
- string result;
+ std::vector<float> embedding;
+ try{
+ if (use_hotword) {
+ if(hw_emb.size()<=0){
+ LOG(ERROR) << "hw_emb is null";
+ return "";
+ }
+ //PrintMat(hw_emb, "input_clas_emb");
+ const int64_t hotword_shape[3] = {1, static_cast<int64_t>(hw_emb.size()), static_cast<int64_t>(hw_emb[0].size())};
+ embedding.reserve(hw_emb.size() * hw_emb[0].size());
+ for (auto item : hw_emb) {
+ embedding.insert(embedding.end(), item.begin(), item.end());
+ }
+ //LOG(INFO) << "hotword shape " << hotword_shape[0] << " " << hotword_shape[1] << " " << hotword_shape[2] << " size " << embedding.size();
+ Ort::Value onnx_hw_emb = Ort::Value::CreateTensor<float>(
+ m_memoryInfo, embedding.data(), embedding.size(), hotword_shape, 3);
+
+ input_onnx.emplace_back(std::move(onnx_hw_emb));
+ }
+ }catch (std::exception const &e)
+ {
+ LOG(ERROR)<<e.what();
+ return "";
+ }
+
+ string result="";
try {
auto outputTensor = m_session_->Run(Ort::RunOptions{nullptr}, m_szInputNames.data(), input_onnx.data(), input_onnx.size(), m_szOutputNames.data(), m_szOutputNames.size());
std::vector<int64_t> outputShape = outputTensor[0].GetTensorTypeAndShapeInfo().GetShape();
+ //LOG(INFO) << "paraformer out shape " << outputShape[0] << " " << outputShape[1] << " " << outputShape[2];
int64_t outputCount = std::accumulate(outputShape.begin(), outputShape.end(), 1, std::multiplies<int64_t>());
float* floatData = outputTensor[0].GetTensorMutableData<float>();
auto encoder_out_lens = outputTensor[1].GetTensorMutableData<int64_t>();
- result = GreedySearch(floatData, *encoder_out_lens, outputShape[2]);
+ // timestamp
+ if(outputTensor.size() == 4){
+ std::vector<int64_t> us_alphas_shape = outputTensor[2].GetTensorTypeAndShapeInfo().GetShape();
+ float* us_alphas_data = outputTensor[2].GetTensorMutableData<float>();
+ std::vector<float> us_alphas(us_alphas_shape[1]);
+ for (int i = 0; i < us_alphas_shape[1]; i++) {
+ us_alphas[i] = us_alphas_data[i];
+ }
+
+ std::vector<int64_t> us_peaks_shape = outputTensor[3].GetTensorTypeAndShapeInfo().GetShape();
+ float* us_peaks_data = outputTensor[3].GetTensorMutableData<float>();
+ std::vector<float> us_peaks(us_peaks_shape[1]);
+ for (int i = 0; i < us_peaks_shape[1]; i++) {
+ us_peaks[i] = us_peaks_data[i];
+ }
+ result = GreedySearch(floatData, *encoder_out_lens, outputShape[2], true, us_alphas, us_peaks);
+ }else{
+ result = GreedySearch(floatData, *encoder_out_lens, outputShape[2]);
+ }
}
catch (std::exception const &e)
{
@@ -379,6 +738,102 @@
return result;
}
+
+std::vector<std::vector<float>> Paraformer::CompileHotwordEmbedding(std::string &hotwords) {
+ int embedding_dim = encoder_size;
+ std::vector<std::vector<float>> hw_emb;
+ if (!use_hotword) {
+ std::vector<float> vec(embedding_dim, 0);
+ hw_emb.push_back(vec);
+ return hw_emb;
+ }
+ int max_hotword_len = 10;
+ std::vector<int32_t> hotword_matrix;
+ std::vector<int32_t> lengths;
+ int hotword_size = 1;
+ int real_hw_size = 0;
+ if (!hotwords.empty()) {
+ std::vector<std::string> hotword_array = split(hotwords, ' ');
+ hotword_size = hotword_array.size() + 1;
+ hotword_matrix.reserve(hotword_size * max_hotword_len);
+ for (auto hotword : hotword_array) {
+ std::vector<std::string> chars;
+ if (EncodeConverter::IsAllChineseCharactor((const U8CHAR_T*)hotword.c_str(), hotword.size())) {
+ KeepChineseCharacterAndSplit(hotword, chars);
+ } else {
+ // for english
+ std::vector<std::string> words = split(hotword, ' ');
+ for (auto word : words) {
+ std::vector<string> tokens = seg_dict->GetTokensByWord(word);
+ chars.insert(chars.end(), tokens.begin(), tokens.end());
+ }
+ }
+ if(chars.size()==0){
+ continue;
+ }
+ std::vector<int32_t> hw_vector(max_hotword_len, 0);
+ int vector_len = std::min(max_hotword_len, (int)chars.size());
+ for (int i=0; i<chars.size(); i++) {
+ std::cout << chars[i] << " ";
+ hw_vector[i] = vocab->GetIdByToken(chars[i]);
+ }
+ std::cout << std::endl;
+ lengths.push_back(vector_len);
+ real_hw_size += 1;
+ hotword_matrix.insert(hotword_matrix.end(), hw_vector.begin(), hw_vector.end());
+ }
+ hotword_size = real_hw_size + 1;
+ }
+ std::vector<int32_t> blank_vec(max_hotword_len, 0);
+ blank_vec[0] = 1;
+ hotword_matrix.insert(hotword_matrix.end(), blank_vec.begin(), blank_vec.end());
+ lengths.push_back(1);
+
+#ifdef _WIN_X86
+ Ort::MemoryInfo m_memoryInfo = Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU);
+#else
+ Ort::MemoryInfo m_memoryInfo = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault);
+#endif
+
+ const int64_t input_shape_[2] = {hotword_size, max_hotword_len};
+ Ort::Value onnx_hotword = Ort::Value::CreateTensor<int32_t>(m_memoryInfo,
+ (int32_t*)hotword_matrix.data(),
+ hotword_size * max_hotword_len,
+ input_shape_,
+ 2);
+ LOG(INFO) << "clas shape " << hotword_size << " " << max_hotword_len << std::endl;
+
+ std::vector<Ort::Value> input_onnx;
+ input_onnx.emplace_back(std::move(onnx_hotword));
+
+ std::vector<std::vector<float>> result;
+ try {
+ auto outputTensor = hw_m_session->Run(Ort::RunOptions{nullptr}, hw_m_szInputNames.data(), input_onnx.data(), input_onnx.size(), hw_m_szOutputNames.data(), hw_m_szOutputNames.size());
+ std::vector<int64_t> outputShape = outputTensor[0].GetTensorTypeAndShapeInfo().GetShape();
+
+ int64_t outputCount = std::accumulate(outputShape.begin(), outputShape.end(), 1, std::multiplies<int64_t>());
+ float* floatData = outputTensor[0].GetTensorMutableData<float>(); // shape [max_hotword_len, hotword_size, dim]
+ // get embedding by real hotword length
+ assert(outputShape[0] == max_hotword_len);
+ assert(outputShape[1] == hotword_size);
+ embedding_dim = outputShape[2];
+
+ for (int j = 0; j < hotword_size; j++)
+ {
+ int start_pos = hotword_size * (lengths[j] - 1) * embedding_dim + j * embedding_dim;
+ std::vector<float> embedding;
+ embedding.insert(embedding.begin(), floatData + start_pos, floatData + start_pos + embedding_dim);
+ result.push_back(embedding);
+ }
+ }
+ catch (std::exception const &e)
+ {
+ LOG(ERROR)<<e.what();
+ }
+ //PrintMat(result, "clas_embedding_output");
+ return result;
+}
+
string Paraformer::Rescoring()
{
LOG(ERROR)<<"Not Imp!!!!!!";
--
Gitblit v1.9.1