From e0fa63765bfb4a36bde7047c2a6066ca5a80e90f Mon Sep 17 00:00:00 2001
From: Yabin Li <wucong.lyb@alibaba-inc.com>
Date: 星期一, 21 八月 2023 10:37:42 +0800
Subject: [PATCH] Dev hw (#878)

---
 funasr/runtime/onnxruntime/src/paraformer.cpp |  185 +++++++++++++++++++++++++++++++++++++++++++++-
 1 files changed, 181 insertions(+), 4 deletions(-)

diff --git a/funasr/runtime/onnxruntime/src/paraformer.cpp b/funasr/runtime/onnxruntime/src/paraformer.cpp
index e2c695c..cbaab2d 100644
--- a/funasr/runtime/onnxruntime/src/paraformer.cpp
+++ b/funasr/runtime/onnxruntime/src/paraformer.cpp
@@ -4,13 +4,17 @@
 */
 
 #include "precomp.h"
+#include "paraformer.h"
+#include "encode_converter.h"
+#include <cstddef>
 
 using namespace std;
-
 namespace funasr {
 
 Paraformer::Paraformer()
-:env_(ORT_LOGGING_LEVEL_ERROR, "paraformer"),session_options_{}{
+:use_hotword(false),
+ env_(ORT_LOGGING_LEVEL_ERROR, "paraformer"),session_options_{},
+ hw_env_(ORT_LOGGING_LEVEL_ERROR, "paraformer_hw"),hw_session_options{} {
 }
 
 // offline
@@ -45,6 +49,10 @@
     m_strInputNames.push_back(strName.c_str());
     GetInputName(m_session_.get(), strName,1);
     m_strInputNames.push_back(strName);
+    if (use_hotword) {
+        GetInputName(m_session_.get(), strName, 2);
+        m_strInputNames.push_back(strName);
+    }
     
     size_t numOutputNodes = m_session_->GetOutputCount();
     for(int index=0; index<numOutputNodes; index++){
@@ -206,10 +214,47 @@
     }
 }
 
+void Paraformer::InitHwCompiler(const std::string &hw_model, int thread_num) {
+    hw_session_options.SetIntraOpNumThreads(thread_num);
+    hw_session_options.SetGraphOptimizationLevel(ORT_ENABLE_ALL);
+    // DisableCpuMemArena can improve performance
+    hw_session_options.DisableCpuMemArena();
+
+    try {
+        hw_m_session = std::make_unique<Ort::Session>(hw_env_, hw_model.c_str(), hw_session_options);
+        LOG(INFO) << "Successfully load model from " << hw_model;
+    } catch (std::exception const &e) {
+        LOG(ERROR) << "Error when load hw compiler onnx model: " << e.what();
+        exit(0);
+    }
+
+    string strName;
+    GetInputName(hw_m_session.get(), strName);
+    hw_m_strInputNames.push_back(strName.c_str());
+    //GetInputName(hw_m_session.get(), strName,1);
+    //hw_m_strInputNames.push_back(strName);
+    
+    GetOutputName(hw_m_session.get(), strName);
+    hw_m_strOutputNames.push_back(strName);
+
+    for (auto& item : hw_m_strInputNames)
+        hw_m_szInputNames.push_back(item.c_str());
+    for (auto& item : hw_m_strOutputNames)
+        hw_m_szOutputNames.push_back(item.c_str());
+    // if init hotword compiler is called, this is a hotword paraformer model
+    use_hotword = true;
+}
+
+void Paraformer::InitSegDict(const std::string &seg_dict_model) {
+    seg_dict = new SegDict(seg_dict_model.c_str());
+}
+
 Paraformer::~Paraformer()
 {
     if(vocab)
         delete vocab;
+    if(seg_dict)
+        delete seg_dict;
 }
 
 void Paraformer::Reset()
@@ -228,6 +273,10 @@
     int32_t feature_dim = fbank_opts_.mel_opts.num_bins;
     vector<float> features(frames * feature_dim);
     float *p = features.data();
+    //std::cout << "samples " << len << std::endl;
+    //std::cout << "fbank frames " << frames << std::endl;
+    //std::cout << "fbank dim " << feature_dim << std::endl;
+    //std::cout << "feature size " << features.size() << std::endl;
 
     for (int32_t i = 0; i != frames; ++i) {
         const float *f = fbank_.GetFrame(i);
@@ -549,7 +598,7 @@
     }
   }
 
-string Paraformer::Forward(float* din, int len, bool input_finished)
+string Paraformer::Forward(float* din, int len, bool input_finished, const std::vector<std::vector<float>> &hw_emb)
 {
 
     int32_t in_feat_dim = fbank_opts_.mel_opts.num_bins;
@@ -559,6 +608,7 @@
 
     int32_t feat_dim = lfr_m*in_feat_dim;
     int32_t num_frames = wav_feats.size() / feat_dim;
+    //std::cout << "feat in: " << num_frames << " " << feat_dim << std::endl;
 
 #ifdef _WIN_X86
         Ort::MemoryInfo m_memoryInfo = Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU);
@@ -578,15 +628,41 @@
     paraformer_length.emplace_back(num_frames);
     Ort::Value onnx_feats_len = Ort::Value::CreateTensor<int32_t>(
           m_memoryInfo, paraformer_length.data(), paraformer_length.size(), paraformer_length_shape, 1);
-    
+
     std::vector<Ort::Value> input_onnx;
     input_onnx.emplace_back(std::move(onnx_feats));
     input_onnx.emplace_back(std::move(onnx_feats_len));
+
+    std::vector<float> embedding;
+    try{
+        if (use_hotword) {
+            if(hw_emb.size()<=0){
+                LOG(ERROR) << "hw_emb is null";
+                return "";
+            }
+            //PrintMat(hw_emb, "input_clas_emb");
+            const int64_t hotword_shape[3] = {1, hw_emb.size(), hw_emb[0].size()};
+            embedding.reserve(hw_emb.size() * hw_emb[0].size());
+            for (auto item : hw_emb) {
+                embedding.insert(embedding.end(), item.begin(), item.end());
+            }
+            //LOG(INFO) << "hotword shape " << hotword_shape[0] << " " << hotword_shape[1] << " " << hotword_shape[2] << " size " << embedding.size();
+            Ort::Value onnx_hw_emb = Ort::Value::CreateTensor<float>(
+                m_memoryInfo, embedding.data(), embedding.size(), hotword_shape, 3);
+
+            input_onnx.emplace_back(std::move(onnx_hw_emb));
+        }
+    }catch (std::exception const &e)
+    {
+        LOG(ERROR)<<e.what();
+        return "";
+    }
 
     string result;
     try {
         auto outputTensor = m_session_->Run(Ort::RunOptions{nullptr}, m_szInputNames.data(), input_onnx.data(), input_onnx.size(), m_szOutputNames.data(), m_szOutputNames.size());
         std::vector<int64_t> outputShape = outputTensor[0].GetTensorTypeAndShapeInfo().GetShape();
+        //LOG(INFO) << "paraformer out shape " << outputShape[0] << " " << outputShape[1] << " " << outputShape[2];
 
         int64_t outputCount = std::accumulate(outputShape.begin(), outputShape.end(), 1, std::multiplies<int64_t>());
         float* floatData = outputTensor[0].GetTensorMutableData<float>();
@@ -610,6 +686,17 @@
         }else{
             result = GreedySearch(floatData, *encoder_out_lens, outputShape[2]);
         }
+//         int pos = 0;
+//         std::vector<std::vector<float>> logits;
+//         for (int j = 0; j < outputShape[1]; j++)
+//         {
+//             std::vector<float> vec_token;
+//             vec_token.insert(vec_token.begin(), floatData + pos, floatData + pos + outputShape[2]);
+//             logits.push_back(vec_token);
+//             pos += outputShape[2];
+//         }
+//         //PrintMat(logits, "logits_out");
+//         result = GreedySearch(floatData, *encoder_out_lens, outputShape[2]);
     }
     catch (std::exception const &e)
     {
@@ -619,6 +706,96 @@
     return result;
 }
 
+
+std::vector<std::vector<float>> Paraformer::CompileHotwordEmbedding(std::string &hotwords) {
+    int embedding_dim = encoder_size;
+    std::vector<std::vector<float>> hw_emb;
+    if (!use_hotword) {
+        std::vector<float> vec(embedding_dim, 0);
+        hw_emb.push_back(vec);
+        return hw_emb;
+    }
+    int max_hotword_len = 10;
+    std::vector<int32_t> hotword_matrix;
+    std::vector<int32_t> lengths;
+    int hotword_size = 1;
+    if (!hotwords.empty()) {
+      std::vector<std::string> hotword_array = split(hotwords, ' ');
+      hotword_size = hotword_array.size() + 1;
+      hotword_matrix.reserve(hotword_size * max_hotword_len);
+      for (auto hotword : hotword_array) {
+        std::vector<std::string> chars;
+        if (EncodeConverter::IsAllChineseCharactor((const U8CHAR_T*)hotword.c_str(), hotword.size())) {
+          KeepChineseCharacterAndSplit(hotword, chars);
+        } else {
+          // for english
+          std::vector<std::string> words = split(hotword, ' ');
+          for (auto word : words) {
+            std::vector<string> tokens = seg_dict->GetTokensByWord(word);
+            chars.insert(chars.end(), tokens.begin(), tokens.end());
+          }
+        }
+        std::vector<int32_t> hw_vector(max_hotword_len, 0);
+        int vector_len = std::min(max_hotword_len, (int)chars.size());
+        for (int i=0; i<chars.size(); i++) {
+          std::cout << chars[i] << " ";
+          hw_vector[i] = vocab->GetIdByToken(chars[i]);
+        }
+        std::cout << std::endl;
+        lengths.push_back(vector_len);
+        hotword_matrix.insert(hotword_matrix.end(), hw_vector.begin(), hw_vector.end());
+      }
+    }
+    std::vector<int32_t> blank_vec(max_hotword_len, 0);
+    blank_vec[0] = 1;
+    hotword_matrix.insert(hotword_matrix.end(), blank_vec.begin(), blank_vec.end());
+    lengths.push_back(1);
+
+#ifdef _WIN_X86
+        Ort::MemoryInfo m_memoryInfo = Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU);
+#else
+        Ort::MemoryInfo m_memoryInfo = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault);
+#endif
+
+    const int64_t input_shape_[2] = {hotword_size, max_hotword_len};
+    Ort::Value onnx_hotword = Ort::Value::CreateTensor<int32_t>(m_memoryInfo,
+        (int32_t*)hotword_matrix.data(),
+        hotword_size * max_hotword_len,
+        input_shape_,
+        2);
+    LOG(INFO) << "clas shape " << hotword_size << " " << max_hotword_len << std::endl;
+    
+    std::vector<Ort::Value> input_onnx;
+    input_onnx.emplace_back(std::move(onnx_hotword));
+
+    std::vector<std::vector<float>> result;
+    try {
+        auto outputTensor = hw_m_session->Run(Ort::RunOptions{nullptr}, hw_m_szInputNames.data(), input_onnx.data(), input_onnx.size(), hw_m_szOutputNames.data(), hw_m_szOutputNames.size());
+        std::vector<int64_t> outputShape = outputTensor[0].GetTensorTypeAndShapeInfo().GetShape();
+
+        int64_t outputCount = std::accumulate(outputShape.begin(), outputShape.end(), 1, std::multiplies<int64_t>());
+        float* floatData = outputTensor[0].GetTensorMutableData<float>(); // shape [max_hotword_len, hotword_size, dim]
+        // get embedding by real hotword length
+        assert(outputShape[0] == max_hotword_len);
+        assert(outputShape[1] == hotword_size);
+        embedding_dim = outputShape[2];
+
+        for (int j = 0; j < hotword_size; j++)
+        {
+            int start_pos = hotword_size * (lengths[j] - 1) * embedding_dim + j * embedding_dim;
+            std::vector<float> embedding;
+            embedding.insert(embedding.begin(), floatData + start_pos, floatData + start_pos + embedding_dim);
+            result.push_back(embedding);
+        }
+    }
+    catch (std::exception const &e)
+    {
+        LOG(ERROR)<<e.what();
+    }
+    //PrintMat(result, "clas_embedding_output");
+    return result;
+}
+
 string Paraformer::Rescoring()
 {
     LOG(ERROR)<<"Not Imp!!!!!!";

--
Gitblit v1.9.1