From 7ab2e5cf22bbb31808bcacf84c054c710e4e6a93 Mon Sep 17 00:00:00 2001
From: Yabin Li <wucong.lyb@alibaba-inc.com>
Date: 星期一, 24 四月 2023 16:19:17 +0800
Subject: [PATCH] Merge pull request #400 from alibaba-damo-academy/dev_knf

---
 funasr/runtime/onnxruntime/src/util.cpp |   28 ++++++++++++++--------------
 1 files changed, 14 insertions(+), 14 deletions(-)

diff --git a/funasr/runtime/onnxruntime/src/util.cpp b/funasr/runtime/onnxruntime/src/util.cpp
index 5a72c72..c5c27af 100644
--- a/funasr/runtime/onnxruntime/src/util.cpp
+++ b/funasr/runtime/onnxruntime/src/util.cpp
@@ -1,7 +1,7 @@
 
 #include "precomp.h"
 
-float *loadparams(const char *filename)
+float *LoadParams(const char *filename)
 {
 
     FILE *fp;
@@ -10,20 +10,20 @@
     uint32_t nFileLen = ftell(fp);
     fseek(fp, 0, SEEK_SET);
 
-    float *params_addr = (float *)aligned_malloc(32, nFileLen);
+    float *params_addr = (float *)AlignedMalloc(32, nFileLen);
     int n = fread(params_addr, 1, nFileLen, fp);
     fclose(fp);
 
     return params_addr;
 }
 
-int val_align(int val, int align)
+int ValAlign(int val, int align)
 {
     float tmp = ceil((float)val / (float)align) * (float)align;
     return (int)tmp;
 }
 
-void disp_params(float *din, int size)
+void DispParams(float *din, int size)
 {
     int i;
     for (i = 0; i < size; i++) {
@@ -39,7 +39,7 @@
     fclose(fp);
 }
 
-void basic_norm(Tensor<float> *&din, float norm)
+void BasicNorm(Tensor<float> *&din, float norm)
 {
 
     int Tmax = din->size[2];
@@ -59,7 +59,7 @@
     }
 }
 
-void findmax(float *din, int len, float &max_val, int &max_idx)
+void FindMax(float *din, int len, float &max_val, int &max_idx)
 {
     int i;
     max_val = -INFINITY;
@@ -72,7 +72,7 @@
     }
 }
 
-string pathAppend(const string &p1, const string &p2)
+string PathAppend(const string &p1, const string &p2)
 {
 
     char sep = '/';
@@ -89,7 +89,7 @@
         return (p1 + p2);
 }
 
-void relu(Tensor<float> *din)
+void Relu(Tensor<float> *din)
 {
     int i;
     for (i = 0; i < din->buff_size; i++) {
@@ -98,7 +98,7 @@
     }
 }
 
-void swish(Tensor<float> *din)
+void Swish(Tensor<float> *din)
 {
     int i;
     for (i = 0; i < din->buff_size; i++) {
@@ -107,7 +107,7 @@
     }
 }
 
-void sigmoid(Tensor<float> *din)
+void Sigmoid(Tensor<float> *din)
 {
     int i;
     for (i = 0; i < din->buff_size; i++) {
@@ -116,7 +116,7 @@
     }
 }
 
-void doubleswish(Tensor<float> *din)
+void DoubleSwish(Tensor<float> *din)
 {
     int i;
     for (i = 0; i < din->buff_size; i++) {
@@ -125,7 +125,7 @@
     }
 }
 
-void softmax(float *din, int mask, int len)
+void Softmax(float *din, int mask, int len)
 {
     float *tmp = (float *)malloc(mask * sizeof(float));
     int i;
@@ -149,7 +149,7 @@
     }
 }
 
-void log_softmax(float *din, int len)
+void LogSoftmax(float *din, int len)
 {
     float *tmp = (float *)malloc(len * sizeof(float));
     int i;
@@ -164,7 +164,7 @@
     free(tmp);
 }
 
-void glu(Tensor<float> *din, Tensor<float> *dout)
+void Glu(Tensor<float> *din, Tensor<float> *dout)
 {
     int mm = din->buff_size / 1024;
     int i, j;

--
Gitblit v1.9.1