| | |
| | | |
| | | #include "precomp.h" |
| | | |
| | | float *loadparams(const char *filename) |
| | | float *LoadParams(const char *filename) |
| | | { |
| | | |
| | | FILE *fp; |
| | |
| | | uint32_t nFileLen = ftell(fp); |
| | | fseek(fp, 0, SEEK_SET); |
| | | |
| | | float *params_addr = (float *)aligned_malloc(32, nFileLen); |
| | | float *params_addr = (float *)AlignedMalloc(32, nFileLen); |
| | | int n = fread(params_addr, 1, nFileLen, fp); |
| | | fclose(fp); |
| | | |
| | | return params_addr; |
| | | } |
| | | |
| | | int val_align(int val, int align) |
| | | int ValAlign(int val, int align) |
| | | { |
| | | float tmp = ceil((float)val / (float)align) * (float)align; |
| | | return (int)tmp; |
| | | } |
| | | |
| | | void disp_params(float *din, int size) |
| | | void DispParams(float *din, int size) |
| | | { |
| | | int i; |
| | | for (i = 0; i < size; i++) { |
| | |
| | | fclose(fp); |
| | | } |
| | | |
| | | void basic_norm(Tensor<float> *&din, float norm) |
| | | void BasicNorm(Tensor<float> *&din, float norm) |
| | | { |
| | | |
| | | int Tmax = din->size[2]; |
| | |
| | | } |
| | | } |
| | | |
| | | void findmax(float *din, int len, float &max_val, int &max_idx) |
| | | void FindMax(float *din, int len, float &max_val, int &max_idx) |
| | | { |
| | | int i; |
| | | max_val = -INFINITY; |
| | |
| | | } |
| | | } |
| | | |
| | | string pathAppend(const string &p1, const string &p2) |
| | | string PathAppend(const string &p1, const string &p2) |
| | | { |
| | | |
| | | char sep = '/'; |
| | |
| | | return (p1 + p2); |
| | | } |
| | | |
| | | void relu(Tensor<float> *din) |
| | | void Relu(Tensor<float> *din) |
| | | { |
| | | int i; |
| | | for (i = 0; i < din->buff_size; i++) { |
| | |
| | | } |
| | | } |
| | | |
| | | void swish(Tensor<float> *din) |
| | | void Swish(Tensor<float> *din) |
| | | { |
| | | int i; |
| | | for (i = 0; i < din->buff_size; i++) { |
| | |
| | | } |
| | | } |
| | | |
| | | void sigmoid(Tensor<float> *din) |
| | | void Sigmoid(Tensor<float> *din) |
| | | { |
| | | int i; |
| | | for (i = 0; i < din->buff_size; i++) { |
| | |
| | | } |
| | | } |
| | | |
| | | void doubleswish(Tensor<float> *din) |
| | | void DoubleSwish(Tensor<float> *din) |
| | | { |
| | | int i; |
| | | for (i = 0; i < din->buff_size; i++) { |
| | |
| | | } |
| | | } |
| | | |
| | | void softmax(float *din, int mask, int len) |
| | | void Softmax(float *din, int mask, int len) |
| | | { |
| | | float *tmp = (float *)malloc(mask * sizeof(float)); |
| | | int i; |
| | |
| | | } |
| | | } |
| | | |
| | | void log_softmax(float *din, int len) |
| | | void LogSoftmax(float *din, int len) |
| | | { |
| | | float *tmp = (float *)malloc(len * sizeof(float)); |
| | | int i; |
| | |
| | | free(tmp); |
| | | } |
| | | |
| | | void glu(Tensor<float> *din, Tensor<float> *dout) |
| | | void Glu(Tensor<float> *din, Tensor<float> *dout) |
| | | { |
| | | int mm = din->buff_size / 1024; |
| | | int i, j; |