From 8a8d60d5786510ec7b1dd4f622e848de8a15f8a8 Mon Sep 17 00:00:00 2001
From: 雾聪 <wucong.lyb@alibaba-inc.com>
Date: 星期一, 15 一月 2024 16:36:51 +0800
Subject: [PATCH] replace NULL for onnxruntime/src
---
runtime/onnxruntime/src/encode_converter.cpp | 14 ++--
runtime/onnxruntime/src/alignedmem.cpp | 4
runtime/onnxruntime/src/encode_converter.h | 6 +-
runtime/onnxruntime/src/audio.cpp | 92 +++++++++++++++---------------
runtime/onnxruntime/src/funasrruntime.cpp | 10 +-
runtime/onnxruntime/src/tokenizer.cpp | 4
runtime/onnxruntime/src/bias-lm.h | 8 +-
7 files changed, 69 insertions(+), 69 deletions(-)
diff --git a/runtime/onnxruntime/src/alignedmem.cpp b/runtime/onnxruntime/src/alignedmem.cpp
index 9c7d323..b72c623 100644
--- a/runtime/onnxruntime/src/alignedmem.cpp
+++ b/runtime/onnxruntime/src/alignedmem.cpp
@@ -6,8 +6,8 @@
void *p1; // original block
void **p2; // aligned block
int offset = alignment - 1 + sizeof(void *);
- if ((p1 = (void *)malloc(required_bytes + offset)) == NULL) {
- return NULL;
+ if ((p1 = (void *)malloc(required_bytes + offset)) == nullptr) {
+ return nullptr;
}
p2 = (void **)(((size_t)(p1) + offset) & ~(alignment - 1));
p2[-1] = p1;
diff --git a/runtime/onnxruntime/src/audio.cpp b/runtime/onnxruntime/src/audio.cpp
index a2c19dc..40ea871 100644
--- a/runtime/onnxruntime/src/audio.cpp
+++ b/runtime/onnxruntime/src/audio.cpp
@@ -160,7 +160,7 @@
len = end - start;
}
AudioFrame::~AudioFrame(){
- if(data != NULL){
+ if(data != nullptr){
free(data);
}
}
@@ -195,37 +195,37 @@
Audio::Audio(int data_type) : dest_sample_rate(MODEL_SAMPLE_RATE), data_type(data_type)
{
- speech_buff = NULL;
- speech_data = NULL;
+ speech_buff = nullptr;
+ speech_data = nullptr;
align_size = 1360;
seg_sample = dest_sample_rate / 1000;
}
Audio::Audio(int model_sample_rate, int data_type) : dest_sample_rate(model_sample_rate), data_type(data_type)
{
- speech_buff = NULL;
- speech_data = NULL;
+ speech_buff = nullptr;
+ speech_data = nullptr;
align_size = 1360;
seg_sample = dest_sample_rate / 1000;
}
Audio::Audio(int model_sample_rate, int data_type, int size) : dest_sample_rate(model_sample_rate), data_type(data_type)
{
- speech_buff = NULL;
- speech_data = NULL;
+ speech_buff = nullptr;
+ speech_data = nullptr;
align_size = (float)size;
seg_sample = dest_sample_rate / 1000;
}
Audio::~Audio()
{
- if (speech_buff != NULL) {
+ if (speech_buff != nullptr) {
free(speech_buff);
}
- if (speech_data != NULL) {
+ if (speech_data != nullptr) {
free(speech_data);
}
- if (speech_char != NULL) {
+ if (speech_char != nullptr) {
free(speech_char);
}
ClearQueue(frame_queue);
@@ -269,7 +269,7 @@
resampler->Resample(waveform, n, true, &samples);
//reset speech_data
speech_len = samples.size();
- if (speech_data != NULL) {
+ if (speech_data != nullptr) {
free(speech_data);
}
speech_data = (float*)malloc(sizeof(float) * speech_len);
@@ -283,21 +283,21 @@
#else
// from file
AVFormatContext* formatContext = avformat_alloc_context();
- if (avformat_open_input(&formatContext, filename, NULL, NULL) != 0) {
+ if (avformat_open_input(&formatContext, filename, nullptr, nullptr) != 0) {
LOG(ERROR) << "Error: Could not open input file.";
avformat_close_input(&formatContext);
avformat_free_context(formatContext);
return false;
}
- if (avformat_find_stream_info(formatContext, NULL) < 0) {
+ if (avformat_find_stream_info(formatContext, nullptr) < 0) {
LOG(ERROR) << "Error: Could not open input file.";
avformat_close_input(&formatContext);
avformat_free_context(formatContext);
return false;
}
- const AVCodec* codec = NULL;
- AVCodecParameters* codecParameters = NULL;
+ const AVCodec* codec = nullptr;
+ AVCodecParameters* codecParameters = nullptr;
int audioStreamIndex = av_find_best_stream(formatContext, AVMEDIA_TYPE_AUDIO, -1, -1, &codec, 0);
if (audioStreamIndex >= 0) {
codecParameters = formatContext->streams[audioStreamIndex]->codecpar;
@@ -321,7 +321,7 @@
avcodec_free_context(&codecContext);
return false;
}
- if (avcodec_open2(codecContext, codec, NULL) < 0) {
+ if (avcodec_open2(codecContext, codec, nullptr) < 0) {
LOG(ERROR) << "Error: Could not open audio decoder.";
avformat_close_input(&formatContext);
avformat_free_context(formatContext);
@@ -400,10 +400,10 @@
av_packet_free(&packet);
av_frame_free(&frame);
- if (speech_data != NULL) {
+ if (speech_data != nullptr) {
free(speech_data);
}
- if (speech_char != NULL) {
+ if (speech_char != nullptr) {
free(speech_char);
}
offset = 0;
@@ -460,7 +460,7 @@
}
AVFormatContext* formatContext = avformat_alloc_context();
formatContext->pb = avio_ctx;
- if (avformat_open_input(&formatContext, "", NULL, NULL) != 0) {
+ if (avformat_open_input(&formatContext, "", nullptr, nullptr) != 0) {
LOG(ERROR) << "Error: Could not open input file.";
avio_context_free(&avio_ctx);
avformat_close_input(&formatContext);
@@ -468,15 +468,15 @@
return false;
}
- if (avformat_find_stream_info(formatContext, NULL) < 0) {
+ if (avformat_find_stream_info(formatContext, nullptr) < 0) {
LOG(ERROR) << "Error: Could not find stream information.";
avio_context_free(&avio_ctx);
avformat_close_input(&formatContext);
avformat_free_context(formatContext);
return false;
}
- const AVCodec* codec = NULL;
- AVCodecParameters* codecParameters = NULL;
+ const AVCodec* codec = nullptr;
+ AVCodecParameters* codecParameters = nullptr;
int audioStreamIndex = av_find_best_stream(formatContext, AVMEDIA_TYPE_AUDIO, -1, -1, &codec, 0);
if (audioStreamIndex >= 0) {
codecParameters = formatContext->streams[audioStreamIndex]->codecpar;
@@ -497,7 +497,7 @@
avcodec_free_context(&codecContext);
return false;
}
- if (avcodec_open2(codecContext, codec, NULL) < 0) {
+ if (avcodec_open2(codecContext, codec, nullptr) < 0) {
LOG(ERROR) << "Error: Could not open audio decoder.";
avio_context_free(&avio_ctx);
avformat_close_input(&formatContext);
@@ -582,7 +582,7 @@
av_packet_free(&packet);
av_frame_free(&frame);
- if (speech_data != NULL) {
+ if (speech_data != nullptr) {
free(speech_data);
}
@@ -613,10 +613,10 @@
bool Audio::LoadWav(const char *filename, int32_t* sampling_rate, bool resample)
{
WaveHeader header;
- if (speech_data != NULL) {
+ if (speech_data != nullptr) {
free(speech_data);
}
- if (speech_buff != NULL) {
+ if (speech_buff != nullptr) {
free(speech_buff);
}
@@ -688,7 +688,7 @@
bool Audio::LoadWav2Char(const char *filename, int32_t* sampling_rate)
{
WaveHeader header;
- if (speech_char != NULL) {
+ if (speech_char != nullptr) {
free(speech_char);
}
offset = 0;
@@ -727,10 +727,10 @@
bool Audio::LoadWav(const char* buf, int n_file_len, int32_t* sampling_rate)
{
WaveHeader header;
- if (speech_data != NULL) {
+ if (speech_data != nullptr) {
free(speech_data);
}
- if (speech_buff != NULL) {
+ if (speech_buff != nullptr) {
free(speech_buff);
}
@@ -772,7 +772,7 @@
bool Audio::LoadPcmwav(const char* buf, int n_buf_len, int32_t* sampling_rate)
{
- if (speech_data != NULL) {
+ if (speech_data != nullptr) {
free(speech_data);
}
@@ -805,7 +805,7 @@
bool Audio::LoadPcmwavOnline(const char* buf, int n_buf_len, int32_t* sampling_rate)
{
- if (speech_data != NULL) {
+ if (speech_data != nullptr) {
free(speech_data);
}
@@ -842,10 +842,10 @@
bool Audio::LoadPcmwav(const char* filename, int32_t* sampling_rate, bool resample)
{
- if (speech_data != NULL) {
+ if (speech_data != nullptr) {
free(speech_data);
}
- if (speech_buff != NULL) {
+ if (speech_buff != nullptr) {
free(speech_buff);
}
offset = 0;
@@ -897,7 +897,7 @@
bool Audio::LoadPcmwav2Char(const char* filename, int32_t* sampling_rate)
{
- if (speech_char != NULL) {
+ if (speech_char != nullptr) {
free(speech_char);
}
offset = 0;
@@ -924,7 +924,7 @@
bool Audio::LoadOthers2Char(const char* filename)
{
- if (speech_char != NULL) {
+ if (speech_char != nullptr) {
free(speech_char);
}
@@ -1048,7 +1048,7 @@
frame_queue.pop();
int sp_len = frame->GetLen();
delete frame;
- frame = NULL;
+ frame = nullptr;
std::vector<float> pcm_data(speech_data, speech_data+sp_len);
vector<std::vector<int>> vad_segments = (offline_stream->vad_handle)->Infer(pcm_data);
@@ -1060,7 +1060,7 @@
frame->SetStart(start);
frame->SetEnd(end);
frame_queue.push(frame);
- frame = NULL;
+ frame = nullptr;
}
}
@@ -1072,7 +1072,7 @@
frame_queue.pop();
int sp_len = frame->GetLen();
delete frame;
- frame = NULL;
+ frame = nullptr;
std::vector<float> pcm_data(speech_data, speech_data+sp_len);
vad_segments = vad_obj->Infer(pcm_data, input_finished);
@@ -1087,7 +1087,7 @@
frame_queue.pop();
int sp_len = frame->GetLen();
delete frame;
- frame = NULL;
+ frame = nullptr;
std::vector<float> pcm_data(speech_data, speech_data+sp_len);
vector<std::vector<int>> vad_segments = vad_obj->Infer(pcm_data, input_finished);
@@ -1108,7 +1108,7 @@
frame->data = (float*)malloc(sizeof(float) * step);
memcpy(frame->data, all_samples.data()+start-offset, step*sizeof(float));
asr_online_queue.push(frame);
- frame = NULL;
+ frame = nullptr;
speech_start += step/seg_sample;
}
}
@@ -1136,7 +1136,7 @@
frame->data = (float*)malloc(sizeof(float) * (end-start));
memcpy(frame->data, all_samples.data()+start-offset, (end-start)*sizeof(float));
asr_online_queue.push(frame);
- frame = NULL;
+ frame = nullptr;
}
if(asr_mode != ASR_ONLINE){
@@ -1147,7 +1147,7 @@
frame->data = (float*)malloc(sizeof(float) * (end-start));
memcpy(frame->data, all_samples.data()+start-offset, (end-start)*sizeof(float));
asr_offline_queue.push(frame);
- frame = NULL;
+ frame = nullptr;
}
speech_start = -1;
@@ -1170,7 +1170,7 @@
frame->data = (float*)malloc(sizeof(float) * step);
memcpy(frame->data, all_samples.data()+start-offset, step*sizeof(float));
asr_online_queue.push(frame);
- frame = NULL;
+ frame = nullptr;
speech_start += step/seg_sample;
}
}
@@ -1195,7 +1195,7 @@
frame->data = (float*)malloc(sizeof(float) * (end-offline_start));
memcpy(frame->data, all_samples.data()+offline_start-offset, (end-offline_start)*sizeof(float));
asr_offline_queue.push(frame);
- frame = NULL;
+ frame = nullptr;
}
if(asr_mode != ASR_OFFLINE){
@@ -1213,7 +1213,7 @@
frame->data = (float*)malloc(sizeof(float) * step);
memcpy(frame->data, all_samples.data()+start-offset+sample_offset, step*sizeof(float));
asr_online_queue.push(frame);
- frame = NULL;
+ frame = nullptr;
}
}else{
frame = new AudioFrame(0);
@@ -1221,7 +1221,7 @@
frame->global_start = speech_start; // in this case start >= end
frame->global_end = speech_end_i;
asr_online_queue.push(frame);
- frame = NULL;
+ frame = nullptr;
}
}
speech_start = -1;
diff --git a/runtime/onnxruntime/src/bias-lm.h b/runtime/onnxruntime/src/bias-lm.h
index 957197a..ddaf3e4 100644
--- a/runtime/onnxruntime/src/bias-lm.h
+++ b/runtime/onnxruntime/src/bias-lm.h
@@ -48,7 +48,7 @@
std::vector<std::vector<int>> split_id_vec;
struct timeval start, end;
- gettimeofday(&start, NULL);
+ gettimeofday(&start, nullptr);
LoadCfgFromYaml(cfg_file.c_str(), opt_);
while (getline(ifs_hws, line)) {
@@ -86,7 +86,7 @@
BuildGraph(split_id_vec, custom_weight);
ifs_hws.close();
- gettimeofday(&end, NULL);
+ gettimeofday(&end, nullptr);
long seconds = (end.tv_sec - start.tv_sec);
long modle_init_micros = ((seconds * 1000000) + end.tv_usec) - (start.tv_usec);
LOG(INFO) << "Build bias lm takes " << (double)modle_init_micros / 1000000 << " s";
@@ -99,7 +99,7 @@
std::vector<std::vector<int>> split_id_vec;
struct timeval start, end;
- gettimeofday(&start, NULL);
+ gettimeofday(&start, nullptr);
opt_.incre_bias_ = inc_bias;
for (const pair<string, int>& kv : hws_map) {
float score = 1.0f;
@@ -128,7 +128,7 @@
}
BuildGraph(split_id_vec, custom_weight);
- gettimeofday(&end, NULL);
+ gettimeofday(&end, nullptr);
long seconds = (end.tv_sec - start.tv_sec);
long modle_init_micros = ((seconds * 1000000) + end.tv_usec) - (start.tv_usec);
LOG(INFO) << "Build bias lm takes " << (double)modle_init_micros / 1000000 << " s";
diff --git a/runtime/onnxruntime/src/encode_converter.cpp b/runtime/onnxruntime/src/encode_converter.cpp
index 6c1097e..2ba6109 100644
--- a/runtime/onnxruntime/src/encode_converter.cpp
+++ b/runtime/onnxruntime/src/encode_converter.cpp
@@ -441,7 +441,7 @@
}
bool EncodeConverter::IsAllChineseCharactor(const U8CHAR_T* pu8, size_t ilen) {
- if (pu8 == NULL || ilen <= 0) {
+ if (pu8 == nullptr || ilen <= 0) {
return false;
}
@@ -458,7 +458,7 @@
}
bool EncodeConverter::HasAlpha(const U8CHAR_T* pu8, size_t ilen) {
- if (pu8 == NULL || ilen <= 0) {
+ if (pu8 == nullptr || ilen <= 0) {
return false;
}
for (size_t i = 0; i < ilen; i++) {
@@ -471,7 +471,7 @@
bool EncodeConverter::IsAllAlpha(const U8CHAR_T* pu8, size_t ilen) {
- if (pu8 == NULL || ilen <= 0) {
+ if (pu8 == nullptr || ilen <= 0) {
return false;
}
for (size_t i = 0; i < ilen; i++) {
@@ -483,7 +483,7 @@
}
bool EncodeConverter::IsAllAlphaAndPunct(const U8CHAR_T* pu8, size_t ilen) {
- if (pu8 == NULL || ilen <= 0) {
+ if (pu8 == nullptr || ilen <= 0) {
return false;
}
bool flag1 = HasAlpha(pu8, ilen);
@@ -500,7 +500,7 @@
}
bool EncodeConverter::IsAllAlphaAndDigit(const U8CHAR_T* pu8, size_t ilen) {
- if (pu8 == NULL || ilen <= 0) {
+ if (pu8 == nullptr || ilen <= 0) {
return false;
}
bool flag1 = HasAlpha(pu8, ilen);
@@ -516,7 +516,7 @@
return true;
}
bool EncodeConverter::IsAllAlphaAndDigitAndBlank(const U8CHAR_T* pu8, size_t ilen) {
- if (pu8 == NULL || ilen <= 0) {
+ if (pu8 == nullptr || ilen <= 0) {
return false;
}
for (size_t i = 0; i < ilen; i++) {
@@ -529,7 +529,7 @@
bool EncodeConverter::NeedAddTailBlank(std::string str) {
U8CHAR_T *pu8 = (U8CHAR_T*)str.data();
size_t ilen = str.size();
- if (pu8 == NULL || ilen <= 0) {
+ if (pu8 == nullptr || ilen <= 0) {
return false;
}
if (IsAllAlpha(pu8, ilen) || IsAllAlphaAndPunct(pu8, ilen) || IsAllAlphaAndDigit(pu8, ilen)) {
diff --git a/runtime/onnxruntime/src/encode_converter.h b/runtime/onnxruntime/src/encode_converter.h
index f8d3b23..a135eb6 100644
--- a/runtime/onnxruntime/src/encode_converter.h
+++ b/runtime/onnxruntime/src/encode_converter.h
@@ -88,15 +88,15 @@
#ifdef _MSC_VER
// convert to the local ansi page
static std::string UTF8ToLocaleAnsi(const std::string& strUTF8) {
- int len = MultiByteToWideChar(CP_UTF8, 0, strUTF8.c_str(), -1, NULL, 0);
+ int len = MultiByteToWideChar(CP_UTF8, 0, strUTF8.c_str(), -1, nullptr, 0);
unsigned short*wszGBK = new unsigned short[len + 1];
memset(wszGBK, 0, len * 2 + 2);
MultiByteToWideChar(CP_UTF8, 0, (LPCCH)strUTF8.c_str(), -1, (LPWSTR)wszGBK, len);
- len = WideCharToMultiByte(CP_ACP, 0, (LPCWCH)wszGBK, -1, NULL, 0, NULL, NULL);
+ len = WideCharToMultiByte(CP_ACP, 0, (LPCWCH)wszGBK, -1, nullptr, 0, nullptr, nullptr);
char *szGBK = new char[len + 1];
memset(szGBK, 0, len + 1);
- WideCharToMultiByte(CP_ACP, 0, (LPCWCH)wszGBK, -1, szGBK, len, NULL, NULL);
+ WideCharToMultiByte(CP_ACP, 0, (LPCWCH)wszGBK, -1, szGBK, len, nullptr, nullptr);
std::string strTemp(szGBK);
delete[]szGBK;
delete[]wszGBK;
diff --git a/runtime/onnxruntime/src/funasrruntime.cpp b/runtime/onnxruntime/src/funasrruntime.cpp
index fdaf69d..0ca4ded 100644
--- a/runtime/onnxruntime/src/funasrruntime.cpp
+++ b/runtime/onnxruntime/src/funasrruntime.cpp
@@ -480,7 +480,7 @@
audio->Split(vad_online_handle, chunk_len, input_finished, mode);
- funasr::AudioFrame* frame = NULL;
+ funasr::AudioFrame* frame = nullptr;
while(audio->FetchChunck(frame) > 0){
string msg = ((funasr::ParaformerOnline*)asr_online_handle)->Forward(frame->data, frame->len, frame->is_final);
if(mode == ASR_ONLINE){
@@ -504,9 +504,9 @@
}else if(mode == ASR_TWO_PASS){
p_result->msg += msg;
}
- if(frame != NULL){
+ if(frame != nullptr){
delete frame;
- frame = NULL;
+ frame = nullptr;
}
}
@@ -561,9 +561,9 @@
if (!(p_result->stamp).empty()){
p_result->stamp_sents = funasr::TimestampSentence(p_result->tpass_msg, p_result->stamp);
}
- if(frame != NULL){
+ if(frame != nullptr){
delete frame;
- frame = NULL;
+ frame = nullptr;
}
}
diff --git a/runtime/onnxruntime/src/tokenizer.cpp b/runtime/onnxruntime/src/tokenizer.cpp
index f56601a..7618282 100644
--- a/runtime/onnxruntime/src/tokenizer.cpp
+++ b/runtime/onnxruntime/src/tokenizer.cpp
@@ -53,8 +53,8 @@
SetJiebaRes(jieba_dict_trie_, jieba_model_);
}else {
- jieba_dict_trie_ = NULL;
- jieba_model_ = NULL;
+ jieba_dict_trie_ = nullptr;
+ jieba_model_ = nullptr;
}
}
--
Gitblit v1.9.1