From c542eacb0aadcbc49c63db40429fca4e08f807a4 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期五, 21 七月 2023 10:27:35 +0800
Subject: [PATCH] Merge branch 'main' of github.com:alibaba-damo-academy/FunASR add
---
funasr/runtime/onnxruntime/src/audio.cpp | 554 ++++++++++++++++++++++++++++++++++++++++++++++++++----
1 files changed, 508 insertions(+), 46 deletions(-)
diff --git a/funasr/runtime/onnxruntime/src/audio.cpp b/funasr/runtime/onnxruntime/src/audio.cpp
index 635c330..85633b7 100644
--- a/funasr/runtime/onnxruntime/src/audio.cpp
+++ b/funasr/runtime/onnxruntime/src/audio.cpp
@@ -9,8 +9,18 @@
#include "audio.h"
#include "precomp.h"
+extern "C" {
+#include <libavutil/opt.h>
+#include <libavcodec/avcodec.h>
+#include <libavformat/avformat.h>
+#include <libavutil/channel_layout.h>
+#include <libavutil/samplefmt.h>
+#include <libswresample/swresample.h>
+}
+
using namespace std;
+namespace funasr {
// see http://soundfile.sapp.org/doc/WaveFormat/
// Note: We assume little endian here
struct WaveHeader {
@@ -128,33 +138,32 @@
start = 0;
};
AudioFrame::~AudioFrame(){};
-int AudioFrame::set_start(int val)
+int AudioFrame::SetStart(int val)
{
start = val < 0 ? 0 : val;
return start;
};
-int AudioFrame::set_end(int val)
+int AudioFrame::SetEnd(int val)
{
end = val;
len = end - start;
return end;
};
-int AudioFrame::get_start()
+int AudioFrame::GetStart()
{
return start;
};
-int AudioFrame::get_len()
+int AudioFrame::GetLen()
{
return len;
};
-int AudioFrame::disp()
+int AudioFrame::Disp()
{
- printf("not imp!!!!\n");
-
+ LOG(ERROR) << "Not imp!!!!";
return 0;
};
@@ -176,42 +185,37 @@
{
if (speech_buff != NULL) {
free(speech_buff);
-
}
-
if (speech_data != NULL) {
-
free(speech_data);
}
+ if (speech_char != NULL) {
+ free(speech_char);
+ }
}
-void Audio::disp()
+void Audio::Disp()
{
- printf("Audio time is %f s. len is %d\n", (float)speech_len / MODEL_SAMPLE_RATE,
- speech_len);
+ LOG(INFO) << "Audio time is " << (float)speech_len / MODEL_SAMPLE_RATE << " s. len is " << speech_len;
}
-float Audio::get_time_len()
+float Audio::GetTimeLen()
{
return (float)speech_len / MODEL_SAMPLE_RATE;
}
-void Audio::wavResample(int32_t sampling_rate, const float *waveform,
+void Audio::WavResample(int32_t sampling_rate, const float *waveform,
int32_t n)
{
- printf(
- "Creating a resampler:\n"
- " in_sample_rate: %d\n"
- " output_sample_rate: %d\n",
- sampling_rate, static_cast<int32_t>(MODEL_SAMPLE_RATE));
+ LOG(INFO) << "Creating a resampler:\n"
+ << " in_sample_rate: "<< sampling_rate << "\n"
+ << " output_sample_rate: " << static_cast<int32_t>(MODEL_SAMPLE_RATE);
float min_freq =
std::min<int32_t>(sampling_rate, MODEL_SAMPLE_RATE);
float lowpass_cutoff = 0.99 * 0.5 * min_freq;
int32_t lowpass_filter_width = 6;
- //FIXME
- //auto resampler = new LinearResample(
- // sampling_rate, model_sample_rate, lowpass_cutoff, lowpass_filter_width);
+
auto resampler = std::make_unique<LinearResample>(
sampling_rate, MODEL_SAMPLE_RATE, lowpass_cutoff, lowpass_filter_width);
std::vector<float> samples;
@@ -226,7 +230,335 @@
copy(samples.begin(), samples.end(), speech_data);
}
-bool Audio::loadwav(const char *filename, int32_t* sampling_rate)
+bool Audio::FfmpegLoad(const char *filename){
+ // from file
+ AVFormatContext* formatContext = avformat_alloc_context();
+ if (avformat_open_input(&formatContext, filename, NULL, NULL) != 0) {
+ printf("Error: Could not open input file.");
+ avformat_close_input(&formatContext);
+ avformat_free_context(formatContext);
+ return false;
+ }
+
+ if (avformat_find_stream_info(formatContext, NULL) < 0) {
+ printf("Error: Could not find stream information.");
+ avformat_close_input(&formatContext);
+ avformat_free_context(formatContext);
+ return false;
+ }
+ const AVCodec* codec = NULL;
+ AVCodecParameters* codecParameters = NULL;
+ int audioStreamIndex = av_find_best_stream(formatContext, AVMEDIA_TYPE_AUDIO, -1, -1, &codec, 0);
+ if (audioStreamIndex >= 0) {
+ codecParameters = formatContext->streams[audioStreamIndex]->codecpar;
+ }
+ AVCodecContext* codecContext = avcodec_alloc_context3(codec);
+ if (!codecContext) {
+ fprintf(stderr, "Failed to allocate codec context\n");
+ avformat_close_input(&formatContext);
+ avformat_free_context(formatContext);
+ return false;
+ }
+ if (avcodec_parameters_to_context(codecContext, codecParameters) != 0) {
+ printf("Error: Could not copy codec parameters to codec context.");
+ avformat_close_input(&formatContext);
+ avformat_free_context(formatContext);
+ avcodec_free_context(&codecContext);
+ return false;
+ }
+ if (avcodec_open2(codecContext, codec, NULL) < 0) {
+ printf("Error: Could not open audio decoder.");
+ avformat_close_input(&formatContext);
+ avformat_free_context(formatContext);
+ avcodec_free_context(&codecContext);
+ return false;
+ }
+ SwrContext *swr_ctx = swr_alloc_set_opts(
+ nullptr, // allocate a new context
+ AV_CH_LAYOUT_MONO, // output channel layout (stereo)
+ AV_SAMPLE_FMT_S16, // output sample format (signed 16-bit)
+ 16000, // output sample rate (same as input)
+ av_get_default_channel_layout(codecContext->channels), // input channel layout
+ codecContext->sample_fmt, // input sample format
+ codecContext->sample_rate, // input sample rate
+ 0, // logging level
+ nullptr // parent context
+ );
+ if (swr_ctx == nullptr) {
+ std::cerr << "Could not initialize resampler" << std::endl;
+ avformat_close_input(&formatContext);
+ avformat_free_context(formatContext);
+ avcodec_free_context(&codecContext);
+ return false;
+ }
+ if (swr_init(swr_ctx) != 0) {
+ std::cerr << "Could not initialize resampler" << std::endl;
+ avformat_close_input(&formatContext);
+ avformat_free_context(formatContext);
+ avcodec_free_context(&codecContext);
+ swr_free(&swr_ctx);
+ return false;
+ }
+
+ // to pcm
+ AVPacket* packet = av_packet_alloc();
+ AVFrame* frame = av_frame_alloc();
+ std::vector<uint8_t> resampled_buffers;
+ while (av_read_frame(formatContext, packet) >= 0) {
+ if (packet->stream_index == audioStreamIndex) {
+ if (avcodec_send_packet(codecContext, packet) >= 0) {
+ while (avcodec_receive_frame(codecContext, frame) >= 0) {
+ // Resample audio if necessary
+ std::vector<uint8_t> resampled_buffer;
+ int in_samples = frame->nb_samples;
+ uint8_t **in_data = frame->extended_data;
+ int out_samples = av_rescale_rnd(in_samples,
+ 16000,
+ codecContext->sample_rate,
+ AV_ROUND_DOWN);
+
+ int resampled_size = out_samples * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16);
+ if (resampled_buffer.size() < resampled_size) {
+ resampled_buffer.resize(resampled_size);
+ }
+ uint8_t *resampled_data = resampled_buffer.data();
+ int ret = swr_convert(
+ swr_ctx,
+ &resampled_data, // output buffer
+ resampled_size, // output buffer size
+ (const uint8_t **)(frame->data), //(const uint8_t **)(frame->extended_data)
+ in_samples // input buffer size
+ );
+ if (ret < 0) {
+ std::cerr << "Error resampling audio" << std::endl;
+ break;
+ }
+ std::copy(resampled_buffer.begin(), resampled_buffer.end(), std::back_inserter(resampled_buffers));
+ }
+ }
+ }
+ av_packet_unref(packet);
+ }
+
+ avformat_close_input(&formatContext);
+ avformat_free_context(formatContext);
+ avcodec_free_context(&codecContext);
+ swr_free(&swr_ctx);
+ av_packet_free(&packet);
+ av_frame_free(&frame);
+
+ if (speech_data != NULL) {
+ free(speech_data);
+ }
+ if (speech_buff != NULL) {
+ free(speech_buff);
+ }
+ offset = 0;
+
+ speech_len = (resampled_buffers.size()) / 2;
+ speech_buff = (int16_t*)malloc(sizeof(int16_t) * speech_len);
+ if (speech_buff)
+ {
+ memset(speech_buff, 0, sizeof(int16_t) * speech_len);
+ memcpy((void*)speech_buff, (const void*)resampled_buffers.data(), speech_len * sizeof(int16_t));
+
+ speech_data = (float*)malloc(sizeof(float) * speech_len);
+ memset(speech_data, 0, sizeof(float) * speech_len);
+
+ float scale = 1;
+ if (data_type == 1) {
+ scale = 32768;
+ }
+ for (int32_t i = 0; i != speech_len; ++i) {
+ speech_data[i] = (float)speech_buff[i] / scale;
+ }
+
+ AudioFrame* frame = new AudioFrame(speech_len);
+ frame_queue.push(frame);
+
+ return true;
+ }
+ else
+ return false;
+
+}
+
+bool Audio::FfmpegLoad(const char* buf, int n_file_len){
+ // from buf
+ char* buf_copy = (char *)malloc(n_file_len);
+ memcpy(buf_copy, buf, n_file_len);
+
+ AVIOContext* avio_ctx = avio_alloc_context(
+ (unsigned char*)buf_copy, // buffer
+ n_file_len, // buffer size
+ 0, // write flag (0 for read-only)
+ nullptr, // opaque pointer (not used here)
+ nullptr, // read callback (not used here)
+ nullptr, // write callback (not used here)
+ nullptr // seek callback (not used here)
+ );
+ AVFormatContext* formatContext = avformat_alloc_context();
+ formatContext->pb = avio_ctx;
+ if (avformat_open_input(&formatContext, "", NULL, NULL) != 0) {
+ printf("Error: Could not open input file.");
+ avio_context_free(&avio_ctx);
+ avformat_close_input(&formatContext);
+ avformat_free_context(formatContext);
+ return false;
+ }
+
+ if (avformat_find_stream_info(formatContext, NULL) < 0) {
+ printf("Error: Could not find stream information.");
+ avio_context_free(&avio_ctx);
+ avformat_close_input(&formatContext);
+ avformat_free_context(formatContext);
+ return false;
+ }
+ const AVCodec* codec = NULL;
+ AVCodecParameters* codecParameters = NULL;
+ int audioStreamIndex = av_find_best_stream(formatContext, AVMEDIA_TYPE_AUDIO, -1, -1, &codec, 0);
+ if (audioStreamIndex >= 0) {
+ codecParameters = formatContext->streams[audioStreamIndex]->codecpar;
+ }
+ AVCodecContext* codecContext = avcodec_alloc_context3(codec);
+ if (!codecContext) {
+ fprintf(stderr, "Failed to allocate codec context\n");
+ avio_context_free(&avio_ctx);
+ avformat_close_input(&formatContext);
+ avformat_free_context(formatContext);
+ return false;
+ }
+ if (avcodec_parameters_to_context(codecContext, codecParameters) != 0) {
+ printf("Error: Could not copy codec parameters to codec context.");
+ avio_context_free(&avio_ctx);
+ avformat_close_input(&formatContext);
+ avformat_free_context(formatContext);
+ avcodec_free_context(&codecContext);
+ return false;
+ }
+ if (avcodec_open2(codecContext, codec, NULL) < 0) {
+ printf("Error: Could not open audio decoder.");
+ avio_context_free(&avio_ctx);
+ avformat_close_input(&formatContext);
+ avformat_free_context(formatContext);
+ avcodec_free_context(&codecContext);
+ return false;
+ }
+ SwrContext *swr_ctx = swr_alloc_set_opts(
+ nullptr, // allocate a new context
+ AV_CH_LAYOUT_MONO, // output channel layout (stereo)
+ AV_SAMPLE_FMT_S16, // output sample format (signed 16-bit)
+ 16000, // output sample rate (same as input)
+ av_get_default_channel_layout(codecContext->channels), // input channel layout
+ codecContext->sample_fmt, // input sample format
+ codecContext->sample_rate, // input sample rate
+ 0, // logging level
+ nullptr // parent context
+ );
+ if (swr_ctx == nullptr) {
+ std::cerr << "Could not initialize resampler" << std::endl;
+ avio_context_free(&avio_ctx);
+ avformat_close_input(&formatContext);
+ avformat_free_context(formatContext);
+ avcodec_free_context(&codecContext);
+ return false;
+ }
+ if (swr_init(swr_ctx) != 0) {
+ std::cerr << "Could not initialize resampler" << std::endl;
+ avio_context_free(&avio_ctx);
+ avformat_close_input(&formatContext);
+ avformat_free_context(formatContext);
+ avcodec_free_context(&codecContext);
+ swr_free(&swr_ctx);
+ return false;
+ }
+
+ // to pcm
+ AVPacket* packet = av_packet_alloc();
+ AVFrame* frame = av_frame_alloc();
+ std::vector<uint8_t> resampled_buffers;
+ while (av_read_frame(formatContext, packet) >= 0) {
+ if (packet->stream_index == audioStreamIndex) {
+ if (avcodec_send_packet(codecContext, packet) >= 0) {
+ while (avcodec_receive_frame(codecContext, frame) >= 0) {
+ // Resample audio if necessary
+ std::vector<uint8_t> resampled_buffer;
+ int in_samples = frame->nb_samples;
+ uint8_t **in_data = frame->extended_data;
+ int out_samples = av_rescale_rnd(in_samples,
+ 16000,
+ codecContext->sample_rate,
+ AV_ROUND_DOWN);
+
+ int resampled_size = out_samples * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16);
+ if (resampled_buffer.size() < resampled_size) {
+ resampled_buffer.resize(resampled_size);
+ }
+ uint8_t *resampled_data = resampled_buffer.data();
+ int ret = swr_convert(
+ swr_ctx,
+ &resampled_data, // output buffer
+ resampled_size, // output buffer size
+ (const uint8_t **)(frame->data), //(const uint8_t **)(frame->extended_data)
+ in_samples // input buffer size
+ );
+ if (ret < 0) {
+ std::cerr << "Error resampling audio" << std::endl;
+ break;
+ }
+ std::copy(resampled_buffer.begin(), resampled_buffer.end(), std::back_inserter(resampled_buffers));
+ }
+ }
+ }
+ av_packet_unref(packet);
+ }
+
+ avio_context_free(&avio_ctx);
+ avformat_close_input(&formatContext);
+ avformat_free_context(formatContext);
+ avcodec_free_context(&codecContext);
+ swr_free(&swr_ctx);
+ av_packet_free(&packet);
+ av_frame_free(&frame);
+
+ if (speech_data != NULL) {
+ free(speech_data);
+ }
+ if (speech_buff != NULL) {
+ free(speech_buff);
+ }
+ offset = 0;
+
+ speech_len = (resampled_buffers.size()) / 2;
+ speech_buff = (int16_t*)malloc(sizeof(int16_t) * speech_len);
+ if (speech_buff)
+ {
+ memset(speech_buff, 0, sizeof(int16_t) * speech_len);
+ memcpy((void*)speech_buff, (const void*)resampled_buffers.data(), speech_len * sizeof(int16_t));
+
+ speech_data = (float*)malloc(sizeof(float) * speech_len);
+ memset(speech_data, 0, sizeof(float) * speech_len);
+
+ float scale = 1;
+ if (data_type == 1) {
+ scale = 32768;
+ }
+ for (int32_t i = 0; i != speech_len; ++i) {
+ speech_data[i] = (float)speech_buff[i] / scale;
+ }
+
+ AudioFrame* frame = new AudioFrame(speech_len);
+ frame_queue.push(frame);
+
+ return true;
+ }
+ else
+ return false;
+
+}
+
+
+bool Audio::LoadWav(const char *filename, int32_t* sampling_rate)
{
WaveHeader header;
if (speech_data != NULL) {
@@ -240,7 +572,25 @@
std::ifstream is(filename, std::ifstream::binary);
is.read(reinterpret_cast<char *>(&header), sizeof(header));
if(!is){
- fprintf(stderr, "Failed to read %s\n", filename);
+ LOG(ERROR) << "Failed to read " << filename;
+ return false;
+ }
+
+ if (!header.Validate()) {
+ return false;
+ }
+
+ header.SeekToDataChunk(is);
+ if (!is) {
+ return false;
+ }
+
+ if (!header.Validate()) {
+ return false;
+ }
+
+ header.SeekToDataChunk(is);
+ if (!is) {
return false;
}
@@ -255,7 +605,7 @@
memset(speech_buff, 0, sizeof(int16_t) * speech_len);
is.read(reinterpret_cast<char *>(speech_buff), header.subchunk2_size);
if (!is) {
- fprintf(stderr, "Failed to read %s\n", filename);
+ LOG(ERROR) << "Failed to read " << filename;
return false;
}
speech_data = (float*)malloc(sizeof(float) * speech_len);
@@ -271,7 +621,7 @@
//resample
if(*sampling_rate != MODEL_SAMPLE_RATE){
- wavResample(*sampling_rate, speech_data, speech_len);
+ WavResample(*sampling_rate, speech_data, speech_len);
}
AudioFrame* frame = new AudioFrame(speech_len);
@@ -283,8 +633,47 @@
return false;
}
-bool Audio::loadwav(const char* buf, int nFileLen, int32_t* sampling_rate)
+bool Audio::LoadWav2Char(const char *filename, int32_t* sampling_rate)
{
+ WaveHeader header;
+ if (speech_char != NULL) {
+ free(speech_char);
+ }
+ offset = 0;
+ std::ifstream is(filename, std::ifstream::binary);
+ is.read(reinterpret_cast<char *>(&header), sizeof(header));
+ if(!is){
+ LOG(ERROR) << "Failed to read " << filename;
+ return false;
+ }
+ if (!header.Validate()) {
+ return false;
+ }
+ header.SeekToDataChunk(is);
+ if (!is) {
+ return false;
+ }
+ if (!header.Validate()) {
+ return false;
+ }
+ header.SeekToDataChunk(is);
+ if (!is) {
+ return false;
+ }
+
+ *sampling_rate = header.sample_rate;
+ // header.subchunk2_size contains the number of bytes in the data.
+ // As we assume each sample contains two bytes, so it is divided by 2 here
+ speech_len = header.subchunk2_size / 2;
+ speech_char = (char *)malloc(header.subchunk2_size);
+ memset(speech_char, 0, header.subchunk2_size);
+ is.read(speech_char, header.subchunk2_size);
+
+ return true;
+}
+
+bool Audio::LoadWav(const char* buf, int n_file_len, int32_t* sampling_rate)
+{
WaveHeader header;
if (speech_data != NULL) {
free(speech_data);
@@ -318,7 +707,7 @@
//resample
if(*sampling_rate != MODEL_SAMPLE_RATE){
- wavResample(*sampling_rate, speech_data, speech_len);
+ WavResample(*sampling_rate, speech_data, speech_len);
}
AudioFrame* frame = new AudioFrame(speech_len);
@@ -330,7 +719,7 @@
return false;
}
-bool Audio::loadpcmwav(const char* buf, int nBufLen, int32_t* sampling_rate)
+bool Audio::LoadPcmwav(const char* buf, int n_buf_len, int32_t* sampling_rate)
{
if (speech_data != NULL) {
free(speech_data);
@@ -340,7 +729,7 @@
}
offset = 0;
- speech_len = nBufLen / 2;
+ speech_len = n_buf_len / 2;
speech_buff = (int16_t*)malloc(sizeof(int16_t) * speech_len);
if (speech_buff)
{
@@ -361,7 +750,7 @@
//resample
if(*sampling_rate != MODEL_SAMPLE_RATE){
- wavResample(*sampling_rate, speech_data, speech_len);
+ WavResample(*sampling_rate, speech_data, speech_len);
}
AudioFrame* frame = new AudioFrame(speech_len);
@@ -373,7 +762,7 @@
return false;
}
-bool Audio::loadpcmwav(const char* filename, int32_t* sampling_rate)
+bool Audio::LoadPcmwav(const char* filename, int32_t* sampling_rate)
{
if (speech_data != NULL) {
free(speech_data);
@@ -386,12 +775,15 @@
FILE* fp;
fp = fopen(filename, "rb");
if (fp == nullptr)
+ {
+ LOG(ERROR) << "Failed to read " << filename;
return false;
+ }
fseek(fp, 0, SEEK_END);
- uint32_t nFileLen = ftell(fp);
+ uint32_t n_file_len = ftell(fp);
fseek(fp, 0, SEEK_SET);
- speech_len = (nFileLen) / 2;
+ speech_len = (n_file_len) / 2;
speech_buff = (int16_t*)malloc(sizeof(int16_t) * speech_len);
if (speech_buff)
{
@@ -412,7 +804,7 @@
//resample
if(*sampling_rate != MODEL_SAMPLE_RATE){
- wavResample(*sampling_rate, speech_data, speech_len);
+ WavResample(*sampling_rate, speech_data, speech_len);
}
AudioFrame* frame = new AudioFrame(speech_len);
@@ -425,7 +817,60 @@
}
-int Audio::fetch_chunck(float *&dout, int len)
+bool Audio::LoadPcmwav2Char(const char* filename, int32_t* sampling_rate)
+{
+ if (speech_char != NULL) {
+ free(speech_char);
+ }
+ offset = 0;
+
+ FILE* fp;
+ fp = fopen(filename, "rb");
+ if (fp == nullptr)
+ {
+ LOG(ERROR) << "Failed to read " << filename;
+ return false;
+ }
+ fseek(fp, 0, SEEK_END);
+ uint32_t n_file_len = ftell(fp);
+ fseek(fp, 0, SEEK_SET);
+
+ speech_len = (n_file_len) / 2;
+ speech_char = (char *)malloc(n_file_len);
+ memset(speech_char, 0, n_file_len);
+ fread(speech_char, sizeof(int16_t), n_file_len/2, fp);
+ fclose(fp);
+
+ return true;
+}
+
+bool Audio::LoadOthers2Char(const char* filename)
+{
+ if (speech_char != NULL) {
+ free(speech_char);
+ }
+
+ FILE* fp;
+ fp = fopen(filename, "rb");
+ if (fp == nullptr)
+ {
+ LOG(ERROR) << "Failed to read " << filename;
+ return false;
+ }
+ fseek(fp, 0, SEEK_END);
+ uint32_t n_file_len = ftell(fp);
+ fseek(fp, 0, SEEK_SET);
+
+ speech_len = n_file_len;
+ speech_char = (char *)malloc(n_file_len);
+ memset(speech_char, 0, n_file_len);
+ fread(speech_char, 1, n_file_len, fp);
+ fclose(fp);
+
+ return true;
+}
+
+int Audio::FetchChunck(float *&dout, int len)
{
if (offset >= speech_align_len) {
dout = NULL;
@@ -446,14 +891,14 @@
}
}
-int Audio::fetch(float *&dout, int &len, int &flag)
+int Audio::Fetch(float *&dout, int &len, int &flag)
{
if (frame_queue.size() > 0) {
AudioFrame *frame = frame_queue.front();
frame_queue.pop();
- dout = speech_data + frame->get_start();
- len = frame->get_len();
+ dout = speech_data + frame->GetStart();
+ len = frame->GetLen();
delete frame;
flag = S_END;
return 1;
@@ -462,7 +907,7 @@
}
}
-void Audio::padding()
+void Audio::Padding()
{
float num_samples = speech_len;
float frame_length = 400;
@@ -499,27 +944,44 @@
delete frame;
}
-void Audio::split(Model* pRecogObj)
+void Audio::Split(OfflineStream* offline_stream)
{
AudioFrame *frame;
frame = frame_queue.front();
frame_queue.pop();
- int sp_len = frame->get_len();
+ int sp_len = frame->GetLen();
delete frame;
frame = NULL;
std::vector<float> pcm_data(speech_data, speech_data+sp_len);
- vector<std::vector<int>> vad_segments = pRecogObj->vad_seg(pcm_data);
+ vector<std::vector<int>> vad_segments = (offline_stream->vad_handle)->Infer(pcm_data);
int seg_sample = MODEL_SAMPLE_RATE/1000;
for(vector<int> segment:vad_segments)
{
frame = new AudioFrame();
int start = segment[0]*seg_sample;
int end = segment[1]*seg_sample;
- frame->set_start(start);
- frame->set_end(end);
+ frame->SetStart(start);
+ frame->SetEnd(end);
frame_queue.push(frame);
frame = NULL;
}
}
+
+
+void Audio::Split(VadModel* vad_obj, vector<std::vector<int>>& vad_segments, bool input_finished)
+{
+ AudioFrame *frame;
+
+ frame = frame_queue.front();
+ frame_queue.pop();
+ int sp_len = frame->GetLen();
+ delete frame;
+ frame = NULL;
+
+ std::vector<float> pcm_data(speech_data, speech_data+sp_len);
+ vad_segments = vad_obj->Infer(pcm_data, input_finished);
+}
+
+} // namespace funasr
\ No newline at end of file
--
Gitblit v1.9.1