From 7fe37e0352ca6f8b5937bcda7263a26529723715 Mon Sep 17 00:00:00 2001
From: zhifu gao <zhifu.gzf@alibaba-inc.com>
Date: 星期三, 10 五月 2023 19:17:04 +0800
Subject: [PATCH] Merge pull request #491 from alibaba-damo-academy/main
---
egs_modelscope/asr/paraformer/speech_paraformer_asr_nat-zh-cn-16k-aishell2-vocab5212-pytorch/README.md | 2
egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/infer.sh | 103 +++++++
funasr/runtime/onnxruntime/readme.md | 51 ++-
funasr/runtime/websocket/websocketsrv.cpp | 4
funasr/runtime/onnxruntime/src/funasr-onnx-offline-rtf.cpp | 32 +
egs_modelscope/asr/paraformer/speech_paraformer_asr_nat-zh-cn-16k-aishell2-vocab5212-pytorch/infer.py | 2
egs_modelscope/asr/paraformer/speech_paraformer_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch/README.md | 2
funasr/runtime/onnxruntime/src/funasrruntime.cpp | 201 +++++++--------
funasr/runtime/onnxruntime/src/vad-model.cpp | 8
funasr/runtime/onnxruntime/src/funasr-onnx-offline-vad.cpp | 31 +
funasr/runtime/grpc/paraformer-server.cc | 2
funasr/runtime/onnxruntime/src/fsmn-vad.cpp | 5
funasr/runtime/onnxruntime/src/funasr-onnx-offline-punc.cpp | 2
egs_modelscope/asr/paraformer/speech_paraformer_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch/infer.sh | 103 +++++++
funasr/runtime/onnxruntime/src/util.h | 1
egs_modelscope/asr/paraformer/speech_paraformer_asr_nat-zh-cn-16k-aishell2-vocab5212-pytorch/infer.sh | 103 +++++++
/dev/null | 1
funasr/runtime/onnxruntime/src/funasr-onnx-offline.cpp | 31 +
egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/README.md | 2
funasr/runtime/onnxruntime/src/util.cpp | 9
funasr/runtime/onnxruntime/include/funasrruntime.h | 28 +
egs_modelscope/asr/paraformer/speech_paraformer_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch/infer.py | 2
funasr/runtime/onnxruntime/include/vad-model.h | 4
funasr/runtime/onnxruntime/src/fsmn-vad.h | 2
24 files changed, 543 insertions(+), 188 deletions(-)
diff --git a/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/README.md b/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/README.md
index 92088a2..bb55ab5 120000
--- a/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/README.md
+++ b/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/README.md
@@ -1 +1 @@
-../TEMPLATE/README.md
\ No newline at end of file
+../../TEMPLATE/README.md
\ No newline at end of file
diff --git a/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/infer.sh b/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/infer.sh
deleted file mode 120000
index 0b3b38b..0000000
--- a/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/infer.sh
+++ /dev/null
@@ -1 +0,0 @@
-../TEMPLATE/infer.sh
\ No newline at end of file
diff --git a/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/infer.sh b/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/infer.sh
new file mode 100644
index 0000000..ef49d7a
--- /dev/null
+++ b/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/infer.sh
@@ -0,0 +1,103 @@
+#!/usr/bin/env bash
+
+set -e
+set -u
+set -o pipefail
+
+stage=1
+stop_stage=2
+model="damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
+data_dir="./data/test"
+output_dir="./results"
+batch_size=64
+gpu_inference=true # whether to perform gpu decoding
+gpuid_list="0,1" # set gpus, e.g., gpuid_list="0,1"
+njob=64 # the number of jobs for CPU decoding, if gpu_inference=false, use CPU decoding, please set njob
+checkpoint_dir=
+checkpoint_name="valid.cer_ctc.ave.pb"
+
+. utils/parse_options.sh || exit 1;
+
+if ${gpu_inference} == "true"; then
+ nj=$(echo $gpuid_list | awk -F "," '{print NF}')
+else
+ nj=$njob
+ batch_size=1
+ gpuid_list=""
+ for JOB in $(seq ${nj}); do
+ gpuid_list=$gpuid_list"-1,"
+ done
+fi
+
+mkdir -p $output_dir/split
+split_scps=""
+for JOB in $(seq ${nj}); do
+ split_scps="$split_scps $output_dir/split/wav.$JOB.scp"
+done
+perl utils/split_scp.pl ${data_dir}/wav.scp ${split_scps}
+
+if [ -n "${checkpoint_dir}" ]; then
+ python utils/prepare_checkpoint.py ${model} ${checkpoint_dir} ${checkpoint_name}
+ model=${checkpoint_dir}/${model}
+fi
+
+if [ $stage -le 1 ] && [ $stop_stage -ge 1 ];then
+ echo "Decoding ..."
+ gpuid_list_array=(${gpuid_list//,/ })
+ for JOB in $(seq ${nj}); do
+ {
+ id=$((JOB-1))
+ gpuid=${gpuid_list_array[$id]}
+ mkdir -p ${output_dir}/output.$JOB
+ python infer.py \
+ --model ${model} \
+ --audio_in ${output_dir}/split/wav.$JOB.scp \
+ --output_dir ${output_dir}/output.$JOB \
+ --batch_size ${batch_size} \
+ --gpuid ${gpuid}
+ }&
+ done
+ wait
+
+ mkdir -p ${output_dir}/1best_recog
+ for f in token score text; do
+ if [ -f "${output_dir}/output.1/1best_recog/${f}" ]; then
+ for i in $(seq "${nj}"); do
+ cat "${output_dir}/output.${i}/1best_recog/${f}"
+ done | sort -k1 >"${output_dir}/1best_recog/${f}"
+ fi
+ done
+fi
+
+if [ $stage -le 2 ] && [ $stop_stage -ge 2 ];then
+ echo "Computing WER ..."
+ cp ${output_dir}/1best_recog/text ${output_dir}/1best_recog/text.proc
+ cp ${data_dir}/text ${output_dir}/1best_recog/text.ref
+ python utils/compute_wer.py ${output_dir}/1best_recog/text.ref ${output_dir}/1best_recog/text.proc ${output_dir}/1best_recog/text.cer
+ tail -n 3 ${output_dir}/1best_recog/text.cer
+fi
+
+if [ $stage -le 3 ] && [ $stop_stage -ge 3 ];then
+ echo "SpeechIO TIOBE textnorm"
+ echo "$0 --> Normalizing REF text ..."
+ ./utils/textnorm_zh.py \
+ --has_key --to_upper \
+ ${data_dir}/text \
+ ${output_dir}/1best_recog/ref.txt
+
+ echo "$0 --> Normalizing HYP text ..."
+ ./utils/textnorm_zh.py \
+ --has_key --to_upper \
+ ${output_dir}/1best_recog/text.proc \
+ ${output_dir}/1best_recog/rec.txt
+ grep -v $'\t$' ${output_dir}/1best_recog/rec.txt > ${output_dir}/1best_recog/rec_non_empty.txt
+
+ echo "$0 --> computing WER/CER and alignment ..."
+ ./utils/error_rate_zh \
+ --tokenizer char \
+ --ref ${output_dir}/1best_recog/ref.txt \
+ --hyp ${output_dir}/1best_recog/rec_non_empty.txt \
+ ${output_dir}/1best_recog/DETAILS.txt | tee ${output_dir}/1best_recog/RESULTS.txt
+ rm -rf ${output_dir}/1best_recog/rec.txt ${output_dir}/1best_recog/rec_non_empty.txt
+fi
+
diff --git a/egs_modelscope/asr/paraformer/speech_paraformer_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch/README.md b/egs_modelscope/asr/paraformer/speech_paraformer_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch/README.md
index 92088a2..bb55ab5 120000
--- a/egs_modelscope/asr/paraformer/speech_paraformer_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch/README.md
+++ b/egs_modelscope/asr/paraformer/speech_paraformer_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch/README.md
@@ -1 +1 @@
-../TEMPLATE/README.md
\ No newline at end of file
+../../TEMPLATE/README.md
\ No newline at end of file
diff --git a/egs_modelscope/asr/paraformer/speech_paraformer_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch/infer.py b/egs_modelscope/asr/paraformer/speech_paraformer_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch/infer.py
index f05fbbb..128fc31 120000
--- a/egs_modelscope/asr/paraformer/speech_paraformer_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch/infer.py
+++ b/egs_modelscope/asr/paraformer/speech_paraformer_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch/infer.py
@@ -1 +1 @@
-../TEMPLATE/infer.py
\ No newline at end of file
+../../TEMPLATE/infer.py
\ No newline at end of file
diff --git a/egs_modelscope/asr/paraformer/speech_paraformer_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch/infer.sh b/egs_modelscope/asr/paraformer/speech_paraformer_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch/infer.sh
deleted file mode 120000
index 0b3b38b..0000000
--- a/egs_modelscope/asr/paraformer/speech_paraformer_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch/infer.sh
+++ /dev/null
@@ -1 +0,0 @@
-../TEMPLATE/infer.sh
\ No newline at end of file
diff --git a/egs_modelscope/asr/paraformer/speech_paraformer_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch/infer.sh b/egs_modelscope/asr/paraformer/speech_paraformer_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch/infer.sh
new file mode 100644
index 0000000..207bbdf
--- /dev/null
+++ b/egs_modelscope/asr/paraformer/speech_paraformer_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch/infer.sh
@@ -0,0 +1,103 @@
+#!/usr/bin/env bash
+
+set -e
+set -u
+set -o pipefail
+
+stage=1
+stop_stage=2
+model="damo/speech_paraformer_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch"
+data_dir="./data/test"
+output_dir="./results"
+batch_size=64
+gpu_inference=true # whether to perform gpu decoding
+gpuid_list="0,1" # set gpus, e.g., gpuid_list="0,1"
+njob=64 # the number of jobs for CPU decoding, if gpu_inference=false, use CPU decoding, please set njob
+checkpoint_dir=
+checkpoint_name="valid.cer_ctc.ave.pb"
+
+. utils/parse_options.sh || exit 1;
+
+if ${gpu_inference} == "true"; then
+ nj=$(echo $gpuid_list | awk -F "," '{print NF}')
+else
+ nj=$njob
+ batch_size=1
+ gpuid_list=""
+ for JOB in $(seq ${nj}); do
+ gpuid_list=$gpuid_list"-1,"
+ done
+fi
+
+mkdir -p $output_dir/split
+split_scps=""
+for JOB in $(seq ${nj}); do
+ split_scps="$split_scps $output_dir/split/wav.$JOB.scp"
+done
+perl utils/split_scp.pl ${data_dir}/wav.scp ${split_scps}
+
+if [ -n "${checkpoint_dir}" ]; then
+ python utils/prepare_checkpoint.py ${model} ${checkpoint_dir} ${checkpoint_name}
+ model=${checkpoint_dir}/${model}
+fi
+
+if [ $stage -le 1 ] && [ $stop_stage -ge 1 ];then
+ echo "Decoding ..."
+ gpuid_list_array=(${gpuid_list//,/ })
+ for JOB in $(seq ${nj}); do
+ {
+ id=$((JOB-1))
+ gpuid=${gpuid_list_array[$id]}
+ mkdir -p ${output_dir}/output.$JOB
+ python infer.py \
+ --model ${model} \
+ --audio_in ${output_dir}/split/wav.$JOB.scp \
+ --output_dir ${output_dir}/output.$JOB \
+ --batch_size ${batch_size} \
+ --gpuid ${gpuid}
+ }&
+ done
+ wait
+
+ mkdir -p ${output_dir}/1best_recog
+ for f in token score text; do
+ if [ -f "${output_dir}/output.1/1best_recog/${f}" ]; then
+ for i in $(seq "${nj}"); do
+ cat "${output_dir}/output.${i}/1best_recog/${f}"
+ done | sort -k1 >"${output_dir}/1best_recog/${f}"
+ fi
+ done
+fi
+
+if [ $stage -le 2 ] && [ $stop_stage -ge 2 ];then
+ echo "Computing WER ..."
+ cp ${output_dir}/1best_recog/text ${output_dir}/1best_recog/text.proc
+ cp ${data_dir}/text ${output_dir}/1best_recog/text.ref
+ python utils/compute_wer.py ${output_dir}/1best_recog/text.ref ${output_dir}/1best_recog/text.proc ${output_dir}/1best_recog/text.cer
+ tail -n 3 ${output_dir}/1best_recog/text.cer
+fi
+
+if [ $stage -le 3 ] && [ $stop_stage -ge 3 ];then
+ echo "SpeechIO TIOBE textnorm"
+ echo "$0 --> Normalizing REF text ..."
+ ./utils/textnorm_zh.py \
+ --has_key --to_upper \
+ ${data_dir}/text \
+ ${output_dir}/1best_recog/ref.txt
+
+ echo "$0 --> Normalizing HYP text ..."
+ ./utils/textnorm_zh.py \
+ --has_key --to_upper \
+ ${output_dir}/1best_recog/text.proc \
+ ${output_dir}/1best_recog/rec.txt
+ grep -v $'\t$' ${output_dir}/1best_recog/rec.txt > ${output_dir}/1best_recog/rec_non_empty.txt
+
+ echo "$0 --> computing WER/CER and alignment ..."
+ ./utils/error_rate_zh \
+ --tokenizer char \
+ --ref ${output_dir}/1best_recog/ref.txt \
+ --hyp ${output_dir}/1best_recog/rec_non_empty.txt \
+ ${output_dir}/1best_recog/DETAILS.txt | tee ${output_dir}/1best_recog/RESULTS.txt
+ rm -rf ${output_dir}/1best_recog/rec.txt ${output_dir}/1best_recog/rec_non_empty.txt
+fi
+
diff --git a/egs_modelscope/asr/paraformer/speech_paraformer_asr_nat-zh-cn-16k-aishell2-vocab5212-pytorch/README.md b/egs_modelscope/asr/paraformer/speech_paraformer_asr_nat-zh-cn-16k-aishell2-vocab5212-pytorch/README.md
index 92088a2..bb55ab5 120000
--- a/egs_modelscope/asr/paraformer/speech_paraformer_asr_nat-zh-cn-16k-aishell2-vocab5212-pytorch/README.md
+++ b/egs_modelscope/asr/paraformer/speech_paraformer_asr_nat-zh-cn-16k-aishell2-vocab5212-pytorch/README.md
@@ -1 +1 @@
-../TEMPLATE/README.md
\ No newline at end of file
+../../TEMPLATE/README.md
\ No newline at end of file
diff --git a/egs_modelscope/asr/paraformer/speech_paraformer_asr_nat-zh-cn-16k-aishell2-vocab5212-pytorch/infer.py b/egs_modelscope/asr/paraformer/speech_paraformer_asr_nat-zh-cn-16k-aishell2-vocab5212-pytorch/infer.py
index f05fbbb..128fc31 120000
--- a/egs_modelscope/asr/paraformer/speech_paraformer_asr_nat-zh-cn-16k-aishell2-vocab5212-pytorch/infer.py
+++ b/egs_modelscope/asr/paraformer/speech_paraformer_asr_nat-zh-cn-16k-aishell2-vocab5212-pytorch/infer.py
@@ -1 +1 @@
-../TEMPLATE/infer.py
\ No newline at end of file
+../../TEMPLATE/infer.py
\ No newline at end of file
diff --git a/egs_modelscope/asr/paraformer/speech_paraformer_asr_nat-zh-cn-16k-aishell2-vocab5212-pytorch/infer.sh b/egs_modelscope/asr/paraformer/speech_paraformer_asr_nat-zh-cn-16k-aishell2-vocab5212-pytorch/infer.sh
deleted file mode 120000
index 0b3b38b..0000000
--- a/egs_modelscope/asr/paraformer/speech_paraformer_asr_nat-zh-cn-16k-aishell2-vocab5212-pytorch/infer.sh
+++ /dev/null
@@ -1 +0,0 @@
-../TEMPLATE/infer.sh
\ No newline at end of file
diff --git a/egs_modelscope/asr/paraformer/speech_paraformer_asr_nat-zh-cn-16k-aishell2-vocab5212-pytorch/infer.sh b/egs_modelscope/asr/paraformer/speech_paraformer_asr_nat-zh-cn-16k-aishell2-vocab5212-pytorch/infer.sh
new file mode 100644
index 0000000..4b59bc1
--- /dev/null
+++ b/egs_modelscope/asr/paraformer/speech_paraformer_asr_nat-zh-cn-16k-aishell2-vocab5212-pytorch/infer.sh
@@ -0,0 +1,103 @@
+#!/usr/bin/env bash
+
+set -e
+set -u
+set -o pipefail
+
+stage=1
+stop_stage=2
+model="damo/speech_paraformer_asr_nat-zh-cn-16k-aishell2-vocab5212-pytorch"
+data_dir="./data/test"
+output_dir="./results"
+batch_size=64
+gpu_inference=true # whether to perform gpu decoding
+gpuid_list="0,1" # set gpus, e.g., gpuid_list="0,1"
+njob=64 # the number of jobs for CPU decoding, if gpu_inference=false, use CPU decoding, please set njob
+checkpoint_dir=
+checkpoint_name="valid.cer_ctc.ave.pb"
+
+. utils/parse_options.sh || exit 1;
+
+if ${gpu_inference} == "true"; then
+ nj=$(echo $gpuid_list | awk -F "," '{print NF}')
+else
+ nj=$njob
+ batch_size=1
+ gpuid_list=""
+ for JOB in $(seq ${nj}); do
+ gpuid_list=$gpuid_list"-1,"
+ done
+fi
+
+mkdir -p $output_dir/split
+split_scps=""
+for JOB in $(seq ${nj}); do
+ split_scps="$split_scps $output_dir/split/wav.$JOB.scp"
+done
+perl utils/split_scp.pl ${data_dir}/wav.scp ${split_scps}
+
+if [ -n "${checkpoint_dir}" ]; then
+ python utils/prepare_checkpoint.py ${model} ${checkpoint_dir} ${checkpoint_name}
+ model=${checkpoint_dir}/${model}
+fi
+
+if [ $stage -le 1 ] && [ $stop_stage -ge 1 ];then
+ echo "Decoding ..."
+ gpuid_list_array=(${gpuid_list//,/ })
+ for JOB in $(seq ${nj}); do
+ {
+ id=$((JOB-1))
+ gpuid=${gpuid_list_array[$id]}
+ mkdir -p ${output_dir}/output.$JOB
+ python infer.py \
+ --model ${model} \
+ --audio_in ${output_dir}/split/wav.$JOB.scp \
+ --output_dir ${output_dir}/output.$JOB \
+ --batch_size ${batch_size} \
+ --gpuid ${gpuid}
+ }&
+ done
+ wait
+
+ mkdir -p ${output_dir}/1best_recog
+ for f in token score text; do
+ if [ -f "${output_dir}/output.1/1best_recog/${f}" ]; then
+ for i in $(seq "${nj}"); do
+ cat "${output_dir}/output.${i}/1best_recog/${f}"
+ done | sort -k1 >"${output_dir}/1best_recog/${f}"
+ fi
+ done
+fi
+
+if [ $stage -le 2 ] && [ $stop_stage -ge 2 ];then
+ echo "Computing WER ..."
+ cp ${output_dir}/1best_recog/text ${output_dir}/1best_recog/text.proc
+ cp ${data_dir}/text ${output_dir}/1best_recog/text.ref
+ python utils/compute_wer.py ${output_dir}/1best_recog/text.ref ${output_dir}/1best_recog/text.proc ${output_dir}/1best_recog/text.cer
+ tail -n 3 ${output_dir}/1best_recog/text.cer
+fi
+
+if [ $stage -le 3 ] && [ $stop_stage -ge 3 ];then
+ echo "SpeechIO TIOBE textnorm"
+ echo "$0 --> Normalizing REF text ..."
+ ./utils/textnorm_zh.py \
+ --has_key --to_upper \
+ ${data_dir}/text \
+ ${output_dir}/1best_recog/ref.txt
+
+ echo "$0 --> Normalizing HYP text ..."
+ ./utils/textnorm_zh.py \
+ --has_key --to_upper \
+ ${output_dir}/1best_recog/text.proc \
+ ${output_dir}/1best_recog/rec.txt
+ grep -v $'\t$' ${output_dir}/1best_recog/rec.txt > ${output_dir}/1best_recog/rec_non_empty.txt
+
+ echo "$0 --> computing WER/CER and alignment ..."
+ ./utils/error_rate_zh \
+ --tokenizer char \
+ --ref ${output_dir}/1best_recog/ref.txt \
+ --hyp ${output_dir}/1best_recog/rec_non_empty.txt \
+ ${output_dir}/1best_recog/DETAILS.txt | tee ${output_dir}/1best_recog/RESULTS.txt
+ rm -rf ${output_dir}/1best_recog/rec.txt ${output_dir}/1best_recog/rec_non_empty.txt
+fi
+
diff --git a/funasr/runtime/grpc/paraformer-server.cc b/funasr/runtime/grpc/paraformer-server.cc
index 3bc011a..734dadc 100644
--- a/funasr/runtime/grpc/paraformer-server.cc
+++ b/funasr/runtime/grpc/paraformer-server.cc
@@ -137,7 +137,7 @@
stream->Write(res);
}
else {
- FUNASR_RESULT Result= FunOfflineRecogPCMBuffer(AsrHanlde, tmp_data.c_str(), data_len_int, 16000, RASR_NONE, NULL);
+ FUNASR_RESULT Result= FunOfflineInferBuffer(AsrHanlde, tmp_data.c_str(), data_len_int, RASR_NONE, NULL, 16000);
std::string asr_result = ((FUNASR_RECOG_RESULT*)Result)->msg;
auto end_time = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now().time_since_epoch()).count();
diff --git a/funasr/runtime/onnxruntime/include/funasrruntime.h b/funasr/runtime/onnxruntime/include/funasrruntime.h
index 75be80e..5cfdb47 100644
--- a/funasr/runtime/onnxruntime/include/funasrruntime.h
+++ b/funasr/runtime/onnxruntime/include/funasrruntime.h
@@ -46,15 +46,20 @@
FUNASR_MODEL_PARAFORMER = 3,
}FUNASR_MODEL_TYPE;
+typedef enum
+{
+ FSMN_VAD_OFFLINE=0,
+ FSMN_VAD_ONLINE = 1,
+}FSMN_VAD_MODE;
+
typedef void (* QM_CALLBACK)(int cur_step, int n_total); // n_total: total steps; cur_step: Current Step.
// ASR
_FUNASRAPI FUNASR_HANDLE FunASRInit(std::map<std::string, std::string>& model_path, int thread_num);
-
-_FUNASRAPI FUNASR_RESULT FunASRRecogBuffer(FUNASR_HANDLE handle, const char* sz_buf, int n_len, FUNASR_MODE mode, QM_CALLBACK fn_callback);
-_FUNASRAPI FUNASR_RESULT FunASRRecogPCMBuffer(FUNASR_HANDLE handle, const char* sz_buf, int n_len, int sampling_rate, FUNASR_MODE mode, QM_CALLBACK fn_callback);
-_FUNASRAPI FUNASR_RESULT FunASRRecogPCMFile(FUNASR_HANDLE handle, const char* sz_filename, int sampling_rate, FUNASR_MODE mode, QM_CALLBACK fn_callback);
-_FUNASRAPI FUNASR_RESULT FunASRRecogFile(FUNASR_HANDLE handle, const char* sz_wavfile, FUNASR_MODE mode, QM_CALLBACK fn_callback);
+// buffer
+_FUNASRAPI FUNASR_RESULT FunASRInferBuffer(FUNASR_HANDLE handle, const char* sz_buf, int n_len, FUNASR_MODE mode, QM_CALLBACK fn_callback, int sampling_rate=16000);
+// file, support wav & pcm
+_FUNASRAPI FUNASR_RESULT FunASRInfer(FUNASR_HANDLE handle, const char* sz_filename, FUNASR_MODE mode, QM_CALLBACK fn_callback, int sampling_rate=16000);
_FUNASRAPI const char* FunASRGetResult(FUNASR_RESULT result,int n_index);
_FUNASRAPI const int FunASRGetRetNumber(FUNASR_RESULT result);
@@ -63,9 +68,12 @@
_FUNASRAPI const float FunASRGetRetSnippetTime(FUNASR_RESULT result);
// VAD
-_FUNASRAPI FUNASR_HANDLE FsmnVadInit(std::map<std::string, std::string>& model_path, int thread_num);
+_FUNASRAPI FUNASR_HANDLE FsmnVadInit(std::map<std::string, std::string>& model_path, int thread_num, FSMN_VAD_MODE mode=FSMN_VAD_OFFLINE);
+// buffer
+_FUNASRAPI FUNASR_RESULT FsmnVadInferBuffer(FUNASR_HANDLE handle, const char* sz_buf, int n_len, FSMN_VAD_MODE mode, QM_CALLBACK fn_callback, int sampling_rate=16000);
+// file, support wav & pcm
+_FUNASRAPI FUNASR_RESULT FsmnVadInfer(FUNASR_HANDLE handle, const char* sz_filename, FSMN_VAD_MODE mode, QM_CALLBACK fn_callback, int sampling_rate=16000);
-_FUNASRAPI FUNASR_RESULT FsmnVadWavFile(FUNASR_HANDLE handle, const char* sz_wavfile, FUNASR_MODE mode, QM_CALLBACK fn_callback);
_FUNASRAPI std::vector<std::vector<int>>* FsmnVadGetResult(FUNASR_RESULT result,int n_index);
_FUNASRAPI void FsmnVadFreeResult(FUNASR_RESULT result);
_FUNASRAPI void FsmnVadUninit(FUNASR_HANDLE handle);
@@ -78,8 +86,10 @@
//OfflineStream
_FUNASRAPI FUNASR_HANDLE FunOfflineInit(std::map<std::string, std::string>& model_path, int thread_num);
-_FUNASRAPI FUNASR_RESULT FunOfflineRecogFile(FUNASR_HANDLE handle, const char* sz_wavfile, FUNASR_MODE mode, QM_CALLBACK fn_callback);
-_FUNASRAPI FUNASR_RESULT FunOfflineRecogPCMBuffer(FUNASR_HANDLE handle, const char* sz_buf, int n_len, int sampling_rate, FUNASR_MODE mode, QM_CALLBACK fn_callback);
+// buffer
+_FUNASRAPI FUNASR_RESULT FunOfflineInferBuffer(FUNASR_HANDLE handle, const char* sz_buf, int n_len, FUNASR_MODE mode, QM_CALLBACK fn_callback, int sampling_rate=16000);
+// file, support wav & pcm
+_FUNASRAPI FUNASR_RESULT FunOfflineInfer(FUNASR_HANDLE handle, const char* sz_filename, FUNASR_MODE mode, QM_CALLBACK fn_callback, int sampling_rate=16000);
_FUNASRAPI void FunOfflineUninit(FUNASR_HANDLE handle);
#ifdef __cplusplus
diff --git a/funasr/runtime/onnxruntime/include/vad-model.h b/funasr/runtime/onnxruntime/include/vad-model.h
index 2a8d6e4..e37bd97 100644
--- a/funasr/runtime/onnxruntime/include/vad-model.h
+++ b/funasr/runtime/onnxruntime/include/vad-model.h
@@ -16,7 +16,7 @@
virtual void LoadConfigFromYaml(const char* filename)=0;
virtual void FbankKaldi(float sample_rate, std::vector<std::vector<float>> &vad_feats,
const std::vector<float> &waves)=0;
- virtual std::vector<std::vector<float>> &LfrCmvn(std::vector<std::vector<float>> &vad_feats)=0;
+ virtual void LfrCmvn(std::vector<std::vector<float>> &vad_feats)=0;
virtual void Forward(
const std::vector<std::vector<float>> &chunk_feats,
std::vector<std::vector<float>> *out_prob)=0;
@@ -24,6 +24,6 @@
virtual void InitCache()=0;
};
-VadModel *CreateVadModel(std::map<std::string, std::string>& model_path, int thread_num);
+VadModel *CreateVadModel(std::map<std::string, std::string>& model_path, int thread_num, int mode);
} // namespace funasr
#endif
diff --git a/funasr/runtime/onnxruntime/readme.md b/funasr/runtime/onnxruntime/readme.md
index 5b42c30..8b5d68a 100644
--- a/funasr/runtime/onnxruntime/readme.md
+++ b/funasr/runtime/onnxruntime/readme.md
@@ -43,11 +43,10 @@
### funasr-onnx-offline
```shell
-./funasr-onnx-offline [--wav-scp <string>] [--wav-path <string>]
- [--punc-quant <string>] [--punc-dir <string>]
- [--vad-quant <string>] [--vad-dir <string>]
- [--quantize <string>] --model-dir <string>
- [--] [--version] [-h]
+./funasr-onnx-offline --model-dir <string> [--quantize <string>]
+ [--vad-dir <string>] [--vad-quant <string>]
+ [--punc-dir <string>] [--punc-quant <string>]
+ --wav-path <string> [--] [--version] [-h]
Where:
--model-dir <string>
(required) the asr model path, which contains model.onnx, config.yaml, am.mvn
@@ -64,12 +63,13 @@
--punc-quant <string>
false (Default), load the model of model.onnx in punc_dir. If set true, load the model of model_quant.onnx in punc_dir
- --wav-scp <string>
- wave scp path
--wav-path <string>
- wave file path
+ (required) the input could be:
+ wav_path, e.g.: asr_example.wav;
+ pcm_path, e.g.: asr_example.pcm;
+ wav.scp, kaldi style wav list (wav_id \t wav_path)
- Required: --model-dir <string>
+ Required: --model-dir <string> --wav-path <string>
If use vad, please add: --vad-dir <string>
If use punc, please add: --punc-dir <string>
@@ -84,20 +84,20 @@
### funasr-onnx-offline-vad
```shell
-./funasr-onnx-offline-vad [--wav-scp <string>] [--wav-path <string>]
- [--quantize <string>] --model-dir <string>
- [--] [--version] [-h]
+./funasr-onnx-offline-vad --model-dir <string> [--quantize <string>]
+ --wav-path <string> [--] [--version] [-h]
Where:
--model-dir <string>
(required) the vad model path, which contains model.onnx, vad.yaml, vad.mvn
--quantize <string>
false (Default), load the model of model.onnx in model_dir. If set true, load the model of model_quant.onnx in model_dir
- --wav-scp <string>
- wave scp path
--wav-path <string>
- wave file path
+ (required) the input could be:
+ wav_path, e.g.: asr_example.wav;
+ pcm_path, e.g.: asr_example.pcm;
+ wav.scp, kaldi style wav list (wav_id \t wav_path)
- Required: --model-dir <string>
+ Required: --model-dir <string> --wav-path <string>
For example:
./funasr-onnx-offline-vad \
@@ -107,17 +107,17 @@
### funasr-onnx-offline-punc
```shell
-./funasr-onnx-offline-punc [--txt-path <string>] [--quantize <string>]
- --model-dir <string> [--] [--version] [-h]
+./funasr-onnx-offline-punc --model-dir <string> [--quantize <string>]
+ --txt-path <string> [--] [--version] [-h]
Where:
--model-dir <string>
(required) the punc model path, which contains model.onnx, punc.yaml
--quantize <string>
false (Default), load the model of model.onnx in model_dir. If set true, load the model of model_quant.onnx in model_dir
--txt-path <string>
- txt file path, one sentence per line
+ (required) txt file path, one sentence per line
- Required: --model-dir <string>
+ Required: --model-dir <string> --txt-path <string>
For example:
./funasr-onnx-offline-punc \
@@ -126,8 +126,8 @@
```
### funasr-onnx-offline-rtf
```shell
-./funasr-onnx-offline-rtf --thread-num <int32_t> --wav-scp <string>
- [--quantize <string>] --model-dir <string>
+./funasr-onnx-offline-rtf --model-dir <string> [--quantize <string>]
+ --wav-path <string> --thread-num <int32_t>
[--] [--version] [-h]
Where:
--thread-num <int32_t>
@@ -136,8 +136,11 @@
(required) the model path, which contains model.onnx, config.yaml, am.mvn
--quantize <string>
false (Default), load the model of model.onnx in model_dir. If set true, load the model of model_quant.onnx in model_dir
- --wav-scp <string>
- (required) wave scp path
+ --wav-path <string>
+ (required) the input could be:
+ wav_path, e.g.: asr_example.wav;
+ pcm_path, e.g.: asr_example.pcm;
+ wav.scp, kaldi style wav list (wav_id \t wav_path)
For example:
./funasr-onnx-offline-rtf \
diff --git a/funasr/runtime/onnxruntime/src/fsmn-vad.cpp b/funasr/runtime/onnxruntime/src/fsmn-vad.cpp
index 0a646f0..f061534 100644
--- a/funasr/runtime/onnxruntime/src/fsmn-vad.cpp
+++ b/funasr/runtime/onnxruntime/src/fsmn-vad.cpp
@@ -225,7 +225,7 @@
}
}
-std::vector<std::vector<float>> &FsmnVad::LfrCmvn(std::vector<std::vector<float>> &vad_feats) {
+void FsmnVad::LfrCmvn(std::vector<std::vector<float>> &vad_feats) {
std::vector<std::vector<float>> out_feats;
int T = vad_feats.size();
@@ -264,7 +264,6 @@
}
}
vad_feats = out_feats;
- return vad_feats;
}
std::vector<std::vector<int>>
@@ -272,7 +271,7 @@
std::vector<std::vector<float>> vad_feats;
std::vector<std::vector<float>> vad_probs;
FbankKaldi(vad_sample_rate_, vad_feats, waves);
- vad_feats = LfrCmvn(vad_feats);
+ LfrCmvn(vad_feats);
Forward(vad_feats, &vad_probs);
E2EVadModel vad_scorer = E2EVadModel();
diff --git a/funasr/runtime/onnxruntime/src/fsmn-vad.h b/funasr/runtime/onnxruntime/src/fsmn-vad.h
index 7a6707c..3d183f8 100644
--- a/funasr/runtime/onnxruntime/src/fsmn-vad.h
+++ b/funasr/runtime/onnxruntime/src/fsmn-vad.h
@@ -36,7 +36,7 @@
void FbankKaldi(float sample_rate, std::vector<std::vector<float>> &vad_feats,
const std::vector<float> &waves);
- std::vector<std::vector<float>> &LfrCmvn(std::vector<std::vector<float>> &vad_feats);
+ void LfrCmvn(std::vector<std::vector<float>> &vad_feats);
void Forward(
const std::vector<std::vector<float>> &chunk_feats,
diff --git a/funasr/runtime/onnxruntime/src/funasr-onnx-offline-punc.cpp b/funasr/runtime/onnxruntime/src/funasr-onnx-offline-punc.cpp
index a8ee9a9..e18c27e 100644
--- a/funasr/runtime/onnxruntime/src/funasr-onnx-offline-punc.cpp
+++ b/funasr/runtime/onnxruntime/src/funasr-onnx-offline-punc.cpp
@@ -36,7 +36,7 @@
TCLAP::CmdLine cmd("funasr-onnx-offline-punc", ' ', "1.0");
TCLAP::ValueArg<std::string> model_dir("", MODEL_DIR, "the punc model path, which contains model.onnx, punc.yaml", true, "", "string");
TCLAP::ValueArg<std::string> quantize("", QUANTIZE, "false (Default), load the model of model.onnx in model_dir. If set true, load the model of model_quant.onnx in model_dir", false, "false", "string");
- TCLAP::ValueArg<std::string> txt_path("", TXT_PATH, "txt file path, one sentence per line", false, "", "string");
+ TCLAP::ValueArg<std::string> txt_path("", TXT_PATH, "txt file path, one sentence per line", true, "", "string");
cmd.add(model_dir);
cmd.add(quantize);
diff --git a/funasr/runtime/onnxruntime/src/funasr-onnx-offline-rtf.cpp b/funasr/runtime/onnxruntime/src/funasr-onnx-offline-rtf.cpp
index 76624e7..6ba65c6 100644
--- a/funasr/runtime/onnxruntime/src/funasr-onnx-offline-rtf.cpp
+++ b/funasr/runtime/onnxruntime/src/funasr-onnx-offline-rtf.cpp
@@ -39,7 +39,7 @@
// warm up
for (size_t i = 0; i < 1; i++)
{
- FUNASR_RESULT result=FunASRRecogFile(asr_handle, wav_list[0].c_str(), RASR_NONE, NULL);
+ FUNASR_RESULT result=FunASRInfer(asr_handle, wav_list[0].c_str(), RASR_NONE, NULL, 16000);
}
while (true) {
@@ -50,7 +50,7 @@
}
gettimeofday(&start, NULL);
- FUNASR_RESULT result=FunASRRecogFile(asr_handle, wav_list[i].c_str(), RASR_NONE, NULL);
+ FUNASR_RESULT result=FunASRInfer(asr_handle, wav_list[i].c_str(), RASR_NONE, NULL, 16000);
gettimeofday(&end, NULL);
seconds = (end.tv_sec - start.tv_sec);
@@ -77,6 +77,15 @@
}
}
+bool is_target_file(const std::string& filename, const std::string target) {
+ std::size_t pos = filename.find_last_of(".");
+ if (pos == std::string::npos) {
+ return false;
+ }
+ std::string extension = filename.substr(pos + 1);
+ return (extension == target);
+}
+
void GetValue(TCLAP::ValueArg<std::string>& value_arg, string key, std::map<std::string, std::string>& model_path)
{
if (value_arg.isSet()){
@@ -94,19 +103,19 @@
TCLAP::ValueArg<std::string> model_dir("", MODEL_DIR, "the model path, which contains model.onnx, config.yaml, am.mvn", true, "", "string");
TCLAP::ValueArg<std::string> quantize("", QUANTIZE, "false (Default), load the model of model.onnx in model_dir. If set true, load the model of model_quant.onnx in model_dir", false, "false", "string");
- TCLAP::ValueArg<std::string> wav_scp("", WAV_SCP, "wave scp path", true, "", "string");
+ TCLAP::ValueArg<std::string> wav_path("", WAV_PATH, "the input could be: wav_path, e.g.: asr_example.wav; pcm_path, e.g.: asr_example.pcm; wav.scp, kaldi style wav list (wav_id \t wav_path)", true, "", "string");
TCLAP::ValueArg<std::int32_t> thread_num("", THREAD_NUM, "multi-thread num for rtf", true, 0, "int32_t");
cmd.add(model_dir);
cmd.add(quantize);
- cmd.add(wav_scp);
+ cmd.add(wav_path);
cmd.add(thread_num);
cmd.parse(argc, argv);
std::map<std::string, std::string> model_path;
GetValue(model_dir, MODEL_DIR, model_path);
GetValue(quantize, QUANTIZE, model_path);
- GetValue(wav_scp, WAV_SCP, model_path);
+ GetValue(wav_path, WAV_PATH, model_path);
struct timeval start, end;
gettimeofday(&start, NULL);
@@ -125,10 +134,14 @@
// read wav_scp
vector<string> wav_list;
- if(model_path.find(WAV_SCP)!=model_path.end()){
- ifstream in(model_path.at(WAV_SCP));
+ string wav_path_ = model_path.at(WAV_PATH);
+ if(is_target_file(wav_path_, "wav") || is_target_file(wav_path_, "pcm")){
+ wav_list.emplace_back(wav_path_);
+ }
+ else if(is_target_file(wav_path_, "scp")){
+ ifstream in(wav_path_);
if (!in.is_open()) {
- LOG(ERROR) << "Failed to open file: " << model_path.at(WAV_SCP);
+ LOG(ERROR) << "Failed to open file: " << model_path.at(WAV_SCP) ;
return 0;
}
string line;
@@ -140,6 +153,9 @@
wav_list.emplace_back(column2);
}
in.close();
+ }else{
+ LOG(ERROR)<<"Please check the wav extension!";
+ exit(-1);
}
// 澶氱嚎绋嬫祴璇�
diff --git a/funasr/runtime/onnxruntime/src/funasr-onnx-offline-vad.cpp b/funasr/runtime/onnxruntime/src/funasr-onnx-offline-vad.cpp
index 37513ae..0f606c6 100644
--- a/funasr/runtime/onnxruntime/src/funasr-onnx-offline-vad.cpp
+++ b/funasr/runtime/onnxruntime/src/funasr-onnx-offline-vad.cpp
@@ -21,6 +21,15 @@
using namespace std;
+bool is_target_file(const std::string& filename, const std::string target) {
+ std::size_t pos = filename.find_last_of(".");
+ if (pos == std::string::npos) {
+ return false;
+ }
+ std::string extension = filename.substr(pos + 1);
+ return (extension == target);
+}
+
void GetValue(TCLAP::ValueArg<std::string>& value_arg, string key, std::map<std::string, std::string>& model_path)
{
if (value_arg.isSet()){
@@ -58,20 +67,17 @@
TCLAP::ValueArg<std::string> model_dir("", MODEL_DIR, "the vad model path, which contains model.onnx, vad.yaml, vad.mvn", true, "", "string");
TCLAP::ValueArg<std::string> quantize("", QUANTIZE, "false (Default), load the model of model.onnx in model_dir. If set true, load the model of model_quant.onnx in model_dir", false, "false", "string");
- TCLAP::ValueArg<std::string> wav_path("", WAV_PATH, "wave file path", false, "", "string");
- TCLAP::ValueArg<std::string> wav_scp("", WAV_SCP, "wave scp path", false, "", "string");
+ TCLAP::ValueArg<std::string> wav_path("", WAV_PATH, "the input could be: wav_path, e.g.: asr_example.wav; pcm_path, e.g.: asr_example.pcm; wav.scp, kaldi style wav list (wav_id \t wav_path)", true, "", "string");
cmd.add(model_dir);
cmd.add(quantize);
cmd.add(wav_path);
- cmd.add(wav_scp);
cmd.parse(argc, argv);
std::map<std::string, std::string> model_path;
GetValue(model_dir, MODEL_DIR, model_path);
GetValue(quantize, QUANTIZE, model_path);
GetValue(wav_path, WAV_PATH, model_path);
- GetValue(wav_scp, WAV_SCP, model_path);
struct timeval start, end;
gettimeofday(&start, NULL);
@@ -89,14 +95,14 @@
long modle_init_micros = ((seconds * 1000000) + end.tv_usec) - (start.tv_usec);
LOG(INFO) << "Model initialization takes " << (double)modle_init_micros / 1000000 << " s";
- // read wav_path and wav_scp
+ // read wav_path
vector<string> wav_list;
-
- if(model_path.find(WAV_PATH)!=model_path.end()){
- wav_list.emplace_back(model_path.at(WAV_PATH));
+ string wav_path_ = model_path.at(WAV_PATH);
+ if(is_target_file(wav_path_, "wav") || is_target_file(wav_path_, "pcm")){
+ wav_list.emplace_back(wav_path_);
}
- if(model_path.find(WAV_SCP)!=model_path.end()){
- ifstream in(model_path.at(WAV_SCP));
+ else if(is_target_file(wav_path_, "scp")){
+ ifstream in(wav_path_);
if (!in.is_open()) {
LOG(ERROR) << "Failed to open file: " << model_path.at(WAV_SCP) ;
return 0;
@@ -110,13 +116,16 @@
wav_list.emplace_back(column2);
}
in.close();
+ }else{
+ LOG(ERROR)<<"Please check the wav extension!";
+ exit(-1);
}
float snippet_time = 0.0f;
long taking_micros = 0;
for(auto& wav_file : wav_list){
gettimeofday(&start, NULL);
- FUNASR_RESULT result=FsmnVadWavFile(vad_hanlde, wav_file.c_str(), RASR_NONE, NULL);
+ FUNASR_RESULT result=FsmnVadInfer(vad_hanlde, wav_file.c_str(), FSMN_VAD_OFFLINE, NULL, 16000);
gettimeofday(&end, NULL);
seconds = (end.tv_sec - start.tv_sec);
taking_micros += ((seconds * 1000000) + end.tv_usec) - (start.tv_usec);
diff --git a/funasr/runtime/onnxruntime/src/funasr-onnx-offline.cpp b/funasr/runtime/onnxruntime/src/funasr-onnx-offline.cpp
index 343039d..3472925 100644
--- a/funasr/runtime/onnxruntime/src/funasr-onnx-offline.cpp
+++ b/funasr/runtime/onnxruntime/src/funasr-onnx-offline.cpp
@@ -20,6 +20,15 @@
using namespace std;
+bool is_target_file(const std::string& filename, const std::string target) {
+ std::size_t pos = filename.find_last_of(".");
+ if (pos == std::string::npos) {
+ return false;
+ }
+ std::string extension = filename.substr(pos + 1);
+ return (extension == target);
+}
+
void GetValue(TCLAP::ValueArg<std::string>& value_arg, string key, std::map<std::string, std::string>& model_path)
{
if (value_arg.isSet()){
@@ -41,8 +50,7 @@
TCLAP::ValueArg<std::string> punc_dir("", PUNC_DIR, "the punc model path, which contains model.onnx, punc.yaml", false, "", "string");
TCLAP::ValueArg<std::string> punc_quant("", PUNC_QUANT, "false (Default), load the model of model.onnx in punc_dir. If set true, load the model of model_quant.onnx in punc_dir", false, "false", "string");
- TCLAP::ValueArg<std::string> wav_path("", WAV_PATH, "wave file path", false, "", "string");
- TCLAP::ValueArg<std::string> wav_scp("", WAV_SCP, "wave scp path", false, "", "string");
+ TCLAP::ValueArg<std::string> wav_path("", WAV_PATH, "the input could be: wav_path, e.g.: asr_example.wav; pcm_path, e.g.: asr_example.pcm; wav.scp, kaldi style wav list (wav_id \t wav_path)", true, "", "string");
cmd.add(model_dir);
cmd.add(quantize);
@@ -51,7 +59,6 @@
cmd.add(punc_dir);
cmd.add(punc_quant);
cmd.add(wav_path);
- cmd.add(wav_scp);
cmd.parse(argc, argv);
std::map<std::string, std::string> model_path;
@@ -62,7 +69,6 @@
GetValue(punc_dir, PUNC_DIR, model_path);
GetValue(punc_quant, PUNC_QUANT, model_path);
GetValue(wav_path, WAV_PATH, model_path);
- GetValue(wav_scp, WAV_SCP, model_path);
struct timeval start, end;
gettimeofday(&start, NULL);
@@ -80,14 +86,14 @@
long modle_init_micros = ((seconds * 1000000) + end.tv_usec) - (start.tv_usec);
LOG(INFO) << "Model initialization takes " << (double)modle_init_micros / 1000000 << " s";
- // read wav_path and wav_scp
+ // read wav_path
vector<string> wav_list;
-
- if(model_path.find(WAV_PATH)!=model_path.end()){
- wav_list.emplace_back(model_path.at(WAV_PATH));
+ string wav_path_ = model_path.at(WAV_PATH);
+ if(is_target_file(wav_path_, "wav") || is_target_file(wav_path_, "pcm")){
+ wav_list.emplace_back(wav_path_);
}
- if(model_path.find(WAV_SCP)!=model_path.end()){
- ifstream in(model_path.at(WAV_SCP));
+ else if(is_target_file(wav_path_, "scp")){
+ ifstream in(wav_path_);
if (!in.is_open()) {
LOG(ERROR) << "Failed to open file: " << model_path.at(WAV_SCP) ;
return 0;
@@ -101,13 +107,16 @@
wav_list.emplace_back(column2);
}
in.close();
+ }else{
+ LOG(ERROR)<<"Please check the wav extension!";
+ exit(-1);
}
float snippet_time = 0.0f;
long taking_micros = 0;
for(auto& wav_file : wav_list){
gettimeofday(&start, NULL);
- FUNASR_RESULT result=FunOfflineRecogFile(asr_hanlde, wav_file.c_str(), RASR_NONE, NULL);
+ FUNASR_RESULT result=FunOfflineInfer(asr_hanlde, wav_file.c_str(), RASR_NONE, NULL, 16000);
gettimeofday(&end, NULL);
seconds = (end.tv_sec - start.tv_sec);
taking_micros += ((seconds * 1000000) + end.tv_usec) - (start.tv_usec);
diff --git a/funasr/runtime/onnxruntime/src/funasrruntime.cpp b/funasr/runtime/onnxruntime/src/funasrruntime.cpp
index 893ba70..adef504 100644
--- a/funasr/runtime/onnxruntime/src/funasrruntime.cpp
+++ b/funasr/runtime/onnxruntime/src/funasrruntime.cpp
@@ -11,9 +11,9 @@
return mm;
}
- _FUNASRAPI FUNASR_HANDLE FsmnVadInit(std::map<std::string, std::string>& model_path, int thread_num)
+ _FUNASRAPI FUNASR_HANDLE FsmnVadInit(std::map<std::string, std::string>& model_path, int thread_num, FSMN_VAD_MODE mode)
{
- funasr::VadModel* mm = funasr::CreateVadModel(model_path, thread_num);
+ funasr::VadModel* mm = funasr::CreateVadModel(model_path, thread_num, mode);
return mm;
}
@@ -30,36 +30,7 @@
}
// APIs for ASR Infer
- _FUNASRAPI FUNASR_RESULT FunASRRecogBuffer(FUNASR_HANDLE handle, const char* sz_buf, int n_len, FUNASR_MODE mode, QM_CALLBACK fn_callback)
- {
- funasr::Model* recog_obj = (funasr::Model*)handle;
- if (!recog_obj)
- return nullptr;
-
- int32_t sampling_rate = -1;
- funasr::Audio audio(1);
- if (!audio.LoadWav(sz_buf, n_len, &sampling_rate))
- return nullptr;
-
- float* buff;
- int len;
- int flag=0;
- funasr::FUNASR_RECOG_RESULT* p_result = new funasr::FUNASR_RECOG_RESULT;
- p_result->snippet_time = audio.GetTimeLen();
- int n_step = 0;
- int n_total = audio.GetQueueSize();
- while (audio.Fetch(buff, len, flag) > 0) {
- string msg = recog_obj->Forward(buff, len, flag);
- p_result->msg += msg;
- n_step++;
- if (fn_callback)
- fn_callback(n_step, n_total);
- }
-
- return p_result;
- }
-
- _FUNASRAPI FUNASR_RESULT FunASRRecogPCMBuffer(FUNASR_HANDLE handle, const char* sz_buf, int n_len, int sampling_rate, FUNASR_MODE mode, QM_CALLBACK fn_callback)
+ _FUNASRAPI FUNASR_RESULT FunASRInferBuffer(FUNASR_HANDLE handle, const char* sz_buf, int n_len, FUNASR_MODE mode, QM_CALLBACK fn_callback, int sampling_rate)
{
funasr::Model* recog_obj = (funasr::Model*)handle;
if (!recog_obj)
@@ -87,23 +58,32 @@
return p_result;
}
- _FUNASRAPI FUNASR_RESULT FunASRRecogPCMFile(FUNASR_HANDLE handle, const char* sz_filename, int sampling_rate, FUNASR_MODE mode, QM_CALLBACK fn_callback)
+ _FUNASRAPI FUNASR_RESULT FunASRInfer(FUNASR_HANDLE handle, const char* sz_filename, FUNASR_MODE mode, QM_CALLBACK fn_callback, int sampling_rate)
{
funasr::Model* recog_obj = (funasr::Model*)handle;
if (!recog_obj)
return nullptr;
funasr::Audio audio(1);
- if (!audio.LoadPcmwav(sz_filename, &sampling_rate))
- return nullptr;
+ if(funasr::is_target_file(sz_filename, "wav")){
+ int32_t sampling_rate_ = -1;
+ if(!audio.LoadWav(sz_filename, &sampling_rate_))
+ return nullptr;
+ }else if(funasr::is_target_file(sz_filename, "pcm")){
+ if (!audio.LoadPcmwav(sz_filename, &sampling_rate))
+ return nullptr;
+ }else{
+ LOG(ERROR)<<"Wrong wav extension";
+ exit(-1);
+ }
float* buff;
int len;
int flag = 0;
- funasr::FUNASR_RECOG_RESULT* p_result = new funasr::FUNASR_RECOG_RESULT;
- p_result->snippet_time = audio.GetTimeLen();
int n_step = 0;
int n_total = audio.GetQueueSize();
+ funasr::FUNASR_RECOG_RESULT* p_result = new funasr::FUNASR_RECOG_RESULT;
+ p_result->snippet_time = audio.GetTimeLen();
while (audio.Fetch(buff, len, flag) > 0) {
string msg = recog_obj->Forward(buff, len, flag);
p_result->msg += msg;
@@ -115,46 +95,45 @@
return p_result;
}
- _FUNASRAPI FUNASR_RESULT FunASRRecogFile(FUNASR_HANDLE handle, const char* sz_wavfile, FUNASR_MODE mode, QM_CALLBACK fn_callback)
- {
- funasr::Model* recog_obj = (funasr::Model*)handle;
- if (!recog_obj)
- return nullptr;
-
- int32_t sampling_rate = -1;
- funasr::Audio audio(1);
- if(!audio.LoadWav(sz_wavfile, &sampling_rate))
- return nullptr;
-
- float* buff;
- int len;
- int flag = 0;
- int n_step = 0;
- int n_total = audio.GetQueueSize();
- funasr::FUNASR_RECOG_RESULT* p_result = new funasr::FUNASR_RECOG_RESULT;
- p_result->snippet_time = audio.GetTimeLen();
- while (audio.Fetch(buff, len, flag) > 0) {
- string msg = recog_obj->Forward(buff, len, flag);
- p_result->msg+= msg;
- n_step++;
- if (fn_callback)
- fn_callback(n_step, n_total);
- }
-
- return p_result;
- }
-
// APIs for VAD Infer
- _FUNASRAPI FUNASR_RESULT FsmnVadWavFile(FUNASR_HANDLE handle, const char* sz_wavfile, FUNASR_MODE mode, QM_CALLBACK fn_callback)
+ _FUNASRAPI FUNASR_RESULT FsmnVadInferBuffer(FUNASR_HANDLE handle, const char* sz_buf, int n_len, FSMN_VAD_MODE mode, QM_CALLBACK fn_callback, int sampling_rate)
{
funasr::VadModel* vad_obj = (funasr::VadModel*)handle;
if (!vad_obj)
return nullptr;
-
- int32_t sampling_rate = -1;
+
funasr::Audio audio(1);
- if(!audio.LoadWav(sz_wavfile, &sampling_rate))
+ if (!audio.LoadPcmwav(sz_buf, n_len, &sampling_rate))
return nullptr;
+
+ funasr::FUNASR_VAD_RESULT* p_result = new funasr::FUNASR_VAD_RESULT;
+ p_result->snippet_time = audio.GetTimeLen();
+
+ vector<std::vector<int>> vad_segments;
+ audio.Split(vad_obj, vad_segments);
+ p_result->segments = new vector<std::vector<int>>(vad_segments);
+
+ return p_result;
+ }
+
+ _FUNASRAPI FUNASR_RESULT FsmnVadInfer(FUNASR_HANDLE handle, const char* sz_filename, FSMN_VAD_MODE mode, QM_CALLBACK fn_callback, int sampling_rate)
+ {
+ funasr::VadModel* vad_obj = (funasr::VadModel*)handle;
+ if (!vad_obj)
+ return nullptr;
+
+ funasr::Audio audio(1);
+ if(funasr::is_target_file(sz_filename, "wav")){
+ int32_t sampling_rate_ = -1;
+ if(!audio.LoadWav(sz_filename, &sampling_rate_))
+ return nullptr;
+ }else if(funasr::is_target_file(sz_filename, "pcm")){
+ if (!audio.LoadPcmwav(sz_filename, &sampling_rate))
+ return nullptr;
+ }else{
+ LOG(ERROR)<<"Wrong wav extension";
+ exit(-1);
+ }
funasr::FUNASR_VAD_RESULT* p_result = new funasr::FUNASR_VAD_RESULT;
p_result->snippet_time = audio.GetTimeLen();
@@ -178,43 +157,7 @@
}
// APIs for Offline-stream Infer
- _FUNASRAPI FUNASR_RESULT FunOfflineRecogFile(FUNASR_HANDLE handle, const char* sz_wavfile, FUNASR_MODE mode, QM_CALLBACK fn_callback)
- {
- funasr::OfflineStream* offline_stream = (funasr::OfflineStream*)handle;
- if (!offline_stream)
- return nullptr;
-
- int32_t sampling_rate = -1;
- funasr::Audio audio(1);
- if(!audio.LoadWav(sz_wavfile, &sampling_rate))
- return nullptr;
- if(offline_stream->UseVad()){
- audio.Split(offline_stream);
- }
-
- float* buff;
- int len;
- int flag = 0;
- int n_step = 0;
- int n_total = audio.GetQueueSize();
- funasr::FUNASR_RECOG_RESULT* p_result = new funasr::FUNASR_RECOG_RESULT;
- p_result->snippet_time = audio.GetTimeLen();
- while (audio.Fetch(buff, len, flag) > 0) {
- string msg = (offline_stream->asr_handle)->Forward(buff, len, flag);
- p_result->msg+= msg;
- n_step++;
- if (fn_callback)
- fn_callback(n_step, n_total);
- }
- if(offline_stream->UsePunc()){
- string punc_res = (offline_stream->punc_handle)->AddPunc((p_result->msg).c_str());
- p_result->msg = punc_res;
- }
-
- return p_result;
- }
-
- _FUNASRAPI FUNASR_RESULT FunOfflineRecogPCMBuffer(FUNASR_HANDLE handle, const char* sz_buf, int n_len, int sampling_rate, FUNASR_MODE mode, QM_CALLBACK fn_callback)
+ _FUNASRAPI FUNASR_RESULT FunOfflineInferBuffer(FUNASR_HANDLE handle, const char* sz_buf, int n_len, FUNASR_MODE mode, QM_CALLBACK fn_callback, int sampling_rate)
{
funasr::OfflineStream* offline_stream = (funasr::OfflineStream*)handle;
if (!offline_stream)
@@ -249,6 +192,50 @@
return p_result;
}
+ _FUNASRAPI FUNASR_RESULT FunOfflineInfer(FUNASR_HANDLE handle, const char* sz_filename, FUNASR_MODE mode, QM_CALLBACK fn_callback, int sampling_rate)
+ {
+ funasr::OfflineStream* offline_stream = (funasr::OfflineStream*)handle;
+ if (!offline_stream)
+ return nullptr;
+
+ funasr::Audio audio(1);
+ if(funasr::is_target_file(sz_filename, "wav")){
+ int32_t sampling_rate_ = -1;
+ if(!audio.LoadWav(sz_filename, &sampling_rate_))
+ return nullptr;
+ }else if(funasr::is_target_file(sz_filename, "pcm")){
+ if (!audio.LoadPcmwav(sz_filename, &sampling_rate))
+ return nullptr;
+ }else{
+ LOG(ERROR)<<"Wrong wav extension";
+ exit(-1);
+ }
+ if(offline_stream->UseVad()){
+ audio.Split(offline_stream);
+ }
+
+ float* buff;
+ int len;
+ int flag = 0;
+ int n_step = 0;
+ int n_total = audio.GetQueueSize();
+ funasr::FUNASR_RECOG_RESULT* p_result = new funasr::FUNASR_RECOG_RESULT;
+ p_result->snippet_time = audio.GetTimeLen();
+ while (audio.Fetch(buff, len, flag) > 0) {
+ string msg = (offline_stream->asr_handle)->Forward(buff, len, flag);
+ p_result->msg+= msg;
+ n_step++;
+ if (fn_callback)
+ fn_callback(n_step, n_total);
+ }
+ if(offline_stream->UsePunc()){
+ string punc_res = (offline_stream->punc_handle)->AddPunc((p_result->msg).c_str());
+ p_result->msg = punc_res;
+ }
+
+ return p_result;
+ }
+
_FUNASRAPI const int FunASRGetRetNumber(FUNASR_RESULT result)
{
if (!result)
diff --git a/funasr/runtime/onnxruntime/src/util.cpp b/funasr/runtime/onnxruntime/src/util.cpp
index d29c5c0..755913c 100644
--- a/funasr/runtime/onnxruntime/src/util.cpp
+++ b/funasr/runtime/onnxruntime/src/util.cpp
@@ -180,4 +180,13 @@
}
}
+bool is_target_file(const std::string& filename, const std::string target) {
+ std::size_t pos = filename.find_last_of(".");
+ if (pos == std::string::npos) {
+ return false;
+ }
+ std::string extension = filename.substr(pos + 1);
+ return (extension == target);
+}
+
} // namespace funasr
\ No newline at end of file
diff --git a/funasr/runtime/onnxruntime/src/util.h b/funasr/runtime/onnxruntime/src/util.h
index 95ef458..8823a32 100644
--- a/funasr/runtime/onnxruntime/src/util.h
+++ b/funasr/runtime/onnxruntime/src/util.h
@@ -25,6 +25,7 @@
extern void Glu(Tensor<float> *din, Tensor<float> *dout);
string PathAppend(const string &p1, const string &p2);
+bool is_target_file(const std::string& filename, const std::string target);
} // namespace funasr
#endif
diff --git a/funasr/runtime/onnxruntime/src/vad-model.cpp b/funasr/runtime/onnxruntime/src/vad-model.cpp
index 764db00..336758f 100644
--- a/funasr/runtime/onnxruntime/src/vad-model.cpp
+++ b/funasr/runtime/onnxruntime/src/vad-model.cpp
@@ -1,10 +1,14 @@
#include "precomp.h"
namespace funasr {
-VadModel *CreateVadModel(std::map<std::string, std::string>& model_path, int thread_num)
+VadModel *CreateVadModel(std::map<std::string, std::string>& model_path, int thread_num, int mode)
{
VadModel *mm;
- mm = new FsmnVad();
+ if(mode == FSMN_VAD_OFFLINE){
+ mm = new FsmnVad();
+ }else{
+ LOG(ERROR)<<"Online fsmn vad not imp!";
+ }
string vad_model_path;
string vad_cmvn_path;
diff --git a/funasr/runtime/websocket/websocketsrv.cpp b/funasr/runtime/websocket/websocketsrv.cpp
index 9e56667..1a6adbf 100644
--- a/funasr/runtime/websocket/websocketsrv.cpp
+++ b/funasr/runtime/websocket/websocketsrv.cpp
@@ -25,8 +25,8 @@
if (!buffer.empty()) {
// fout.write(buffer.data(), buffer.size());
// feed data to asr engine
- FUNASR_RESULT Result = FunOfflineRecogPCMBuffer(
- asr_hanlde, buffer.data(), buffer.size(), 16000, RASR_NONE, NULL);
+ FUNASR_RESULT Result = FunOfflineInferBuffer(
+ asr_hanlde, buffer.data(), buffer.size(), RASR_NONE, NULL, 16000);
std::string asr_result =
((FUNASR_RECOG_RESULT*)Result)->msg; // get decode result
--
Gitblit v1.9.1