From 429995f4d007adba9099196a868c65a5b6cf14a5 Mon Sep 17 00:00:00 2001
From: aky15 <ankeyu.aky@11.17.44.249>
Date: 星期三, 17 五月 2023 17:38:49 +0800
Subject: [PATCH] Merge branch 'dev_infer' of https://github.com/alibaba-damo-academy/FunASR into dev_infer
---
egs/aishell/data2vec_transformer_finetune/run.sh | 1
egs/aishell2/transformer/conf/train_asr_transformer.yaml | 1
egs/aishell/paraformerbert/run.sh | 2
egs/aishell2/paraformerbert/local/extract_embeds.sh | 2
egs/aishell2/paraformerbert/run.sh | 2
egs_modelscope/speaker_verification/speech_xvector_sv-zh-cn-cnceleb-16k-spk3465-pytorch/infer_sv.py | 4
egs/aishell/conformer/run.sh | 1
egs/aishell/paraformer/conf/train_asr_paraformer_conformer_12e_6d_2048_256.yaml | 2
egs/aishell/transformer/run.sh | 1
egs_modelscope/speaker_diarization/speech_diarization_sond-zh-cn-alimeeting-16k-n16k4-pytorch/infer.py | 8 +
funasr/bin/train.py | 6 +
egs/aishell2/data2vec_pretrain/run.sh | 3
egs/aishell2/conformer/conf/train_asr_conformer.yaml | 1
egs/aishell2/paraformer/conf/train_asr_paraformer_conformer_20e_1280_320_6d_1280_320.yaml | 1
funasr/bin/sv_inference_launch.py | 3
egs/aishell2/data2vec_pretrain/conf/train_pretrain_transformer.yaml | 4
egs/aishell/paraformerbert/local/extract_embeds.sh | 2
funasr/models/encoder/sanm_encoder.py | 11 --
funasr/bin/diar_inference_launch.py | 3
egs/aishell/conformer/conf/train_asr_conformer.yaml | 4
egs/aishell2/conformer/run.sh | 3
egs/alimeeting/sa-asr/conf/train_sa_asr_conformer.yaml | 1
egs/librispeech/conformer/run.sh | 2
egs/aishell/data2vec_paraformer_finetune/conf/train_asr_paraformer_transformer_12e_6d_3072_768.yaml | 2
egs/librispeech_100h/conformer/run.sh | 2
egs/aishell/transformer/conf/train_asr_transformer.yaml | 2
funasr/bin/asr_infer.py | 17 ---
funasr/utils/prepare_data.py | 177 +++++++++++++++++++----------------
egs/aishell/data2vec_paraformer_finetune/run.sh | 1
egs/aishell/data2vec_transformer_finetune/conf/train_asr_transformer_12e_6d_3072_768.yaml | 2
egs/aishell2/transformer/run.sh | 1
funasr/models/predictor/cif.py | 5
egs/aishell/paraformer/run.sh | 1
funasr/bin/sv_infer.py | 1
egs/aishell2/paraformer/run.sh | 1
egs_modelscope/speaker_diarization/speech_diarization_sond-en-us-callhome-8k-n16k4-pytorch/infer.py | 8 +
36 files changed, 154 insertions(+), 134 deletions(-)
diff --git a/egs/aishell/conformer/conf/train_asr_conformer.yaml b/egs/aishell/conformer/conf/train_asr_conformer.yaml
index ea2353e..4814ee7 100644
--- a/egs/aishell/conformer/conf/train_asr_conformer.yaml
+++ b/egs/aishell/conformer/conf/train_asr_conformer.yaml
@@ -83,6 +83,8 @@
num_time_mask: 2
dataset_conf:
+ data_names: speech,text
+ data_types: sound,text
shuffle: True
shuffle_conf:
shuffle_size: 2048
@@ -93,4 +95,4 @@
num_workers: 8
log_interval: 50
-normalize: None
+normalize: None
\ No newline at end of file
diff --git a/egs/aishell/conformer/run.sh b/egs/aishell/conformer/run.sh
index fa52c60..09105dd 100755
--- a/egs/aishell/conformer/run.sh
+++ b/egs/aishell/conformer/run.sh
@@ -135,6 +135,7 @@
--data_dir ${feats_dir}/data \
--train_set ${train_set} \
--valid_set ${valid_set} \
+ --data_file_names "wav.scp,text" \
--cmvn_file ${feats_dir}/data/${train_set}/cmvn/cmvn.mvn \
--speed_perturb ${speed_perturb} \
--resume true \
diff --git a/egs/aishell/data2vec_paraformer_finetune/conf/train_asr_paraformer_transformer_12e_6d_3072_768.yaml b/egs/aishell/data2vec_paraformer_finetune/conf/train_asr_paraformer_transformer_12e_6d_3072_768.yaml
index 287b088..1e1acee 100644
--- a/egs/aishell/data2vec_paraformer_finetune/conf/train_asr_paraformer_transformer_12e_6d_3072_768.yaml
+++ b/egs/aishell/data2vec_paraformer_finetune/conf/train_asr_paraformer_transformer_12e_6d_3072_768.yaml
@@ -105,6 +105,8 @@
r_order: 1
dataset_conf:
+ data_names: speech,text
+ data_types: sound,text
shuffle: True
shuffle_conf:
shuffle_size: 2048
diff --git a/egs/aishell/data2vec_paraformer_finetune/run.sh b/egs/aishell/data2vec_paraformer_finetune/run.sh
index fd7222f..bf6d231 100755
--- a/egs/aishell/data2vec_paraformer_finetune/run.sh
+++ b/egs/aishell/data2vec_paraformer_finetune/run.sh
@@ -139,6 +139,7 @@
--data_dir ${feats_dir}/data \
--train_set ${train_set} \
--valid_set ${valid_set} \
+ --data_file_names "wav.scp,text" \
--init_param ${init_param} \
--cmvn_file ${feats_dir}/data/${train_set}/cmvn/cmvn.mvn \
--resume true \
diff --git a/egs/aishell/data2vec_transformer_finetune/conf/train_asr_transformer_12e_6d_3072_768.yaml b/egs/aishell/data2vec_transformer_finetune/conf/train_asr_transformer_12e_6d_3072_768.yaml
index ad3ad2e..32a7b5b 100644
--- a/egs/aishell/data2vec_transformer_finetune/conf/train_asr_transformer_12e_6d_3072_768.yaml
+++ b/egs/aishell/data2vec_transformer_finetune/conf/train_asr_transformer_12e_6d_3072_768.yaml
@@ -96,6 +96,8 @@
num_time_mask: 2
dataset_conf:
+ data_names: speech,text
+ data_types: sound,text
shuffle: True
shuffle_conf:
shuffle_size: 2048
diff --git a/egs/aishell/data2vec_transformer_finetune/run.sh b/egs/aishell/data2vec_transformer_finetune/run.sh
index 7a28e3b..666b18c 100755
--- a/egs/aishell/data2vec_transformer_finetune/run.sh
+++ b/egs/aishell/data2vec_transformer_finetune/run.sh
@@ -139,6 +139,7 @@
--data_dir ${feats_dir}/data \
--train_set ${train_set} \
--valid_set ${valid_set} \
+ --data_file_names "wav.scp,text" \
--init_param ${init_param} \
--cmvn_file ${feats_dir}/data/${train_set}/cmvn/cmvn.mvn \
--speed_perturb ${speed_perturb} \
diff --git a/egs/aishell/paraformer/conf/train_asr_paraformer_conformer_12e_6d_2048_256.yaml b/egs/aishell/paraformer/conf/train_asr_paraformer_conformer_12e_6d_2048_256.yaml
index bac8d04..6a14b7f 100644
--- a/egs/aishell/paraformer/conf/train_asr_paraformer_conformer_12e_6d_2048_256.yaml
+++ b/egs/aishell/paraformer/conf/train_asr_paraformer_conformer_12e_6d_2048_256.yaml
@@ -93,6 +93,8 @@
tail_threshold: 0.45
dataset_conf:
+ data_names: speech,text
+ data_types: sound,text
shuffle: True
shuffle_conf:
shuffle_size: 2048
diff --git a/egs/aishell/paraformer/run.sh b/egs/aishell/paraformer/run.sh
index bf23aa2..9723426 100755
--- a/egs/aishell/paraformer/run.sh
+++ b/egs/aishell/paraformer/run.sh
@@ -135,6 +135,7 @@
--data_dir ${feats_dir}/data \
--train_set ${train_set} \
--valid_set ${valid_set} \
+ --data_file_names "wav.scp,text" \
--cmvn_file ${feats_dir}/data/${train_set}/cmvn/cmvn.mvn \
--speed_perturb ${speed_perturb} \
--resume true \
diff --git a/egs/aishell/paraformerbert/local/extract_embeds.sh b/egs/aishell/paraformerbert/local/extract_embeds.sh
index c07e528..ca0c878 100755
--- a/egs/aishell/paraformerbert/local/extract_embeds.sh
+++ b/egs/aishell/paraformerbert/local/extract_embeds.sh
@@ -54,6 +54,8 @@
cat ${local_records_dir}/embeds.${JOB}.shape || exit 1;
done > ${local_scp_dir_raw}/embeds.shape
fi
+
+ cp ${local_scp_dir_raw}/embeds.scp ${raw_dataset_path}/data/${data_set}/embeds.scp
done
echo "embeds is in: ${local_scp_dir_raw}"
diff --git a/egs/aishell/paraformerbert/run.sh b/egs/aishell/paraformerbert/run.sh
index 5ba9671..dec256d 100755
--- a/egs/aishell/paraformerbert/run.sh
+++ b/egs/aishell/paraformerbert/run.sh
@@ -146,7 +146,7 @@
--data_dir ${feats_dir}/data \
--train_set ${train_set} \
--valid_set ${valid_set} \
- --embed_path ${feats_dir}/data \
+ --data_file_names "wav.scp,text,embed.scp" \
--cmvn_file ${feats_dir}/data/${train_set}/cmvn/cmvn.mvn \
--speed_perturb ${speed_perturb} \
--resume true \
diff --git a/egs/aishell/transformer/conf/train_asr_transformer.yaml b/egs/aishell/transformer/conf/train_asr_transformer.yaml
index 22e651b..b386565 100644
--- a/egs/aishell/transformer/conf/train_asr_transformer.yaml
+++ b/egs/aishell/transformer/conf/train_asr_transformer.yaml
@@ -73,6 +73,8 @@
warmup_steps: 25000
dataset_conf:
+ data_names: speech,text
+ data_types: sound,text
shuffle: True
shuffle_conf:
shuffle_size: 2048
diff --git a/egs/aishell/transformer/run.sh b/egs/aishell/transformer/run.sh
index 86088c3..6989606 100755
--- a/egs/aishell/transformer/run.sh
+++ b/egs/aishell/transformer/run.sh
@@ -135,6 +135,7 @@
--data_dir ${feats_dir}/data \
--train_set ${train_set} \
--valid_set ${valid_set} \
+ --data_file_names "wav.scp,text" \
--cmvn_file ${feats_dir}/data/${train_set}/cmvn/cmvn.mvn \
--speed_perturb ${speed_perturb} \
--resume true \
diff --git a/egs/aishell2/conformer/conf/train_asr_conformer.yaml b/egs/aishell2/conformer/conf/train_asr_conformer.yaml
index 3fd034f..8183378 100644
--- a/egs/aishell2/conformer/conf/train_asr_conformer.yaml
+++ b/egs/aishell2/conformer/conf/train_asr_conformer.yaml
@@ -84,6 +84,7 @@
num_time_mask: 2
dataset_conf:
+ data_names: speech,text
data_types: sound,text
shuffle: True
shuffle_conf:
diff --git a/egs/aishell2/conformer/run.sh b/egs/aishell2/conformer/run.sh
index c3b4065..ae57431 100755
--- a/egs/aishell2/conformer/run.sh
+++ b/egs/aishell2/conformer/run.sh
@@ -103,8 +103,6 @@
utils/text2token.py -s 1 -n 1 --space "" ${feats_dir}/data/${train_set}/text | cut -f 2- -d" " | tr " " "\n" \
| sort | uniq | grep -a -v -e '^\s*$' | awk '{print $0}' >> ${token_list}
echo "<unk>" >> ${token_list}
- mkdir -p ${feats_dir}/asr_stats_fbank_zh_char/${train_set}
- mkdir -p ${feats_dir}/asr_stats_fbank_zh_char/${valid_set}
fi
# LM Training Stage
@@ -139,6 +137,7 @@
--data_dir ${feats_dir}/data \
--train_set ${train_set} \
--valid_set ${valid_set} \
+ --data_file_names "wav.scp,text" \
--cmvn_file ${feats_dir}/data/${train_set}/cmvn/cmvn.mvn \
--speed_perturb ${speed_perturb} \
--dataset_type $dataset_type \
diff --git a/egs/aishell2/data2vec_pretrain/conf/train_pretrain_transformer.yaml b/egs/aishell2/data2vec_pretrain/conf/train_pretrain_transformer.yaml
index b6e8808..767d8ba 100644
--- a/egs/aishell2/data2vec_pretrain/conf/train_pretrain_transformer.yaml
+++ b/egs/aishell2/data2vec_pretrain/conf/train_pretrain_transformer.yaml
@@ -72,8 +72,8 @@
# for dataset
dataset_conf:
batch_mode: clipping
- data_names: speech,none
- data_types: sound,none
+ data_names: speech
+ data_types: sound
shuffle: true
shuffle_conf:
shuffle_size: 12800
diff --git a/egs/aishell2/data2vec_pretrain/run.sh b/egs/aishell2/data2vec_pretrain/run.sh
index 250254f..9334a4b 100755
--- a/egs/aishell2/data2vec_pretrain/run.sh
+++ b/egs/aishell2/data2vec_pretrain/run.sh
@@ -82,8 +82,6 @@
utils/text2token.py -s 1 -n 1 --space "" ${feats_dir}/data/${train_set}/text | cut -f 2- -d" " | tr " " "\n" \
| sort | uniq | grep -a -v -e '^\s*$' | awk '{print $0}' >> ${token_list}
echo "<unk>" >> ${token_list}
- mkdir -p ${feats_dir}/asr_stats_fbank_zh_char/${train_set}
- mkdir -p ${feats_dir}/asr_stats_fbank_zh_char/${valid_set}
fi
# Training Stage
@@ -110,6 +108,7 @@
--data_dir ${feats_dir}/data \
--train_set ${train_set} \
--valid_set ${valid_set} \
+ --data_file_names "wav.scp" \
--cmvn_file ${feats_dir}/data/${train_set}/cmvn/cmvn.mvn \
--speed_perturb ${speed_perturb} \
--dataset_type $dataset_type \
diff --git a/egs/aishell2/paraformer/conf/train_asr_paraformer_conformer_20e_1280_320_6d_1280_320.yaml b/egs/aishell2/paraformer/conf/train_asr_paraformer_conformer_20e_1280_320_6d_1280_320.yaml
index 7fc9794..3ecf44e 100644
--- a/egs/aishell2/paraformer/conf/train_asr_paraformer_conformer_20e_1280_320_6d_1280_320.yaml
+++ b/egs/aishell2/paraformer/conf/train_asr_paraformer_conformer_20e_1280_320_6d_1280_320.yaml
@@ -94,6 +94,7 @@
r_order: 1
dataset_conf:
+ data_names: speech,text
data_types: sound,text
shuffle: True
shuffle_conf:
diff --git a/egs/aishell2/paraformer/run.sh b/egs/aishell2/paraformer/run.sh
index a8cde55..83e49d0 100755
--- a/egs/aishell2/paraformer/run.sh
+++ b/egs/aishell2/paraformer/run.sh
@@ -137,6 +137,7 @@
--data_dir ${feats_dir}/data \
--train_set ${train_set} \
--valid_set ${valid_set} \
+ --data_file_names "wav.scp,text" \
--cmvn_file ${feats_dir}/data/${train_set}/cmvn/cmvn.mvn \
--speed_perturb ${speed_perturb} \
--dataset_type $dataset_type \
diff --git a/egs/aishell2/paraformerbert/local/extract_embeds.sh b/egs/aishell2/paraformerbert/local/extract_embeds.sh
index ee89924..d7dd4f2 100755
--- a/egs/aishell2/paraformerbert/local/extract_embeds.sh
+++ b/egs/aishell2/paraformerbert/local/extract_embeds.sh
@@ -54,6 +54,8 @@
cat ${local_records_dir}/embeds.${JOB}.shape || exit 1;
done > ${local_scp_dir_raw}/embeds.shape
fi
+
+ cp ${local_scp_dir_raw}/embeds.scp ${raw_dataset_path}/data/${data_set}/embeds.scp
done
echo "embeds is in: ${local_scp_dir_raw}"
diff --git a/egs/aishell2/paraformerbert/run.sh b/egs/aishell2/paraformerbert/run.sh
index 44aa357..4d2ffaf 100755
--- a/egs/aishell2/paraformerbert/run.sh
+++ b/egs/aishell2/paraformerbert/run.sh
@@ -147,7 +147,7 @@
--data_dir ${feats_dir}/data \
--train_set ${train_set} \
--valid_set ${valid_set} \
- --embed_path ${feats_dir}/data \
+ --data_file_names "wav.scp,text,embed.scp" \
--cmvn_file ${feats_dir}/data/${train_set}/cmvn/cmvn.mvn \
--speed_perturb ${speed_perturb} \
--dataset_type $dataset_type \
diff --git a/egs/aishell2/transformer/conf/train_asr_transformer.yaml b/egs/aishell2/transformer/conf/train_asr_transformer.yaml
index 2d16c18..1b76e2a 100644
--- a/egs/aishell2/transformer/conf/train_asr_transformer.yaml
+++ b/egs/aishell2/transformer/conf/train_asr_transformer.yaml
@@ -78,6 +78,7 @@
num_time_mask: 2
dataset_conf:
+ data_names: speech,text
data_types: sound,text
shuffle: True
shuffle_conf:
diff --git a/egs/aishell2/transformer/run.sh b/egs/aishell2/transformer/run.sh
index b66c63d..6e5c82a 100755
--- a/egs/aishell2/transformer/run.sh
+++ b/egs/aishell2/transformer/run.sh
@@ -137,6 +137,7 @@
--data_dir ${feats_dir}/data \
--train_set ${train_set} \
--valid_set ${valid_set} \
+ --data_file_names "wav.scp,text" \
--cmvn_file ${feats_dir}/data/${train_set}/cmvn/cmvn.mvn \
--speed_perturb ${speed_perturb} \
--dataset_type $dataset_type \
diff --git a/egs/alimeeting/sa-asr/conf/train_sa_asr_conformer.yaml b/egs/alimeeting/sa-asr/conf/train_sa_asr_conformer.yaml
index 421d7df..aa48b2d 100644
--- a/egs/alimeeting/sa-asr/conf/train_sa_asr_conformer.yaml
+++ b/egs/alimeeting/sa-asr/conf/train_sa_asr_conformer.yaml
@@ -43,7 +43,6 @@
pooling_type: statistic
num_nodes_resnet1: 256
num_nodes_last_layer: 256
- batchnorm_momentum: 0.5
# decoder related
decoder: sa_decoder
diff --git a/egs/librispeech/conformer/run.sh b/egs/librispeech/conformer/run.sh
index 2e34cbf..b942dd2 100755
--- a/egs/librispeech/conformer/run.sh
+++ b/egs/librispeech/conformer/run.sh
@@ -55,7 +55,7 @@
inference_config=conf/decode_asr_transformer.yaml
#inference_config=conf/decode_asr_transformer_beam60_ctc0.3.yaml
-inference_asr_model=valid.acc.ave_10best.pth
+inference_asr_model=valid.acc.ave_10best.pb
# you can set gpu num for decoding here
gpuid_list=$CUDA_VISIBLE_DEVICES # set gpus for decoding, the same as training stage by default
diff --git a/egs/librispeech_100h/conformer/run.sh b/egs/librispeech_100h/conformer/run.sh
index da7a597..d1a20bc 100755
--- a/egs/librispeech_100h/conformer/run.sh
+++ b/egs/librispeech_100h/conformer/run.sh
@@ -55,7 +55,7 @@
inference_config=conf/decode_asr_transformer.yaml
#inference_config=conf/decode_asr_transformer_beam60_ctc0.3.yaml
-inference_asr_model=valid.acc.ave_10best.pth
+inference_asr_model=valid.acc.ave_10best.pb
# you can set gpu num for decoding here
gpuid_list=$CUDA_VISIBLE_DEVICES # set gpus for decoding, the same as training stage by default
diff --git a/egs_modelscope/speaker_diarization/speech_diarization_sond-en-us-callhome-8k-n16k4-pytorch/infer.py b/egs_modelscope/speaker_diarization/speech_diarization_sond-en-us-callhome-8k-n16k4-pytorch/infer.py
index 3db6f7d..9e80d2b 100644
--- a/egs_modelscope/speaker_diarization/speech_diarization_sond-en-us-callhome-8k-n16k4-pytorch/infer.py
+++ b/egs_modelscope/speaker_diarization/speech_diarization_sond-en-us-callhome-8k-n16k4-pytorch/infer.py
@@ -7,8 +7,9 @@
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
-# 鍒濆鍖栨帹鐞� pipeline
-# 褰撲互鍘熷闊抽浣滀负杈撳叆鏃朵娇鐢ㄩ厤缃枃浠� sond.yaml锛屽苟璁剧疆 mode 涓簊ond_demo
+# initialize the pipeline for inference
+# when using the raw waveform files to inference, please use the config file `sond.yaml`
+# and set mode to `sond_demo`
inference_diar_pipline = pipeline(
mode="sond_demo",
num_workers=0,
@@ -19,7 +20,8 @@
sv_model_revision="master",
)
-# 浠� audio_list 浣滀负杈撳叆锛屽叾涓涓�涓煶棰戜负寰呮娴嬭闊筹紝鍚庨潰鐨勯煶棰戜负涓嶅悓璇磋瘽浜虹殑澹扮汗娉ㄥ唽璇煶
+# use audio_list as the input, where the first one is the record to be detected
+# and the following files are enrollments for different speakers
audio_list = [
"https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_data/record.wav",
"https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_data/spk_A.wav",
diff --git a/egs_modelscope/speaker_diarization/speech_diarization_sond-zh-cn-alimeeting-16k-n16k4-pytorch/infer.py b/egs_modelscope/speaker_diarization/speech_diarization_sond-zh-cn-alimeeting-16k-n16k4-pytorch/infer.py
index db10193..dc867b0 100644
--- a/egs_modelscope/speaker_diarization/speech_diarization_sond-zh-cn-alimeeting-16k-n16k4-pytorch/infer.py
+++ b/egs_modelscope/speaker_diarization/speech_diarization_sond-zh-cn-alimeeting-16k-n16k4-pytorch/infer.py
@@ -7,8 +7,9 @@
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
-# 鍒濆鍖栨帹鐞� pipeline
-# 褰撲互鍘熷闊抽浣滀负杈撳叆鏃朵娇鐢ㄩ厤缃枃浠� sond.yaml锛屽苟璁剧疆 mode 涓簊ond_demo
+# initialize the pipeline for inference
+# when using the raw waveform files to inference, please use the config file `sond.yaml`
+# and set mode to `sond_demo`
inference_diar_pipline = pipeline(
mode="sond_demo",
num_workers=0,
@@ -19,7 +20,8 @@
sv_model_revision="master",
)
-# 浠� audio_list 浣滀负杈撳叆锛屽叾涓涓�涓煶棰戜负寰呮娴嬭闊筹紝鍚庨潰鐨勯煶棰戜负涓嶅悓璇磋瘽浜虹殑澹扮汗娉ㄥ唽璇煶
+# use audio_list as the input, where the first one is the record to be detected
+# and the following files are enrollments for different speakers
audio_list = [
"https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_data/speaker_diarization/record.wav",
"https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_data/speaker_diarization/spk1.wav",
diff --git a/egs_modelscope/speaker_verification/speech_xvector_sv-zh-cn-cnceleb-16k-spk3465-pytorch/infer_sv.py b/egs_modelscope/speaker_verification/speech_xvector_sv-zh-cn-cnceleb-16k-spk3465-pytorch/infer_sv.py
index c51313d..7a53827 100644
--- a/egs_modelscope/speaker_verification/speech_xvector_sv-zh-cn-cnceleb-16k-spk3465-pytorch/infer_sv.py
+++ b/egs_modelscope/speaker_verification/speech_xvector_sv-zh-cn-cnceleb-16k-spk3465-pytorch/infer_sv.py
@@ -7,13 +7,13 @@
model='damo/speech_xvector_sv-zh-cn-cnceleb-16k-spk3465-pytorch'
)
- # 涓や釜璇煶涓虹浉鍚岃璇濅汉
+ # the same speaker
rec_result = inference_sv_pipline(audio_in=(
'https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/sv_example_enroll.wav',
'https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/sv_example_same.wav'))
print("Similarity", rec_result["scores"])
- # 涓や釜璇煶涓轰笉鍚岃璇濅汉
+ # different speaker
rec_result = inference_sv_pipline(audio_in=(
'https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/sv_example_enroll.wav',
'https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/sv_example_different.wav'))
diff --git a/funasr/bin/asr_infer.py b/funasr/bin/asr_infer.py
index f6c5504..03145f8 100644
--- a/funasr/bin/asr_infer.py
+++ b/funasr/bin/asr_infer.py
@@ -762,23 +762,6 @@
feats_len = speech_lengths
if feats.shape[1] != 0:
- if cache_en["is_final"]:
- if feats.shape[1] + cache_en["chunk_size"][2] < cache_en["chunk_size"][1]:
- cache_en["last_chunk"] = True
- else:
- # first chunk
- feats_chunk1 = feats[:, :cache_en["chunk_size"][1], :]
- feats_len = torch.tensor([feats_chunk1.shape[1]])
- results_chunk1 = self.infer(feats_chunk1, feats_len, cache)
-
- # last chunk
- cache_en["last_chunk"] = True
- feats_chunk2 = feats[:, -(feats.shape[1] + cache_en["chunk_size"][2] - cache_en["chunk_size"][1]):, :]
- feats_len = torch.tensor([feats_chunk2.shape[1]])
- results_chunk2 = self.infer(feats_chunk2, feats_len, cache)
-
- return [" ".join(results_chunk1 + results_chunk2)]
-
results = self.infer(feats, feats_len, cache)
return results
diff --git a/funasr/bin/diar_inference_launch.py b/funasr/bin/diar_inference_launch.py
index 69d37d6..e0d900e 100755
--- a/funasr/bin/diar_inference_launch.py
+++ b/funasr/bin/diar_inference_launch.py
@@ -38,7 +38,6 @@
from scipy.signal import medfilt
from funasr.utils.cli_utils import get_commandline_args
from funasr.tasks.diar import DiarTask
-from funasr.tasks.asr import ASRTask
from funasr.tasks.diar import EENDOLADiarTask
from funasr.torch_utils.device_funcs import to_device
from funasr.torch_utils.set_all_random_seed import set_all_random_seed
@@ -187,7 +186,7 @@
raise TypeError("raw_inputs must be a list or tuple in [speech, profile1, profile2, ...] ")
else:
# 3. Build data-iterator
- loader = ASRTask.build_streaming_iterator(
+ loader = DiarTask.build_streaming_iterator(
data_path_and_name_and_type,
dtype=dtype,
batch_size=batch_size,
diff --git a/funasr/bin/sv_infer.py b/funasr/bin/sv_infer.py
index 9761497..1517bfa 100755
--- a/funasr/bin/sv_infer.py
+++ b/funasr/bin/sv_infer.py
@@ -23,7 +23,6 @@
from funasr.utils.cli_utils import get_commandline_args
from funasr.tasks.sv import SVTask
-from funasr.tasks.asr import ASRTask
from funasr.torch_utils.device_funcs import to_device
from funasr.torch_utils.set_all_random_seed import set_all_random_seed
from funasr.utils import config_argparse
diff --git a/funasr/bin/sv_inference_launch.py b/funasr/bin/sv_inference_launch.py
index 8e00730..dbddd9f 100755
--- a/funasr/bin/sv_inference_launch.py
+++ b/funasr/bin/sv_inference_launch.py
@@ -34,7 +34,6 @@
from funasr.utils.cli_utils import get_commandline_args
from funasr.tasks.sv import SVTask
-from funasr.tasks.asr import ASRTask
from funasr.torch_utils.device_funcs import to_device
from funasr.torch_utils.set_all_random_seed import set_all_random_seed
from funasr.utils import config_argparse
@@ -115,7 +114,7 @@
data_path_and_name_and_type = [raw_inputs, "speech", "waveform"]
# 3. Build data-iterator
- loader = ASRTask.build_streaming_iterator(
+ loader = SVTask.build_streaming_iterator(
data_path_and_name_and_type,
dtype=dtype,
batch_size=batch_size,
diff --git a/funasr/bin/train.py b/funasr/bin/train.py
index 53e5bde..0e95d77 100755
--- a/funasr/bin/train.py
+++ b/funasr/bin/train.py
@@ -335,6 +335,12 @@
help="dev dataset",
)
parser.add_argument(
+ "--data_file_names",
+ type=str,
+ default="wav.scp,text",
+ help="input data files",
+ )
+ parser.add_argument(
"--speed_perturb",
type=float,
nargs="+",
diff --git a/funasr/models/encoder/sanm_encoder.py b/funasr/models/encoder/sanm_encoder.py
index e071e57..da67586 100644
--- a/funasr/models/encoder/sanm_encoder.py
+++ b/funasr/models/encoder/sanm_encoder.py
@@ -355,18 +355,9 @@
def _add_overlap_chunk(self, feats: np.ndarray, cache: dict = {}):
if len(cache) == 0:
return feats
- # process last chunk
cache["feats"] = to_device(cache["feats"], device=feats.device)
overlap_feats = torch.cat((cache["feats"], feats), dim=1)
- if cache["is_final"]:
- cache["feats"] = overlap_feats[:, -cache["chunk_size"][0]:, :]
- if not cache["last_chunk"]:
- padding_length = sum(cache["chunk_size"]) - overlap_feats.shape[1]
- overlap_feats = overlap_feats.transpose(1, 2)
- overlap_feats = F.pad(overlap_feats, (0, padding_length))
- overlap_feats = overlap_feats.transpose(1, 2)
- else:
- cache["feats"] = overlap_feats[:, -(cache["chunk_size"][0] + cache["chunk_size"][2]):, :]
+ cache["feats"] = overlap_feats[:, -(cache["chunk_size"][0] + cache["chunk_size"][2]):, :]
return overlap_feats
def forward_chunk(self,
diff --git a/funasr/models/predictor/cif.py b/funasr/models/predictor/cif.py
index c59e245..3c363db 100644
--- a/funasr/models/predictor/cif.py
+++ b/funasr/models/predictor/cif.py
@@ -221,13 +221,14 @@
if cache is not None and "chunk_size" in cache:
alphas[:, :cache["chunk_size"][0]] = 0.0
- alphas[:, sum(cache["chunk_size"][:2]):] = 0.0
+ if "is_final" in cache and not cache["is_final"]:
+ alphas[:, sum(cache["chunk_size"][:2]):] = 0.0
if cache is not None and "cif_alphas" in cache and "cif_hidden" in cache:
cache["cif_hidden"] = to_device(cache["cif_hidden"], device=hidden.device)
cache["cif_alphas"] = to_device(cache["cif_alphas"], device=alphas.device)
hidden = torch.cat((cache["cif_hidden"], hidden), dim=1)
alphas = torch.cat((cache["cif_alphas"], alphas), dim=1)
- if cache is not None and "last_chunk" in cache and cache["last_chunk"]:
+ if cache is not None and "is_final" in cache and cache["is_final"]:
tail_hidden = torch.zeros((batch_size, 1, hidden_size), device=hidden.device)
tail_alphas = torch.tensor([[self.tail_threshold]], device=alphas.device)
tail_alphas = torch.tile(tail_alphas, (batch_size, 1))
diff --git a/funasr/utils/prepare_data.py b/funasr/utils/prepare_data.py
index 3f55170..36795b4 100644
--- a/funasr/utils/prepare_data.py
+++ b/funasr/utils/prepare_data.py
@@ -3,6 +3,7 @@
import shutil
from multiprocessing import Pool
+import kaldiio
import numpy as np
import torch.distributed as dist
import torchaudio
@@ -48,49 +49,80 @@
def calc_shape_core(root_path, args, idx):
- wav_scp_file = os.path.join(root_path, "wav.scp.{}".format(idx))
- shape_file = os.path.join(root_path, "speech_shape.{}".format(idx))
- with open(wav_scp_file) as f:
+ file_name = args.data_file_names.split(",")[0]
+ data_name = args.dataset_conf.get("data_names", "speech,text").split(",")[0]
+ scp_file = os.path.join(root_path, "{}.{}".format(file_name, idx))
+ shape_file = os.path.join(root_path, "{}_shape.{}".format(data_name, idx))
+ with open(scp_file) as f:
lines = f.readlines()
- frontend_conf = args.frontend_conf
- dataset_conf = args.dataset_conf
- speech_length_min = dataset_conf.speech_length_min if hasattr(dataset_conf, "speech_length_min") else -1
- speech_length_max = dataset_conf.speech_length_max if hasattr(dataset_conf, "speech_length_max") else -1
- with open(shape_file, "w") as f:
- for line in lines:
- sample_name, wav_path = line.strip().split()
- n_frames, feature_dim = wav2num_frame(wav_path, frontend_conf)
- write_flag = True
- if n_frames > 0 and speech_length_min > 0:
- write_flag = n_frames >= speech_length_min
- if n_frames > 0 and speech_length_max > 0:
- write_flag = n_frames <= speech_length_max
- if write_flag:
- f.write("{} {},{}\n".format(sample_name, str(int(np.ceil(n_frames))), str(int(feature_dim))))
+ data_type = args.dataset_conf.get("data_types", "sound,text").split(",")[0]
+ if data_type == "sound":
+ frontend_conf = args.frontend_conf
+ dataset_conf = args.dataset_conf
+ length_min = dataset_conf.speech_length_min if hasattr(dataset_conf, "{}_length_min".format(data_name)) else -1
+ length_max = dataset_conf.speech_length_max if hasattr(dataset_conf, "{}_length_max".format(data_name)) else -1
+ with open(shape_file, "w") as f:
+ for line in lines:
+ sample_name, wav_path = line.strip().split()
+ n_frames, feature_dim = wav2num_frame(wav_path, frontend_conf)
+ write_flag = True
+ if n_frames > 0 and length_min > 0:
+ write_flag = n_frames >= length_min
+ if n_frames > 0 and length_max > 0:
+ write_flag = n_frames <= length_max
+ if write_flag:
+ f.write("{} {},{}\n".format(sample_name, str(int(np.ceil(n_frames))), str(int(feature_dim))))
+ f.flush()
+ elif data_type == "kaldi_ark":
+ dataset_conf = args.dataset_conf
+ length_min = dataset_conf.speech_length_min if hasattr(dataset_conf, "{}_length_min".format(data_name)) else -1
+ length_max = dataset_conf.speech_length_max if hasattr(dataset_conf, "{}_length_max".format(data_name)) else -1
+ with open(shape_file, "w") as f:
+ for line in lines:
+ sample_name, feature_path = line.strip().split()
+ feature = kaldiio.load_mat(feature_path)
+ n_frames, feature_dim = feature.shape
+ if n_frames > 0 and length_min > 0:
+ write_flag = n_frames >= length_min
+ if n_frames > 0 and length_max > 0:
+ write_flag = n_frames <= length_max
+ if write_flag:
+ f.write("{} {},{}\n".format(sample_name, str(int(np.ceil(n_frames))), str(int(feature_dim))))
+ f.flush()
+ elif data_type == "text":
+ with open(shape_file, "w") as f:
+ for line in lines:
+ sample_name, text = line.strip().split(maxsplit=1)
+ n_tokens = len(text.split())
+ f.write("{} {}\n".format(sample_name, str(int(np.ceil(n_tokens)))))
f.flush()
+ else:
+ raise RuntimeError("Unsupported data_type: {}".format(data_type))
def calc_shape(args, dataset, nj=64):
- shape_path = os.path.join(args.data_dir, dataset, "speech_shape")
+ data_name = args.dataset_conf.get("data_names", "speech,text").split(",")[0]
+ shape_path = os.path.join(args.data_dir, dataset, "{}_shape".format(data_name))
if os.path.exists(shape_path):
logging.info('Shape file for small dataset already exists.')
return
- split_shape_path = os.path.join(args.data_dir, dataset, "shape_files")
+ split_shape_path = os.path.join(args.data_dir, dataset, "{}_shape_files".format(data_name))
if os.path.exists(split_shape_path):
shutil.rmtree(split_shape_path)
os.mkdir(split_shape_path)
# split
- wav_scp_file = os.path.join(args.data_dir, dataset, "wav.scp")
- with open(wav_scp_file) as f:
+ file_name = args.data_file_names.split(",")[0]
+ scp_file = os.path.join(args.data_dir, dataset, file_name)
+ with open(scp_file) as f:
lines = f.readlines()
num_lines = len(lines)
num_job_lines = num_lines // nj
start = 0
for i in range(nj):
end = start + num_job_lines
- file = os.path.join(split_shape_path, "wav.scp.{}".format(str(i + 1)))
+ file = os.path.join(split_shape_path, "{}.{}".format(file_name, str(i + 1)))
with open(file, "w") as f:
if i == nj - 1:
f.writelines(lines[start:])
@@ -108,15 +140,18 @@
# combine
with open(shape_path, "w") as f:
for i in range(nj):
- job_file = os.path.join(split_shape_path, "speech_shape.{}".format(str(i + 1)))
+ job_file = os.path.join(split_shape_path, "{}_shape.{}".format(data_name, str(i + 1)))
with open(job_file) as job_f:
lines = job_f.readlines()
f.writelines(lines)
logging.info('Generating shape files done.')
-def generate_data_list(data_dir, dataset, nj=64):
- list_file = os.path.join(data_dir, dataset, "data.list")
+def generate_data_list(args, data_dir, dataset, nj=64):
+ data_names = args.dataset_conf.get("data_names", "speech,text").split(",")
+ file_names = args.data_file_names.split(",")
+ concat_data_name = "_".join(data_names)
+ list_file = os.path.join(data_dir, dataset, "{}_data.list".format(concat_data_name))
if os.path.exists(list_file):
logging.info('Data list for large dataset already exists.')
return
@@ -125,85 +160,67 @@
shutil.rmtree(split_path)
os.mkdir(split_path)
- with open(os.path.join(data_dir, dataset, "wav.scp")) as f_wav:
- wav_lines = f_wav.readlines()
- with open(os.path.join(data_dir, dataset, "text")) as f_text:
- text_lines = f_text.readlines()
- num_lines = len(wav_lines)
+ data_lines_list = []
+ for file_name in file_names:
+ with open(os.path.join(data_dir, dataset, file_name)) as f:
+ lines = f.readlines()
+ data_lines_list.append(lines)
+ num_lines = len(data_lines_list[0])
num_job_lines = num_lines // nj
start = 0
for i in range(nj):
end = start + num_job_lines
split_path_nj = os.path.join(split_path, str(i + 1))
os.mkdir(split_path_nj)
- wav_file = os.path.join(split_path_nj, "wav.scp")
- text_file = os.path.join(split_path_nj, "text")
- with open(wav_file, "w") as fw, open(text_file, "w") as ft:
- if i == nj - 1:
- fw.writelines(wav_lines[start:])
- ft.writelines(text_lines[start:])
- else:
- fw.writelines(wav_lines[start:end])
- ft.writelines(text_lines[start:end])
+ for file_id, file_name in enumerate(file_names):
+ file = os.path.join(split_path_nj, file_name)
+ with open(file, "w") as f:
+ if i == nj - 1:
+ f.writelines(data_lines_list[file_id][start:])
+ else:
+ f.writelines(data_lines_list[file_id][start:end])
start = end
with open(list_file, "w") as f_data:
for i in range(nj):
- wav_path = os.path.join(split_path, str(i + 1), "wav.scp")
- text_path = os.path.join(split_path, str(i + 1), "text")
- f_data.write(wav_path + " " + text_path + "\n")
+ path = ""
+ for file_name in file_names:
+ path = path + os.path.join(split_path, str(i + 1), file_name)
+ f_data.write(path + "\n")
def prepare_data(args, distributed_option):
distributed = distributed_option.distributed
if not distributed or distributed_option.dist_rank == 0:
- filter_wav_text(args.data_dir, args.train_set)
- filter_wav_text(args.data_dir, args.valid_set)
+ if hasattr(args, "filter_input") and args.filter_input:
+ filter_wav_text(args.data_dir, args.train_set)
+ filter_wav_text(args.data_dir, args.valid_set)
if args.dataset_type == "small":
calc_shape(args, args.train_set)
calc_shape(args, args.valid_set)
if args.dataset_type == "large":
- generate_data_list(args.data_dir, args.train_set)
- generate_data_list(args.data_dir, args.valid_set)
+ generate_data_list(args, args.data_dir, args.train_set)
+ generate_data_list(args, args.data_dir, args.valid_set)
+ data_names = args.dataset_conf.get("data_names", "speech,text").split(",")
+ data_types = args.dataset_conf.get("data_types", "sound,text").split(",")
+ file_names = args.data_file_names.split(",")
+ print("data_names: {}, data_types: {}, file_names: {}".format(data_names, data_types, file_names))
+ assert len(data_names) == len(data_types) == len(file_names)
if args.dataset_type == "small":
- args.train_shape_file = [os.path.join(args.data_dir, args.train_set, "speech_shape")]
- args.valid_shape_file = [os.path.join(args.data_dir, args.valid_set, "speech_shape")]
- data_names = args.dataset_conf.get("data_names", "speech,text").split(",")
- data_types = args.dataset_conf.get("data_types", "sound,text").split(",")
- args.train_data_path_and_name_and_type = [
- ["{}/{}/wav.scp".format(args.data_dir, args.train_set), data_names[0], data_types[0]],
- ["{}/{}/text".format(args.data_dir, args.train_set), data_names[1], data_types[1]]
- ]
- args.valid_data_path_and_name_and_type = [
- ["{}/{}/wav.scp".format(args.data_dir, args.valid_set), data_names[0], data_types[0]],
- ["{}/{}/text".format(args.data_dir, args.valid_set), data_names[1], data_types[1]]
- ]
- if args.embed_path is not None:
+ args.train_shape_file = [os.path.join(args.data_dir, args.train_set, "{}_shape".format(data_names[0]))]
+ args.valid_shape_file = [os.path.join(args.data_dir, args.valid_set, "{}_shape".format(data_names[0]))]
+ args.train_data_path_and_name_and_type, args.valid_data_path_and_name_and_type = [], []
+ for file_name, data_name, data_type in zip(file_names, data_names, data_types):
args.train_data_path_and_name_and_type.append(
- [os.path.join(args.embed_path, "embeds", args.train_set, "embeds.scp"), "embed", "kaldi_ark"])
+ ["{}/{}/{}".format(args.data_dir, args.train_set, file_name), data_name, data_type])
args.valid_data_path_and_name_and_type.append(
- [os.path.join(args.embed_path, "embeds", args.valid_set, "embeds.scp"), "embed", "kaldi_ark"])
+ ["{}/{}/{}".format(args.data_dir, args.valid_set, file_name), data_name, data_type])
else:
- args.train_data_file = os.path.join(args.data_dir, args.train_set, "data.list")
- args.valid_data_file = os.path.join(args.data_dir, args.valid_set, "data.list")
- if args.embed_path is not None:
- if not distributed or distributed_option.dist_rank == 0:
- for d in [args.train_set, args.valid_set]:
- file = os.path.join(args.data_dir, d, "data.list")
- with open(file) as f:
- lines = f.readlines()
- out_file = os.path.join(args.data_dir, d, "data_with_embed.list")
- with open(out_file, "w") as out_f:
- for line in lines:
- parts = line.strip().split()
- idx = parts[0].split("/")[-2]
- embed_file = os.path.join(args.embed_path, "embeds", args.valid_set, "ark",
- "embeds.{}.ark".format(idx))
- out_f.write(parts[0] + " " + parts[1] + " " + embed_file + "\n")
- args.train_data_file = os.path.join(args.data_dir, args.train_set, "data_with_embed.list")
- args.valid_data_file = os.path.join(args.data_dir, args.valid_set, "data_with_embed.list")
+ concat_data_name = "_".join(data_names)
+ args.train_data_file = os.path.join(args.data_dir, args.train_set, "{}_data.list".format(concat_data_name))
+ args.valid_data_file = os.path.join(args.data_dir, args.valid_set, "{}_data.list".format(concat_data_name))
if distributed:
dist.barrier()
--
Gitblit v1.9.1