From cf8e000a84e888495dcf30c4dbfecea1ee7ab4e2 Mon Sep 17 00:00:00 2001
From: jmwang66 <wangjiaming.wjm@alibaba-inc.com>
Date: 星期一, 07 八月 2023 16:13:37 +0800
Subject: [PATCH] Merge pull request #807 from alibaba-damo-academy/dev_wjm
---
egs/callhome/eend_ola/run.sh | 133 +++++++++++++++++++++++++++++++++-----------
1 files changed, 100 insertions(+), 33 deletions(-)
diff --git a/egs/callhome/eend_ola/run.sh b/egs/callhome/eend_ola/run.sh
index fb030c5..aa441bf 100644
--- a/egs/callhome/eend_ola/run.sh
+++ b/egs/callhome/eend_ola/run.sh
@@ -3,19 +3,23 @@
. ./path.sh || exit 1;
# machines configuration
-CUDA_VISIBLE_DEVICES="7"
+CUDA_VISIBLE_DEVICES="0"
gpu_num=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
count=1
# general configuration
-simu_feats_dir="/nfs/wangjiaming.wjm/EEND_ARK_DATA/dump/simu_data/data"
-simu_feats_dir_chunk2000="/nfs/wangjiaming.wjm/EEND_ARK_DATA/dump/simu_data_chunk2000/data"
-callhome_feats_dir_chunk2000="/nfs/wangjiaming.wjm/EEND_ARK_DATA/dump/callhome_chunk2000/data"
+dump_cmd=utils/run.pl
+nj=64
+
+# feature configuration
+data_dir="./data"
+simu_feats_dir=$data_dir/ark_data/dump/simu_data/data
+simu_feats_dir_chunk2000=$data_dir/ark_data/dump/simu_data_chunk2000/data
+callhome_feats_dir_chunk2000=$data_dir/ark_data/dump/callhome_chunk2000/data
simu_train_dataset=train
simu_valid_dataset=dev
-callhome_train_dataset=callhome1_allspk
-callhome_valid_dataset=callhome2_allspk
-callhome2_wav_scp_file=wav.scp
+callhome_train_dataset=callhome1_spkall
+callhome_valid_dataset=callhome2_spkall
# model average
simu_average_2spkr_start=91
@@ -27,11 +31,11 @@
exp_dir="."
input_size=345
-stage=-1
-stop_stage=-1
+stage=1
+stop_stage=5
# exp tag
-tag="exp_fix"
+tag="exp1"
. local/parse_options.sh || exit 1;
@@ -52,23 +56,82 @@
# simulate mixture data for training and inference
if [ ${stage} -le -1 ] && [ ${stop_stage} -ge -1 ]; then
- echo "stage 0: Simulate mixture data for training and inference"
+ echo "stage -1: Simulate mixture data for training and inference"
echo "The detail can be found in https://github.com/hitachi-speech/EEND"
echo "Before running this step, you should download and compile kaldi and set KALDI_ROOT in this script and path.sh"
echo "This stage may take a long time, please waiting..."
KALDI_ROOT=
ln -s $KALDI_ROOT/egs/wsj/s5/steps steps
ln -s $KALDI_ROOT/egs/wsj/s5/utils utils
- . local/run_prepare_shared_eda.sh
+ local/run_prepare_shared_eda.sh
fi
-## Prepare data for training and inference
-#if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then
-# echo "stage 0: Prepare data for training and inference"
-# echo "The detail can be found in https://github.com/hitachi-speech/EEND"
-# . ./local/
-#fi
-#
+# Prepare data for training and inference
+if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then
+ echo "stage 0: Prepare data for training and inference"
+ simu_opts_num_speaker_array=(1 2 3 4)
+ simu_opts_sil_scale_array=(2 2 5 9)
+ simu_opts_num_train=100000
+
+ # for simulated data of chunk500 and chunk2000
+ for dset in swb_sre_cv swb_sre_tr; do
+ if [ "$dset" == "swb_sre_tr" ]; then
+ n_mixtures=${simu_opts_num_train}
+ dataset=train
+ else
+ n_mixtures=500
+ dataset=dev
+ fi
+ simu_data_dir=${dset}_ns"$(IFS="n"; echo "${simu_opts_num_speaker_array[*]}")"_beta"$(IFS="n"; echo "${simu_opts_sil_scale_array[*]}")"_${n_mixtures}
+ mkdir -p ${data_dir}/simu/data/${simu_data_dir}/.work
+ split_scps=
+ for n in $(seq $nj); do
+ split_scps="$split_scps ${data_dir}/simu/data/${simu_data_dir}/.work/wav.scp.$n"
+ done
+ utils/split_scp.pl "${data_dir}/simu/data/${simu_data_dir}/wav.scp" $split_scps || exit 1
+ python local/split.py ${data_dir}/simu/data/${simu_data_dir}
+ # for chunk_size=500
+ output_dir=${data_dir}/ark_data/dump/simu_data/$dataset
+ mkdir -p $output_dir/.logs
+ $dump_cmd --max-jobs-run $nj JOB=1:$nj $output_dir/.logs/dump.JOB.log \
+ python local/dump_feature.py \
+ --data_dir ${data_dir}/simu/data/${simu_data_dir}/.work \
+ --output_dir $output_dir \
+ --index JOB
+ mkdir -p ${data_dir}/ark_data/dump/simu_data/data/$dataset
+ cat ${data_dir}/ark_data/dump/simu_data/$dataset/feature.scp.* > ${data_dir}/ark_data/dump/simu_data/data/$dataset/feature.scp
+ cat ${data_dir}/ark_data/dump/simu_data/$dataset/label.scp.* > ${data_dir}/ark_data/dump/simu_data/data/$dataset/label.scp
+ paste -d" " ${data_dir}/ark_data/dump/simu_data/data/$dataset/feature.scp <(cut -f2 -d" " ${data_dir}/ark_data/dump/simu_data/data/$dataset/label.scp) > ${data_dir}/ark_data/dump/simu_data/data/$dataset/feats.scp
+ grep "ns2" ${data_dir}/ark_data/dump/simu_data/data/$dataset/feats.scp > ${data_dir}/ark_data/dump/simu_data/data/$dataset/feats_2spkr.scp
+ # for chunk_size=2000
+ output_dir=${data_dir}/ark_data/dump/simu_data_chunk2000/$dataset
+ mkdir -p $output_dir/.logs
+ $dump_cmd --max-jobs-run $nj JOB=1:$nj $output_dir/.logs/dump.JOB.log \
+ python local/dump_feature.py \
+ --data_dir ${data_dir}/simu/data/${simu_data_dir}/.work \
+ --output_dir $output_dir \
+ --index JOB \
+ --num_frames 2000
+ mkdir -p ${data_dir}/ark_data/dump/simu_data_chunk2000/data/$dataset
+ cat ${data_dir}/ark_data/dump/simu_data_chunk2000/$dataset/feature.scp.* > ${data_dir}/ark_data/dump/simu_data_chunk2000/data/$dataset/feature.scp
+ cat ${data_dir}/ark_data/dump/simu_data_chunk2000/$dataset/label.scp.* > ${data_dir}/ark_data/dump/simu_data_chunk2000/data/$dataset/label.scp
+ paste -d" " ${data_dir}/ark_data/dump/simu_data_chunk2000/data/$dataset/feature.scp <(cut -f2 -d" " ${data_dir}/ark_data/dump/simu_data_chunk2000/data/$dataset/label.scp) > ${data_dir}/ark_data/dump/simu_data_chunk2000/data/$dataset/feats.scp
+ done
+
+ # for callhome data
+ for dset in callhome1_spkall callhome2_spkall; do
+ find $data_dir/eval/$dset -maxdepth 1 -type f -exec cp {} {}.1 \;
+ output_dir=${data_dir}/ark_data/dump/callhome_chunk2000/$dset
+ mkdir -p $output_dir
+ python local/dump_feature.py \
+ --data_dir $data_dir/eval/$dset \
+ --output_dir $output_dir \
+ --index 1 \
+ --num_frames 2000
+ mkdir -p ${data_dir}/ark_data/dump/callhome_chunk2000/data/$dset
+ paste -d" " ${data_dir}/ark_data/dump/callhome_chunk2000/$dset/feature.scp.1 <(cut -f2 -d" " ${data_dir}/ark_data/dump/callhome_chunk2000/$dset/label.scp.1) > ${data_dir}/ark_data/dump/callhome_chunk2000/data/$dset/feats.scp
+ done
+fi
# Training on simulated two-speaker data
world_size=$gpu_num
@@ -159,10 +222,10 @@
python local/model_averaging.py ${exp_dir}/exp/${simu_allspkr_model_dir}/$simu_allspkr_ave_id.pb $models
fi
-# Training on simulated all-speaker data with chunk_size=2000
+# Training on simulated all-speaker data with chunk_size 2000
world_size=$gpu_num
if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then
- echo "stage 3: Training on simulated all-speaker data with chunk_size=2000"
+ echo "stage 3: Training on simulated all-speaker data with chunk_size 2000"
mkdir -p ${exp_dir}/exp/${simu_allspkr_chunk2000_model_dir}
mkdir -p ${exp_dir}/exp/${simu_allspkr_chunk2000_model_dir}/log
INIT_FILE=${exp_dir}/exp/${simu_allspkr_chunk2000_model_dir}/ddp_init
@@ -200,11 +263,11 @@
wait
fi
-# Training on callhome all-speaker data with chunk_size=2000
+# Training on callhome all-speaker data with chunk_size 2000
world_size=$gpu_num
callhome_ave_id=avg${callhome_average_start}-${callhome_average_end}
if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then
- echo "stage 4: Training on callhome all-speaker data with chunk_size=2000"
+ echo "stage 4: Training on callhome all-speaker data with chunk_size 2000"
mkdir -p ${exp_dir}/exp/${callhome_model_dir}
mkdir -p ${exp_dir}/exp/${callhome_model_dir}/log
INIT_FILE=${exp_dir}/exp/${callhome_model_dir}/ddp_init
@@ -245,13 +308,17 @@
python local/model_averaging.py ${exp_dir}/exp/${callhome_model_dir}/$callhome_ave_id.pb $models
fi
-## inference
-#if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then
-# echo "Inference"
-# mkdir -p ${exp_dir}/exp/${callhome_model_dir}/inference/log
-# CUDA_VISIBLE_DEVICES=$CUDA_VISIBLE_DEVICES python local/infer.py \
-# --config_file ${exp_dir}/exp/${callhome_model_dir}/config.yaml \
-# --model_file ${exp_dir}/exp/${callhome_model_dir}/$callhome_ave_id.pb \
-# --output_rttm_file ${exp_dir}/exp/${callhome_model_dir}/inference/rttm \
-# --wav_scp_file ${callhome_feats_dir_chunk2000}/${callhome_valid_dataset}/${callhome2_wav_scp_file} 1> ${exp_dir}/exp/${callhome_model_dir}/inference/log/infer.log 2>&1
-#fi
\ No newline at end of file
+# inference and compute DER
+if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then
+ echo "Inference"
+ mkdir -p ${exp_dir}/exp/${callhome_model_dir}/inference/log
+ CUDA_VISIBLE_DEVICES=$CUDA_VISIBLE_DEVICES python local/infer.py \
+ --config_file ${exp_dir}/exp/${callhome_model_dir}/config.yaml \
+ --model_file ${exp_dir}/exp/${callhome_model_dir}/$callhome_ave_id.pb \
+ --output_rttm_file ${exp_dir}/exp/${callhome_model_dir}/inference/rttm \
+ --wav_scp_file $data_dir/eval/callhome2_spkall/wav.scp \
+ 1> ${exp_dir}/exp/${callhome_model_dir}/inference/log/infer.log 2>&1
+ md-eval.pl -c 0.25 \
+ -r ${data_dir}/eval/${callhome_valid_dataset}/rttm \
+ -s ${exp_dir}/exp/${callhome_model_dir}/inference/rttm > ${exp_dir}/exp/${callhome_model_dir}/inference/result_med11_collar0.25 2>/dev/null || exit
+fi
\ No newline at end of file
--
Gitblit v1.9.1