嘉渊
2023-07-21 e79c9a801e7e7458ce6894fa85178fa974dfd18b
egs/callhome/eend_ola/run_test.sh
@@ -8,6 +8,11 @@
count=1
# general configuration
dump_cmd=utils/run.pl
nj=64
# feature configuration
data_dir="/nfs/wangjiaming.wjm/EEND_DATA_sad30_snr10n15n20/convert_test/data"
simu_feats_dir="/nfs/wangjiaming.wjm/EEND_ARK_DATA/dump/simu_data/data"
simu_feats_dir_chunk2000="/nfs/wangjiaming.wjm/EEND_ARK_DATA/dump/simu_data_chunk2000/data"
callhome_feats_dir_chunk2000="/nfs/wangjiaming.wjm/EEND_ARK_DATA/dump/callhome_chunk2000/data"
@@ -27,8 +32,8 @@
exp_dir="."
input_size=345
stage=5
stop_stage=5
stage=0
stop_stage=0
# exp tag
tag="exp1"
@@ -62,13 +67,45 @@
    local/run_prepare_shared_eda.sh
fi
## Prepare data for training and inference
#if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then
#    echo "stage 0: Prepare data for training and inference"
#    echo "The detail can be found in https://github.com/hitachi-speech/EEND"
#    . ./local/
#fi
#
# Prepare data for training and inference
if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then
    echo "stage 0: Prepare data for training and inference"
    simu_opts_num_speaker_array=(1 2 3 4)
    simu_opts_sil_scale_array=(2 2 5 9)
    simu_opts_num_train=100000
    # for simulated data of chunk500 and chunk2000
    for dset in swb_sre_tr swb_sre_cv; do
        if [ "$dset" == "swb_sre_tr" ]; then
            n_mixtures=${simu_opts_num_train}
            dataset=train
        else
            n_mixtures=500
            dataset=dev
        fi
        simu_data_dir=${dset}_ns"$(IFS="n"; echo "${simu_opts_num_speaker_array[*]}")"_beta"$(IFS="n"; echo "${simu_opts_sil_scale_array[*]}")"_${n_mixtures}
#        mkdir -p ${data_dir}/simu/data/${simu_data_dir}/.work
#        split_scps=
#        for n in $(seq $nj); do
#            split_scps="$split_scps ${data_dir}/simu/data/${simu_data_dir}/.work/wav.$n.scp"
#        done
#        utils/split_scp.pl "${data_dir}/simu/data/${simu_data_dir}/wav.scp" $split_scps || exit 1
#        python local/split.py ${data_dir}/simu/data/${simu_data_dir}
#        # for chunk_size=500
#        output_dir=${data_dir}/ark_data/dump/simu_data/$dataset
#        mkdir -p $output_dir/.logs
#        $dump_cmd --max-jobs-run $nj JOB=1:$nj $output_dir/.logs/dump.JOB.log \
#        python local/dump_feature.py \
#              --data_dir ${data_dir}/simu/data/${simu_data_dir}/.work \
#              --output_dir ${data_dir}/ark_data/dump/simu_data/$dataset \
#              --index JOB
        mkdir -p ${data_dir}/ark_data/dump/simu_data/data/$dataset
        python local/gen_feats_scp.py \
              --root_path ${data_dir}/ark_data/dump/simu_data \
              --out_path ${data_dir}/ark_data/dump/simu_data/data/$dataset \
              --split_num $nj
    done
fi
# Training on simulated two-speaker data
world_size=$gpu_num
@@ -245,7 +282,7 @@
    python local/model_averaging.py ${exp_dir}/exp/${callhome_model_dir}/$callhome_ave_id.pb $models
fi
# inference
# inference and compute DER
if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then
    echo "Inference"
    mkdir -p ${exp_dir}/exp/${callhome_model_dir}/inference/log
@@ -255,4 +292,7 @@
        --output_rttm_file ${exp_dir}/exp/${callhome_model_dir}/inference/rttm \
        --wav_scp_file ${callhome_feats_dir_chunk2000}/${callhome_valid_dataset}/${callhome2_wav_scp_file} \
        1> ${exp_dir}/exp/${callhome_model_dir}/inference/log/infer.log 2>&1
    md-eval.pl -c 0.25 \
          -r ${callhome_feats_dir_chunk2000}/${callhome_valid_dataset}/rttm \
          -s ${exp_dir}/exp/${callhome_model_dir}/inference/rttm > ${exp_dir}/exp/${callhome_model_dir}/inference/result_med11_collar0.25 2>/dev/null || exit
fi