| | |
| | | #!/usr/bin/env bash |
| | | |
| | | |
| | | |
| | | CUDA_VISIBLE_DEVICES="0,1" |
| | | |
| | | # general configuration |
| | |
| | | inference_device="cuda" #"cpu" |
| | | inference_checkpoint="model.pt" |
| | | inference_scp="wav.scp" |
| | | inference_batch_size=32 |
| | | |
| | | # data |
| | | raw_data=../raw_data |
| | |
| | | valid_set=dev |
| | | test_sets="dev test" |
| | | |
| | | config=train_asr_paraformer_conformer_12e_6d_2048_256.yaml |
| | | config=paraformer_conformer_12e_6d_2048_256.yaml |
| | | model_dir="baseline_$(basename "${config}" .yaml)_${lang}_${token_type}_${tag}" |
| | | |
| | | |
| | |
| | | echo "stage 5: Inference" |
| | | |
| | | if ${inference_device} == "cuda"; then |
| | | nj=$(echo CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}') |
| | | nj=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}') |
| | | else |
| | | nj=$njob |
| | | batch_size=1 |
| | | gpuid_list="" |
| | | inference_batch_size=1 |
| | | CUDA_VISIBLE_DEVICES="" |
| | | for JOB in $(seq ${nj}); do |
| | | gpuid_list=CUDA_VISIBLE_DEVICES"-1," |
| | | CUDA_VISIBLE_DEVICES=$CUDA_VISIBLE_DEVICES"-1," |
| | | done |
| | | fi |
| | | |
| | | for dset in ${test_sets}; do |
| | | |
| | | inference_dir="${asr_exp}/${inference_checkpoint}/${dset}" |
| | | inference_dir="${exp_dir}/exp/${model_dir}/${inference_checkpoint}/${dset}" |
| | | _logdir="${inference_dir}/logdir" |
| | | |
| | | mkdir -p "${_logdir}" |
| | |
| | | done |
| | | utils/split_scp.pl "${key_file}" ${split_scps} |
| | | |
| | | gpuid_list_array=(${gpuid_list//,/ }) |
| | | for JOB in $(seq ${nj}); do |
| | | { |
| | | id=$((JOB-1)) |
| | | gpuid=${gpuid_list_array[$id]} |
| | | |
| | | export CUDA_VISIBLE_DEVICES=${gpuid} |
| | | python ../../../funasr/bin/inference.py \ |
| | | --config-path="${exp_dir}/exp/${model_dir}" \ |
| | | --config-name="config.yaml" \ |
| | |
| | | ++frontend_conf.cmvn_file="${feats_dir}/data/${train_set}/am.mvn" \ |
| | | ++input="${_logdir}/keys.${JOB}.scp" \ |
| | | ++output_dir="${inference_dir}/${JOB}" \ |
| | | ++device="${inference_device}" |
| | | ++device="${inference_device}" \ |
| | | ++batch_size="${inference_batch_size}" |
| | | }& |
| | | |
| | | done |