| | |
| | | |
| | | # general configuration |
| | | feats_dir="../DATA" #feature output dictionary |
| | | exp_dir="." |
| | | exp_dir=`pwd` |
| | | lang=zh |
| | | token_type=char |
| | | stage=0 |
| | |
| | | log_file="${exp_dir}/exp/${model_dir}/train.log.txt.${current_time}" |
| | | echo "log_file: ${log_file}" |
| | | |
| | | export CUDA_VISIBLE_DEVICES=$CUDA_VISIBLE_DEVICES |
| | | gpu_num=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}') |
| | | torchrun \ |
| | | --nnodes 1 \ |
| | |
| | | if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then |
| | | echo "stage 5: Inference" |
| | | |
| | | if ${inference_device} == "cuda"; then |
| | | if [ ${inference_device} == "cuda" ]; then |
| | | nj=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}') |
| | | else |
| | | inference_batch_size=1 |
| | |
| | | ++input="${_logdir}/keys.${JOB}.scp" \ |
| | | ++output_dir="${inference_dir}/${JOB}" \ |
| | | ++device="${inference_device}" \ |
| | | ++batch_size="${inference_batch_size}" |
| | | ++batch_size="${inference_batch_size}" &> ${_logdir}/log.${JOB}.txt |
| | | }& |
| | | |
| | | done |