| | |
| | | gpu_num=2 |
| | | count=1 |
| | | gpu_inference=true # Whether to perform gpu decoding, set false for cpu decoding |
| | | njob=4 # the number of jobs for each gpu |
| | | njob=1 # the number of jobs for each gpu |
| | | train_cmd=utils/run.pl |
| | | infer_cmd=utils/run.pl |
| | | |
| | |
| | | ngpu=$(echo $gpuid_list | awk -F "," '{print NF}') |
| | | |
| | | if ${gpu_inference}; then |
| | | inference_nj=$[${ngpu}*${njob}] |
| | | inference_nj=$njob |
| | | _ngpu=1 |
| | | else |
| | | inference_nj=$njob |
| | |
| | | fi |
| | | ${infer_cmd} --gpu "${_ngpu}" --max-jobs-run "${_nj}" JOB=1:"${_nj}" "${_logdir}"/asr_inference.JOB.log \ |
| | | python -m funasr.bin.asr_inference_launch \ |
| | | --batch_size 1 \ |
| | | --batch_size 64 \ |
| | | --ngpu "${_ngpu}" \ |
| | | --njob ${njob} \ |
| | | --gpuid_list ${gpuid_list} \ |
| | | --gpuid_list ${gpuid_list:0:1} \ |
| | | --data_path_and_name_and_type "${_data}/${scp},speech,${type}" \ |
| | | --key_file "${_logdir}"/keys.JOB.scp \ |
| | | --asr_train_config "${asr_exp}"/config.yaml \ |
| | |
| | | model_name=speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch |
| | | model_revision="v1.0.4" # please do not modify the model revision |
| | | inference_nj=32 |
| | | gpuid_list="0,1" # set gpus, e.g., gpuid_list="0,1" |
| | | gpuid_list="0" # set gpus, e.g., gpuid_list="0,1" |
| | | ngpu=$(echo $gpuid_list | awk -F "," '{print NF}') |
| | | njob=4 # the number of jobs for each gpu |
| | | njob=1 # the number of jobs for each gpu |
| | | gpu_inference=true # Whether to perform gpu decoding, set false for cpu decoding |
| | | |
| | | if ${gpu_inference}; then |
| | |
| | | gpu_num=2 |
| | | count=1 |
| | | gpu_inference=true # Whether to perform gpu decoding, set false for cpu decoding |
| | | njob=4 # the number of jobs for each gpu |
| | | njob=1 # the number of jobs for each gpu |
| | | train_cmd=utils/run.pl |
| | | infer_cmd=utils/run.pl |
| | | |
| | |
| | | ngpu=$(echo $gpuid_list | awk -F "," '{print NF}') |
| | | |
| | | if ${gpu_inference}; then |
| | | inference_nj=$[${ngpu}*${njob}] |
| | | inference_nj=$njob |
| | | _ngpu=1 |
| | | else |
| | | inference_nj=$njob |
| | |
| | | fi |
| | | ${infer_cmd} --gpu "${_ngpu}" --max-jobs-run "${_nj}" JOB=1:"${_nj}" "${_logdir}"/asr_inference.JOB.log \ |
| | | python -m funasr.bin.asr_inference_launch \ |
| | | --batch_size 1 \ |
| | | --batch_size 64 \ |
| | | --ngpu "${_ngpu}" \ |
| | | --njob ${njob} \ |
| | | --gpuid_list ${gpuid_list} \ |
| | | --gpuid_list ${gpuid_list:0:1} \ |
| | | --data_path_and_name_and_type "${_data}/${scp},speech,${type}" \ |
| | | --key_file "${_logdir}"/keys.JOB.scp \ |
| | | --asr_train_config "${asr_exp}"/config.yaml \ |
| | |
| | | model_name=speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch |
| | | model_revision="v1.0.4" # please do not modify the model revision |
| | | inference_nj=32 |
| | | gpuid_list="0,1" # set gpus, e.g., gpuid_list="0,1" |
| | | gpuid_list="0" # set gpus, e.g., gpuid_list="0,1" |
| | | ngpu=$(echo $gpuid_list | awk -F "," '{print NF}') |
| | | njob=4 # the number of jobs for each gpu |
| | | njob=1 # the number of jobs for each gpu |
| | | gpu_inference=true # Whether to perform gpu decoding, set false for cpu decoding |
| | | |
| | | if ${gpu_inference}; then |
| | |
| | | gpu_num=2 |
| | | count=1 |
| | | gpu_inference=true # Whether to perform gpu decoding, set false for cpu decoding |
| | | njob=4 # the number of jobs for each gpu |
| | | njob=1 # the number of jobs for each gpu |
| | | train_cmd=utils/run.pl |
| | | infer_cmd=utils/run.pl |
| | | |
| | |
| | | ngpu=$(echo $gpuid_list | awk -F "," '{print NF}') |
| | | |
| | | if ${gpu_inference}; then |
| | | inference_nj=$[${ngpu}*${njob}] |
| | | inference_nj=$njob |
| | | _ngpu=1 |
| | | else |
| | | inference_nj=$njob |
| | |
| | | fi |
| | | ${infer_cmd} --gpu "${_ngpu}" --max-jobs-run "${_nj}" JOB=1:"${_nj}" "${_logdir}"/asr_inference.JOB.log \ |
| | | python -m funasr.bin.asr_inference_launch \ |
| | | --batch_size 1 \ |
| | | --batch_size 64 \ |
| | | --ngpu "${_ngpu}" \ |
| | | --njob ${njob} \ |
| | | --gpuid_list ${gpuid_list} \ |
| | | --gpuid_list ${gpuid_list:0:1} \ |
| | | --data_path_and_name_and_type "${_data}/${scp},speech,${type}" \ |
| | | --key_file "${_logdir}"/keys.JOB.scp \ |
| | | --asr_train_config "${asr_exp}"/config.yaml \ |
| | |
| | | model_revision="v1.0.4" # please do not modify the model revision |
| | | data_dir= # wav list, ${data_dir}/wav.scp |
| | | exp_dir="exp" |
| | | gpuid_list="0,1" |
| | | gpuid_list="0" |
| | | ngpu=$(echo $gpuid_list | awk -F "," '{print NF}') |
| | | njob=4 |
| | | njob=1 |
| | | gpu_inference=true |
| | | decode_cmd=utils/run.pl |
| | | |
| | |
| | | data_dir= # wav list, ${data_dir}/wav.scp |
| | | finetune_model_name= # fine-tuning model name |
| | | finetune_exp_dir= # fine-tuning model experiment result path |
| | | gpuid_list="0,1" |
| | | gpuid_list="0" |
| | | ngpu=$(echo $gpuid_list | awk -F "," '{print NF}') |
| | | njob=4 |
| | | njob=1 |
| | | gpu_inference=true |
| | | decode_cmd=utils/run.pl |
| | | |
| | |
| | | inference_nj=$[${ngpu}*${njob}] |
| | | _ngpu=1 |
| | | else |
| | | inference_nj=${njob} |
| | | inference_nj=${njob} |
| | | _ngpu=0 |
| | | fi |
| | |
| | | |
| | | for i in $(seq ${inference_nj}); do |
| | | cat ${_logdir}/text.${i} |
| | | done | sort -k1 >${_dir}/text |
| | | done | sort -k1 >${_dir}/text |
| | |
| | | model_name=speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch |
| | | model_revision="v1.0.4" # please do not modify the model revision |
| | | inference_nj=32 |
| | | gpuid_list="0,1" # set gpus, e.g., gpuid_list="0,1" |
| | | gpuid_list="0" # set gpus, e.g., gpuid_list="0,1" |
| | | ngpu=$(echo $gpuid_list | awk -F "," '{print NF}') |
| | | njob=4 # the number of jobs for each gpu |
| | | njob=1 # the number of jobs for each gpu |
| | | gpu_inference=true # Whether to perform gpu decoding, set false for cpu decoding |
| | | |
| | | if ${gpu_inference}; then |
| | |
| | | model_name=speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch |
| | | model_revision="v1.0.4" # please do not modify the model revision |
| | | inference_nj=32 |
| | | gpuid_list="0,1" # set gpus, e.g., gpuid_list="0,1" |
| | | gpuid_list="0" # set gpus, e.g., gpuid_list="0,1" |
| | | ngpu=$(echo $gpuid_list | awk -F "," '{print NF}') |
| | | njob=4 # the number of jobs for each gpu |
| | | njob=1 # the number of jobs for each gpu |
| | | gpu_inference=true # Whether to perform gpu decoding, set false for cpu decoding |
| | | |
| | | if ${gpu_inference}; then |