lzr265946
2022-12-23 532c56b3c1deba9493acf968d743076c4f4a7f3c
update inference config
9个文件已修改
51 ■■■■ 已修改文件
egs_modelscope/aishell/paraformer/paraformer_large_finetune.sh 8 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
egs_modelscope/aishell/paraformer/paraformer_large_infer.sh 4 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
egs_modelscope/aishell2/paraformer/paraformer_large_finetune.sh 8 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
egs_modelscope/aishell2/paraformer/paraformer_large_infer.sh 4 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
egs_modelscope/common/modelscope_common_finetune.sh 8 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
egs_modelscope/common/modelscope_common_infer.sh 4 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
egs_modelscope/common/modelscope_common_infer_after_finetune.sh 7 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
egs_modelscope/speechio/paraformer/paraformer_large_infer.sh 4 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
egs_modelscope/wenetspeech/paraformer/paraformer_large_infer.sh 4 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
egs_modelscope/aishell/paraformer/paraformer_large_finetune.sh
@@ -7,7 +7,7 @@
gpu_num=2
count=1
gpu_inference=true # Whether to perform gpu decoding, set false for cpu decoding
njob=4 # the number of jobs for each gpu
njob=1 # the number of jobs for each gpu
train_cmd=utils/run.pl
infer_cmd=utils/run.pl
@@ -84,7 +84,7 @@
ngpu=$(echo $gpuid_list | awk -F "," '{print NF}')
if ${gpu_inference}; then
    inference_nj=$[${ngpu}*${njob}]
    inference_nj=$njob
    _ngpu=1
else
    inference_nj=$njob
@@ -237,10 +237,10 @@
        fi
        ${infer_cmd} --gpu "${_ngpu}" --max-jobs-run "${_nj}" JOB=1:"${_nj}" "${_logdir}"/asr_inference.JOB.log \
            python -m funasr.bin.asr_inference_launch \
                --batch_size 1 \
                --batch_size 64 \
                --ngpu "${_ngpu}" \
                --njob ${njob} \
                --gpuid_list ${gpuid_list} \
                --gpuid_list ${gpuid_list:0:1} \
                --data_path_and_name_and_type "${_data}/${scp},speech,${type}" \
                --key_file "${_logdir}"/keys.JOB.scp \
                --asr_train_config "${asr_exp}"/config.yaml \
egs_modelscope/aishell/paraformer/paraformer_large_infer.sh
@@ -10,9 +10,9 @@
model_name=speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch
model_revision="v1.0.4"     # please do not modify the model revision
inference_nj=32
gpuid_list="0,1" # set gpus, e.g., gpuid_list="0,1"
gpuid_list="0" # set gpus, e.g., gpuid_list="0,1"
ngpu=$(echo $gpuid_list | awk -F "," '{print NF}')
njob=4  # the number of jobs for each gpu
njob=1  # the number of jobs for each gpu
gpu_inference=true  # Whether to perform gpu decoding, set false for cpu decoding
if ${gpu_inference}; then
egs_modelscope/aishell2/paraformer/paraformer_large_finetune.sh
@@ -7,7 +7,7 @@
gpu_num=2
count=1
gpu_inference=true # Whether to perform gpu decoding, set false for cpu decoding
njob=4 # the number of jobs for each gpu
njob=1 # the number of jobs for each gpu
train_cmd=utils/run.pl
infer_cmd=utils/run.pl
@@ -85,7 +85,7 @@
ngpu=$(echo $gpuid_list | awk -F "," '{print NF}')
if ${gpu_inference}; then
    inference_nj=$[${ngpu}*${njob}]
    inference_nj=$njob
    _ngpu=1
else
    inference_nj=$njob
@@ -252,10 +252,10 @@
        fi
        ${infer_cmd} --gpu "${_ngpu}" --max-jobs-run "${_nj}" JOB=1:"${_nj}" "${_logdir}"/asr_inference.JOB.log \
            python -m funasr.bin.asr_inference_launch \
                --batch_size 1 \
                --batch_size 64 \
                --ngpu "${_ngpu}" \
                --njob ${njob} \
                --gpuid_list ${gpuid_list} \
                --gpuid_list ${gpuid_list:0:1} \
                --data_path_and_name_and_type "${_data}/${scp},speech,${type}" \
                --key_file "${_logdir}"/keys.JOB.scp \
                --asr_train_config "${asr_exp}"/config.yaml \
egs_modelscope/aishell2/paraformer/paraformer_large_infer.sh
@@ -10,9 +10,9 @@
model_name=speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch
model_revision="v1.0.4"     # please do not modify the model revision
inference_nj=32
gpuid_list="0,1" # set gpus, e.g., gpuid_list="0,1"
gpuid_list="0" # set gpus, e.g., gpuid_list="0,1"
ngpu=$(echo $gpuid_list | awk -F "," '{print NF}')
njob=4  # the number of jobs for each gpu
njob=1  # the number of jobs for each gpu
gpu_inference=true  # Whether to perform gpu decoding, set false for cpu decoding
if ${gpu_inference}; then
egs_modelscope/common/modelscope_common_finetune.sh
@@ -7,7 +7,7 @@
gpu_num=2
count=1
gpu_inference=true # Whether to perform gpu decoding, set false for cpu decoding
njob=4 # the number of jobs for each gpu
njob=1 # the number of jobs for each gpu
train_cmd=utils/run.pl
infer_cmd=utils/run.pl
@@ -84,7 +84,7 @@
ngpu=$(echo $gpuid_list | awk -F "," '{print NF}')
if ${gpu_inference}; then
    inference_nj=$[${ngpu}*${njob}]
    inference_nj=$njob
    _ngpu=1
else
    inference_nj=$njob
@@ -244,10 +244,10 @@
        fi
        ${infer_cmd} --gpu "${_ngpu}" --max-jobs-run "${_nj}" JOB=1:"${_nj}" "${_logdir}"/asr_inference.JOB.log \
            python -m funasr.bin.asr_inference_launch \
                --batch_size 1 \
                --batch_size 64 \
                --ngpu "${_ngpu}" \
                --njob ${njob} \
                --gpuid_list ${gpuid_list} \
                --gpuid_list ${gpuid_list:0:1} \
                --data_path_and_name_and_type "${_data}/${scp},speech,${type}" \
                --key_file "${_logdir}"/keys.JOB.scp \
                --asr_train_config "${asr_exp}"/config.yaml \
egs_modelscope/common/modelscope_common_infer.sh
@@ -8,9 +8,9 @@
model_revision="v1.0.4"     # please do not modify the model revision
data_dir=  # wav list, ${data_dir}/wav.scp
exp_dir="exp"
gpuid_list="0,1"
gpuid_list="0"
ngpu=$(echo $gpuid_list | awk -F "," '{print NF}')
njob=4
njob=1
gpu_inference=true
decode_cmd=utils/run.pl
egs_modelscope/common/modelscope_common_infer_after_finetune.sh
@@ -8,9 +8,9 @@
data_dir=  # wav list, ${data_dir}/wav.scp
finetune_model_name=  # fine-tuning model name
finetune_exp_dir=  # fine-tuning model experiment result path
gpuid_list="0,1"
gpuid_list="0"
ngpu=$(echo $gpuid_list | awk -F "," '{print NF}')
njob=4
njob=1
gpu_inference=true
decode_cmd=utils/run.pl
@@ -20,7 +20,6 @@
    inference_nj=$[${ngpu}*${njob}]
    _ngpu=1
else
    inference_nj=${njob}
    inference_nj=${njob}
    _ngpu=0
fi
@@ -63,4 +62,4 @@
    for i in $(seq ${inference_nj}); do
        cat ${_logdir}/text.${i}
    done | sort -k1 >${_dir}/text
    done | sort -k1 >${_dir}/text
egs_modelscope/speechio/paraformer/paraformer_large_infer.sh
@@ -10,9 +10,9 @@
model_name=speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch
model_revision="v1.0.4"     # please do not modify the model revision
inference_nj=32
gpuid_list="0,1" # set gpus, e.g., gpuid_list="0,1"
gpuid_list="0" # set gpus, e.g., gpuid_list="0,1"
ngpu=$(echo $gpuid_list | awk -F "," '{print NF}')
njob=4  # the number of jobs for each gpu
njob=1  # the number of jobs for each gpu
gpu_inference=true  # Whether to perform gpu decoding, set false for cpu decoding
if ${gpu_inference}; then
egs_modelscope/wenetspeech/paraformer/paraformer_large_infer.sh
@@ -10,9 +10,9 @@
model_name=speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch
model_revision="v1.0.4"     # please do not modify the model revision
inference_nj=32
gpuid_list="0,1" # set gpus, e.g., gpuid_list="0,1"
gpuid_list="0" # set gpus, e.g., gpuid_list="0,1"
ngpu=$(echo $gpuid_list | awk -F "," '{print NF}')
njob=4  # the number of jobs for each gpu
njob=1  # the number of jobs for each gpu
gpu_inference=true  # Whether to perform gpu decoding, set false for cpu decoding
if ${gpu_inference}; then