From 2a66366be4c2715870e4859fd5a5db6e8a9dc00a Mon Sep 17 00:00:00 2001
From: chenmengzheAAA <123789350+chenmengzheAAA@users.noreply.github.com>
Date: 星期四, 14 九月 2023 19:00:17 +0800
Subject: [PATCH] Merge pull request #956 from alibaba-damo-academy/chenmengzheAAA-patch-4
---
egs/aishell/data2vec_transformer_finetune/run.sh | 58 ++++++++++++++++++++++++++++++++++++++++------------------
1 files changed, 40 insertions(+), 18 deletions(-)
diff --git a/egs/aishell/data2vec_transformer_finetune/run.sh b/egs/aishell/data2vec_transformer_finetune/run.sh
index e040290..7b01a5f 100755
--- a/egs/aishell/data2vec_transformer_finetune/run.sh
+++ b/egs/aishell/data2vec_transformer_finetune/run.sh
@@ -8,7 +8,7 @@
count=1
gpu_inference=true # Whether to perform gpu decoding, set false for cpu decoding
# for gpu decoding, inference_nj=ngpu*njob; for cpu decoding, inference_nj=njob
-njob=1
+njob=5
train_cmd=utils/run.pl
infer_cmd=utils/run.pl
@@ -20,15 +20,15 @@
type=sound
scp=wav.scp
speed_perturb="0.9 1.0 1.1"
-stage=3
-stop_stage=4
+stage=0
+stop_stage=5
# feature configuration
feats_dim=80
nj=64
# data
-raw_data=
+raw_data=../raw_data
data_url=www.openslr.org/resources/33
# exp tag
@@ -52,8 +52,8 @@
asr_config=conf/train_asr_transformer_12e_6d_3072_768.yaml
model_dir="baseline_$(basename "${asr_config}" .yaml)_${lang}_${token_type}_${tag}"
-inference_config=conf/decode_asr_transformer_noctc_1best.yaml
-inference_asr_model=valid.acc.ave_10best.pb
+inference_config=conf/decode_asr_transformer.yaml
+inference_asr_model=valid.cer_ctc.ave_10best.pb
# you can set gpu num for decoding here
gpuid_list=$CUDA_VISIBLE_DEVICES # set gpus for decoding, the same as training stage by default
@@ -88,14 +88,14 @@
if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then
echo "stage 1: Feature and CMVN Generation"
- utils/compute_cmvn.sh --cmd "$train_cmd" --nj $nj --feats_dim ${feats_dim} ${feats_dir}/data/${train_set}
+ utils/compute_cmvn.sh --fbankdir ${feats_dir}/data/${train_set} --cmd "$train_cmd" --nj $nj --feats_dim ${feats_dim} --config_file "$asr_config" --scale 1.0
fi
-token_list=${feats_dir}/data/${lang}_token_list/char/tokens.txt
+token_list=${feats_dir}/data/${lang}_token_list/$token_type/tokens.txt
echo "dictionary: ${token_list}"
if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then
echo "stage 2: Dictionary Preparation"
- mkdir -p ${feats_dir}/data/${lang}_token_list/char/
+ mkdir -p ${feats_dir}/data/${lang}_token_list/$token_type/
echo "make a dictionary"
echo "<blank>" > ${token_list}
@@ -106,11 +106,17 @@
echo "<unk>" >> ${token_list}
fi
-# Training Stage
+# LM Training Stage
world_size=$gpu_num # run on one machine
if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then
- echo "stage 3: Training"
- python utils/download_model.py --model_name ${model_name} # download pretrained model on ModelScope
+ echo "stage 3: LM Training"
+fi
+
+# ASR Training Stage
+world_size=$gpu_num # run on one machine
+if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then
+ echo "stage 4: ASR Training"
+ python utils/download_model.py --model_name ${model_name} # download pretrained model on ModelScope
mkdir -p ${exp_dir}/exp/${model_dir}
mkdir -p ${exp_dir}/exp/${model_dir}/log
INIT_FILE=${exp_dir}/exp/${model_dir}/ddp_init
@@ -128,13 +134,14 @@
--task_name asr \
--gpu_id $gpu_id \
--use_preprocessor true \
- --token_type char \
+ --token_type $token_type \
--token_list $token_list \
--data_dir ${feats_dir}/data \
--train_set ${train_set} \
--valid_set ${valid_set} \
+ --data_file_names "wav.scp,text" \
--init_param ${init_param} \
- --cmvn_file ${feats_dir}/data/${train_set}/cmvn/cmvn.mvn \
+ --cmvn_file ${feats_dir}/data/${train_set}/cmvn/am.mvn \
--speed_perturb ${speed_perturb} \
--resume true \
--output_dir ${exp_dir}/exp/${model_dir} \
@@ -151,8 +158,8 @@
fi
# Testing Stage
-if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then
- echo "stage 4: Inference"
+if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then
+ echo "stage 5: Inference"
for dset in ${test_sets}; do
asr_exp=${exp_dir}/exp/${model_dir}
inference_tag="$(basename "${inference_config}" .yaml)"
@@ -184,12 +191,12 @@
--njob ${njob} \
--gpuid_list ${gpuid_list} \
--data_path_and_name_and_type "${_data}/${scp},speech,${type}" \
- --cmvn_file ${feats_dir}/data/${train_set}/cmvn/cmvn.mvn \
+ --cmvn_file ${feats_dir}/data/${train_set}/cmvn/am.mvn \
--key_file "${_logdir}"/keys.JOB.scp \
--asr_train_config "${asr_exp}"/config.yaml \
--asr_model_file "${asr_exp}"/"${inference_asr_model}" \
--output_dir "${_logdir}"/output.JOB \
- --mode paraformer \
+ --mode asr \
${_opts}
for f in token token_int score text; do
@@ -205,4 +212,19 @@
tail -n 3 ${_dir}/text.cer > ${_dir}/text.cer.txt
cat ${_dir}/text.cer.txt
done
+fi
+
+# Prepare files for ModelScope fine-tuning and inference
+if [ ${stage} -le 6 ] && [ ${stop_stage} -ge 6 ]; then
+ echo "stage 6: ModelScope Preparation"
+ cp ${feats_dir}/data/${train_set}/cmvn/am.mvn ${exp_dir}/exp/${model_dir}/am.mvn
+ vocab_size=$(cat ${token_list} | wc -l)
+ python utils/gen_modelscope_configuration.py \
+ --am_model_name $inference_asr_model \
+ --mode asr \
+ --model_name data2vec_finetune_transformer \
+ --dataset aishell \
+ --output_dir $exp_dir/exp/$model_dir \
+ --vocab_size $vocab_size \
+ --tag $tag
fi
\ No newline at end of file
--
Gitblit v1.9.1