From 5fec3c9e58fceda85fa2daf7deec2492372dac8a Mon Sep 17 00:00:00 2001
From: Chong Zhang <iriszhangchong@gmail.com>
Date: 星期二, 23 五月 2023 17:01:47 +0800
Subject: [PATCH] Update modelscope_models.md
---
egs/librispeech_100h/conformer/run.sh | 36 +++++++++++++++++++++++++-----------
1 files changed, 25 insertions(+), 11 deletions(-)
diff --git a/egs/librispeech_100h/conformer/run.sh b/egs/librispeech_100h/conformer/run.sh
index 7d63125..f0db69c 100755
--- a/egs/librispeech_100h/conformer/run.sh
+++ b/egs/librispeech_100h/conformer/run.sh
@@ -19,8 +19,9 @@
token_type=bpe
type=sound
scp=wav.scp
-stage=3
-stop_stage=4
+speed_perturb="0.9 1.0 1.1"
+stage=0
+stop_stage=5
# feature configuration
feats_dim=80
@@ -52,9 +53,10 @@
asr_config=conf/train_asr_conformer.yaml
model_dir="baseline_$(basename "${asr_config}" .yaml)_${lang}_${token_type}_${tag}"
-inference_config=conf/decode_asr_transformer.yaml
-#inference_config=conf/decode_asr_transformer_beam60_ctc0.3.yaml
-inference_asr_model=valid.acc.ave_10best.pth
+#inference_config=conf/decode_asr_transformer_ctc0.3_beam1.yaml
+inference_config=conf/decode_asr_transformer_ctc0.3_beam5.yaml
+#inference_config=conf/decode_asr_transformer_ctc0.3_beam20.yaml
+inference_asr_model=valid.acc.ave_10best.pb
# you can set gpu num for decoding here
gpuid_list=$CUDA_VISIBLE_DEVICES # set gpus for decoding, the same as training stage by default
@@ -82,6 +84,11 @@
for x in dev-clean dev-other test-clean test-other train-clean-100; do
local/data_prep.sh ${raw_data}/LibriSpeech/${x} ${feats_dir}/data/${x//-/_}
done
+ mkdir $feats_dir/data/$valid_set
+ dev_sets="dev_clean dev_other"
+ for file in wav.scp text; do
+ ( for f in $dev_sets; do cat $feats_dir/data/$f/$file; done ) | sort -k1 > $feats_dir/data/$valid_set/$file || exit 1;
+ done
fi
if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then
@@ -105,10 +112,16 @@
echo "<unk>" >> ${token_list}
fi
-# Training Stage
+# LM Training Stage
world_size=$gpu_num # run on one machine
if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then
- echo "stage 3: Training"
+ echo "stage 3: LM Training"
+fi
+
+# ASR Training Stage
+world_size=$gpu_num # run on one machine
+if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4; then
+ echo "stage 4: ASR Training"
mkdir -p ${exp_dir}/exp/${model_dir}
mkdir -p ${exp_dir}/exp/${model_dir}/log
INIT_FILE=${exp_dir}/exp/${model_dir}/ddp_init
@@ -133,10 +146,11 @@
--data_dir ${feats_dir}/data \
--train_set ${train_set} \
--valid_set ${valid_set} \
+ --cmvn_file ${feats_dir}/data/${train_set}/cmvn/cmvn.mvn \
+ --speed_perturb ${speed_perturb} \
--resume true \
--output_dir ${exp_dir}/exp/${model_dir} \
--config $asr_config \
- --input_size $feats_dim \
--ngpu $gpu_num \
--num_worker_count $count \
--multiprocessing_distributed true \
@@ -150,8 +164,8 @@
fi
# Testing Stage
-if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then
- echo "stage 4: Inference"
+if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then
+ echo "stage 5: Inference"
for dset in ${test_sets}; do
asr_exp=${exp_dir}/exp/${model_dir}
inference_tag="$(basename "${inference_config}" .yaml)"
@@ -162,7 +176,7 @@
exit 0
fi
mkdir -p "${_logdir}"
- _data="${feats_dir}/${dumpdir}/${dset}"
+ _data="${feats_dir}/data/${dset}"
key_file=${_data}/${scp}
num_scp_file="$(<${key_file} wc -l)"
_nj=$([ $inference_nj -le $num_scp_file ] && echo "$inference_nj" || echo "$num_scp_file")
--
Gitblit v1.9.1