From 8f5576c3eaf7ba89bfce269d2a4846004aee43db Mon Sep 17 00:00:00 2001
From: zhifu gao <zhifu.gzf@alibaba-inc.com>
Date: 星期四, 20 六月 2024 19:37:20 +0800
Subject: [PATCH] Dev gzf deepspeed (#1835)
---
examples/industrial_data_pretraining/paraformer_streaming/finetune.sh | 30 ++++++++++++++++++++++++------
1 files changed, 24 insertions(+), 6 deletions(-)
diff --git a/examples/industrial_data_pretraining/paraformer_streaming/finetune.sh b/examples/industrial_data_pretraining/paraformer_streaming/finetune.sh
index 54bc2d1..96ac79f 100644
--- a/examples/industrial_data_pretraining/paraformer_streaming/finetune.sh
+++ b/examples/industrial_data_pretraining/paraformer_streaming/finetune.sh
@@ -1,6 +1,7 @@
# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
# MIT License (https://opensource.org/licenses/MIT)
+workspace=`pwd`
# which gpu to train or finetune
export CUDA_VISIBLE_DEVICES="0,1"
@@ -41,25 +42,42 @@
output_dir="./outputs"
log_file="${output_dir}/log.txt"
+deepspeed_config=${workspace}../../ds_stage1.json
mkdir -p ${output_dir}
echo "log_file: ${log_file}"
-torchrun \
---nnodes 1 \
---nproc_per_node ${gpu_num} \
-../../../funasr/bin/train.py \
+DISTRIBUTED_ARGS="
+ --nnodes ${WORLD_SIZE:-1} \
+ --nproc_per_node $gpu_num \
+ --node_rank ${RANK:-0} \
+ --master_addr ${MASTER_ADDR:-127.0.0.1} \
+ --master_port ${MASTER_PORT: 26669}
+"
+
+echo $DISTRIBUTED_ARGS
+
+torchrun $DISTRIBUTED_ARGS \
+../../../funasr/bin/train_ds.py \
++model="${model_name_or_model_dir}" \
++train_data_set_list="${train_data}" \
++valid_data_set_list="${val_data}" \
-++dataset_conf.batch_size=20000 \
+++dataset="AudioDataset" \
+++dataset_conf.index_ds="IndexDSJsonl" \
+++dataset_conf.data_split_num=1 \
+++dataset_conf.batch_sampler="BatchSampler" \
+++dataset_conf.batch_size=6000 \
+++dataset_conf.sort_size=1024 \
++dataset_conf.batch_type="token" \
++dataset_conf.num_workers=4 \
++train_conf.max_epoch=50 \
++train_conf.log_interval=1 \
-++train_conf.resume=false \
+++train_conf.resume=true \
++train_conf.validate_interval=2000 \
++train_conf.save_checkpoint_interval=2000 \
++train_conf.keep_nbest_models=20 \
+++train_conf.avg_nbest_model=10 \
+++train_conf.use_deepspeed=false \
+++train_conf.deepspeed_config=${deepspeed_config} \
++optim_conf.lr=0.0002 \
++output_dir="${output_dir}" &> ${log_file}
\ No newline at end of file
--
Gitblit v1.9.1