From 8f5576c3eaf7ba89bfce269d2a4846004aee43db Mon Sep 17 00:00:00 2001
From: zhifu gao <zhifu.gzf@alibaba-inc.com>
Date: 星期四, 20 六月 2024 19:37:20 +0800
Subject: [PATCH] Dev gzf deepspeed (#1835)
---
examples/industrial_data_pretraining/llm_asr/demo_train_or_finetune2.sh | 37 ++++-
examples/industrial_data_pretraining/llm_asr/demo_train_or_finetune.sh | 21 ++
examples/industrial_data_pretraining/paraformer_streaming/finetune.sh | 30 ++++-
examples/industrial_data_pretraining/contextual_paraformer/finetune.sh | 32 ++++-
examples/deepspeed_conf/ds_stage2.json | 0
examples/industrial_data_pretraining/bicif_paraformer/finetune.sh | 31 ++++-
examples/industrial_data_pretraining/paraformer/finetune.sh | 30 +++-
examples/deepspeed_conf/ds_stage1.json | 0
examples/deepspeed_conf/ds_z3_config.json | 0
/dev/null | 75 ------------
examples/deepspeed_conf/ds_z3_offload_config.json | 0
examples/industrial_data_pretraining/sense_voice/finetune.sh | 36 ++++-
examples/deepspeed_conf/ds_z2_config.json | 0
examples/deepspeed_conf/ds_z0_config.json | 0
examples/deepspeed_conf/ds_z2_offload_config.json | 0
examples/deepspeed_conf/ds_stage3.json | 0
examples/industrial_data_pretraining/seaco_paraformer/finetune.sh | 29 +++-
17 files changed, 191 insertions(+), 130 deletions(-)
diff --git a/examples/industrial_data_pretraining/deepspeed/ds_stage1.json b/examples/deepspeed_conf/ds_stage1.json
similarity index 100%
rename from examples/industrial_data_pretraining/deepspeed/ds_stage1.json
rename to examples/deepspeed_conf/ds_stage1.json
diff --git a/examples/industrial_data_pretraining/deepspeed/ds_stage2.json b/examples/deepspeed_conf/ds_stage2.json
similarity index 100%
rename from examples/industrial_data_pretraining/deepspeed/ds_stage2.json
rename to examples/deepspeed_conf/ds_stage2.json
diff --git a/examples/industrial_data_pretraining/deepspeed/ds_stage3.json b/examples/deepspeed_conf/ds_stage3.json
similarity index 100%
rename from examples/industrial_data_pretraining/deepspeed/ds_stage3.json
rename to examples/deepspeed_conf/ds_stage3.json
diff --git a/examples/industrial_data_pretraining/deepspeed/ds_z0_config.json b/examples/deepspeed_conf/ds_z0_config.json
similarity index 100%
rename from examples/industrial_data_pretraining/deepspeed/ds_z0_config.json
rename to examples/deepspeed_conf/ds_z0_config.json
diff --git a/examples/industrial_data_pretraining/deepspeed/ds_z2_config.json b/examples/deepspeed_conf/ds_z2_config.json
similarity index 100%
rename from examples/industrial_data_pretraining/deepspeed/ds_z2_config.json
rename to examples/deepspeed_conf/ds_z2_config.json
diff --git a/examples/industrial_data_pretraining/deepspeed/ds_z2_offload_config.json b/examples/deepspeed_conf/ds_z2_offload_config.json
similarity index 100%
rename from examples/industrial_data_pretraining/deepspeed/ds_z2_offload_config.json
rename to examples/deepspeed_conf/ds_z2_offload_config.json
diff --git a/examples/industrial_data_pretraining/deepspeed/ds_z3_config.json b/examples/deepspeed_conf/ds_z3_config.json
similarity index 100%
rename from examples/industrial_data_pretraining/deepspeed/ds_z3_config.json
rename to examples/deepspeed_conf/ds_z3_config.json
diff --git a/examples/industrial_data_pretraining/deepspeed/ds_z3_offload_config.json b/examples/deepspeed_conf/ds_z3_offload_config.json
similarity index 100%
rename from examples/industrial_data_pretraining/deepspeed/ds_z3_offload_config.json
rename to examples/deepspeed_conf/ds_z3_offload_config.json
diff --git a/examples/industrial_data_pretraining/bicif_paraformer/finetune.sh b/examples/industrial_data_pretraining/bicif_paraformer/finetune.sh
index 08f6f56..dc3149a 100644
--- a/examples/industrial_data_pretraining/bicif_paraformer/finetune.sh
+++ b/examples/industrial_data_pretraining/bicif_paraformer/finetune.sh
@@ -1,6 +1,8 @@
# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
# MIT License (https://opensource.org/licenses/MIT)
+workspace=`pwd`
+
# method1, finetune from model hub
# which gpu to train or finetune
@@ -45,21 +47,38 @@
mkdir -p ${output_dir}
echo "log_file: ${log_file}"
-torchrun \
---nnodes 1 \
---nproc_per_node ${gpu_num} \
-../../../funasr/bin/train.py \
+deepspeed_config=${workspace}../../ds_stage1.json
+
+DISTRIBUTED_ARGS="
+ --nnodes ${WORLD_SIZE:-1} \
+ --nproc_per_node $gpu_num \
+ --node_rank ${RANK:-0} \
+ --master_addr ${MASTER_ADDR:-127.0.0.1} \
+ --master_port ${MASTER_PORT: 26669}
+"
+
+echo $DISTRIBUTED_ARGS
+
+torchrun $DISTRIBUTED_ARGS \
+../../../funasr/bin/train_ds.py \
++model="${model_name_or_model_dir}" \
++train_data_set_list="${train_data}" \
++valid_data_set_list="${val_data}" \
-++dataset_conf.batch_size=20000 \
+++dataset="AudioDataset" \
+++dataset_conf.index_ds="IndexDSJsonl" \
+++dataset_conf.data_split_num=1 \
+++dataset_conf.batch_sampler="BatchSampler" \
+++dataset_conf.batch_size=6000 \
+++dataset_conf.sort_size=1024 \
++dataset_conf.batch_type="token" \
++dataset_conf.num_workers=4 \
++train_conf.max_epoch=50 \
++train_conf.log_interval=1 \
-++train_conf.resume=false \
+++train_conf.resume=true \
++train_conf.validate_interval=2000 \
++train_conf.save_checkpoint_interval=2000 \
++train_conf.keep_nbest_models=20 \
+++train_conf.use_deepspeed=false \
+++train_conf.deepspeed_config=${deepspeed_config} \
++optim_conf.lr=0.0002 \
++output_dir="${output_dir}" &> ${log_file}
\ No newline at end of file
diff --git a/examples/industrial_data_pretraining/contextual_paraformer/finetune.sh b/examples/industrial_data_pretraining/contextual_paraformer/finetune.sh
index 25c7330..f82a67d 100644
--- a/examples/industrial_data_pretraining/contextual_paraformer/finetune.sh
+++ b/examples/industrial_data_pretraining/contextual_paraformer/finetune.sh
@@ -1,6 +1,8 @@
# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
# MIT License (https://opensource.org/licenses/MIT)
+workspace=`pwd`
+
# method1, finetune from model hub
# which gpu to train or finetune
@@ -46,22 +48,38 @@
mkdir -p ${output_dir}
echo "log_file: ${log_file}"
-torchrun \
---nnodes 1 \
---nproc_per_node ${gpu_num} \
-../../../funasr/bin/train.py \
+deepspeed_config=${workspace}../../ds_stage1.json
+
+DISTRIBUTED_ARGS="
+ --nnodes ${WORLD_SIZE:-1} \
+ --nproc_per_node $gpu_num \
+ --node_rank ${RANK:-0} \
+ --master_addr ${MASTER_ADDR:-127.0.0.1} \
+ --master_port ${MASTER_PORT: 26669}
+"
+
+echo $DISTRIBUTED_ARGS
+
+torchrun $DISTRIBUTED_ARGS \
+../../../funasr/bin/train_ds.py \
++model="${model_name_or_model_dir}" \
-++model_revision="${model_revision}" \
++train_data_set_list="${train_data}" \
++valid_data_set_list="${val_data}" \
-++dataset_conf.batch_size=20000 \
+++dataset="AudioDatasetHotword" \
+++dataset_conf.index_ds="IndexDSJsonl" \
+++dataset_conf.data_split_num=1 \
+++dataset_conf.batch_sampler="BatchSampler" \
+++dataset_conf.batch_size=6000 \
+++dataset_conf.sort_size=1024 \
++dataset_conf.batch_type="token" \
++dataset_conf.num_workers=4 \
++train_conf.max_epoch=50 \
++train_conf.log_interval=1 \
-++train_conf.resume=false \
+++train_conf.resume=true \
++train_conf.validate_interval=2000 \
++train_conf.save_checkpoint_interval=2000 \
++train_conf.keep_nbest_models=20 \
+++train_conf.use_deepspeed=false \
+++train_conf.deepspeed_config=${deepspeed_config} \
++optim_conf.lr=0.0002 \
++output_dir="${output_dir}" &> ${log_file}
\ No newline at end of file
diff --git a/examples/industrial_data_pretraining/llm_asr/demo_train_or_finetune.sh b/examples/industrial_data_pretraining/llm_asr/demo_train_or_finetune.sh
index 6b88e75..3ab7615 100644
--- a/examples/industrial_data_pretraining/llm_asr/demo_train_or_finetune.sh
+++ b/examples/industrial_data_pretraining/llm_asr/demo_train_or_finetune.sh
@@ -30,10 +30,20 @@
mkdir -p ${output_dir}
echo "log_file: ${log_file}"
-torchrun \
---nnodes 1 \
---nproc_per_node ${gpu_num} \
-../../../funasr/bin/train.py \
+deepspeed_config=${workspace}../../ds_stage1.json
+
+DISTRIBUTED_ARGS="
+ --nnodes ${WORLD_SIZE:-1} \
+ --nproc_per_node $gpu_num \
+ --node_rank ${RANK:-0} \
+ --master_addr ${MASTER_ADDR:-127.0.0.1} \
+ --master_port ${MASTER_PORT: 26669}
+"
+
+echo $DISTRIBUTED_ARGS
+
+torchrun $DISTRIBUTED_ARGS \
+../../../funasr/bin/train_ds.py \
--config-path "${workspace}/conf" \
--config-name "${config}" \
++train_data_set_list="${train_data}" \
@@ -41,6 +51,9 @@
++dataset_conf.batch_size=4 \
++dataset_conf.num_workers=4 \
++train_conf.max_epoch=15 \
+++train_conf.use_deepspeed=false \
+++train_conf.deepspeed_config=${deepspeed_config} \
++optim_conf.lr=0.0001 \
++init_param="${init_param}" \
+
++output_dir="${output_dir}" &> ${log_file} &
diff --git a/examples/industrial_data_pretraining/llm_asr/demo_train_or_finetune2.sh b/examples/industrial_data_pretraining/llm_asr/demo_train_or_finetune2.sh
index 306e23d..1762f39 100644
--- a/examples/industrial_data_pretraining/llm_asr/demo_train_or_finetune2.sh
+++ b/examples/industrial_data_pretraining/llm_asr/demo_train_or_finetune2.sh
@@ -30,18 +30,39 @@
mkdir -p ${output_dir}
echo "log_file: ${log_file}"
-torchrun \
---nnodes 1 \
---nproc_per_node ${gpu_num} \
-../../../funasr/bin/train.py \
+deepspeed_config=${workspace}../../ds_stage1.json
+
+DISTRIBUTED_ARGS="
+ --nnodes ${WORLD_SIZE:-1} \
+ --nproc_per_node $gpu_num \
+ --node_rank ${RANK:-0} \
+ --master_addr ${MASTER_ADDR:-127.0.0.1} \
+ --master_port ${MASTER_PORT: 26669}
+"
+
+echo $DISTRIBUTED_ARGS
+
+torchrun $DISTRIBUTED_ARGS \
+../../../funasr/bin/train_ds.py \
--config-path "${workspace}/conf" \
--config-name "${config}" \
++train_data_set_list="${train_data}" \
++valid_data_set_list="${val_data}" \
-++dataset_conf.batch_size=1 \
-++dataset_conf.num_workers=0 \
-++train_conf.max_epoch=15 \
-++train_conf.save_checkpoint_interval=1000 \
+++dataset_conf.data_split_num=1 \
+++dataset_conf.batch_sampler="BatchSampler" \
+++dataset_conf.batch_size=6000 \
+++dataset_conf.sort_size=1024 \
+++dataset_conf.batch_type="token" \
+++dataset_conf.num_workers=4 \
+++train_conf.max_epoch=50 \
+++train_conf.log_interval=1 \
+++train_conf.resume=true \
+++train_conf.validate_interval=2000 \
+++train_conf.save_checkpoint_interval=2000 \
+++train_conf.keep_nbest_models=20 \
+++train_conf.avg_nbest_model=10 \
+++train_conf.use_deepspeed=false \
+++train_conf.deepspeed_config=${deepspeed_config} \
++optim_conf.lr=0.0001 \
++init_param="${init_param}" \
++output_dir="${output_dir}" &> ${log_file} &
diff --git a/examples/industrial_data_pretraining/paraformer/finetune.sh b/examples/industrial_data_pretraining/paraformer/finetune.sh
index 9467a0b..8d4491a 100644
--- a/examples/industrial_data_pretraining/paraformer/finetune.sh
+++ b/examples/industrial_data_pretraining/paraformer/finetune.sh
@@ -1,6 +1,7 @@
# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
# MIT License (https://opensource.org/licenses/MIT)
+workspace=`pwd`
# which gpu to train or finetune
export CUDA_VISIBLE_DEVICES="0,1"
@@ -40,27 +41,42 @@
output_dir="./outputs"
log_file="${output_dir}/log.txt"
+deepspeed_config=${workspace}../../ds_stage1.json
mkdir -p ${output_dir}
echo "log_file: ${log_file}"
-torchrun \
---nnodes 1 \
---node_rank 0 \
---nproc_per_node ${gpu_num} \
-../../../funasr/bin/train.py \
+DISTRIBUTED_ARGS="
+ --nnodes ${WORLD_SIZE:-1} \
+ --nproc_per_node $gpu_num \
+ --node_rank ${RANK:-0} \
+ --master_addr ${MASTER_ADDR:-127.0.0.1} \
+ --master_port ${MASTER_PORT: 26669}
+"
+
+echo $DISTRIBUTED_ARGS
+
+torchrun $DISTRIBUTED_ARGS \
+../../../funasr/bin/train_ds.py \
++model="${model_name_or_model_dir}" \
++train_data_set_list="${train_data}" \
++valid_data_set_list="${val_data}" \
-++dataset_conf.batch_size=20000 \
+++dataset="AudioDataset" \
+++dataset_conf.index_ds="IndexDSJsonl" \
+++dataset_conf.data_split_num=1 \
+++dataset_conf.batch_sampler="BatchSampler" \
+++dataset_conf.batch_size=6000 \
+++dataset_conf.sort_size=1024 \
++dataset_conf.batch_type="token" \
++dataset_conf.num_workers=4 \
++train_conf.max_epoch=50 \
++train_conf.log_interval=1 \
-++train_conf.resume=false \
+++train_conf.resume=true \
++train_conf.validate_interval=2000 \
++train_conf.save_checkpoint_interval=2000 \
++train_conf.keep_nbest_models=20 \
++train_conf.avg_nbest_model=10 \
+++train_conf.use_deepspeed=false \
+++train_conf.deepspeed_config=${deepspeed_config} \
++optim_conf.lr=0.0002 \
++output_dir="${output_dir}" &> ${log_file}
\ No newline at end of file
diff --git a/examples/industrial_data_pretraining/paraformer/train_from_local.sh b/examples/industrial_data_pretraining/paraformer/train_from_local.sh
deleted file mode 100644
index b883908..0000000
--- a/examples/industrial_data_pretraining/paraformer/train_from_local.sh
+++ /dev/null
@@ -1,75 +0,0 @@
-# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
-# MIT License (https://opensource.org/licenses/MIT)
-
-# method2, finetune from local model
-
-workspace=`pwd`
-
-echo "current path: ${workspace}" # /xxxx/funasr/examples/industrial_data_pretraining/paraformer
-
-# download model
-local_path_root=${workspace}/modelscope_models
-mkdir -p ${local_path_root}
-local_path=${local_path_root}/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch
-git clone https://www.modelscope.cn/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch.git ${local_path}
-
-
-# which gpu to train or finetune
-export CUDA_VISIBLE_DEVICES="0,1"
-gpu_num=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
-
-# data dir, which contains: train.json, val.json
-data_dir="../../../data/list"
-
-train_data="${data_dir}/train.jsonl"
-val_data="${data_dir}/val.jsonl"
-
-
-# generate train.jsonl and val.jsonl from wav.scp and text.txt
-python -m funasr.datasets.audio_datasets.scp2jsonl \
-++scp_file_list='["../../../data/list/train_wav.scp", "../../../data/list/train_text.txt"]' \
-++data_type_list='["source", "target"]' \
-++jsonl_file_out="${train_data}"
-
-python -m funasr.datasets.audio_datasets.scp2jsonl \
-++scp_file_list='["../../../data/list/val_wav.scp", "../../../data/list/val_text.txt"]' \
-++data_type_list='["source", "target"]' \
-++jsonl_file_out="${val_data}"
-
-
-tokens="${local_path}/tokens.json"
-cmvn_file="${local_path}/am.mvn"
-
-# output dir
-output_dir="./outputs"
-log_file="${output_dir}/log.txt"
-
-config_name="config.yaml"
-
-init_param="${local_path}/model.pt"
-
-mkdir -p ${output_dir}
-echo "log_file: ${log_file}"
-
-torchrun \
---nnodes 1 \
---nproc_per_node ${gpu_num} \
-../../../funasr/bin/train.py \
---config-path "${local_path}" \
---config-name "${config_name}" \
-++train_data_set_list="${train_data}" \
-++valid_data_set_list="${val_data}" \
-++dataset_conf.batch_size=20000 \
-++dataset_conf.batch_type="token" \
-++dataset_conf.num_workers=4 \
-++train_conf.max_epoch=50 \
-++train_conf.log_interval=10 \
-++train_conf.resume=false \
-++train_conf.validate_interval=15 \
-++train_conf.save_checkpoint_interval=15 \
-++train_conf.keep_nbest_models=50 \
-++optim_conf.lr=0.0002 \
-++init_param="${init_param}" \
-++tokenizer_conf.token_list="${tokens}" \
-++frontend_conf.cmvn_file="${cmvn_file}" \
-++output_dir="${output_dir}" &> ${log_file}
diff --git a/examples/industrial_data_pretraining/paraformer_streaming/finetune.sh b/examples/industrial_data_pretraining/paraformer_streaming/finetune.sh
index 54bc2d1..96ac79f 100644
--- a/examples/industrial_data_pretraining/paraformer_streaming/finetune.sh
+++ b/examples/industrial_data_pretraining/paraformer_streaming/finetune.sh
@@ -1,6 +1,7 @@
# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
# MIT License (https://opensource.org/licenses/MIT)
+workspace=`pwd`
# which gpu to train or finetune
export CUDA_VISIBLE_DEVICES="0,1"
@@ -41,25 +42,42 @@
output_dir="./outputs"
log_file="${output_dir}/log.txt"
+deepspeed_config=${workspace}../../ds_stage1.json
mkdir -p ${output_dir}
echo "log_file: ${log_file}"
-torchrun \
---nnodes 1 \
---nproc_per_node ${gpu_num} \
-../../../funasr/bin/train.py \
+DISTRIBUTED_ARGS="
+ --nnodes ${WORLD_SIZE:-1} \
+ --nproc_per_node $gpu_num \
+ --node_rank ${RANK:-0} \
+ --master_addr ${MASTER_ADDR:-127.0.0.1} \
+ --master_port ${MASTER_PORT: 26669}
+"
+
+echo $DISTRIBUTED_ARGS
+
+torchrun $DISTRIBUTED_ARGS \
+../../../funasr/bin/train_ds.py \
++model="${model_name_or_model_dir}" \
++train_data_set_list="${train_data}" \
++valid_data_set_list="${val_data}" \
-++dataset_conf.batch_size=20000 \
+++dataset="AudioDataset" \
+++dataset_conf.index_ds="IndexDSJsonl" \
+++dataset_conf.data_split_num=1 \
+++dataset_conf.batch_sampler="BatchSampler" \
+++dataset_conf.batch_size=6000 \
+++dataset_conf.sort_size=1024 \
++dataset_conf.batch_type="token" \
++dataset_conf.num_workers=4 \
++train_conf.max_epoch=50 \
++train_conf.log_interval=1 \
-++train_conf.resume=false \
+++train_conf.resume=true \
++train_conf.validate_interval=2000 \
++train_conf.save_checkpoint_interval=2000 \
++train_conf.keep_nbest_models=20 \
+++train_conf.avg_nbest_model=10 \
+++train_conf.use_deepspeed=false \
+++train_conf.deepspeed_config=${deepspeed_config} \
++optim_conf.lr=0.0002 \
++output_dir="${output_dir}" &> ${log_file}
\ No newline at end of file
diff --git a/examples/industrial_data_pretraining/seaco_paraformer/finetune.sh b/examples/industrial_data_pretraining/seaco_paraformer/finetune.sh
index 0221bb2..32fe4ca 100644
--- a/examples/industrial_data_pretraining/seaco_paraformer/finetune.sh
+++ b/examples/industrial_data_pretraining/seaco_paraformer/finetune.sh
@@ -1,6 +1,7 @@
# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
# MIT License (https://opensource.org/licenses/MIT)
+workspace=`pwd`
# which gpu to train or finetune
export CUDA_VISIBLE_DEVICES="0,1"
@@ -44,22 +45,38 @@
mkdir -p ${output_dir}
echo "log_file: ${log_file}"
-torchrun \
---nnodes 1 \
---nproc_per_node ${gpu_num} \
-../../../funasr/bin/train.py \
+DISTRIBUTED_ARGS="
+ --nnodes ${WORLD_SIZE:-1} \
+ --nproc_per_node $gpu_num \
+ --node_rank ${RANK:-0} \
+ --master_addr ${MASTER_ADDR:-127.0.0.1} \
+ --master_port ${MASTER_PORT: 26669}
+"
+
+echo $DISTRIBUTED_ARGS
+
+torchrun $DISTRIBUTED_ARGS \
+../../../funasr/bin/train_ds.py \
++model="${model_name_or_model_dir}" \
++train_data_set_list="${train_data}" \
++valid_data_set_list="${val_data}" \
-++dataset_conf.batch_size=20000 \
+++dataset="AudioDatasetHotword" \
+++dataset_conf.index_ds="IndexDSJsonl" \
+++dataset_conf.data_split_num=1 \
+++dataset_conf.batch_sampler="BatchSampler" \
+++dataset_conf.batch_size=6000 \
+++dataset_conf.sort_size=1024 \
++dataset_conf.batch_type="token" \
++dataset_conf.num_workers=4 \
++train_conf.max_epoch=50 \
++train_conf.log_interval=1 \
-++train_conf.resume=false \
+++train_conf.resume=true \
++train_conf.validate_interval=2000 \
++train_conf.save_checkpoint_interval=2000 \
++train_conf.avg_keep_nbest_models_type='loss' \
++train_conf.keep_nbest_models=20 \
+++train_conf.avg_nbest_model=10 \
+++train_conf.use_deepspeed=false \
+++train_conf.deepspeed_config=${deepspeed_config} \
++optim_conf.lr=0.0002 \
++output_dir="${output_dir}" &> ${log_file}
\ No newline at end of file
diff --git a/examples/industrial_data_pretraining/sense_voice/finetune.sh b/examples/industrial_data_pretraining/sense_voice/finetune.sh
index cb07901..1191657 100644
--- a/examples/industrial_data_pretraining/sense_voice/finetune.sh
+++ b/examples/industrial_data_pretraining/sense_voice/finetune.sh
@@ -1,6 +1,7 @@
# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
# MIT License (https://opensource.org/licenses/MIT)
+workspace=`pwd`
# which gpu to train or finetune
export CUDA_VISIBLE_DEVICES="0"
@@ -44,26 +45,39 @@
mkdir -p ${output_dir}
echo "log_file: ${log_file}"
-#torchrun \
-#--nnodes 1 \
-#--node_rank 0 \
-#--nproc_per_node ${gpu_num} \
-python \
-../../../funasr/bin/train.py \
+deepspeed_config=${workspace}../../ds_stage1.json
+
+DISTRIBUTED_ARGS="
+ --nnodes ${WORLD_SIZE:-1} \
+ --nproc_per_node $gpu_num \
+ --node_rank ${RANK:-0} \
+ --master_addr ${MASTER_ADDR:-127.0.0.1} \
+ --master_port ${MASTER_PORT: 26669}
+"
+
+echo $DISTRIBUTED_ARGS
+
+torchrun $DISTRIBUTED_ARGS \
+../../../funasr/bin/train_ds.py \
++model="${model_name_or_model_dir}" \
++train_data_set_list="${train_data}" \
++valid_data_set_list="${val_data}" \
-++dataset_conf.batch_size=500 \
+++dataset="SenseVoiceDataset" \
+++dataset_conf.IndexDSJsonl="IndexDSJsonl" \
+++dataset_conf.data_split_num=1 \
+++dataset_conf.batch_sampler="BatchSampler" \
+++dataset_conf.batch_size=6000 \
+++dataset_conf.sort_size=1024 \
++dataset_conf.batch_type="token" \
-++dataset_conf.num_workers=0 \
+++dataset_conf.num_workers=4 \
++train_conf.max_epoch=50 \
++train_conf.log_interval=1 \
-++train_conf.resume=false \
+++train_conf.resume=true \
++train_conf.validate_interval=2000 \
++train_conf.save_checkpoint_interval=2000 \
++train_conf.keep_nbest_models=20 \
++train_conf.avg_nbest_model=10 \
+++train_conf.use_deepspeed=false \
+++train_conf.deepspeed_config=${deepspeed_config} \
++optim_conf.lr=0.0002 \
-++debug=true \
-++device="cpu" \
++output_dir="${output_dir}" #&> ${log_file}
\ No newline at end of file
--
Gitblit v1.9.1