From 28ccfbfc51068a663a80764e14074df5edf2b5ba Mon Sep 17 00:00:00 2001
From: kongdeqiang <kongdeqiang960204@163.com>
Date: 星期五, 13 三月 2026 17:41:41 +0800
Subject: [PATCH] 提交

---
 examples/industrial_data_pretraining/paraformer/finetune.sh |  104 +++++++++++++++++++++++++++++++++++++++++++++++----
 1 files changed, 95 insertions(+), 9 deletions(-)

diff --git a/examples/industrial_data_pretraining/paraformer/finetune.sh b/examples/industrial_data_pretraining/paraformer/finetune.sh
old mode 100644
new mode 100755
index ce1953c..a1f041f
--- a/examples/industrial_data_pretraining/paraformer/finetune.sh
+++ b/examples/industrial_data_pretraining/paraformer/finetune.sh
@@ -1,12 +1,98 @@
+# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
+#  MIT License  (https://opensource.org/licenses/MIT)
 
-cmd="funasr/bin/train.py"
+workspace=`pwd`
+export MODELSCOPE_CACHE="/home/boying/IdeaProjects/FunASRxl-0313/models/"
+# which gpu to train or finetune
+export CUDA_VISIBLE_DEVICES="0"
+gpu_num=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
 
-python $cmd \
-+model="/Users/zhifu/modelscope_models/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch" \
-+token_list="/Users/zhifu/.cache/modelscope/hub/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/tokens.txt" \
-+train_data_set_list="/Users/zhifu/funasr_github/test_local/aishell2_dev_ios/asr_task_debug_len.jsonl" \
-+output_dir="/Users/zhifu/Downloads/ckpt/funasr2/exp2" \
-+device="cpu"
+# model_name from model_hub, or model_dir in local path
 
-#--config-path "/Users/zhifu/funasr_github/examples/industrial_data_pretraining/paraformer-large/conf" \
-#--config-name "finetune.yaml" \
\ No newline at end of file
+## option 1, download model automatically
+model_name_or_model_dir="iic/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
+
+## option 2, download model by git
+#local_path_root=${workspace}/modelscope_models
+#mkdir -p ${local_path_root}/${model_name_or_model_dir}
+#git clone https://www.modelscope.cn/${model_name_or_model_dir}.git ${local_path_root}/${model_name_or_model_dir}
+#model_name_or_model_dir=${local_path_root}/${model_name_or_model_dir}
+
+
+# data dir, which contains: train.json, val.json
+data_dir="../../../data"
+
+train_data="${data_dir}/train/train.jsonl"
+val_data="${data_dir}/val/val.jsonl"
+
+# generate train.jsonl and val.jsonl from wav.scp and text.txt
+scp2jsonl \
+++scp_file_list='["../../../data/train/train_wav.scp", "../../../data/train/train_text.txt"]' \
+++data_type_list='["source", "target"]' \
+++jsonl_file_out="${train_data}"
+
+scp2jsonl \
+++scp_file_list='["../../../data/val/val_wav.scp", "../../../data/val/val_text.txt"]' \
+++data_type_list='["source", "target"]' \
+++jsonl_file_out="${val_data}"
+
+
+# exp output dir
+output_dir="/home/boying/IdeaProjects/FunASRxl-0313/exp/paraformer_train"
+log_file="${output_dir}/log.txt"
+
+BATCH_SIZE=16
+LR=0.0005
+
+deepspeed_config=${workspace}/../../deepspeed_conf/ds_stage1.json
+
+mkdir -p ${output_dir}
+echo "log_file: ${log_file}"
+
+DISTRIBUTED_ARGS="
+    --nnodes ${WORLD_SIZE:-1} \
+    --nproc_per_node $gpu_num \
+    --node_rank ${RANK:-0} \
+    --master_addr ${MASTER_ADDR:-127.0.0.1} \
+    --master_port ${MASTER_PORT:-26669}
+"
+
+echo $DISTRIBUTED_ARGS
+
+echo "=========================================="
+echo " 寮�濮� FunASR 璁粌..."
+echo "馃搧 鏁版嵁鐩綍: $train_data"
+echo "馃捑 杈撳嚭鐩綍: $output_dir"
+echo " 棰勮缁冩ā鍨�: $model_name_or_model_dir"
+echo "馃幆 Batch Size: $BATCH_SIZE, LR: $LR, Epochs: $MAX_EPOCH"
+echo "=========================================="
+
+torchrun $DISTRIBUTED_ARGS \
+../../../funasr/bin/train_ds.py \
+++model="${model_name_or_model_dir}" \
+++train_data_set_list="${train_data}" \
+++valid_data_set_list="${val_data}" \
+++dataset="AudioDataset" \
+++dataset_conf.index_ds="IndexDSJsonl" \
+++dataset_conf.data_split_num=1 \
+++dataset_conf.batch_sampler="BatchSampler" \
+++dataset_conf.batch_size="${BATCH_SIZE}"  \
+++dataset_conf.sort_size=1024 \
+++dataset_conf.batch_type="token" \
+++dataset_conf.num_workers=4 \
+++train_conf.max_epoch=50 \
+++train_conf.log_interval=1 \
+++train_conf.resume=true \
+++train_conf.validate_interval=2000 \
+++train_conf.save_checkpoint_interval=2000 \
+++train_conf.keep_nbest_models=20 \
+++train_conf.avg_nbest_model=10 \
+++train_conf.use_deepspeed=false \
+++train_conf.deepspeed_config=${deepspeed_config} \
+++optim_conf.lr="${LR}" \
+++output_dir="${output_dir}" &> ${log_file}
+
+
+echo "=========================================="
+echo "鉁� 璁粌瀹屾垚锛佹ā鍨嬩繚瀛樺湪: $output_dir"
+echo "=========================================="

--
Gitblit v1.9.1