From 28ccfbfc51068a663a80764e14074df5edf2b5ba Mon Sep 17 00:00:00 2001
From: kongdeqiang <kongdeqiang960204@163.com>
Date: 星期五, 13 三月 2026 17:41:41 +0800
Subject: [PATCH] 提交
---
examples/industrial_data_pretraining/paraformer/finetune.sh | 40 ++++++++++++++++++++++++++++------------
1 files changed, 28 insertions(+), 12 deletions(-)
diff --git a/examples/industrial_data_pretraining/paraformer/finetune.sh b/examples/industrial_data_pretraining/paraformer/finetune.sh
old mode 100644
new mode 100755
index 5bf71f0..a1f041f
--- a/examples/industrial_data_pretraining/paraformer/finetune.sh
+++ b/examples/industrial_data_pretraining/paraformer/finetune.sh
@@ -2,15 +2,15 @@
# MIT License (https://opensource.org/licenses/MIT)
workspace=`pwd`
-
+export MODELSCOPE_CACHE="/home/boying/IdeaProjects/FunASRxl-0313/models/"
# which gpu to train or finetune
-export CUDA_VISIBLE_DEVICES="0,1"
+export CUDA_VISIBLE_DEVICES="0"
gpu_num=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
# model_name from model_hub, or model_dir in local path
## option 1, download model automatically
-model_name_or_model_dir="iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
+model_name_or_model_dir="iic/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
## option 2, download model by git
#local_path_root=${workspace}/modelscope_models
@@ -20,26 +20,29 @@
# data dir, which contains: train.json, val.json
-data_dir="../../../data/list"
+data_dir="../../../data"
-train_data="${data_dir}/train.jsonl"
-val_data="${data_dir}/val.jsonl"
+train_data="${data_dir}/train/train.jsonl"
+val_data="${data_dir}/val/val.jsonl"
# generate train.jsonl and val.jsonl from wav.scp and text.txt
scp2jsonl \
-++scp_file_list='["../../../data/list/train_wav.scp", "../../../data/list/train_text.txt"]' \
+++scp_file_list='["../../../data/train/train_wav.scp", "../../../data/train/train_text.txt"]' \
++data_type_list='["source", "target"]' \
++jsonl_file_out="${train_data}"
scp2jsonl \
-++scp_file_list='["../../../data/list/val_wav.scp", "../../../data/list/val_text.txt"]' \
+++scp_file_list='["../../../data/val/val_wav.scp", "../../../data/val/val_text.txt"]' \
++data_type_list='["source", "target"]' \
++jsonl_file_out="${val_data}"
# exp output dir
-output_dir="./outputs"
+output_dir="/home/boying/IdeaProjects/FunASRxl-0313/exp/paraformer_train"
log_file="${output_dir}/log.txt"
+
+BATCH_SIZE=16
+LR=0.0005
deepspeed_config=${workspace}/../../deepspeed_conf/ds_stage1.json
@@ -56,6 +59,14 @@
echo $DISTRIBUTED_ARGS
+echo "=========================================="
+echo " 寮�濮� FunASR 璁粌..."
+echo "馃搧 鏁版嵁鐩綍: $train_data"
+echo "馃捑 杈撳嚭鐩綍: $output_dir"
+echo " 棰勮缁冩ā鍨�: $model_name_or_model_dir"
+echo "馃幆 Batch Size: $BATCH_SIZE, LR: $LR, Epochs: $MAX_EPOCH"
+echo "=========================================="
+
torchrun $DISTRIBUTED_ARGS \
../../../funasr/bin/train_ds.py \
++model="${model_name_or_model_dir}" \
@@ -65,7 +76,7 @@
++dataset_conf.index_ds="IndexDSJsonl" \
++dataset_conf.data_split_num=1 \
++dataset_conf.batch_sampler="BatchSampler" \
-++dataset_conf.batch_size=6000 \
+++dataset_conf.batch_size="${BATCH_SIZE}" \
++dataset_conf.sort_size=1024 \
++dataset_conf.batch_type="token" \
++dataset_conf.num_workers=4 \
@@ -78,5 +89,10 @@
++train_conf.avg_nbest_model=10 \
++train_conf.use_deepspeed=false \
++train_conf.deepspeed_config=${deepspeed_config} \
-++optim_conf.lr=0.0002 \
-++output_dir="${output_dir}" &> ${log_file}
\ No newline at end of file
+++optim_conf.lr="${LR}" \
+++output_dir="${output_dir}" &> ${log_file}
+
+
+echo "=========================================="
+echo "鉁� 璁粌瀹屾垚锛佹ā鍨嬩繚瀛樺湪: $output_dir"
+echo "=========================================="
--
Gitblit v1.9.1