From 6aba0e3b5689b5b24ba9c56d9947ce1a859b2ad9 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期四, 22 二月 2024 12:06:40 +0800
Subject: [PATCH] train finetune demo

---
 examples/industrial_data_pretraining/paraformer/infer.sh               |   21 ++++++++++
 /dev/null                                                              |   29 --------------
 .gitignore                                                             |    1 
 examples/aishell/paraformer/demo_train_or_finetune.sh                  |    4 +-
 examples/industrial_data_pretraining/paraformer/infer_from_local.sh    |   39 +++++++++++++++++++
 examples/industrial_data_pretraining/paraformer/finetune_from_local.sh |   13 ++++--
 examples/industrial_data_pretraining/paraformer/finetune.sh            |    6 ++-
 7 files changed, 75 insertions(+), 38 deletions(-)

diff --git a/.gitignore b/.gitignore
index 6bdfd5d..adf2937 100644
--- a/.gitignore
+++ b/.gitignore
@@ -24,3 +24,4 @@
 outputs*
 emotion2vec*
 GPT-SoVITS*
+modelscope_models
diff --git a/examples/aishell/paraformer/demo_train_or_finetune.sh b/examples/aishell/paraformer/demo_train_or_finetune.sh
index 4c95dc7..06607c7 100644
--- a/examples/aishell/paraformer/demo_train_or_finetune.sh
+++ b/examples/aishell/paraformer/demo_train_or_finetune.sh
@@ -17,7 +17,7 @@
 
 train_data="${data_dir}/train.jsonl"
 val_data="${data_dir}/val.jsonl"
-tokens="${data_dir}/tokens.jsonl"
+tokens="${data_dir}/tokens.json"
 cmvn_file="${data_dir}/am.mvn"
 
 # exp output dir
@@ -45,7 +45,7 @@
 ++dataset_conf.batch_size=32 \
 ++dataset_conf.batch_type="example" \
 ++dataset_conf.num_workers=4 \
-++train_conf.max_epoch=20 \
+++train_conf.max_epoch=150 \
 ++optim_conf.lr=0.0002 \
 ++init_param="${init_param}" \
 ++output_dir="${output_dir}" &> ${log_file}
diff --git a/examples/industrial_data_pretraining/paraformer/demo.sh b/examples/industrial_data_pretraining/paraformer/demo.sh
deleted file mode 100644
index 477aee6..0000000
--- a/examples/industrial_data_pretraining/paraformer/demo.sh
+++ /dev/null
@@ -1,29 +0,0 @@
-
-# method1, inference from model hub
-
-model="damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
-model_revision="v2.0.4"
-
-python -m funasr.bin.inference \
-+model=${model} \
-+model_revision=${model_revision} \
-+input="https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav" \
-+output_dir="./outputs/debug" \
-+device="cpu" \
-
-
-# method2, inference from local model
-
-#python -m funasr.bin.inference \
-#--config-path="/Users/zhifu/funasr_github/test_local/funasr_cli_egs" \
-#--config-name="config.yaml" \
-#++init_param="/Users/zhifu/funasr_github/test_local/funasr_cli_egs/model.pt" \
-#++tokenizer_conf.token_list="/Users/zhifu/funasr_github/test_local/funasr_cli_egs/tokens.txt" \
-#++frontend_conf.cmvn_file="/Users/zhifu/funasr_github/test_local/funasr_cli_egs/am.mvn" \
-#++input="data/wav.scp" \
-#++output_dir="./outputs/debug" \
-#++device="cuda" \
-
-
-
-
diff --git a/examples/industrial_data_pretraining/paraformer/finetune_from_model_hub.sh b/examples/industrial_data_pretraining/paraformer/finetune.sh
similarity index 89%
rename from examples/industrial_data_pretraining/paraformer/finetune_from_model_hub.sh
rename to examples/industrial_data_pretraining/paraformer/finetune.sh
index 0ccb83a..21b29b6 100644
--- a/examples/industrial_data_pretraining/paraformer/finetune_from_model_hub.sh
+++ b/examples/industrial_data_pretraining/paraformer/finetune.sh
@@ -1,6 +1,8 @@
 # Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
 #  MIT License  (https://opensource.org/licenses/MIT)
 
+# method1, finetune from model hub
+
 # which gpu to train or finetune
 export CUDA_VISIBLE_DEVICES="0,1"
 gpu_num=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
@@ -30,8 +32,8 @@
 --nnodes 1 \
 --nproc_per_node ${gpu_num} \
 funasr/bin/train.py \
-+model="damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch" \
-+model_revision="v2.0.4" \
+++model="damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch" \
+++model_revision="v2.0.4" \
 ++train_data_set_list="${train_data}" \
 ++valid_data_set_list="${val_data}" \
 ++dataset_conf.batch_size=32 \
diff --git a/examples/industrial_data_pretraining/paraformer/finetune_from_local_model.sh b/examples/industrial_data_pretraining/paraformer/finetune_from_local.sh
similarity index 90%
rename from examples/industrial_data_pretraining/paraformer/finetune_from_local_model.sh
rename to examples/industrial_data_pretraining/paraformer/finetune_from_local.sh
index 2e0b8ee..5b4d991 100644
--- a/examples/industrial_data_pretraining/paraformer/finetune_from_local_model.sh
+++ b/examples/industrial_data_pretraining/paraformer/finetune_from_local.sh
@@ -1,8 +1,12 @@
 # Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
 #  MIT License  (https://opensource.org/licenses/MIT)
 
+# method2, finetune from local model
+
+workspace=`pwd`
+
 # download model
-local_path_root=../modelscope_models
+local_path_root=${workspace}/modelscope_models
 mkdir -p ${local_path_root}
 local_path=${local_path_root}/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch
 git clone https://www.modelscope.cn/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch.git ${local_path}
@@ -24,15 +28,14 @@
 train_data="${data_dir}/train.jsonl"
 val_data="${data_dir}/val.jsonl"
 
-tokens="${local_path}/tokens.jsonl"
+tokens="${local_path}/tokens.json"
 cmvn_file="${local_path}/am.mvn"
 
 # exp output dir
 output_dir="/Users/zhifu/exp"
 log_file="${output_dir}/log.txt"
 
-workspace=`pwd`
-config="${local_path}/config.yaml"
+config="config.yaml"
 
 init_param="${local_path}/model.pt"
 
@@ -43,7 +46,7 @@
 --nnodes 1 \
 --nproc_per_node ${gpu_num} \
 ../../../funasr/bin/train.py \
---config-path "${workspace}/conf" \
+--config-path "${local_path}" \
 --config-name "${config}" \
 ++train_data_set_list="${train_data}" \
 ++valid_data_set_list="${val_data}" \
diff --git a/examples/industrial_data_pretraining/paraformer/infer.sh b/examples/industrial_data_pretraining/paraformer/infer.sh
new file mode 100644
index 0000000..a1975ea
--- /dev/null
+++ b/examples/industrial_data_pretraining/paraformer/infer.sh
@@ -0,0 +1,21 @@
+# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
+#  MIT License  (https://opensource.org/licenses/MIT)
+
+# method1, inference from model hub
+
+# for more input type, please ref to readme.md
+input="https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav"
+
+output_dir="./outputs/debug"
+
+model="damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
+model_revision="v2.0.4"
+
+device="cuda:0" # "cuda:0" for gpu0, "cuda:1" for gpu1, "cpu"
+
+python -m funasr.bin.inference \
+++model=${model} \
+++model_revision=${model_revision} \
+++input="${input}" \
+++output_dir="${output_dir}" \
+++device="${device}" \
diff --git a/examples/industrial_data_pretraining/paraformer/infer_from_local.sh b/examples/industrial_data_pretraining/paraformer/infer_from_local.sh
new file mode 100644
index 0000000..06c9ca6
--- /dev/null
+++ b/examples/industrial_data_pretraining/paraformer/infer_from_local.sh
@@ -0,0 +1,39 @@
+# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
+#  MIT License  (https://opensource.org/licenses/MIT)
+
+# method2, inference from local model
+
+# for more input type, please ref to readme.md
+input="https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav"
+
+output_dir="./outputs/debug"
+
+workspace=`pwd`
+
+# download model
+local_path_root=${workspace}/modelscope_models
+mkdir -p ${local_path_root}
+local_path=${local_path_root}/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch
+git clone https://www.modelscope.cn/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch.git ${local_path}
+
+device="cuda:0" # "cuda:0" for gpu0, "cuda:1" for gpu1, "cpu"
+
+tokens="${local_path}/tokens.json"
+cmvn_file="${local_path}/am.mvn"
+
+config="config.yaml"
+init_param="${local_path}/model.pt"
+
+python -m funasr.bin.inference \
+--config-path "${local_path}" \
+--config-name "${config}" \
+++init_param="${init_param}" \
+++tokenizer_conf.token_list="${tokens}" \
+++frontend_conf.cmvn_file="${cmvn_file}" \
+++input="${input}" \
+++output_dir="${output_dir}" \
+++device="${device}" \
+
+
+
+

--
Gitblit v1.9.1