From 24aea85b5bc3f354d683201fa9e37968f3f1638f Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期四, 21 三月 2024 14:01:45 +0800
Subject: [PATCH] trainer
---
examples/industrial_data_pretraining/paraformer_streaming/finetune.sh | 66 ++++++++
data/list/val_wav.scp | 4
examples/industrial_data_pretraining/contextual_paraformer/finetune.sh | 51 ++++-
data/list/train_text.txt | 6
examples/industrial_data_pretraining/bicif_paraformer/finetune.sh | 67 ++++++++
examples/industrial_data_pretraining/paraformer/finetune.sh | 49 ++++--
setup.py | 3
data/list/train_wav.scp | 6
data/list/train.jsonl | 4
README_zh.md | 2
funasr/datasets/audio_datasets/scp2jsonl.py | 19 +
README.md | 2
/dev/null | 71 --------
data/list/val.jsonl | 2
data/list/val_text.txt | 4
examples/industrial_data_pretraining/paraformer/train_from_local.sh | 0
examples/industrial_data_pretraining/seaco_paraformer/finetune.sh | 52 ++++-
examples/industrial_data_pretraining/paraformer_streaming/demo.py | 16 +
18 files changed, 284 insertions(+), 140 deletions(-)
diff --git a/README.md b/README.md
index a4511b9..e7ff1b1 100644
--- a/README.md
+++ b/README.md
@@ -97,7 +97,7 @@
<a name="quick-start"></a>
## Quick Start
-Below is a quick start tutorial. Test audio files ([Mandarin](https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/vad_example.wav), [English]()).
+Below is a quick start tutorial. Test audio files ([Mandarin](https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/vad_example.wav), [English](https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_en.wav)).
### Command-line usage
diff --git a/README_zh.md b/README_zh.md
index 3e58aa7..46af926 100644
--- a/README_zh.md
+++ b/README_zh.md
@@ -91,7 +91,7 @@
<a name="蹇�熷紑濮�"></a>
## 蹇�熷紑濮�
-涓嬮潰涓哄揩閫熶笂鎵嬫暀绋嬶紝娴嬭瘯闊抽锛圼涓枃](https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/vad_example.wav)锛孾鑻辨枃]()锛�
+涓嬮潰涓哄揩閫熶笂鎵嬫暀绋嬶紝娴嬭瘯闊抽锛圼涓枃](https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/vad_example.wav)锛孾鑻辨枃](https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_en.wav)锛�
### 鍙墽琛屽懡浠よ
diff --git a/data/list/train.jsonl b/data/list/train.jsonl
new file mode 100644
index 0000000..618af38
--- /dev/null
+++ b/data/list/train.jsonl
@@ -0,0 +1,4 @@
+{"key": "BAC009S0764W0121", "source": "https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/BAC009S0764W0121.wav", "source_len": 90, "target": "鐢氳嚦鍑虹幇浜ゆ槗鍑犱箮鍋滄粸鐨勬儏鍐�", "target_len": 13}
+{"key": "BAC009S0916W0489", "source": "https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/BAC009S0916W0489.wav", "source_len": 90, "target": "婀栧寳涓�鍏徃浠ュ憳宸ュ悕涔夎捶娆炬暟鍗佸憳宸ヨ礋鍊哄崈涓�", "target_len": 20}
+{"key": "asr_example_cn_en", "source": "https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_cn_en.wav", "source_len": 91, "target": "鎵�鏈夊彧瑕佸鐞� data 涓嶇浣犳槸鍋� machine learning 鍋� deep learning 鍋� data analytics 鍋� data science 涔熷ソ scientist 涔熷ソ閫氶�氶兘瑕侀兘鍋氱殑鍩烘湰鍔熷晩閭� again 鍏堝厛瀵规湁涓�浜涗篃璁稿", "target_len": 19}
+{"key": "ID0012W0014", "source": "https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_en.wav", "source_len": 88, "target": "he tried to think how it could be", "target_len": 8}
diff --git a/data/list/train_text.txt b/data/list/train_text.txt
index f4d4fe4..2fc2aa5 100644
--- a/data/list/train_text.txt
+++ b/data/list/train_text.txt
@@ -1,2 +1,4 @@
-ID0012W0013 褰撳鎴烽闄╂壙鍙楄兘鍔涜瘎浼颁緷鎹彂鐢熷彉鍖栨椂
-ID0012W0014 鏉ㄦ稕涓嶅緱涓嶅皢宸ュ巶鍏虫帀
\ No newline at end of file
+BAC009S0764W0121 鐢氳嚦鍑虹幇浜ゆ槗鍑犱箮鍋滄粸鐨勬儏鍐�
+BAC009S0916W0489 婀栧寳涓�鍏徃浠ュ憳宸ュ悕涔夎捶娆炬暟鍗佸憳宸ヨ礋鍊哄崈涓�
+asr_example_cn_en 鎵�鏈夊彧瑕佸鐞� data 涓嶇浣犳槸鍋� machine learning 鍋� deep learning 鍋� data analytics 鍋� data science 涔熷ソ scientist 涔熷ソ閫氶�氶兘瑕侀兘鍋氱殑鍩烘湰鍔熷晩閭� again 鍏堝厛瀵规湁涓�浜涗篃璁稿
+ID0012W0014 he tried to think how it could be
\ No newline at end of file
diff --git a/data/list/train_wav.scp b/data/list/train_wav.scp
index 325a340..210f3cb 100644
--- a/data/list/train_wav.scp
+++ b/data/list/train_wav.scp
@@ -1,2 +1,4 @@
-ID0012W0013 /Users/zhifu/funasr_github/test_local/aishell2_dev_ios/wav/D0012/ID0012W0013.wav
-ID0012W0014 /Users/zhifu/funasr_github/test_local/aishell2_dev_ios/wav/D0012/ID0012W0014.wav
\ No newline at end of file
+BAC009S0764W0121 https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/BAC009S0764W0121.wav
+BAC009S0916W0489 https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/BAC009S0916W0489.wav
+asr_example_cn_en https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_cn_en.wav
+ID0012W0014 https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_en.wav
\ No newline at end of file
diff --git a/data/list/val.jsonl b/data/list/val.jsonl
new file mode 100644
index 0000000..8cd75ad
--- /dev/null
+++ b/data/list/val.jsonl
@@ -0,0 +1,2 @@
+{"key": "ID0012W0013", "source": "https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav", "source_len": 88, "target": "娆㈣繋澶у鏉ヤ綋楠岃揪鎽╅櫌鎺ㄥ嚭鐨勮闊宠瘑鍒ā鍨�", "target_len": 19}
+{"key": "ID0012W0014", "source": "https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_en.wav", "source_len": 88, "target": "he tried to think how it could be", "target_len": 8}
diff --git a/data/list/val_text.txt b/data/list/val_text.txt
index f4d4fe4..c1aa42d 100644
--- a/data/list/val_text.txt
+++ b/data/list/val_text.txt
@@ -1,2 +1,2 @@
-ID0012W0013 褰撳鎴烽闄╂壙鍙楄兘鍔涜瘎浼颁緷鎹彂鐢熷彉鍖栨椂
-ID0012W0014 鏉ㄦ稕涓嶅緱涓嶅皢宸ュ巶鍏虫帀
\ No newline at end of file
+ID0012W0013 娆㈣繋澶у鏉ヤ綋楠岃揪鎽╅櫌鎺ㄥ嚭鐨勮闊宠瘑鍒ā鍨�
+ID0012W0014 he tried to think how it could be
\ No newline at end of file
diff --git a/data/list/val_wav.scp b/data/list/val_wav.scp
index 325a340..4054ad3 100644
--- a/data/list/val_wav.scp
+++ b/data/list/val_wav.scp
@@ -1,2 +1,2 @@
-ID0012W0013 /Users/zhifu/funasr_github/test_local/aishell2_dev_ios/wav/D0012/ID0012W0013.wav
-ID0012W0014 /Users/zhifu/funasr_github/test_local/aishell2_dev_ios/wav/D0012/ID0012W0014.wav
\ No newline at end of file
+ID0012W0013 https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav
+ID0012W0014 https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_en.wav
\ No newline at end of file
diff --git a/examples/industrial_data_pretraining/bicif_paraformer/finetune.sh b/examples/industrial_data_pretraining/bicif_paraformer/finetune.sh
new file mode 100644
index 0000000..885e5c0
--- /dev/null
+++ b/examples/industrial_data_pretraining/bicif_paraformer/finetune.sh
@@ -0,0 +1,67 @@
+# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
+# MIT License (https://opensource.org/licenses/MIT)
+
+# method1, finetune from model hub
+
+# which gpu to train or finetune
+export CUDA_VISIBLE_DEVICES="0,1"
+gpu_num=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
+
+# model_name from model_hub, or model_dir in local path
+
+## option 1, download model automatically
+model_name_or_model_dir="iic/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
+model_revision="v2.0.4"
+
+## option 2, download model by git
+#local_path_root=${workspace}/modelscope_models
+#mkdir -p ${local_path_root}/${model_name_or_model_dir}
+#git clone https://www.modelscope.cn/${model_name_or_model_dir}.git ${local_path_root}/${model_name_or_model_dir}
+#model_name_or_model_dir=${local_path_root}/${model_name_or_model_dir}
+
+
+# data dir, which contains: train.json, val.json
+data_dir="../../../data/list"
+
+train_data="${data_dir}/train.jsonl"
+val_data="${data_dir}/val.jsonl"
+
+# generate train.jsonl and val.jsonl from wav.scp and text.txt
+scp2jsonl \
+++scp_file_list='["../../../data/list/train_wav.scp", "../../../data/list/train_text.txt"]' \
+++data_type_list='["source", "target"]' \
+++jsonl_file_out="${train_data}"
+
+scp2jsonl \
+++scp_file_list='["../../../data/list/val_wav.scp", "../../../data/list/val_text.txt"]' \
+++data_type_list='["source", "target"]' \
+++jsonl_file_out="${val_data}"
+
+
+# exp output dir
+output_dir="./outputs"
+log_file="${output_dir}/log.txt"
+
+
+mkdir -p ${output_dir}
+echo "log_file: ${log_file}"
+
+torchrun \
+--nnodes 1 \
+--nproc_per_node ${gpu_num} \
+../../../funasr/bin/train.py \
+++model="${model_name_or_model_dir}" \
+++model_revision="${model_revision}" \
+++train_data_set_list="${train_data}" \
+++valid_data_set_list="${val_data}" \
+++dataset_conf.batch_size=20000 \
+++dataset_conf.batch_type="token" \
+++dataset_conf.num_workers=4 \
+++train_conf.max_epoch=50 \
+++train_conf.log_interval=1 \
+++train_conf.resume=false \
+++train_conf.validate_interval=2000 \
+++train_conf.save_checkpoint_interval=2000 \
+++train_conf.keep_nbest_models=20 \
+++optim_conf.lr=0.0002 \
+++output_dir="${output_dir}" &> ${log_file}
\ No newline at end of file
diff --git a/examples/industrial_data_pretraining/contextual_paraformer/finetune.sh b/examples/industrial_data_pretraining/contextual_paraformer/finetune.sh
index 98cc73c..de88968 100644
--- a/examples/industrial_data_pretraining/contextual_paraformer/finetune.sh
+++ b/examples/industrial_data_pretraining/contextual_paraformer/finetune.sh
@@ -7,21 +7,39 @@
export CUDA_VISIBLE_DEVICES="0,1"
gpu_num=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
-# data dir, which contains: train.json, val.json
-data_dir="/Users/zhifu/funasr1.0/data/list"
+# model_name from model_hub, or model_dir in local path
-## generate jsonl from wav.scp and text.txt
-#python -m funasr.datasets.audio_datasets.scp2jsonl \
-#++scp_file_list='["/Users/zhifu/funasr1.0/test_local/wav.scp", "/Users/zhifu/funasr1.0/test_local/text.txt"]' \
-#++data_type_list='["source", "target"]' \
-#++jsonl_file_out=/Users/zhifu/funasr1.0/test_local/audio_datasets.jsonl
+## option 1, download model automatically
+model_name_or_model_dir="iic/speech_paraformer-large-contextual_asr_nat-zh-cn-16k-common-vocab8404"
+model_revision="v2.0.4"
+
+## option 2, download model by git
+#local_path_root=${workspace}/modelscope_models
+#mkdir -p ${local_path_root}/${model_name_or_model_dir}
+#git clone https://www.modelscope.cn/${model_name_or_model_dir}.git ${local_path_root}/${model_name_or_model_dir}
+#model_name_or_model_dir=${local_path_root}/${model_name_or_model_dir}
+
+
+# data dir, which contains: train.json, val.json
+data_dir="../../../data/list"
train_data="${data_dir}/train.jsonl"
val_data="${data_dir}/val.jsonl"
+# generate train.jsonl and val.jsonl from wav.scp and text.txt
+scp2jsonl \
+++scp_file_list='["../../../data/list/train_wav.scp", "../../../data/list/train_text.txt"]' \
+++data_type_list='["source", "target"]' \
+++jsonl_file_out="${train_data}"
+
+scp2jsonl \
+++scp_file_list='["../../../data/list/val_wav.scp", "../../../data/list/val_text.txt"]' \
+++data_type_list='["source", "target"]' \
+++jsonl_file_out="${val_data}"
+
# exp output dir
-output_dir="/Users/zhifu/exp"
+output_dir="./outputs"
log_file="${output_dir}/log.txt"
@@ -31,14 +49,19 @@
torchrun \
--nnodes 1 \
--nproc_per_node ${gpu_num} \
-funasr/bin/train.py \
-++model="iic/speech_paraformer-large-contextual_asr_nat-zh-cn-16k-common-vocab8404" \
-++model_revision="v2.0.5" \
+../../../funasr/bin/train.py \
+++model="${model_name_or_model_dir}" \
+++model_revision="${model_revision}" \
++train_data_set_list="${train_data}" \
++valid_data_set_list="${val_data}" \
-++dataset_conf.batch_size=32 \
-++dataset_conf.batch_type="example" \
+++dataset_conf.batch_size=20000 \
+++dataset_conf.batch_type="token" \
++dataset_conf.num_workers=4 \
-++train_conf.max_epoch=20 \
+++train_conf.max_epoch=50 \
+++train_conf.log_interval=1 \
+++train_conf.resume=false \
+++train_conf.validate_interval=2000 \
+++train_conf.save_checkpoint_interval=2000 \
+++train_conf.keep_nbest_models=20 \
++optim_conf.lr=0.0002 \
++output_dir="${output_dir}" &> ${log_file}
\ No newline at end of file
diff --git a/examples/industrial_data_pretraining/contextual_paraformer/finetune_from_local.sh b/examples/industrial_data_pretraining/contextual_paraformer/finetune_from_local.sh
deleted file mode 100644
index 4dbe855..0000000
--- a/examples/industrial_data_pretraining/contextual_paraformer/finetune_from_local.sh
+++ /dev/null
@@ -1,71 +0,0 @@
-# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
-# MIT License (https://opensource.org/licenses/MIT)
-
-# method2, finetune from local model
-
-workspace=`pwd`
-
-echo "current path: ${workspace}" # /xxxx/funasr/examples/industrial_data_pretraining/paraformer
-
-# download model
-local_path_root=${workspace}/modelscope_models
-mkdir -p ${local_path_root}
-local_path=${local_path_root}/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch
-git clone https://www.modelscope.cn/iic/speech_paraformer-large-contextual_asr_nat-zh-cn-16k-common-vocab8404.git ${local_path}
-
-
-# which gpu to train or finetune
-export CUDA_VISIBLE_DEVICES="0,1"
-gpu_num=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
-
-# data dir, which contains: train.json, val.json
-data_dir="../../../data/list"
-
-train_data="${data_dir}/train.jsonl"
-val_data="${data_dir}/val.jsonl"
-
-
-# generate train.jsonl and val.jsonl from wav.scp and text.txt
-python -m funasr.datasets.audio_datasets.scp2jsonl \
-++scp_file_list='["../../../data/list/train_wav.scp", "../../../data/list/train_text.txt"]' \
-++data_type_list='["source", "target"]' \
-++jsonl_file_out="${train_data}"
-
-python -m funasr.datasets.audio_datasets.scp2jsonl \
-++scp_file_list='["../../../data/list/val_wav.scp", "../../../data/list/val_text.txt"]' \
-++data_type_list='["source", "target"]' \
-++jsonl_file_out="${val_data}"
-
-
-tokens="${local_path}/tokens.json"
-cmvn_file="${local_path}/am.mvn"
-
-# output dir
-output_dir="./outputs"
-log_file="${output_dir}/log.txt"
-
-config_name="config.yaml"
-
-init_param="${local_path}/model.pt"
-
-mkdir -p ${output_dir}
-echo "log_file: ${log_file}"
-
-torchrun \
---nnodes 1 \
---nproc_per_node ${gpu_num} \
-../../../funasr/bin/train.py \
---config-path "${local_path}" \
---config-name "${config_name}" \
-++train_data_set_list="${train_data}" \
-++valid_data_set_list="${val_data}" \
-++tokenizer_conf.token_list="${tokens}" \
-++frontend_conf.cmvn_file="${cmvn_file}" \
-++dataset_conf.batch_size=32 \
-++dataset_conf.batch_type="example" \
-++dataset_conf.num_workers=4 \
-++train_conf.max_epoch=20 \
-++optim_conf.lr=0.0002 \
-++train_conf.log_interval=1 \
-++init_param="${init_param}" \
-++output_dir="${output_dir}" &> ${log_file}
diff --git a/examples/industrial_data_pretraining/paraformer/finetune.sh b/examples/industrial_data_pretraining/paraformer/finetune.sh
index 9fc8bf0..7209252 100644
--- a/examples/industrial_data_pretraining/paraformer/finetune.sh
+++ b/examples/industrial_data_pretraining/paraformer/finetune.sh
@@ -1,27 +1,44 @@
# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
# MIT License (https://opensource.org/licenses/MIT)
-# method1, finetune from model hub
# which gpu to train or finetune
export CUDA_VISIBLE_DEVICES="0,1"
gpu_num=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
-# data dir, which contains: train.json, val.json
-data_dir="/Users/zhifu/funasr1.0/data/list"
+# model_name from model_hub, or model_dir in local path
-## generate jsonl from wav.scp and text.txt
-#python -m funasr.datasets.audio_datasets.scp2jsonl \
-#++scp_file_list='["/Users/zhifu/funasr1.0/test_local/wav.scp", "/Users/zhifu/funasr1.0/test_local/text.txt"]' \
-#++data_type_list='["source", "target"]' \
-#++jsonl_file_out=/Users/zhifu/funasr1.0/test_local/audio_datasets.jsonl
+## option 1, download model automatically
+model_name_or_model_dir="iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
+model_revision="v2.0.4"
+
+## option 2, download model by git
+#local_path_root=${workspace}/modelscope_models
+#mkdir -p ${local_path_root}/${model_name_or_model_dir}
+#git clone https://www.modelscope.cn/${model_name_or_model_dir}.git ${local_path_root}/${model_name_or_model_dir}
+#model_name_or_model_dir=${local_path_root}/${model_name_or_model_dir}
+
+
+# data dir, which contains: train.json, val.json
+data_dir="../../../data/list"
train_data="${data_dir}/train.jsonl"
val_data="${data_dir}/val.jsonl"
+# generate train.jsonl and val.jsonl from wav.scp and text.txt
+scp2jsonl \
+++scp_file_list='["../../../data/list/train_wav.scp", "../../../data/list/train_text.txt"]' \
+++data_type_list='["source", "target"]' \
+++jsonl_file_out="${train_data}"
+
+scp2jsonl \
+++scp_file_list='["../../../data/list/val_wav.scp", "../../../data/list/val_text.txt"]' \
+++data_type_list='["source", "target"]' \
+++jsonl_file_out="${val_data}"
+
# exp output dir
-output_dir="/Users/zhifu/exp"
+output_dir="./outputs"
log_file="${output_dir}/log.txt"
@@ -31,19 +48,19 @@
torchrun \
--nnodes 1 \
--nproc_per_node ${gpu_num} \
-funasr/bin/train.py \
-++model="iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch" \
-++model_revision="v2.0.4" \
+../../../funasr/bin/train.py \
+++model="${model_name_or_model_dir}" \
+++model_revision="${model_revision}" \
++train_data_set_list="${train_data}" \
++valid_data_set_list="${val_data}" \
++dataset_conf.batch_size=20000 \
++dataset_conf.batch_type="token" \
++dataset_conf.num_workers=4 \
++train_conf.max_epoch=50 \
-++train_conf.log_interval=10 \
+++train_conf.log_interval=1 \
++train_conf.resume=false \
-++train_conf.validate_interval=15 \
-++train_conf.save_checkpoint_interval=15 \
-++train_conf.keep_nbest_models=50 \
+++train_conf.validate_interval=2000 \
+++train_conf.save_checkpoint_interval=2000 \
+++train_conf.keep_nbest_models=20 \
++optim_conf.lr=0.0002 \
++output_dir="${output_dir}" &> ${log_file}
\ No newline at end of file
diff --git a/examples/industrial_data_pretraining/paraformer/finetune_from_local.sh b/examples/industrial_data_pretraining/paraformer/train_from_local.sh
similarity index 100%
rename from examples/industrial_data_pretraining/paraformer/finetune_from_local.sh
rename to examples/industrial_data_pretraining/paraformer/train_from_local.sh
diff --git a/examples/industrial_data_pretraining/paraformer_streaming/demo.py b/examples/industrial_data_pretraining/paraformer_streaming/demo.py
index 601a531..9fcee3a 100644
--- a/examples/industrial_data_pretraining/paraformer_streaming/demo.py
+++ b/examples/industrial_data_pretraining/paraformer_streaming/demo.py
@@ -3,13 +3,16 @@
# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
# MIT License (https://opensource.org/licenses/MIT)
+import os
+
from funasr import AutoModel
-chunk_size = [5, 10, 5] #[0, 10, 5] 600ms, [0, 8, 4] 480ms
-encoder_chunk_look_back = 0 #number of chunks to lookback for encoder self-attention
-decoder_chunk_look_back = 0 #number of encoder chunks to lookback for decoder cross-attention
-wav_file="/Users/zhifu/Downloads/NCYzUhAtZNI_0015.wav"
+chunk_size = [0, 10, 5] #[0, 10, 5] 600ms, [0, 8, 4] 480ms
+encoder_chunk_look_back = 4 #number of chunks to lookback for encoder self-attention
+decoder_chunk_look_back = 1 #number of encoder chunks to lookback for decoder cross-attention
model = AutoModel(model="iic/speech_paraformer_asr_nat-zh-cn-16k-common-vocab8404-online", model_revision="v2.0.4")
+
+wav_file = os.path.join(model.model_path, "example/asr_example.wav")
res = model.generate(input=wav_file,
chunk_size=chunk_size,
encoder_chunk_look_back=encoder_chunk_look_back,
@@ -17,12 +20,11 @@
)
print(res)
-# exit()
import soundfile
-import os
-# wav_file = os.path.join(model.model_path, "example/asr_example.wav")
+
+wav_file = os.path.join(model.model_path, "example/asr_example.wav")
speech, sample_rate = soundfile.read(wav_file)
chunk_stride = chunk_size[1] * 960 # 600ms銆�480ms
diff --git a/examples/industrial_data_pretraining/paraformer_streaming/finetune.sh b/examples/industrial_data_pretraining/paraformer_streaming/finetune.sh
new file mode 100644
index 0000000..2bfb2bf
--- /dev/null
+++ b/examples/industrial_data_pretraining/paraformer_streaming/finetune.sh
@@ -0,0 +1,66 @@
+# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
+# MIT License (https://opensource.org/licenses/MIT)
+
+
+# which gpu to train or finetune
+export CUDA_VISIBLE_DEVICES="0,1"
+gpu_num=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
+
+# model_name from model_hub, or model_dir in local path
+
+## option 1, download model automatically
+model_name_or_model_dir="iic/speech_paraformer_asr_nat-zh-cn-16k-common-vocab8404-online"
+model_revision="v2.0.4"
+
+## option 2, download model by git
+#local_path_root=${workspace}/modelscope_models
+#mkdir -p ${local_path_root}/${model_name_or_model_dir}
+#git clone https://www.modelscope.cn/${model_name_or_model_dir}.git ${local_path_root}/${model_name_or_model_dir}
+#model_name_or_model_dir=${local_path_root}/${model_name_or_model_dir}
+
+
+# data dir, which contains: train.json, val.json
+data_dir="../../../data/list"
+
+train_data="${data_dir}/train.jsonl"
+val_data="${data_dir}/val.jsonl"
+
+# generate train.jsonl and val.jsonl from wav.scp and text.txt
+scp2jsonl \
+++scp_file_list='["../../../data/list/train_wav.scp", "../../../data/list/train_text.txt"]' \
+++data_type_list='["source", "target"]' \
+++jsonl_file_out="${train_data}"
+
+scp2jsonl \
+++scp_file_list='["../../../data/list/val_wav.scp", "../../../data/list/val_text.txt"]' \
+++data_type_list='["source", "target"]' \
+++jsonl_file_out="${val_data}"
+
+
+# exp output dir
+output_dir="./outputs"
+log_file="${output_dir}/log.txt"
+
+
+mkdir -p ${output_dir}
+echo "log_file: ${log_file}"
+
+torchrun \
+--nnodes 1 \
+--nproc_per_node ${gpu_num} \
+../../../funasr/bin/train.py \
+++model="${model_name_or_model_dir}" \
+++model_revision="${model_revision}" \
+++train_data_set_list="${train_data}" \
+++valid_data_set_list="${val_data}" \
+++dataset_conf.batch_size=20000 \
+++dataset_conf.batch_type="token" \
+++dataset_conf.num_workers=4 \
+++train_conf.max_epoch=50 \
+++train_conf.log_interval=1 \
+++train_conf.resume=false \
+++train_conf.validate_interval=2000 \
+++train_conf.save_checkpoint_interval=2000 \
+++train_conf.keep_nbest_models=20 \
+++optim_conf.lr=0.0002 \
+++output_dir="${output_dir}" &> ${log_file}
\ No newline at end of file
diff --git a/examples/industrial_data_pretraining/seaco_paraformer/finetune.sh b/examples/industrial_data_pretraining/seaco_paraformer/finetune.sh
index 88f0e23..cfdec77 100644
--- a/examples/industrial_data_pretraining/seaco_paraformer/finetune.sh
+++ b/examples/industrial_data_pretraining/seaco_paraformer/finetune.sh
@@ -1,27 +1,44 @@
# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
# MIT License (https://opensource.org/licenses/MIT)
-# method1, finetune from model hub
# which gpu to train or finetune
export CUDA_VISIBLE_DEVICES="0,1"
gpu_num=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
-# data dir, which contains: train.json, val.json
-data_dir="/Users/zhifu/funasr1.0/data/list"
+# model_name from model_hub, or model_dir in local path
-## generate jsonl from wav.scp and text.txt
-#python -m funasr.datasets.audio_datasets.scp2jsonl \
-#++scp_file_list='["/Users/zhifu/funasr1.0/test_local/wav.scp", "/Users/zhifu/funasr1.0/test_local/text.txt"]' \
-#++data_type_list='["source", "target"]' \
-#++jsonl_file_out=/Users/zhifu/funasr1.0/test_local/audio_datasets.jsonl
+## option 1, download model automatically
+model_name_or_model_dir="iic/speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
+model_revision="v2.0.4"
+
+## option 2, download model by git
+#local_path_root=${workspace}/modelscope_models
+#mkdir -p ${local_path_root}/${model_name_or_model_dir}
+#git clone https://www.modelscope.cn/${model_name_or_model_dir}.git ${local_path_root}/${model_name_or_model_dir}
+#model_name_or_model_dir=${local_path_root}/${model_name_or_model_dir}
+
+
+# data dir, which contains: train.json, val.json
+data_dir="../../../data/list"
train_data="${data_dir}/train.jsonl"
val_data="${data_dir}/val.jsonl"
+# generate train.jsonl and val.jsonl from wav.scp and text.txt
+scp2jsonl \
+++scp_file_list='["../../../data/list/train_wav.scp", "../../../data/list/train_text.txt"]' \
+++data_type_list='["source", "target"]' \
+++jsonl_file_out="${train_data}"
+
+scp2jsonl \
+++scp_file_list='["../../../data/list/val_wav.scp", "../../../data/list/val_text.txt"]' \
+++data_type_list='["source", "target"]' \
+++jsonl_file_out="${val_data}"
+
# exp output dir
-output_dir="/Users/zhifu/exp"
+output_dir="./outputs"
log_file="${output_dir}/log.txt"
@@ -31,14 +48,19 @@
torchrun \
--nnodes 1 \
--nproc_per_node ${gpu_num} \
-funasr/bin/train.py \
-++model="iic/speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch" \
-++model_revision="v2.0.6" \
+../../../funasr/bin/train.py \
+++model="${model_name_or_model_dir}" \
+++model_revision="${model_revision}" \
++train_data_set_list="${train_data}" \
++valid_data_set_list="${val_data}" \
-++dataset_conf.batch_size=32 \
-++dataset_conf.batch_type="example" \
+++dataset_conf.batch_size=20000 \
+++dataset_conf.batch_type="token" \
++dataset_conf.num_workers=4 \
-++train_conf.max_epoch=20 \
+++train_conf.max_epoch=50 \
+++train_conf.log_interval=1 \
+++train_conf.resume=false \
+++train_conf.validate_interval=2000 \
+++train_conf.save_checkpoint_interval=2000 \
+++train_conf.keep_nbest_models=20 \
++optim_conf.lr=0.0002 \
++output_dir="${output_dir}" &> ${log_file}
\ No newline at end of file
diff --git a/examples/industrial_data_pretraining/seaco_paraformer/finetune_from_local.sh b/examples/industrial_data_pretraining/seaco_paraformer/finetune_from_local.sh
deleted file mode 100644
index 9593671..0000000
--- a/examples/industrial_data_pretraining/seaco_paraformer/finetune_from_local.sh
+++ /dev/null
@@ -1,71 +0,0 @@
-# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
-# MIT License (https://opensource.org/licenses/MIT)
-
-# method2, finetune from local model
-
-workspace=`pwd`
-
-echo "current path: ${workspace}" # /xxxx/funasr/examples/industrial_data_pretraining/paraformer
-
-# download model
-local_path_root=${workspace}/modelscope_models
-mkdir -p ${local_path_root}
-local_path=${local_path_root}/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch
-git clone https://www.modelscope.cn/iic/speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch.git ${local_path}
-
-
-# which gpu to train or finetune
-export CUDA_VISIBLE_DEVICES="0,1"
-gpu_num=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
-
-# data dir, which contains: train.json, val.json
-data_dir="../../../data/list"
-
-train_data="${data_dir}/train.jsonl"
-val_data="${data_dir}/val.jsonl"
-
-
-# generate train.jsonl and val.jsonl from wav.scp and text.txt
-python -m funasr.datasets.audio_datasets.scp2jsonl \
-++scp_file_list='["../../../data/list/train_wav.scp", "../../../data/list/train_text.txt"]' \
-++data_type_list='["source", "target"]' \
-++jsonl_file_out="${train_data}"
-
-python -m funasr.datasets.audio_datasets.scp2jsonl \
-++scp_file_list='["../../../data/list/val_wav.scp", "../../../data/list/val_text.txt"]' \
-++data_type_list='["source", "target"]' \
-++jsonl_file_out="${val_data}"
-
-
-tokens="${local_path}/tokens.json"
-cmvn_file="${local_path}/am.mvn"
-
-# output dir
-output_dir="./outputs"
-log_file="${output_dir}/log.txt"
-
-config_name="config.yaml"
-
-init_param="${local_path}/model.pt"
-
-mkdir -p ${output_dir}
-echo "log_file: ${log_file}"
-
-torchrun \
---nnodes 1 \
---nproc_per_node ${gpu_num} \
-../../../funasr/bin/train.py \
---config-path "${local_path}" \
---config-name "${config_name}" \
-++train_data_set_list="${train_data}" \
-++valid_data_set_list="${val_data}" \
-++tokenizer_conf.token_list="${tokens}" \
-++frontend_conf.cmvn_file="${cmvn_file}" \
-++dataset_conf.batch_size=32 \
-++dataset_conf.batch_type="example" \
-++dataset_conf.num_workers=4 \
-++train_conf.max_epoch=20 \
-++optim_conf.lr=0.0002 \
-++train_conf.log_interval=1 \
-++init_param="${init_param}" \
-++output_dir="${output_dir}" &> ${log_file}
diff --git a/funasr/datasets/audio_datasets/scp2jsonl.py b/funasr/datasets/audio_datasets/scp2jsonl.py
index e09a84a..00754dd 100644
--- a/funasr/datasets/audio_datasets/scp2jsonl.py
+++ b/funasr/datasets/audio_datasets/scp2jsonl.py
@@ -29,15 +29,19 @@
data_file_lists = f.readlines()
lines_for_each_th = (len(data_file_lists)-1)//cpu_cores + 1
task_num = cpu_cores if len(data_file_lists) > cpu_cores else 1
- with concurrent.futures.ThreadPoolExecutor(max_workers=cpu_cores) as executor:
-
- futures = [executor.submit(parse_context_length, data_file_lists[i*lines_for_each_th:(i+1)*lines_for_each_th], data_type) for i in range(task_num)]
+ # import pdb;pdb.set_trace()
+ if task_num > 1:
+ with concurrent.futures.ThreadPoolExecutor(max_workers=cpu_cores) as executor:
- for future in concurrent.futures.as_completed(futures):
-
- json_dict[data_type].update(future.result())
- # print(json_dict)
+ futures = [executor.submit(parse_context_length, data_file_lists[i*lines_for_each_th:(i+1)*lines_for_each_th], data_type) for i in range(task_num)]
+ for future in concurrent.futures.as_completed(futures):
+
+ json_dict[data_type].update(future.result())
+ else:
+ res = parse_context_length(data_file_lists, data_type)
+ json_dict[data_type].update(res)
+
with open(jsonl_file_out, "w") as f:
for key in json_dict[data_type_list[0]].keys():
jsonl_line = {"key": key}
@@ -46,6 +50,7 @@
jsonl_line = json.dumps(jsonl_line, ensure_ascii=False)
f.write(jsonl_line+"\n")
f.flush()
+ print(f"processed {len(json_dict[data_type_list[0]])} samples")
else:
pass
diff --git a/setup.py b/setup.py
index 5cb5665..9a69fcd 100644
--- a/setup.py
+++ b/setup.py
@@ -140,6 +140,9 @@
],
entry_points={"console_scripts": [
"funasr = funasr.bin.inference:main_hydra",
+ "funasr-train = funasr.bin.train:main_hydra",
"funasr-export = funasr.bin.export:main_hydra",
+ "scp2jsonl = funasr.datasets.audio_datasets.scp2jsonl:main_hydra",
+ "jsonl2scp = funasr.datasets.audio_datasets.jsonl2scp:main_hydra",
]},
)
--
Gitblit v1.9.1