From 073a040bf0333652cc053e5335fcdd5c3719a4c2 Mon Sep 17 00:00:00 2001
From: Chong Zhang <iriszhangchong@gmail.com>
Date: 星期二, 23 五月 2023 16:51:56 +0800
Subject: [PATCH] Merge pull request #541 from alibaba-damo-academy/dev_zc
---
egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-tr-16k-common-vocab1582-pytorch/finetune.py | 35 +++++++++++++++++++++++++++++++++++
1 files changed, 35 insertions(+), 0 deletions(-)
diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-tr-16k-common-vocab1582-pytorch/finetune.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-tr-16k-common-vocab1582-pytorch/finetune.py
index 850cedc..0393212 100644
--- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-tr-16k-common-vocab1582-pytorch/finetune.py
+++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-tr-16k-common-vocab1582-pytorch/finetune.py
@@ -1,4 +1,27 @@
import os
+<<<<<<< HEAD
+
+from modelscope.metainfo import Trainers
+from modelscope.trainers import build_trainer
+
+from funasr.datasets.ms_dataset import MsDataset
+from funasr.utils.modelscope_param import modelscope_args
+
+
+def modelscope_finetune(params):
+ if not os.path.exists(params.output_dir):
+ os.makedirs(params.output_dir, exist_ok=True)
+ # dataset split ["train", "validation"]
+ ds_dict = MsDataset.load(params.data_path)
+ kwargs = dict(
+ model=params.model,
+ data_dir=ds_dict,
+ dataset_type=params.dataset_type,
+ work_dir=params.output_dir,
+ batch_bins=params.batch_bins,
+ max_epoch=params.max_epoch,
+ lr=params.lr)
+=======
from modelscope.metainfo import Trainers
from modelscope.trainers import build_trainer
from funasr.datasets.ms_dataset import MsDataset
@@ -18,11 +41,22 @@
batch_bins=params["batch_bins"],
max_epoch=params["max_epoch"],
lr=params["lr"])
+>>>>>>> main
trainer = build_trainer(Trainers.speech_asr_trainer, default_args=kwargs)
trainer.train()
if __name__ == '__main__':
+<<<<<<< HEAD
+ params = modelscope_args(model="damo/speech_UniASR_asr_2pass-tr-16k-common-vocab1582-pytorch", data_path="./data")
+ params.output_dir = "./checkpoint" # m妯″瀷淇濆瓨璺緞
+ params.data_path = "./example_data/" # 鏁版嵁璺緞
+ params.dataset_type = "small" # 灏忔暟鎹噺璁剧疆small锛岃嫢鏁版嵁閲忓ぇ浜�1000灏忔椂锛岃浣跨敤large
+ params.batch_bins = 2000 # batch size锛屽鏋渄ataset_type="small"锛宐atch_bins鍗曚綅涓篺bank鐗瑰緛甯ф暟锛屽鏋渄ataset_type="large"锛宐atch_bins鍗曚綅涓烘绉掞紝
+ params.max_epoch = 50 # 鏈�澶ц缁冭疆鏁�
+ params.lr = 0.00005 # 璁剧疆瀛︿範鐜�
+
+=======
params = {}
params["output_dir"] = "./checkpoint"
params["data_dir"] = "./data"
@@ -32,4 +66,5 @@
params["lr"] = 0.00005
params["model"] = "damo/speech_UniASR_asr_2pass-tr-16k-common-vocab1582-pytorch"
params["model_revision"] = None
+>>>>>>> main
modelscope_finetune(params)
--
Gitblit v1.9.1