From a4beddbe757ff34d90591c79d9ed8462752ee47c Mon Sep 17 00:00:00 2001
From: wucong.lyb <wucong.lyb@alibaba-inc.com>
Date: 星期四, 29 六月 2023 17:14:17 +0800
Subject: [PATCH] Merge branch 'main' of https://github.com/alibaba-damo-academy/FunASR
---
egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-tr-16k-common-vocab1582-pytorch/finetune.py | 38 ++------------------------------------
1 files changed, 2 insertions(+), 36 deletions(-)
diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-tr-16k-common-vocab1582-pytorch/finetune.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-tr-16k-common-vocab1582-pytorch/finetune.py
index 0393212..79fd34d 100644
--- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-tr-16k-common-vocab1582-pytorch/finetune.py
+++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-tr-16k-common-vocab1582-pytorch/finetune.py
@@ -1,5 +1,4 @@
import os
-<<<<<<< HEAD
from modelscope.metainfo import Trainers
from modelscope.trainers import build_trainer
@@ -21,50 +20,17 @@
batch_bins=params.batch_bins,
max_epoch=params.max_epoch,
lr=params.lr)
-=======
-from modelscope.metainfo import Trainers
-from modelscope.trainers import build_trainer
-from funasr.datasets.ms_dataset import MsDataset
-
-
-def modelscope_finetune(params):
- if not os.path.exists(params["output_dir"]):
- os.makedirs(params["output_dir"], exist_ok=True)
- # dataset split ["train", "validation"]
- ds_dict = MsDataset.load(params["data_dir"])
- kwargs = dict(
- model=params["model"],
- model_revision=params["model_revision"],
- data_dir=ds_dict,
- dataset_type=params["dataset_type"],
- work_dir=params["output_dir"],
- batch_bins=params["batch_bins"],
- max_epoch=params["max_epoch"],
- lr=params["lr"])
->>>>>>> main
trainer = build_trainer(Trainers.speech_asr_trainer, default_args=kwargs)
trainer.train()
if __name__ == '__main__':
-<<<<<<< HEAD
params = modelscope_args(model="damo/speech_UniASR_asr_2pass-tr-16k-common-vocab1582-pytorch", data_path="./data")
params.output_dir = "./checkpoint" # m妯″瀷淇濆瓨璺緞
params.data_path = "./example_data/" # 鏁版嵁璺緞
params.dataset_type = "small" # 灏忔暟鎹噺璁剧疆small锛岃嫢鏁版嵁閲忓ぇ浜�1000灏忔椂锛岃浣跨敤large
params.batch_bins = 2000 # batch size锛屽鏋渄ataset_type="small"锛宐atch_bins鍗曚綅涓篺bank鐗瑰緛甯ф暟锛屽鏋渄ataset_type="large"锛宐atch_bins鍗曚綅涓烘绉掞紝
- params.max_epoch = 50 # 鏈�澶ц缁冭疆鏁�
+ params.max_epoch = 20 # 鏈�澶ц缁冭疆鏁�
params.lr = 0.00005 # 璁剧疆瀛︿範鐜�
-=======
- params = {}
- params["output_dir"] = "./checkpoint"
- params["data_dir"] = "./data"
- params["batch_bins"] = 2000
- params["dataset_type"] = "small"
- params["max_epoch"] = 50
- params["lr"] = 0.00005
- params["model"] = "damo/speech_UniASR_asr_2pass-tr-16k-common-vocab1582-pytorch"
- params["model_revision"] = None
->>>>>>> main
- modelscope_finetune(params)
+ modelscope_finetune(params)
\ No newline at end of file
--
Gitblit v1.9.1