From 43c30967b01e55d9b6091d88102dfd4bdff0773d Mon Sep 17 00:00:00 2001
From: speech_asr <wangjiaming.wjm@alibaba-inc.com>
Date: 星期四, 20 四月 2023 11:48:19 +0800
Subject: [PATCH] update
---
funasr/bin/train.py | 58 ++++++++++++++++++++++++++++++++++------------------------
1 files changed, 34 insertions(+), 24 deletions(-)
diff --git a/funasr/bin/train.py b/funasr/bin/train.py
index 2a5dc98..cbbf1fa 100644
--- a/funasr/bin/train.py
+++ b/funasr/bin/train.py
@@ -1,15 +1,21 @@
import logging
import os
import sys
+from io import BytesIO
import torch
+from funasr.torch_utils.model_summary import model_summary
+from funasr.torch_utils.pytorch_version import pytorch_cudnn_version
from funasr.torch_utils.set_all_random_seed import set_all_random_seed
from funasr.utils import config_argparse
from funasr.utils.build_dataloader import build_dataloader
from funasr.utils.build_distributed import build_distributed
+from funasr.utils.build_optimizer import build_optimizer
+from funasr.utils.build_scheduler import build_scheduler
from funasr.utils.prepare_data import prepare_data
from funasr.utils.types import str2bool
+from funasr.utils.yaml_no_alias_safe_dump import yaml_no_alias_safe_dump
def get_parser():
@@ -324,9 +330,17 @@
parser = get_parser()
args = parser.parse_args()
+ # set random seed
+ set_all_random_seed(args.seed)
+ torch.backends.cudnn.enabled = args.cudnn_enabled
+ torch.backends.cudnn.benchmark = args.cudnn_benchmark
+ torch.backends.cudnn.deterministic = args.cudnn_deterministic
+
# ddp init
args.distributed = args.dist_world_size > 1
distributed_option = build_distributed(args)
+
+ # for logging
if not distributed_option.distributed or distributed_option.dist_rank == 0:
logging.basicConfig(
level="INFO",
@@ -343,32 +357,28 @@
# prepare files for dataloader
prepare_data(args, distributed_option)
- # set random seed
- set_all_random_seed(args.seed)
- torch.backends.cudnn.enabled = args.cudnn_enabled
- torch.backends.cudnn.benchmark = args.cudnn_benchmark
- torch.backends.cudnn.deterministic = args.cudnn_deterministic
-
- train_dataloader, valid_dataloader = build_dataloader(args)
+ model = build_model(args)
+ optimizer = build_optimizer(args, model=model)
+ scheduler = build_scheduler(args, optimizer)
logging.info("world size: {}, rank: {}, local_rank: {}".format(distributed_option.dist_world_size,
distributed_option.dist_rank,
distributed_option.local_rank))
+ logging.info(pytorch_cudnn_version())
+ logging.info(model_summary(model))
+ logging.info("Optimizer: {}".format(optimizer))
+ logging.info("Scheduler: {}".format(scheduler))
- # optimizers = cls.build_optimizers(args, model=model)
- # schedulers = []
- # for i, optim in enumerate(optimizers, 1):
- # suf = "" if i == 1 else str(i)
- # name = getattr(args, f"scheduler{suf}")
- # conf = getattr(args, f"scheduler{suf}_conf")
- # if name is not None:
- # cls_ = scheduler_classes.get(name)
- # if cls_ is None:
- # raise ValueError(
- # f"must be one of {list(scheduler_classes)}: {name}"
- # )
- # scheduler = cls_(optim, **conf)
- # else:
- # scheduler = None
- #
- # schedulers.append(scheduler)
+ # dump args to config.yaml
+ if not distributed_option.distributed or distributed_option.dist_rank == 0:
+ os.makedirs(args.output_dir, exist_ok=True)
+ with open(os.path.join(args.output_dir, "config.yaml"), "w") as f:
+ logging.info("Saving the configuration in {}/{}".format(args.output_dir, "config.yaml"))
+ if args.use_pai:
+ buffer = BytesIO()
+ torch.save({"config": vars(args)}, buffer)
+ args.oss_bucket.put_object(os.path.join(args.output_dir, "config.dict"), buffer.getvalue())
+ else:
+ yaml_no_alias_safe_dump(vars(args), f, indent=4, sort_keys=False)
+
+ train_dataloader, valid_dataloader = build_dataloader(args)
--
Gitblit v1.9.1