From 6427c834dfd97b1f05c6659cdc7ccf010bf82fe1 Mon Sep 17 00:00:00 2001
From: 嘉渊 <wangjiaming.wjm@alibaba-inc.com>
Date: 星期一, 24 四月 2023 19:50:07 +0800
Subject: [PATCH] update

---
 funasr/bin/train.py |   83 +++++++++++++++++++++++++++++++++++------
 1 files changed, 71 insertions(+), 12 deletions(-)

diff --git a/funasr/bin/train.py b/funasr/bin/train.py
old mode 100644
new mode 100755
index e861199..9c8f672
--- a/funasr/bin/train.py
+++ b/funasr/bin/train.py
@@ -1,3 +1,5 @@
+#!/usr/bin/env python3
+
 import argparse
 import logging
 import os
@@ -12,11 +14,12 @@
 from funasr.build_utils.build_model import build_model
 from funasr.build_utils.build_optimizer import build_optimizer
 from funasr.build_utils.build_scheduler import build_scheduler
+from funasr.build_utils.build_trainer import build_trainer
 from funasr.text.phoneme_tokenizer import g2p_choices
 from funasr.torch_utils.model_summary import model_summary
 from funasr.torch_utils.pytorch_version import pytorch_cudnn_version
 from funasr.torch_utils.set_all_random_seed import set_all_random_seed
-from funasr.utils import config_argparse
+from funasr.utils.nested_dict_action import NestedDictAction
 from funasr.utils.prepare_data import prepare_data
 from funasr.utils.types import str2bool
 from funasr.utils.types import str_or_none
@@ -24,7 +27,7 @@
 
 
 def get_parser():
-    parser = config_argparse.ArgumentParser(
+    parser = argparse.ArgumentParser(
         description="FunASR Common Training Parser",
     )
 
@@ -74,6 +77,12 @@
         default=False,
         help="Whether to use the find_unused_parameters in "
              "torch.nn.parallel.DistributedDataParallel ",
+    )
+    parser.add_argument(
+        "--gpu_id",
+        type=int,
+        default=0,
+        help="local gpu id.",
     )
 
     # cudnn related
@@ -277,10 +286,47 @@
         default=[],
     )
     parser.add_argument(
+        "--train_shape_file",
+        type=str, action="append",
+        default=[],
+    )
+    parser.add_argument(
+        "--valid_shape_file",
+        type=str,
+        action="append",
+        default=[],
+    )
+    parser.add_argument(
         "--use_preprocessor",
         type=str2bool,
         default=True,
         help="Apply preprocessing to data or not",
+    )
+
+    # optimization related
+    parser.add_argument(
+        "--optim",
+        type=lambda x: x.lower(),
+        default="adam",
+        help="The optimizer type",
+    )
+    parser.add_argument(
+        "--optim_conf",
+        action=NestedDictAction,
+        default=dict(),
+        help="The keyword arguments for optimizer",
+    )
+    parser.add_argument(
+        "--scheduler",
+        type=lambda x: str_or_none(x.lower()),
+        default=None,
+        help="The lr scheduler type",
+    )
+    parser.add_argument(
+        "--scheduler_conf",
+        action=NestedDictAction,
+        default=dict(),
+        help="The keyword arguments for lr scheduler",
     )
 
     # most task related
@@ -380,17 +426,15 @@
         help="oss bucket.",
     )
 
-    # task related
-    parser.add_argument("--task_name", help="for different task")
-
     return parser
 
 
 if __name__ == '__main__':
     parser = get_parser()
-    args = parser.parse_args()
-    task_args = build_args(args)
-    args = argparse.Namespace(**vars(args), **vars(task_args))
+    args, extra_task_params = parser.parse_known_args()
+    if extra_task_params:
+        args = build_args(args, parser, extra_task_params)
+        # args = argparse.Namespace(**vars(args), **vars(task_args))
 
     # set random seed
     set_all_random_seed(args.seed)
@@ -399,6 +443,7 @@
     torch.backends.cudnn.deterministic = args.cudnn_deterministic
 
     # ddp init
+    os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_id)
     args.distributed = args.dist_world_size > 1
     distributed_option = build_distributed(args)
 
@@ -420,16 +465,16 @@
     prepare_data(args, distributed_option)
 
     model = build_model(args)
-    optimizer = build_optimizer(args, model=model)
-    scheduler = build_scheduler(args, optimizer)
+    optimizers = build_optimizer(args, model=model)
+    schedulers = build_scheduler(args, optimizers)
 
     logging.info("world size: {}, rank: {}, local_rank: {}".format(distributed_option.dist_world_size,
                                                                    distributed_option.dist_rank,
                                                                    distributed_option.local_rank))
     logging.info(pytorch_cudnn_version())
     logging.info(model_summary(model))
-    logging.info("Optimizer: {}".format(optimizer))
-    logging.info("Scheduler: {}".format(scheduler))
+    logging.info("Optimizer: {}".format(optimizers))
+    logging.info("Scheduler: {}".format(schedulers))
 
     # dump args to config.yaml
     if not distributed_option.distributed or distributed_option.dist_rank == 0:
@@ -443,4 +488,18 @@
             else:
                 yaml_no_alias_safe_dump(vars(args), f, indent=4, sort_keys=False)
 
+    # dataloader for training/validation
     train_dataloader, valid_dataloader = build_dataloader(args)
+
+    # Trainer, including model, optimizers, etc.
+    trainer = build_trainer(
+        args=args,
+        model=model,
+        optimizers=optimizers,
+        schedulers=schedulers,
+        train_dataloader=train_dataloader,
+        valid_dataloader=valid_dataloader,
+        distributed_option=distributed_option
+    )
+
+    trainer.run()

--
Gitblit v1.9.1