From ec5e15d47c9f5392700a0bce59c697a4960b8741 Mon Sep 17 00:00:00 2001
From: 嘉渊 <wangjiaming.wjm@alibaba-inc.com>
Date: 星期二, 25 四月 2023 11:48:12 +0800
Subject: [PATCH] update

---
 funasr/bin/train.py |   45 +++++++++++++++++++++++++++++++++++++++++++--
 1 files changed, 43 insertions(+), 2 deletions(-)

diff --git a/funasr/bin/train.py b/funasr/bin/train.py
index f684f3b..4641370 100755
--- a/funasr/bin/train.py
+++ b/funasr/bin/train.py
@@ -23,6 +23,7 @@
 from funasr.utils.prepare_data import prepare_data
 from funasr.utils.types import int_or_none
 from funasr.utils.types import str2bool
+from funasr.utils.types import str2triple_str
 from funasr.utils.types import str_or_none
 from funasr.utils.yaml_no_alias_safe_dump import yaml_no_alias_safe_dump
 
@@ -59,16 +60,19 @@
     )
     parser.add_argument(
         "--dist_world_size",
+        type=int,
         default=1,
         help="number of nodes for distributed training",
     )
     parser.add_argument(
         "--dist_rank",
+        type=int,
         default=None,
         help="node rank for distributed training",
     )
     parser.add_argument(
         "--local_rank",
+        type=int,
         default=None,
         help="local rank for distributed training",
     )
@@ -85,6 +89,22 @@
         type=int_or_none,
         help="The master port for distributed training"
              "This value is used when dist_init_method == 'env://'",
+    )
+    parser.add_argument(
+        "--dist_launcher",
+        default=None,
+        type=str_or_none,
+        choices=["slurm", "mpi", None],
+        help="The launcher type for distributed training",
+    )
+    parser.add_argument(
+        "--multiprocessing_distributed",
+        default=True,
+        type=str2bool,
+        help="Use multi-processing distributed training to launch "
+             "N processes per node, which has N GPUs. This is the "
+             "fastest way to use PyTorch for either single node or "
+             "multi node data parallel training",
     )
     parser.add_argument(
         "--unused_parameters",
@@ -222,6 +242,12 @@
         help="Enable resuming if checkpoint is existing",
     )
     parser.add_argument(
+        "--train_dtype",
+        default="float32",
+        choices=["float16", "float32", "float64"],
+        help="Data type for training.",
+    )
+    parser.add_argument(
         "--use_amp",
         type=str2bool,
         default=False,
@@ -233,6 +259,12 @@
         help="Show the logs every the number iterations in each epochs at the "
              "training phase. If None is given, it is decided according the number "
              "of training samples automatically .",
+    )
+    parser.add_argument(
+        "--use_tensorboard",
+        type=str2bool,
+        default=True,
+        help="Enable tensorboard logging",
     )
 
     # pretrained model related
@@ -278,6 +310,12 @@
         help="whether to use dataloader for large dataset",
     )
     parser.add_argument(
+        "--dataset_conf",
+        action=NestedDictAction,
+        default=dict(),
+        help=f"The keyword arguments for dataset",
+    )
+    parser.add_argument(
         "--train_data_file",
         type=str,
         default=None,
@@ -291,18 +329,21 @@
     )
     parser.add_argument(
         "--train_data_path_and_name_and_type",
+        type=str2triple_str,
         action="append",
         default=[],
         help="e.g. '--train_data_path_and_name_and_type some/path/a.scp,foo,sound'. ",
     )
     parser.add_argument(
         "--valid_data_path_and_name_and_type",
+        type=str2triple_str,
         action="append",
         default=[],
     )
     parser.add_argument(
         "--train_shape_file",
-        type=str, action="append",
+        type=str,
+        action="append",
         default=[],
     )
     parser.add_argument(
@@ -449,7 +490,6 @@
     args, extra_task_params = parser.parse_known_args()
     if extra_task_params:
         args = build_args(args, parser, extra_task_params)
-        # args = argparse.Namespace(**vars(args), **vars(task_args))
 
     # set random seed
     set_all_random_seed(args.seed)
@@ -487,6 +527,7 @@
                                                                    distributed_option.dist_rank,
                                                                    distributed_option.local_rank))
     logging.info(pytorch_cudnn_version())
+    logging.info("Args: {}".format(args))
     logging.info(model_summary(model))
     logging.info("Optimizer: {}".format(optimizers))
     logging.info("Scheduler: {}".format(schedulers))

--
Gitblit v1.9.1