From e422c6197b5bcada0429986500d8d5ca4ffcb3e4 Mon Sep 17 00:00:00 2001
From: 嘉渊 <wangjiaming.wjm@alibaba-inc.com>
Date: 星期三, 10 五月 2023 19:23:37 +0800
Subject: [PATCH] update repo

---
 funasr/bin/train.py |   57 +++++++++++++++++++++++++++++++++------------------------
 1 files changed, 33 insertions(+), 24 deletions(-)

diff --git a/funasr/bin/train.py b/funasr/bin/train.py
index 64391d9..22387b9 100755
--- a/funasr/bin/train.py
+++ b/funasr/bin/train.py
@@ -65,6 +65,7 @@
     )
     parser.add_argument(
         "--dist_rank",
+        type=int,
         default=None,
         help="node rank for distributed training",
     )
@@ -240,6 +241,12 @@
         help="Enable resuming if checkpoint is existing",
     )
     parser.add_argument(
+        "--train_dtype",
+        default="float32",
+        choices=["float16", "float32", "float64"],
+        help="Data type for training.",
+    )
+    parser.add_argument(
         "--use_amp",
         type=str2bool,
         default=False,
@@ -251,6 +258,12 @@
         help="Show the logs every the number iterations in each epochs at the "
              "training phase. If None is given, it is decided according the number "
              "of training samples automatically .",
+    )
+    parser.add_argument(
+        "--use_tensorboard",
+        type=str2bool,
+        default=True,
+        help="Enable tensorboard logging",
     )
 
     # pretrained model related
@@ -296,39 +309,30 @@
         help="whether to use dataloader for large dataset",
     )
     parser.add_argument(
-        "--train_data_file",
+        "--dataset_conf",
+        action=NestedDictAction,
+        default=dict(),
+        help=f"The keyword arguments for dataset",
+    )
+    parser.add_argument(
+        "--data_dir",
         type=str,
         default=None,
-        help="train_list for large dataset",
+        help="root path of data",
     )
     parser.add_argument(
-        "--valid_data_file",
+        "--train_set",
         type=str,
-        default=None,
-        help="valid_list for large dataset",
+        default="train",
+        help="train dataset",
     )
     parser.add_argument(
-        "--train_data_path_and_name_and_type",
-        action="append",
-        default=[],
-        help="e.g. '--train_data_path_and_name_and_type some/path/a.scp,foo,sound'. ",
-    )
-    parser.add_argument(
-        "--valid_data_path_and_name_and_type",
-        action="append",
-        default=[],
-    )
-    parser.add_argument(
-        "--train_shape_file",
-        type=str, action="append",
-        default=[],
-    )
-    parser.add_argument(
-        "--valid_shape_file",
+        "--valid_set",
         type=str,
-        action="append",
-        default=[],
+        default="validation",
+        help="dev dataset",
     )
+
     parser.add_argument(
         "--use_preprocessor",
         type=str2bool,
@@ -497,6 +501,10 @@
     prepare_data(args, distributed_option)
 
     model = build_model(args)
+    model = model.to(
+        dtype=getattr(torch, args.train_dtype),
+        device="cuda" if args.ngpu > 0 else "cpu",
+    )
     optimizers = build_optimizer(args, model=model)
     schedulers = build_scheduler(args, optimizers)
 
@@ -504,6 +512,7 @@
                                                                    distributed_option.dist_rank,
                                                                    distributed_option.local_rank))
     logging.info(pytorch_cudnn_version())
+    logging.info("Args: {}".format(args))
     logging.info(model_summary(model))
     logging.info("Optimizer: {}".format(optimizers))
     logging.info("Scheduler: {}".format(schedulers))

--
Gitblit v1.9.1