From 9fcb3cc06b4e324f0913d2f61b89becc2baeef1b Mon Sep 17 00:00:00 2001
From: hnluo <haoneng.lhn@alibaba-inc.com>
Date: 星期一, 11 九月 2023 17:40:03 +0800
Subject: [PATCH] Merge pull request #932 from alibaba-damo-academy/dev_lhn

---
 funasr/bin/build_trainer.py |   20 ++++++++++++++------
 1 files changed, 14 insertions(+), 6 deletions(-)

diff --git a/funasr/bin/build_trainer.py b/funasr/bin/build_trainer.py
index 24811c9..52aa509 100644
--- a/funasr/bin/build_trainer.py
+++ b/funasr/bin/build_trainer.py
@@ -529,13 +529,12 @@
                   **kwargs):
     parser = get_parser()
     args, extra_task_params = parser.parse_known_args()
-    if extra_task_params:
-        args = build_args(args, parser, extra_task_params)
+    args = build_args(args, parser, extra_task_params)
 
     if args.local_rank is not None:
-        args.distributed = True
+        distributed = True
     else:
-        args.distributed = False
+        distributed = False
     args.local_rank = args.local_rank if args.local_rank is not None else 0
     local_rank = args.local_rank
     if "CUDA_VISIBLE_DEVICES" in os.environ.keys():
@@ -588,6 +587,16 @@
     args.output_dir = output_dir
     args.gpu_id = args.local_rank
     args.config = finetune_config
+    args.use_pai = False
+    args.batch_type = "length"
+    args.oss_bucket = None
+    args.input_size = None
+    if distributed:
+        args.distributed = True
+        args.simple_ddp = True
+    else:
+        args.distributed = False
+        args.ngpu = 1
     if optim is not None:
         args.optim = optim
     if lr is not None:
@@ -605,6 +614,7 @@
     if batch_bins is not None:
         if args.dataset_type == "small":
             args.batch_bins = batch_bins
+            args.dataset_conf["batch_conf"]["batch_size"] = batch_bins
         elif args.dataset_type == "large":
             args.dataset_conf["batch_conf"]["batch_size"] = batch_bins
         else:
@@ -622,8 +632,6 @@
     torch.backends.cudnn.deterministic = args.cudnn_deterministic
 
     # ddp init
-    os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_id)
-    args.distributed = args.ngpu > 1 or args.dist_world_size > 1
     distributed_option = build_distributed(args)
 
     # for logging

--
Gitblit v1.9.1