From 2d22eaba7c07133d8d77c739b024f1cdc620d100 Mon Sep 17 00:00:00 2001
From: 嘉渊 <wangjiaming.wjm@alibaba-inc.com>
Date: 星期四, 27 七月 2023 14:46:40 +0800
Subject: [PATCH] update

---
 funasr/bin/build_trainer.py |   11 +++++++----
 1 files changed, 7 insertions(+), 4 deletions(-)

diff --git a/funasr/bin/build_trainer.py b/funasr/bin/build_trainer.py
index 24811c9..be33b3c 100644
--- a/funasr/bin/build_trainer.py
+++ b/funasr/bin/build_trainer.py
@@ -529,13 +529,14 @@
                   **kwargs):
     parser = get_parser()
     args, extra_task_params = parser.parse_known_args()
-    if extra_task_params:
-        args = build_args(args, parser, extra_task_params)
+    args = build_args(args, parser, extra_task_params)
 
     if args.local_rank is not None:
         args.distributed = True
+        args.simple_ddp = True
     else:
         args.distributed = False
+        args.ngpu = 1
     args.local_rank = args.local_rank if args.local_rank is not None else 0
     local_rank = args.local_rank
     if "CUDA_VISIBLE_DEVICES" in os.environ.keys():
@@ -588,6 +589,10 @@
     args.output_dir = output_dir
     args.gpu_id = args.local_rank
     args.config = finetune_config
+    args.use_pai = False
+    args.batch_type = "length"
+    args.oss_bucket = None
+    args.input_size = None
     if optim is not None:
         args.optim = optim
     if lr is not None:
@@ -622,8 +627,6 @@
     torch.backends.cudnn.deterministic = args.cudnn_deterministic
 
     # ddp init
-    os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_id)
-    args.distributed = args.ngpu > 1 or args.dist_world_size > 1
     distributed_option = build_distributed(args)
 
     # for logging

--
Gitblit v1.9.1