funasr/bin/train.py
@@ -58,7 +58,7 @@ ) parser.add_argument( "--dist_world_size", default=None, default=1, help="number of nodes for distributed training", ) parser.add_argument( @@ -444,7 +444,7 @@ # ddp init os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_id) args.distributed = args.dist_world_size > 1 args.distributed = args.ngpu > 1 or args.dist_world_size > 1 distributed_option = build_distributed(args) # for logging