| | |
| | | --input_size $feats_dim \ |
| | | --ngpu $gpu_num \ |
| | | --num_worker_count $count \ |
| | | --multiprocessing_distributed true \ |
| | | --dist_init_method $init_method \ |
| | | --dist_world_size $world_size \ |
| | | --dist_rank $rank \ |
| | |
| | | help="Whether to use the find_unused_parameters in " |
| | | "torch.nn.parallel.DistributedDataParallel ", |
| | | ) |
| | | parser.add_argument( |
| | | "--gpu_id", |
| | | type=int, |
| | | default=0, |
| | | help="local gpu id.", |
| | | ) |
| | | |
| | | # cudnn related |
| | | parser.add_argument( |
| | |
| | | torch.backends.cudnn.deterministic = args.cudnn_deterministic |
| | | |
| | | # ddp init |
| | | os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_id) |
| | | args.distributed = args.dist_world_size > 1 |
| | | distributed_option = build_distributed(args) |
| | | |
| | |
| | | # sampler |
| | | dataset_conf = args.dataset_conf |
| | | batch_sampler = LengthBatchSampler( |
| | | batch_bins=dataset_conf["batch_size"], |
| | | batch_bins=dataset_conf["batch_size"] * args.ngpu, |
| | | shape_files=shape_files, |
| | | sort_in_batch=dataset_conf["sort_in_batch"] if hasattr(dataset_conf, "sort_in_batch") else "descending", |
| | | sort_batch=dataset_conf["sort_batch"] if hasattr(dataset_conf, "sort_batch") else "ascending", |