| | |
| | | args.max_update = len(bs_list) * args.max_epoch |
| | | logging.info("Max update: {}".format(args.max_update)) |
| | | |
| | | if args.distributed: |
| | | if args.distributed and mode=="train": |
| | | world_size = torch.distributed.get_world_size() |
| | | rank = torch.distributed.get_rank() |
| | | for batch in batches: |
| | |
| | | self.num_iters_per_epoch = None |
| | | self.shuffle = mode == "train" |
| | | self.seed = args.seed |
| | | self.num_workers = args.num_workers |
| | | self.num_workers = args.dataset_conf.get("num_workers", 8) |
| | | self.collate_fn = collate_fn |
| | | self.pin_memory = args.ngpu > 0 |
| | | |