| | |
| | | kwargs["device"] = next(model.parameters()).device |
| | | trainer.device = kwargs["device"] |
| | | |
| | | # optim |
| | | logging.info("Build optim") |
| | | optim = kwargs.get("optim", "adam") |
| | | assert optim in optim_classes |
| | | optim_class = optim_classes.get(optim) |
| | | optim = optim_class(model.parameters(), **kwargs.get("optim_conf")) |
| | | |
| | | # scheduler |
| | | logging.info("Build scheduler") |
| | | scheduler = kwargs.get("scheduler", "warmuplr") |
| | | assert scheduler in scheduler_classes |
| | | scheduler_class = scheduler_classes.get(scheduler) |
| | | scheduler = scheduler_class(optim, **kwargs.get("scheduler_conf")) |
| | | |
| | | if use_deepspeed: |
| | | args = OmegaConf.create({"deepspeed_config": kwargs.get("deepspeed_config", "")}) |
| | | model, optimizer, _, scheduler = deepspeed.initialize( |
| | | args=args, |
| | | model=model, |
| | | optimizer=optim, |
| | | lr_scheduler=scheduler, |
| | | model_parameters=model.parameters(), |
| | | ) |
| | | model, optim, scheduler = trainer.warp_optim_scheduler(model, kwargs) |
| | | |
| | | # dataset |
| | | logging.info("Build dataloader") |
| | |
| | | scaler=scaler, |
| | | ) |
| | | |
| | | tensorboard_dir = os.path.join(kwargs.get("output_dir"), "tensorboard") |
| | | os.makedirs(tensorboard_dir, exist_ok=True) |
| | | try: |
| | | from tensorboardX import SummaryWriter |
| | | |
| | | writer = SummaryWriter(tensorboard_dir) # if trainer.rank == 0 else None |
| | | except: |
| | | writer = None |
| | | |
| | | dataloader_tr, dataloader_val = None, None |
| | | for epoch in range(trainer.start_epoch, trainer.max_epoch): |
| | | time1 = time.perf_counter() |
| | |
| | | dataloader_train=dataloader_tr, |
| | | dataloader_val=dataloader_val, |
| | | epoch=epoch, |
| | | writer=writer, |
| | | data_split_i=data_split_i, |
| | | data_split_num=dataloader.data_split_num, |
| | | start_step=trainer.start_step, |
| | |
| | | torch.cuda.empty_cache() |
| | | |
| | | trainer.start_data_split_i = 0 |
| | | trainer.validate_epoch( |
| | | model=model, dataloader_val=dataloader_val, epoch=epoch + 1, writer=writer |
| | | ) |
| | | trainer.validate_epoch(model=model, dataloader_val=dataloader_val, epoch=epoch + 1) |
| | | scheduler.step() |
| | | trainer.step_in_epoch = 0 |
| | | trainer.save_checkpoint( |