| | |
| | | ): |
| | | torch.cuda.empty_cache() |
| | | |
| | | time3 = time.perf_counter() |
| | | speed_stats["forward_time"] = f"{time3 - time2:0.3f}" |
| | | loss, stats, weight = retval |
| | | stats = {k: v for k, v in stats.items() if v is not None} |
| | | if self.use_ddp or self.use_fsdp: |
| | |
| | | loss *= self.world_size |
| | | # Scale the loss since we're not updating for every mini-batch |
| | | loss = loss / accum_grad |
| | | |
| | | time3 = time.perf_counter() |
| | | speed_stats["forward_time"] = f"{time3 - time2:0.3f}" |
| | | if self.use_fp16: |
| | | scaler.scale(loss).backward() |
| | | else: |
| | | loss.backward() |
| | | time4 = time.perf_counter() |
| | | speed_stats["backward_time"] = f"{time4 - time3:0.3f}" |
| | | speed_stats["backward_and_AllReaduce_time"] = f"{time4 - time3:0.3f}" |
| | | |
| | | self.train_loss_avg = ( |
| | | self.train_loss_avg * (self.step_in_epoch - 1) + loss.detach().cpu().item() |
| | |
| | | scheduler.step() |
| | | # Clear gradients for the next accumulation stage |
| | | optim.zero_grad(set_to_none=True) |
| | | total_time = f"{time.perf_counter() - time5:0.3f}" |
| | | total_time = f"{(time.perf_counter() - time5)/accum_grad:0.3f}" |
| | | time5 = time.perf_counter() |
| | | speed_stats["optim_time"] = f"{time5 - time4:0.3f}" |
| | | |