游雁
2024-05-09 664fd7abc82def968310b9f202a901ac675b901d
total_time/accum_grad
1个文件已修改
9 ■■■■■ 已修改文件
funasr/train_utils/trainer.py 9 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
funasr/train_utils/trainer.py
@@ -382,8 +382,6 @@
                    ):
                        torch.cuda.empty_cache()
                time3 = time.perf_counter()
                speed_stats["forward_time"] = f"{time3 - time2:0.3f}"
                loss, stats, weight = retval
                stats = {k: v for k, v in stats.items() if v is not None}
                if self.use_ddp or self.use_fsdp:
@@ -400,12 +398,15 @@
                    loss *= self.world_size
                # Scale the loss since we're not updating for every mini-batch
                loss = loss / accum_grad
                time3 = time.perf_counter()
                speed_stats["forward_time"] = f"{time3 - time2:0.3f}"
                if self.use_fp16:
                    scaler.scale(loss).backward()
                else:
                    loss.backward()
                time4 = time.perf_counter()
                speed_stats["backward_time"] = f"{time4 - time3:0.3f}"
                speed_stats["backward_and_AllReaduce_time"] = f"{time4 - time3:0.3f}"
                self.train_loss_avg = (
                    self.train_loss_avg * (self.step_in_epoch - 1) + loss.detach().cpu().item()
@@ -454,7 +455,7 @@
                scheduler.step()
                # Clear gradients for the next accumulation stage
                optim.zero_grad(set_to_none=True)
                total_time = f"{time.perf_counter() - time5:0.3f}"
                total_time = f"{(time.perf_counter() - time5)/accum_grad:0.3f}"
                time5 = time.perf_counter()
                speed_stats["optim_time"] = f"{time5 - time4:0.3f}"