From 664fd7abc82def968310b9f202a901ac675b901d Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期四, 09 五月 2024 17:54:31 +0800
Subject: [PATCH] total_time/accum_grad
---
funasr/train_utils/trainer.py | 9 +++++----
1 files changed, 5 insertions(+), 4 deletions(-)
diff --git a/funasr/train_utils/trainer.py b/funasr/train_utils/trainer.py
index 01e2924..d46a21c 100644
--- a/funasr/train_utils/trainer.py
+++ b/funasr/train_utils/trainer.py
@@ -382,8 +382,6 @@
):
torch.cuda.empty_cache()
- time3 = time.perf_counter()
- speed_stats["forward_time"] = f"{time3 - time2:0.3f}"
loss, stats, weight = retval
stats = {k: v for k, v in stats.items() if v is not None}
if self.use_ddp or self.use_fsdp:
@@ -400,12 +398,15 @@
loss *= self.world_size
# Scale the loss since we're not updating for every mini-batch
loss = loss / accum_grad
+
+ time3 = time.perf_counter()
+ speed_stats["forward_time"] = f"{time3 - time2:0.3f}"
if self.use_fp16:
scaler.scale(loss).backward()
else:
loss.backward()
time4 = time.perf_counter()
- speed_stats["backward_time"] = f"{time4 - time3:0.3f}"
+ speed_stats["backward_and_AllReaduce_time"] = f"{time4 - time3:0.3f}"
self.train_loss_avg = (
self.train_loss_avg * (self.step_in_epoch - 1) + loss.detach().cpu().item()
@@ -454,7 +455,7 @@
scheduler.step()
# Clear gradients for the next accumulation stage
optim.zero_grad(set_to_none=True)
- total_time = f"{time.perf_counter() - time5:0.3f}"
+ total_time = f"{(time.perf_counter() - time5)/accum_grad:0.3f}"
time5 = time.perf_counter()
speed_stats["optim_time"] = f"{time5 - time4:0.3f}"
--
Gitblit v1.9.1