From e299cfecaf979833d9c4d7c70e44cb92ea066afe Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期四, 09 五月 2024 20:02:37 +0800
Subject: [PATCH] total_time/accum_grad
---
funasr/train_utils/trainer.py | 49 ++++++++++++++++++++++++++-----------------------
1 files changed, 26 insertions(+), 23 deletions(-)
diff --git a/funasr/train_utils/trainer.py b/funasr/train_utils/trainer.py
index d46a21c..28fbb29 100644
--- a/funasr/train_utils/trainer.py
+++ b/funasr/train_utils/trainer.py
@@ -384,18 +384,19 @@
loss, stats, weight = retval
stats = {k: v for k, v in stats.items() if v is not None}
- if self.use_ddp or self.use_fsdp:
- # Apply weighted averaging for loss and stats
- loss = (loss * weight.type(loss.dtype)).sum()
- # if distributed, this method can also apply all_reduce()
- # stats, weight = recursive_average(stats, weight, distributed=True)
- if self.use_ddp or self.use_fsdp:
- dist.all_reduce(weight, op=dist.ReduceOp.SUM)
- # Now weight is summation over all workers
- loss /= weight.sum() # shape:[1] -> shape:[]
- # Multiply world_size because DistributedDataParallel
- # automatically normalizes the gradient by world_size.
- loss *= self.world_size
+ # if self.use_ddp or self.use_fsdp:
+ # # Apply weighted averaging for loss and stats
+ # loss = (loss * weight.type(loss.dtype)).sum()
+ # # if distributed, this method can also apply all_reduce()
+ # # stats, weight = recursive_average(stats, weight, distributed=True)
+ # if self.use_ddp or self.use_fsdp:
+ # dist.all_reduce(weight, op=dist.ReduceOp.SUM)
+ # # Now weight is summation over all workers
+ # loss /= weight.sum() # shape:[1] -> shape:[]
+ # # Multiply world_size because DistributedDataParallel
+ # # automatically normalizes the gradient by world_size.
+ # loss *= self.world_size
+ loss *= self.world_size
# Scale the loss since we're not updating for every mini-batch
loss = loss / accum_grad
@@ -416,17 +417,6 @@
self.train_acc_avg * (self.step_in_epoch - 1)
+ stats["acc"].detach().cpu().item()
) / self.step_in_epoch
- if self.use_ddp or self.use_fsdp:
- train_loss_avg = torch.tensor(self.train_loss_avg, dtype=torch.float32).to(
- self.device
- )
- train_acc_avg = torch.tensor(self.train_acc_avg, dtype=torch.float32).to(
- self.device
- )
- dist.all_reduce(train_loss_avg, op=dist.ReduceOp.SUM)
- dist.all_reduce(train_acc_avg, op=dist.ReduceOp.SUM)
- self.train_loss_avg = train_loss_avg.detach().cpu().item() / self.world_size
- self.train_acc_avg = train_acc_avg.detach().cpu().item() / self.world_size
# Perform an optimizer step only after accumulating enough gradients
if (batch_idx + 1) % accum_grad == 0:
@@ -457,6 +447,19 @@
optim.zero_grad(set_to_none=True)
total_time = f"{(time.perf_counter() - time5)/accum_grad:0.3f}"
time5 = time.perf_counter()
+
+ if self.use_ddp or self.use_fsdp:
+ train_loss_avg = torch.tensor(self.train_loss_avg, dtype=torch.float32).to(
+ self.device
+ )
+ train_acc_avg = torch.tensor(self.train_acc_avg, dtype=torch.float32).to(
+ self.device
+ )
+ dist.all_reduce(train_loss_avg, op=dist.ReduceOp.SUM)
+ dist.all_reduce(train_acc_avg, op=dist.ReduceOp.SUM)
+ self.train_loss_avg = train_loss_avg.detach().cpu().item() / self.world_size
+ self.train_acc_avg = train_acc_avg.detach().cpu().item() / self.world_size
+
speed_stats["optim_time"] = f"{time5 - time4:0.3f}"
speed_stats["total_time"] = total_time
--
Gitblit v1.9.1