From 26d642bfdf59a50365a9c8158acb223cae1004dc Mon Sep 17 00:00:00 2001
From: zhifu gao <zhifu.gzf@alibaba-inc.com>
Date: 星期二, 23 四月 2024 20:13:44 +0800
Subject: [PATCH] Dev gzf exp (#1651)
---
funasr/train_utils/trainer.py | 20 ++++++++++++++++----
1 files changed, 16 insertions(+), 4 deletions(-)
diff --git a/funasr/train_utils/trainer.py b/funasr/train_utils/trainer.py
index 35a266f..3ee6885 100644
--- a/funasr/train_utils/trainer.py
+++ b/funasr/train_utils/trainer.py
@@ -252,6 +252,7 @@
dataloader_val=None,
epoch=None,
writer=None,
+ **kwargs,
):
"""
Defines the training process for a single epoch with gradient accumulation.
@@ -268,10 +269,12 @@
# Initialize the gradient accumulation
optim.zero_grad()
speed_stats = {}
- time5 = time.perf_counter()
+
iterator_stop = torch.tensor(0).to(self.device)
dataloader_train.batch_sampler.set_epoch(epoch)
+ time_beg = time.perf_counter()
+ time5 = time_beg
for batch_idx, batch in enumerate(dataloader_train):
if self.use_ddp or self.use_fsdp:
dist.all_reduce(iterator_stop, dist.ReduceOp.SUM)
@@ -279,11 +282,13 @@
break
self.batch_total += 1
time1 = time.perf_counter()
- speed_stats["data_load"] = f"{time1-time5:0.3f}"
+ speed_stats["data_load"] = f"{time1-time_beg:0.3f}"
batch = to_device(batch, self.device)
-
- my_context = model.no_sync if batch_idx % accum_grad != 0 else nullcontext
+
+ my_context = nullcontext
+ if self.use_ddp or self.use_fsdp:
+ my_context = model.no_sync if batch_idx % accum_grad != 0 else my_context
with my_context():
time2 = time.perf_counter()
with maybe_autocast(self.use_fp16):
@@ -370,6 +375,8 @@
stats=stats,
writer=writer,
tag="train",
+ data_split_i=kwargs.get("data_split_i", 0),
+ data_split_num=kwargs.get("data_split_num", 1),
)
if (batch_idx + 1) % self.validate_interval == 0:
@@ -384,6 +391,7 @@
if (batch_idx+1) % self.save_checkpoint_interval == 0:
self.save_checkpoint(epoch, model=model, optim=optim, scheduler=scheduler, scaler=scaler, step=batch_idx+1)
+ time_beg = time.perf_counter()
else:
if self.use_ddp or self.use_fsdp:
iterator_stop.fill_(1)
@@ -502,6 +510,9 @@
stats=None,
writer=None,
tag="train",
+ data_split_i=0,
+ data_split_num=1,
+ **kwargs,
):
if (batch_idx + 1) % self.log_interval == 0:
@@ -521,6 +532,7 @@
f"{tag}, "
f"rank: {self.local_rank}, "
f"epoch: {epoch}/{self.max_epoch}, "
+ f"data_slice: {data_split_i}/{data_split_num}, "
f"step: {batch_idx + 1}/{batch_num_epoch}, total step: {self.batch_total}, "
f"(loss_avg_rank: {loss:.3f}), "
f"(loss_avg_epoch: {loss_avg_epoch:.3f}), "
--
Gitblit v1.9.1