From 16a976a01d110d3969759be7720cae2b6b0664f7 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期日, 24 三月 2024 01:27:08 +0800
Subject: [PATCH] finetune
---
funasr/train_utils/trainer.py | 35 +++++++++++++++++++++++++++++++----
1 files changed, 31 insertions(+), 4 deletions(-)
diff --git a/funasr/train_utils/trainer.py b/funasr/train_utils/trainer.py
index 49b9fbc..cf23483 100644
--- a/funasr/train_utils/trainer.py
+++ b/funasr/train_utils/trainer.py
@@ -239,6 +239,8 @@
Args:
epoch (int): The current epoch number.
"""
+ if self.use_ddp or self.use_fsdp:
+ dist.barrier()
logging.info(f"Train epoch: {epoch}, rank: {self.local_rank}\n")
model.train()
@@ -248,8 +250,14 @@
optim.zero_grad()
speed_stats = {}
time5 = time.perf_counter()
-
+ # iterator_stop = torch.tensor(0).to(self.device)
+
+ dataloader_train.batch_sampler.set_epoch(epoch)
for batch_idx, batch in enumerate(dataloader_train):
+ # if self.use_ddp or self.use_fsdp:
+ # dist.all_reduce(iterator_stop, dist.ReduceOp.SUM)
+ # if iterator_stop > 0:
+ # break
self.batch_total += 1
time1 = time.perf_counter()
speed_stats["data_load"] = f"{time1-time5:0.3f}"
@@ -356,7 +364,11 @@
if (batch_idx+1) % self.save_checkpoint_interval == 0:
self.save_checkpoint(epoch, model=model, optim=optim, scheduler=scheduler, scaler=scaler, step=batch_idx+1)
-
+ # else:
+ # if self.use_ddp or self.use_fsdp:
+ # iterator_stop.fill_(1)
+ # dist.all_reduce(iterator_stop, dist.ReduceOp.SUM)
+
if self.use_ddp or self.use_fsdp:
dist.barrier()
@@ -376,6 +388,8 @@
Args:
epoch (int): The current epoch number.
"""
+ if self.use_ddp or self.use_fsdp:
+ dist.barrier()
logging.info(f"Validate epoch: {epoch}, rank: {self.local_rank}\n")
model.eval()
@@ -383,7 +397,15 @@
speed_stats = {}
time5 = time.perf_counter()
+ # iterator_stop = torch.tensor(0).to(self.device)
+
for batch_idx, batch in enumerate(dataloader_val):
+ # if self.use_ddp or self.use_fsdp:
+ # dist.all_reduce(iterator_stop, dist.ReduceOp.SUM)
+ # if epoch >= 1:
+ # print(f"iterator_stop: {iterator_stop}\n")
+ # if iterator_stop > 0:
+ # break
time1 = time.perf_counter()
speed_stats["data_load"] = f"{time1 - time5:0.3f}"
batch = to_device(batch, self.device)
@@ -397,7 +419,7 @@
# Apply weighted averaging for loss and stats
loss = (loss * weight.type(loss.dtype)).sum()
# if distributed, this method can also apply all_reduce()
- stats, weight = recursive_average(stats, weight, distributed=True)
+ # stats, weight = recursive_average(stats, weight, distributed=True)
if self.use_ddp or self.use_fsdp:
dist.all_reduce(weight, op=dist.ReduceOp.SUM)
# Now weight is summation over all workers
@@ -433,9 +455,14 @@
tag="val",
)
+ # else:
+ # if self.use_ddp or self.use_fsdp:
+ # iterator_stop.fill_(1)
+ # dist.all_reduce(iterator_stop, dist.ReduceOp.SUM)
+
self.val_acc_list.append(self.val_acc_avg)
model.train()
-
+
if self.use_ddp or self.use_fsdp:
dist.barrier()
--
Gitblit v1.9.1