From 77cfefcba0c3d30e0db29e3b63179e385a66e6b7 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期一, 25 三月 2024 14:13:17 +0800
Subject: [PATCH] install requirements automatically
---
funasr/train_utils/trainer.py | 41 +++++++++++++++++++++++++++++++++++------
1 files changed, 35 insertions(+), 6 deletions(-)
diff --git a/funasr/train_utils/trainer.py b/funasr/train_utils/trainer.py
index c443c6f..2d47fc1 100644
--- a/funasr/train_utils/trainer.py
+++ b/funasr/train_utils/trainer.py
@@ -198,6 +198,8 @@
for k in dst_state.keys():
if not k.startswith("module.") and "module."+k in src_state.keys():
k_ddp = "module."+k
+ elif k.startswith("module.") and "module."+k not in src_state.keys():
+ k_ddp = k.replace("module.", "", 1)
else:
k_ddp = k
if k_ddp in src_state.keys():
@@ -237,6 +239,8 @@
Args:
epoch (int): The current epoch number.
"""
+ if self.use_ddp or self.use_fsdp:
+ dist.barrier()
logging.info(f"Train epoch: {epoch}, rank: {self.local_rank}\n")
model.train()
@@ -246,8 +250,14 @@
optim.zero_grad()
speed_stats = {}
time5 = time.perf_counter()
-
+ iterator_stop = torch.tensor(0).to(self.device)
+
+ dataloader_train.batch_sampler.set_epoch(epoch)
for batch_idx, batch in enumerate(dataloader_train):
+ if self.use_ddp or self.use_fsdp:
+ dist.all_reduce(iterator_stop, dist.ReduceOp.SUM)
+ if iterator_stop > 0:
+ break
self.batch_total += 1
time1 = time.perf_counter()
speed_stats["data_load"] = f"{time1-time5:0.3f}"
@@ -330,7 +340,7 @@
speed_stats["total_time"] = total_time
lr = scheduler.get_last_lr()[0]
- batch_num_epoch = -1
+ batch_num_epoch = 1
if hasattr(dataloader_train, "__len__"):
batch_num_epoch = len(dataloader_train)
self.log(epoch, batch_idx,
@@ -354,9 +364,14 @@
if (batch_idx+1) % self.save_checkpoint_interval == 0:
self.save_checkpoint(epoch, model=model, optim=optim, scheduler=scheduler, scaler=scaler, step=batch_idx+1)
-
+ else:
+ if self.use_ddp or self.use_fsdp:
+ iterator_stop.fill_(1)
+ dist.all_reduce(iterator_stop, dist.ReduceOp.SUM)
+
if self.use_ddp or self.use_fsdp:
dist.barrier()
+ iterator_stop = torch.tensor(0).to(self.device)
@@ -374,6 +389,8 @@
Args:
epoch (int): The current epoch number.
"""
+ if self.use_ddp or self.use_fsdp:
+ dist.barrier()
logging.info(f"Validate epoch: {epoch}, rank: {self.local_rank}\n")
model.eval()
@@ -381,7 +398,13 @@
speed_stats = {}
time5 = time.perf_counter()
+ iterator_stop = torch.tensor(0).to(self.device)
+ dataloader_val.batch_sampler.set_epoch(epoch)
for batch_idx, batch in enumerate(dataloader_val):
+ if self.use_ddp or self.use_fsdp:
+ dist.all_reduce(iterator_stop, dist.ReduceOp.SUM)
+ if iterator_stop > 0:
+ break
time1 = time.perf_counter()
speed_stats["data_load"] = f"{time1 - time5:0.3f}"
batch = to_device(batch, self.device)
@@ -395,7 +418,7 @@
# Apply weighted averaging for loss and stats
loss = (loss * weight.type(loss.dtype)).sum()
# if distributed, this method can also apply all_reduce()
- stats, weight = recursive_average(stats, weight, distributed=True)
+ # stats, weight = recursive_average(stats, weight, distributed=True)
if self.use_ddp or self.use_fsdp:
dist.all_reduce(weight, op=dist.ReduceOp.SUM)
# Now weight is summation over all workers
@@ -418,7 +441,7 @@
self.val_loss_avg = val_loss_avg.detach().cpu().item() / self.world_size
self.val_acc_avg = val_acc_avg.detach().cpu().item() / self.world_size
- batch_num_epoch = -1
+ batch_num_epoch = 1
if hasattr(dataloader_val, "__len__"):
batch_num_epoch = len(dataloader_val)
self.log(epoch, batch_idx,
@@ -431,11 +454,17 @@
tag="val",
)
+ else:
+ if self.use_ddp or self.use_fsdp:
+ iterator_stop.fill_(1)
+ dist.all_reduce(iterator_stop, dist.ReduceOp.SUM)
+
self.val_acc_list.append(self.val_acc_avg)
model.train()
-
+
if self.use_ddp or self.use_fsdp:
dist.barrier()
+ iterator_stop = torch.tensor(0).to(self.device)
def log(self,
--
Gitblit v1.9.1