From 73f3f2f91b8549371d8a62ca41355a301d6fcc50 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期一, 20 五月 2024 15:32:40 +0800
Subject: [PATCH] Merge branch 'dev_gzf_deepspeed' of github.com:alibaba-damo-academy/FunASR into dev_gzf_deepspeed merge
---
funasr/train_utils/trainer_ds.py | 36 +++++++++++++++++-------------------
1 files changed, 17 insertions(+), 19 deletions(-)
diff --git a/funasr/train_utils/trainer_ds.py b/funasr/train_utils/trainer_ds.py
index 78cfceb..bb9fca6 100644
--- a/funasr/train_utils/trainer_ds.py
+++ b/funasr/train_utils/trainer_ds.py
@@ -168,8 +168,7 @@
"""
step_in_epoch = None if step is None else step_in_epoch
if self.use_deepspeed:
- with torch.no_grad():
- model.save_checkpoint(save_dir=model_dir, tag=tag, client_state=info_dict)
+
logging.info(f"Save checkpoint: {epoch}, rank: {self.local_rank}\n")
# self.step_or_epoch += 1
state = {
@@ -273,8 +272,7 @@
elif self.use_fsdp:
pass
- step_in_epoch = None if step is None else step_in_epoch
- if self.rank == 0:
+ elif self.rank == 0:
logging.info(f"Save checkpoint: {epoch}, rank: {self.local_rank}\n")
# self.step_or_epoch += 1
state = {
@@ -385,8 +383,8 @@
if self.use_deepspeed:
ckpt = os.path.join(self.output_dir, "model.pt")
- if os.path.isfile(ckpt):
- _, checkpoint = model_engine.load_checkpoint(self.output_dir, "model.pt")
+ if os.path.exists(ckpt):
+ _, checkpoint = model.load_checkpoint(self.output_dir, "model.pt")
self.saved_ckpts = checkpoint["saved_ckpts"]
self.val_acc_step_or_eoch = (
@@ -574,12 +572,12 @@
loss_dict["lr"] = scheduler.get_last_lr()[0]
loss_dict["batch_num_epoch"] = len(dataloader_train)
- self.val_loss_avg = (
- self.val_loss_avg * batch_idx + loss_dict["loss"].detach().cpu().item()
+ self.train_loss_avg = (
+ self.train_loss_avg * batch_idx + loss_dict["loss"].detach().cpu().item()
) / (batch_idx + 1)
- if "acc" in stats:
- self.val_acc_avg = (
- self.val_acc_avg * batch_idx + loss_dict["stats"]["acc"].detach().cpu().item()
+ if "acc" in loss_dict["stats"]:
+ self.train_acc_avg = (
+ self.train_acc_avg * batch_idx + loss_dict["stats"]["acc"].detach().cpu().item()
) / (batch_idx + 1)
self.log(loss_dict, tag="train")
@@ -612,12 +610,12 @@
time_beg = time.perf_counter()
if self.use_ddp or self.use_fsdp or self.use_deepspeed:
- val_loss_avg = torch.tensor(self.val_loss_avg, dtype=torch.float32).to(self.device)
- val_acc_avg = torch.tensor(self.val_acc_avg, dtype=torch.float32).to(self.device)
- dist.all_reduce(val_loss_avg, op=dist.ReduceOp.SUM)
- dist.all_reduce(val_acc_avg, op=dist.ReduceOp.SUM)
- self.val_loss_avg = val_loss_avg.detach().cpu().item() / self.world_size
- self.val_acc_avg = val_acc_avg.detach().cpu().item() / self.world_size
+ train_loss_avg = torch.tensor(self.train_loss_avg, dtype=torch.float32).to(self.device)
+ train_acc_avg = torch.tensor(self.train_acc_avg, dtype=torch.float32).to(self.device)
+ dist.all_reduce(train_loss_avg, op=dist.ReduceOp.SUM)
+ dist.all_reduce(train_acc_avg, op=dist.ReduceOp.SUM)
+ self.train_loss_avg = train_loss_avg.detach().cpu().item() / self.world_size
+ self.train_acc_avg = train_acc_avg.detach().cpu().item() / self.world_size
def forward_step(self, model, batch, loss_dict={}):
dtype = torch.bfloat16
@@ -712,7 +710,7 @@
"data_split_num": kwargs.get("data_split_num", 1),
"log_step": batch_idx + kwargs.get("start_step", 0),
"batch_total": batch_idx,
- "step_in_epoch": step_in_epoch,
+ "step_in_epoch": batch_idx,
"lr": 0.0,
}
@@ -740,7 +738,7 @@
self.val_loss_avg = (
self.val_loss_avg * batch_idx + loss_dict["loss"].detach().cpu().item()
) / (batch_idx + 1)
- if "acc" in stats:
+ if "acc" in loss_dict["stats"]:
self.val_acc_avg = (
self.val_acc_avg * batch_idx
+ loss_dict["stats"]["acc"].detach().cpu().item()
--
Gitblit v1.9.1