From f57b68121a526baea43b2e93f4540d8a2995f633 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期一, 29 四月 2024 15:15:24 +0800
Subject: [PATCH] batch

---
 funasr/train_utils/trainer.py |   98 +++++++++++++++++++++++++++++++++++++++++--------
 1 files changed, 82 insertions(+), 16 deletions(-)

diff --git a/funasr/train_utils/trainer.py b/funasr/train_utils/trainer.py
index 713e171..e86420c 100644
--- a/funasr/train_utils/trainer.py
+++ b/funasr/train_utils/trainer.py
@@ -15,6 +15,11 @@
 from funasr.train_utils.average_nbest_models import average_checkpoints
 from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
 
+try:
+    import wandb
+except:
+    wandb = None
+
 
 @contextmanager
 def maybe_autocast(enabled):
@@ -108,6 +113,23 @@
         self.val_acc_step_or_eoch = {}
         self.val_loss_step_or_eoch = {}
 
+        self.reset_gpu_cache = kwargs.get("reset_gpu_cache", False)
+        self.start_data_split_i = 0
+        self.start_step = 0
+        self.step_in_epoch = 0
+        self.use_wandb = kwargs.get("use_wandb", False)
+        if self.use_wandb:
+            wandb.login(key=kwargs.get("wandb_token"))
+            wandb.init(
+                config=kwargs,
+                project=kwargs.get("wandb_project", "my_project"),
+                entity=kwargs.get("wandb_team", "my_team"),
+                name=kwargs.get("wandb_exp_name", "my_exp"),
+                dir=output_dir,
+                job_type="training",
+                reinit=True,
+            )
+
     def save_checkpoint(
         self,
         epoch,
@@ -116,6 +138,8 @@
         optim=None,
         scheduler=None,
         scaler=None,
+        step_in_epoch=None,
+        **kwargs,
     ):
         """
         Saves a checkpoint containing the model's state, the optimizer's state,
@@ -126,6 +150,7 @@
             epoch (int): The epoch number at which the checkpoint is being saved.
         """
 
+        step_in_epoch = None if step is None else step_in_epoch
         if self.rank == 0:
             logging.info(f"Save checkpoint: {epoch}, rank: {self.local_rank}\n")
             # self.step_or_epoch += 1
@@ -139,7 +164,13 @@
                 "val_loss_step_or_eoch": self.val_loss_step_or_eoch,
                 "best_step_or_epoch": self.best_step_or_epoch,
                 "avg_keep_nbest_models_type": self.avg_keep_nbest_models_type,
+                "step": step,
+                "step_in_epoch": step_in_epoch,
+                "data_split_i": kwargs.get("data_split_i", 0),
+                "data_split_num": kwargs.get("data_split_num", 1),
+                "batch_total": self.batch_total,
             }
+            step = step_in_epoch
             if hasattr(model, "module"):
                 state["state_dict"] = model.module.state_dict()
 
@@ -173,7 +204,7 @@
                     )
                 else:
                     logging.info(
-                        f"No improvement in acc: {self.val_acc_step_or_eoch[ckpt_name]:.4f} < {self.val_acc_step_or_eoch[self.best_step_or_epoch]:.4f}"
+                        f"No improvement in acc: {self.val_acc_step_or_eoch[ckpt_name]:.4f} < {self.val_acc_step_or_eoch[self.best_step_or_epoch]:.4f}, {os.path.join(self.output_dir, self.best_step_or_epoch)}"
                     )
             elif self.avg_keep_nbest_models_type == "loss":
                 if (
@@ -188,7 +219,7 @@
                     )
                 else:
                     logging.info(
-                        f"No improvement in loss: {self.val_loss_step_or_eoch[ckpt_name]:.4f} > {self.val_loss_step_or_eoch[self.best_step_or_epoch]:.4f}"
+                        f"No improvement in loss: {self.val_loss_step_or_eoch[ckpt_name]:.4f} > {self.val_loss_step_or_eoch[self.best_step_or_epoch]:.4f}, {os.path.join(self.output_dir, self.best_step_or_epoch)}"
                     )
             else:
                 print("Undo")
@@ -229,7 +260,7 @@
             ckpt = os.path.join(self.output_dir, "model.pt")
             if os.path.isfile(ckpt):
                 checkpoint = torch.load(ckpt, map_location="cpu")
-                self.start_epoch = checkpoint["epoch"] + 1
+                self.start_epoch = checkpoint["epoch"]
                 # self.model.load_state_dict(checkpoint['state_dict'])
                 src_state = checkpoint["state_dict"]
                 dst_state = model.state_dict()
@@ -265,6 +296,17 @@
                 self.best_step_or_epoch = (
                     checkpoint["best_step_or_epoch"] if "best_step_or_epoch" in checkpoint else ""
                 )
+                self.start_data_split_i = (
+                    checkpoint["data_split_i"] if "data_split_i" in checkpoint else 0
+                )
+                self.batch_total = checkpoint["batch_total"] if "batch_total" in checkpoint else 0
+                self.start_step = checkpoint["step"] if "step" in checkpoint else 0
+                self.start_step = 0 if self.start_step is None else self.start_step
+                self.step_in_epoch = (
+                    checkpoint["step_in_epoch"] if "step_in_epoch" in checkpoint else 0
+                )
+                self.step_in_epoch = 0 if self.step_in_epoch is None else self.step_in_epoch
+
                 model.to(self.device)
                 print(f"Checkpoint loaded successfully from '{ckpt}'")
             else:
@@ -292,7 +334,7 @@
         """
         if self.use_ddp or self.use_fsdp:
             dist.barrier()
-        logging.info(f"Train epoch: {epoch}, rank: {self.local_rank}\n")
+        logging.info(f"Train epoch: {epoch}, rank: {self.rank}\n")
         model.train()
 
         # Set the number of steps for gradient accumulation
@@ -312,6 +354,7 @@
                 if iterator_stop > 0:
                     break
             self.batch_total += 1
+            self.step_in_epoch += 1
             time1 = time.perf_counter()
             speed_stats["data_load"] = f"{time1-time_beg:0.3f}"
 
@@ -324,6 +367,12 @@
                 time2 = time.perf_counter()
                 with maybe_autocast(self.use_fp16):
                     retval = model(**batch)
+
+                    if (
+                        self.reset_gpu_cache
+                        and (torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024) > 70
+                    ):
+                        torch.cuda.empty_cache()
 
                 time3 = time.perf_counter()
                 speed_stats["forward_time"] = f"{time3 - time2:0.3f}"
@@ -408,6 +457,7 @@
                 self.log(
                     epoch,
                     batch_idx,
+                    step_in_epoch=self.step_in_epoch,
                     batch_num_epoch=batch_num_epoch,
                     lr=lr,
                     loss=loss.detach().cpu().item(),
@@ -419,16 +469,17 @@
                     data_split_num=kwargs.get("data_split_num", 1),
                 )
 
-            if (batch_idx + 1) % self.validate_interval == 0:
+            if self.step_in_epoch % self.validate_interval == 0:
                 self.validate_epoch(
                     model=model,
                     dataloader_val=dataloader_val,
                     epoch=epoch,
                     writer=writer,
                     step=batch_idx + 1,
+                    step_in_epoch=self.step_in_epoch,
                 )
 
-            if (batch_idx + 1) % self.save_checkpoint_interval == 0:
+            if self.step_in_epoch % self.save_checkpoint_interval == 0:
                 self.save_checkpoint(
                     epoch,
                     model=model,
@@ -436,6 +487,9 @@
                     scheduler=scheduler,
                     scaler=scaler,
                     step=batch_idx + 1,
+                    step_in_epoch=self.step_in_epoch,
+                    data_split_i=kwargs.get("data_split_i", 0),
+                    data_split_num=kwargs.get("data_split_num", 1),
                 )
 
             time_beg = time.perf_counter()
@@ -465,7 +519,7 @@
         """
         if self.use_ddp or self.use_fsdp:
             dist.barrier()
-        logging.info(f"Validate epoch: {epoch}, rank: {self.local_rank}\n")
+        logging.info(f"Validate epoch: {epoch}, rank: {self.rank}\n")
         model.eval()
 
         with torch.no_grad():
@@ -543,10 +597,10 @@
                     iterator_stop.fill_(1)
                     dist.all_reduce(iterator_stop, dist.ReduceOp.SUM)
 
-        if kwargs.get("step", None) is None:
+        if kwargs.get("step_in_epoch", None) is None:
             ckpt_name = f"model.pt.ep{epoch}"
         else:
-            ckpt_name = f'model.pt.ep{epoch}.{kwargs.get("step")}'
+            ckpt_name = f'model.pt.ep{epoch}.{kwargs.get("step_in_epoch")}'
         self.val_acc_step_or_eoch[ckpt_name] = self.val_acc_avg
         self.val_loss_step_or_eoch[ckpt_name] = self.val_loss_avg
         model.train()
@@ -559,6 +613,7 @@
         self,
         epoch=0,
         batch_idx=0,
+        step_in_epoch=0,
         batch_num_epoch=-1,
         lr=0.0,
         loss=0.0,
@@ -589,10 +644,10 @@
             acc_avg_epoch = getattr(self, f"{tag}_acc_avg")
             description = (
                 f"{tag}, "
-                f"rank: {self.local_rank}, "
+                f"rank: {self.rank}, "
                 f"epoch: {epoch}/{self.max_epoch}, "
                 f"data_slice: {data_split_i}/{data_split_num}, "
-                f"step: {batch_idx + 1}/{batch_num_epoch}, total step: {self.batch_total}, "
+                f"step_in_slice: {batch_idx + 1}/{batch_num_epoch}, step_in_epoch: {step_in_epoch}, total step: {self.batch_total}, "
                 f"(loss_avg_rank: {loss:.3f}), "
                 f"(loss_avg_epoch: {loss_avg_epoch:.3f}), "
                 f"(ppl_avg_epoch: {math.exp(loss_avg_epoch):.3e}), "
@@ -604,18 +659,29 @@
             )
             logging.info(description)
 
+            description_dict = {
+                f"rank{self.rank}_loss/{tag}": loss,
+                f"rank{self.rank}_lr/{tag}": lr,
+            }
+
             if writer is not None:
-                writer.add_scalar(f"rank{self.local_rank}_loss/{tag}", loss, self.batch_total)
-                writer.add_scalar(f"rank{self.local_rank}_lr/{tag}", lr, self.batch_total)
-                writer.add_scalar(f"rank{self.local_rank}_lr/{tag}", lr, self.batch_total)
+                writer.add_scalar(f"rank{self.rank}_loss/{tag}", loss, self.batch_total)
+                writer.add_scalar(f"rank{self.rank}_lr/{tag}", lr, self.batch_total)
                 for key, var in stats.items():
                     writer.add_scalar(
-                        f"stats_rank{self.local_rank}_{key}/{tag}", var.item(), self.batch_total
+                        f"stats_rank{self.rank}_{key}/{tag}", var.item(), self.batch_total
                     )
+                    description_dict[f"stats_rank{self.rank}_{key}/{tag}"] = var.item()
                 for key, var in speed_stats.items():
                     writer.add_scalar(
-                        f"stats_rank{self.local_rank}_{key}/{tag}", eval(var), self.batch_total
+                        f"stats_rank{self.rank}_{key}/{tag}", eval(var), self.batch_total
                     )
+                    description_dict[f"stats_rank{self.rank}_{key}/{tag}"] = eval(var)
+            if self.use_wandb and wandb is not None:
+                wandb.log(
+                    description_dict,
+                    setp=self.batch_total,
+                )
 
     def close(self, writer=None):
 

--
Gitblit v1.9.1