From 00d0df3a1018c63ec8c5d13e611f53c564c0a7e2 Mon Sep 17 00:00:00 2001
From: zhifu gao <zhifu.gzf@alibaba-inc.com>
Date: 星期一, 06 五月 2024 22:17:25 +0800
Subject: [PATCH] Dev gzf decoding (#1695)

---
 funasr/train_utils/trainer.py |   25 +++++++++++++++++++------
 1 files changed, 19 insertions(+), 6 deletions(-)

diff --git a/funasr/train_utils/trainer.py b/funasr/train_utils/trainer.py
index e86420c..01e2924 100644
--- a/funasr/train_utils/trainer.py
+++ b/funasr/train_utils/trainer.py
@@ -169,6 +169,8 @@
                 "data_split_i": kwargs.get("data_split_i", 0),
                 "data_split_num": kwargs.get("data_split_num", 1),
                 "batch_total": self.batch_total,
+                "train_loss_avg": kwargs.get("train_loss_avg", 0),
+                "train_acc_avg": kwargs.get("train_acc_avg", 0),
             }
             step = step_in_epoch
             if hasattr(model, "module"):
@@ -306,7 +308,13 @@
                     checkpoint["step_in_epoch"] if "step_in_epoch" in checkpoint else 0
                 )
                 self.step_in_epoch = 0 if self.step_in_epoch is None else self.step_in_epoch
-
+                print(checkpoint["train_acc_avg"])
+                self.train_acc_avg = (
+                    checkpoint["train_acc_avg"] if "train_acc_avg" in checkpoint else 0
+                )
+                self.train_loss_avg = (
+                    checkpoint["train_loss_avg"] if "train_loss_avg" in checkpoint else 0
+                )
                 model.to(self.device)
                 print(f"Checkpoint loaded successfully from '{ckpt}'")
             else:
@@ -400,12 +408,13 @@
                 speed_stats["backward_time"] = f"{time4 - time3:0.3f}"
 
                 self.train_loss_avg = (
-                    self.train_loss_avg * batch_idx + loss.detach().cpu().item()
-                ) / (batch_idx + 1)
+                    self.train_loss_avg * (self.step_in_epoch - 1) + loss.detach().cpu().item()
+                ) / self.step_in_epoch
                 if "acc" in stats:
                     self.train_acc_avg = (
-                        self.train_acc_avg * batch_idx + stats["acc"].detach().cpu().item()
-                    ) / (batch_idx + 1)
+                        self.train_acc_avg * (self.step_in_epoch - 1)
+                        + stats["acc"].detach().cpu().item()
+                    ) / self.step_in_epoch
                 if self.use_ddp or self.use_fsdp:
                     train_loss_avg = torch.tensor(self.train_loss_avg, dtype=torch.float32).to(
                         self.device
@@ -457,6 +466,7 @@
                 self.log(
                     epoch,
                     batch_idx,
+                    log_step=batch_idx + kwargs.get("start_step", 0),
                     step_in_epoch=self.step_in_epoch,
                     batch_num_epoch=batch_num_epoch,
                     lr=lr,
@@ -490,6 +500,8 @@
                     step_in_epoch=self.step_in_epoch,
                     data_split_i=kwargs.get("data_split_i", 0),
                     data_split_num=kwargs.get("data_split_num", 1),
+                    train_loss_avg=self.train_loss_avg,
+                    train_acc_avg=self.train_acc_avg,
                 )
 
             time_beg = time.perf_counter()
@@ -623,11 +635,12 @@
         tag="train",
         data_split_i=0,
         data_split_num=1,
+        log_step=None,
         **kwargs,
     ):
 
         if (batch_idx + 1) % self.log_interval == 0:
-
+            batch_idx = log_step if log_step is not None else batch_idx
             gpu_info = (
                 "GPU, memory: usage: {:.3f} GB, "
                 "peak: {:.3f} GB, "

--
Gitblit v1.9.1