From 2ac38adbe5f4e1374a079e032ed4b504351a207c Mon Sep 17 00:00:00 2001
From: zhifu gao <zhifu.gzf@alibaba-inc.com>
Date: 星期二, 23 四月 2024 18:08:57 +0800
Subject: [PATCH] Dev gzf exp (#1647)
---
funasr/train_utils/trainer.py | 7 +++++++
1 files changed, 7 insertions(+), 0 deletions(-)
diff --git a/funasr/train_utils/trainer.py b/funasr/train_utils/trainer.py
index caaef38..3ee6885 100644
--- a/funasr/train_utils/trainer.py
+++ b/funasr/train_utils/trainer.py
@@ -252,6 +252,7 @@
dataloader_val=None,
epoch=None,
writer=None,
+ **kwargs,
):
"""
Defines the training process for a single epoch with gradient accumulation.
@@ -374,6 +375,8 @@
stats=stats,
writer=writer,
tag="train",
+ data_split_i=kwargs.get("data_split_i", 0),
+ data_split_num=kwargs.get("data_split_num", 1),
)
if (batch_idx + 1) % self.validate_interval == 0:
@@ -507,6 +510,9 @@
stats=None,
writer=None,
tag="train",
+ data_split_i=0,
+ data_split_num=1,
+ **kwargs,
):
if (batch_idx + 1) % self.log_interval == 0:
@@ -526,6 +532,7 @@
f"{tag}, "
f"rank: {self.local_rank}, "
f"epoch: {epoch}/{self.max_epoch}, "
+ f"data_slice: {data_split_i}/{data_split_num}, "
f"step: {batch_idx + 1}/{batch_num_epoch}, total step: {self.batch_total}, "
f"(loss_avg_rank: {loss:.3f}), "
f"(loss_avg_epoch: {loss_avg_epoch:.3f}), "
--
Gitblit v1.9.1