From 7357bb6c45eb3f0b4d2be5a4b025385ff9eec9e5 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期三, 21 二月 2024 14:34:36 +0800
Subject: [PATCH] update train recipe
---
funasr/train_utils/trainer.py | 9 ++++++++-
1 files changed, 8 insertions(+), 1 deletions(-)
diff --git a/funasr/train_utils/trainer.py b/funasr/train_utils/trainer.py
index 61b9004..d24023d 100644
--- a/funasr/train_utils/trainer.py
+++ b/funasr/train_utils/trainer.py
@@ -3,6 +3,7 @@
import torch
import logging
from tqdm import tqdm
+from datetime import datetime
import torch.distributed as dist
from contextlib import nullcontext
# from torch.utils.tensorboard import SummaryWriter
@@ -156,7 +157,7 @@
self._resume_checkpoint(self.output_dir)
for epoch in range(self.start_epoch, self.max_epoch + 1):
-
+ time1 = time.perf_counter()
self._train_epoch(epoch)
@@ -178,6 +179,9 @@
self.scheduler.step()
+ time2 = time.perf_counter()
+ time_escaped = (time2 - time1)/3600.0
+ print(f"time_escaped_epoch: {time_escaped:.3f} hours, estimated to finish: {(self.max_epoch-epoch)*time_escaped:.3f}")
if self.rank == 0:
average_checkpoints(self.output_dir, self.avg_nbest_model)
@@ -283,7 +287,10 @@
torch.cuda.max_memory_reserved()/1024/1024/1024,
)
lr = self.scheduler.get_last_lr()[0]
+ time_now = datetime.now()
+ time_now = time_now.strftime("%Y-%m-%d %H:%M:%S")
description = (
+ f"{time_now}, "
f"rank: {self.local_rank}, "
f"epoch: {epoch}/{self.max_epoch}, "
f"step: {batch_idx+1}/{len(self.dataloader_train)}, total: {self.batch_total}, "
--
Gitblit v1.9.1