From 2e8dc0933f31bf449ecc11ac1b4dc1833fdaad42 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期二, 20 二月 2024 18:01:15 +0800
Subject: [PATCH] train finetune
---
funasr/train_utils/trainer.py | 78 ++++++++++++++++++++-------------------
1 files changed, 40 insertions(+), 38 deletions(-)
diff --git a/funasr/train_utils/trainer.py b/funasr/train_utils/trainer.py
index d144019..cc7b215 100644
--- a/funasr/train_utils/trainer.py
+++ b/funasr/train_utils/trainer.py
@@ -69,6 +69,8 @@
self.device = next(model.parameters()).device
self.avg_nbest_model = kwargs.get("avg_nbest_model", 5)
self.kwargs = kwargs
+ self.log_interval = kwargs.get("log_interval", 50)
+ self.batch_total = 0
try:
@@ -186,7 +188,7 @@
epoch (int): The current epoch number.
"""
self.model.train()
- pbar = tqdm(colour="blue", desc=f"Training Epoch: {epoch + 1}", total=len(self.dataloader_train),
+ pbar = tqdm(colour="blue", desc=f"rank: {self.local_rank}, Training Epoch: {epoch + 1}", total=len(self.dataloader_train),
dynamic_ncols=True)
# Set the number of steps for gradient accumulation
@@ -195,7 +197,9 @@
self.optim.zero_grad()
speed_stats = {}
time5 = time.perf_counter()
+
for batch_idx, batch in enumerate(self.dataloader_train):
+ self.batch_total += 1
time1 = time.perf_counter()
speed_stats["data_load"] = f"{time1-time5:0.3f}"
@@ -204,25 +208,10 @@
my_context = self.model.no_sync if batch_idx % accum_grad != 0 else nullcontext
with my_context():
time2 = time.perf_counter()
- print("before, GPU, memory: {:.1} MB, "
- "{:.1} MB, "
- "{:.1} MB, "
- "{:.1} MB".format(torch.cuda.memory_allocated()/1024/1024/1024,
- torch.cuda.max_memory_allocated()/1024/1024/1024,
- torch.cuda.memory_reserved()/1024/1024/1024,
- torch.cuda.max_memory_reserved()/1024/1024/1024,
- ))
retval = self.model(**batch)
torch.cuda.empty_cache()
- print("after, GPU, memory: {:.1} MB, "
- "{:.1} MB, "
- "{:.1} MB, "
- "{:.1} MB".format(torch.cuda.memory_allocated()/1024/1024/1024,
- torch.cuda.max_memory_allocated()/1024/1024/1024,
- torch.cuda.memory_reserved()/1024/1024/1024,
- torch.cuda.max_memory_reserved()/1024/1024/1024,
- ))
+
time3 = time.perf_counter()
speed_stats["forward_time"] = f"{time3 - time2:0.3f}"
loss, stats, weight = retval
@@ -273,24 +262,35 @@
speed_stats["total_time"] = total_time
- pbar.update(1)
- if self.local_rank == 0:
+
+ if (batch_idx+1) % self.log_interval == 0 or (batch_idx+1) == len(self.dataloader_train):
+ pbar.update(self.log_interval)
+ gpu_info = "GPU, memory: {:.3f} GB, " \
+ "{:.3f} GB, "\
+ "{:.3f} GB, "\
+ "{:.3f} GB".format(torch.cuda.memory_allocated()/1024/1024/1024,
+ torch.cuda.max_memory_allocated()/1024/1024/1024,
+ torch.cuda.memory_reserved()/1024/1024/1024,
+ torch.cuda.max_memory_reserved()/1024/1024/1024,
+ )
description = (
- f"Train epoch: {epoch}/{self.max_epoch}, "
- f"step {batch_idx}/{len(self.dataloader_train)}, "
- f"{speed_stats}, "
+ f"rank: {self.local_rank}, "
+ f"epoch: {epoch}/{self.max_epoch}, "
+ f"step: {batch_idx}/{len(self.dataloader_train)}, total: {self.batch_total}, "
f"(loss: {loss.detach().cpu().item():.3f}), "
- f"{[(k, round(v.cpu().item(), 3)) for k, v in stats.items()]}"
+ f"{[(k, round(v.cpu().item(), 3)) for k, v in stats.items()]}, "
+ f"{speed_stats}, "
+ f"{gpu_info}"
)
pbar.set_description(description)
if self.writer:
- self.writer.add_scalar('Loss/train', loss.item(),
+ self.writer.add_scalar(f'rank{self.local_rank}_Loss/train', loss.item(),
epoch*len(self.dataloader_train) + batch_idx)
for key, var in stats.items():
- self.writer.add_scalar(f'{key}/train', var.item(),
+ self.writer.add_scalar(f'rank{self.local_rank}_{key}/train', var.item(),
epoch * len(self.dataloader_train) + batch_idx)
for key, var in speed_stats.items():
- self.writer.add_scalar(f'{key}/train', eval(var),
+ self.writer.add_scalar(f'rank{self.local_rank}_{key}/train', eval(var),
epoch * len(self.dataloader_train) + batch_idx)
# if batch_idx == 2:
@@ -307,7 +307,7 @@
"""
self.model.eval()
with torch.no_grad():
- pbar = tqdm(colour="red", desc=f"Training Epoch: {epoch + 1}", total=len(self.dataloader_val),
+ pbar = tqdm(colour="red", desc=f"rank: {self.local_rank}, Validation Epoch: {epoch + 1}", total=len(self.dataloader_val),
dynamic_ncols=True)
speed_stats = {}
time5 = time.perf_counter()
@@ -335,22 +335,24 @@
loss = loss
time4 = time.perf_counter()
- pbar.update(1)
- if self.local_rank == 0:
+
+ if (batch_idx+1) % self.log_interval == 0 or (batch_idx+1) == len(self.dataloader_val):
+ pbar.update(self.log_interval)
description = (
+ f"rank: {self.local_rank}, "
f"validation epoch: {epoch}/{self.max_epoch}, "
- f"step {batch_idx}/{len(self.dataloader_train)}, "
- f"{speed_stats}, "
+ f"step: {batch_idx}/{len(self.dataloader_val)}, "
f"(loss: {loss.detach().cpu().item():.3f}), "
- f"{[(k, round(v.cpu().item(), 3)) for k, v in stats.items()]}"
+ f"{[(k, round(v.cpu().item(), 3)) for k, v in stats.items()]}, "
+ f"{speed_stats}, "
)
pbar.set_description(description)
if self.writer:
- self.writer.add_scalar('Loss/val', loss.item(),
- epoch*len(self.dataloader_train) + batch_idx)
+ self.writer.add_scalar(f"rank{self.local_rank}_Loss/val", loss.item(),
+ epoch*len(self.dataloader_val) + batch_idx)
for key, var in stats.items():
- self.writer.add_scalar(f'{key}/val', var.item(),
- epoch * len(self.dataloader_train) + batch_idx)
+ self.writer.add_scalar(f'rank{self.local_rank}_{key}/val', var.item(),
+ epoch * len(self.dataloader_val) + batch_idx)
for key, var in speed_stats.items():
- self.writer.add_scalar(f'{key}/val', eval(var),
- epoch * len(self.dataloader_train) + batch_idx)
\ No newline at end of file
+ self.writer.add_scalar(f'rank{self.local_rank}_{key}/val', eval(var),
+ epoch * len(self.dataloader_val) + batch_idx)
\ No newline at end of file
--
Gitblit v1.9.1