From a750595594321833b48dc19798eed66876a100b4 Mon Sep 17 00:00:00 2001
From: ming030890 <67713085+ming030890@users.noreply.github.com>
Date: 星期五, 04 七月 2025 14:25:54 +0800
Subject: [PATCH] Fix a few issues found during fine-tuning (#2582)
---
funasr/train_utils/trainer.py | 2 +-
funasr/bin/train_ds.py | 20 ++++++++++++++++++--
funasr/train_utils/trainer_ds.py | 16 +++++++++-------
3 files changed, 28 insertions(+), 10 deletions(-)
diff --git a/funasr/bin/train_ds.py b/funasr/bin/train_ds.py
index 10a5d08..2241b0c 100644
--- a/funasr/bin/train_ds.py
+++ b/funasr/bin/train_ds.py
@@ -149,7 +149,7 @@
dataloader = dataloader_class(**kwargs)
# dataloader_tr, dataloader_val = dataloader_class(**kwargs)
- scaler = GradScaler(enabled=True) if trainer.use_fp16 or trainer.use_bf16 else None
+ scaler = GradScaler(enabled=True) if trainer.use_fp16 else None
scaler = ShardedGradScaler(enabled=trainer.use_fp16) if trainer.use_fsdp else scaler
trainer.resume_checkpoint(
@@ -158,6 +158,10 @@
scheduler=scheduler,
scaler=scaler,
)
+
+ early_stopping_patience = kwargs.get("train_conf", {}).get("early_stopping_patience", 0)
+ best_val_loss = float("inf")
+ epochs_no_improve = 0
dataloader_tr, dataloader_val = None, None
for epoch in range(trainer.start_epoch, trainer.max_epoch):
@@ -199,7 +203,19 @@
trainer.start_data_split_i = 0
trainer.validate_epoch(model=model, dataloader_val=dataloader_val, epoch=epoch + 1)
- scheduler.step()
+ current_val = trainer.val_loss_avg
+
+ if current_val < best_val_loss:
+ logging.info(f"current_val: {current_val}, best_val_loss: {best_val_loss}")
+ best_val_loss = current_val
+ epochs_no_improve = 0
+ else:
+ epochs_no_improve += 1
+ logging.info(f"No val_loss improvement for {epochs_no_improve}/{early_stopping_patience} epochs")
+ if early_stopping_patience > 0 and epochs_no_improve >= early_stopping_patience:
+ logging.info(f"Early stopping triggered at epoch {epoch+1}")
+ break
+
trainer.step_in_epoch = 0
trainer.save_checkpoint(
epoch + 1, model=model, optim=optim, scheduler=scheduler, scaler=scaler
diff --git a/funasr/train_utils/trainer.py b/funasr/train_utils/trainer.py
index d0be9c8..3e69985 100644
--- a/funasr/train_utils/trainer.py
+++ b/funasr/train_utils/trainer.py
@@ -715,7 +715,7 @@
if self.use_wandb and wandb is not None:
wandb.log(
description_dict,
- setp=self.batch_total,
+ step=self.batch_total,
)
def close(self, writer=None):
diff --git a/funasr/train_utils/trainer_ds.py b/funasr/train_utils/trainer_ds.py
index a1430db..ce8809c 100644
--- a/funasr/train_utils/trainer_ds.py
+++ b/funasr/train_utils/trainer_ds.py
@@ -30,9 +30,8 @@
yield
else:
if dtype == torch.float16 or dtype == torch.bfloat16:
- yield
- # with autocast(enabled=True, dtype=dtype):
- # yield
+ with autocast(enabled=True, dtype=dtype):
+ yield
else:
yield
@@ -684,7 +683,7 @@
scaled_loss = model.backward(loss)
else:
loss = loss / self.accum_grad
- if self.use_fp16 or self.use_bf16:
+ if scaler:
scaler.scale(loss).backward()
else:
loss.backward()
@@ -712,7 +711,7 @@
# Execute an optimization step (update model parameters)
if self.use_ddp or self.use_fsdp:
dist.barrier()
- if self.use_fp16 or self.use_bf16:
+ if scaler:
scaler.step(optim)
scaler.update()
else:
@@ -736,6 +735,9 @@
Args:
epoch (int): The current epoch number.
"""
+ self.val_loss_avg = 0.0
+ self.val_acc_avg = 0.0
+
if self.use_ddp or self.use_fsdp or self.use_deepspeed:
dist.barrier()
logging.info(f"Validate epoch: {epoch}, rank: {self.rank}\n")
@@ -757,7 +759,7 @@
"data_split_i": kwargs.get("data_split_i", 0),
"data_split_num": kwargs.get("data_split_num", 1),
"log_step": batch_idx + kwargs.get("start_step", 0),
- "batch_total": batch_idx + 1,
+ "batch_total": self.batch_total,
"step_in_epoch": batch_idx + 1,
"lr": 0.0,
}
@@ -883,7 +885,7 @@
if self.use_wandb and wandb is not None:
wandb.log(
description_dict,
- setp=batch_total,
+ step=batch_total,
)
def close(self, writer=None):
--
Gitblit v1.9.1