From 28ccfbfc51068a663a80764e14074df5edf2b5ba Mon Sep 17 00:00:00 2001
From: kongdeqiang <kongdeqiang960204@163.com>
Date: 星期五, 13 三月 2026 17:41:41 +0800
Subject: [PATCH] 提交
---
funasr/bin/train.py | 8 +++++---
1 files changed, 5 insertions(+), 3 deletions(-)
diff --git a/funasr/bin/train.py b/funasr/bin/train.py
index 2729b80..c56d047 100644
--- a/funasr/bin/train.py
+++ b/funasr/bin/train.py
@@ -20,6 +20,7 @@
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.algorithms.join import Join
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
+from tensorboardX import SummaryWriter
from funasr.train_utils.average_nbest_models import average_checkpoints
from funasr.register import tables
@@ -191,8 +192,6 @@
tensorboard_dir = os.path.join(kwargs.get("output_dir"), "tensorboard")
os.makedirs(tensorboard_dir, exist_ok=True)
try:
- from tensorboardX import SummaryWriter
-
writer = SummaryWriter(tensorboard_dir) # if trainer.rank == 0 else None
except:
writer = None
@@ -222,7 +221,10 @@
)
trainer.start_step = 0
- torch.cuda.empty_cache()
+ device = next(model.parameters()).device
+ if device.type == "cuda":
+ with torch.cuda.device(device):
+ torch.cuda.empty_cache()
time_escaped = (time.perf_counter() - time_slice_i) / 3600.0
logging.info(
--
Gitblit v1.9.1