From 5052ca8bf03c178d9ae73cb68785cd4afb0144d2 Mon Sep 17 00:00:00 2001
From: 辰冢 <49506152+BruceLee569@users.noreply.github.com>
Date: 星期三, 12 二月 2025 16:13:08 +0800
Subject: [PATCH] Hotwords file needs to specify default utf-8 encoding. (#2379)
---
funasr/bin/train_ds.py | 19 +++++++++++++------
1 files changed, 13 insertions(+), 6 deletions(-)
diff --git a/funasr/bin/train_ds.py b/funasr/bin/train_ds.py
index da99adc..b28752b 100644
--- a/funasr/bin/train_ds.py
+++ b/funasr/bin/train_ds.py
@@ -27,7 +27,7 @@
from funasr.train_utils.trainer_ds import Trainer
from funasr.schedulers import scheduler_classes
from funasr.train_utils.initialize import initialize
-from funasr.download.download_from_hub import download_model
+from funasr.download.download_model_from_hub import download_model
from funasr.models.lora.utils import mark_only_lora_as_trainable
from funasr.train_utils.set_all_random_seed import set_all_random_seed
from funasr.train_utils.load_pretrained_model import load_pretrained_model
@@ -66,6 +66,7 @@
# open tf32
torch.backends.cuda.matmul.allow_tf32 = kwargs.get("enable_tf32", True)
+ rank = int(os.environ.get("RANK", 0))
local_rank = int(os.environ.get("LOCAL_RANK", 0))
world_size = int(os.environ.get("WORLD_SIZE", 1))
@@ -80,10 +81,13 @@
deepspeed.init_distributed(dist_backend=kwargs.get("backend", "nccl"))
elif use_ddp or use_fsdp:
logging.info(f"use_ddp: {use_ddp}, use_fsdp: {use_fsdp}")
- dist.init_process_group(backend=kwargs.get("backend", "nccl"), init_method="env://")
+ dist.init_process_group(
+ backend=kwargs.get("backend", "nccl"),
+ init_method="env://",
+ )
torch.cuda.set_device(local_rank)
- rank = dist.get_rank()
+ # rank = dist.get_rank()
logging.info("Build model, frontend, tokenizer")
device = kwargs.get("device", "cuda")
@@ -130,7 +134,7 @@
**kwargs.get("train_conf"),
)
- model = trainer.warp_model(model)
+ model = trainer.warp_model(model, **kwargs)
kwargs["device"] = int(os.environ.get("LOCAL_RANK", 0))
trainer.device = int(os.environ.get("LOCAL_RANK", 0))
@@ -145,7 +149,7 @@
dataloader = dataloader_class(**kwargs)
# dataloader_tr, dataloader_val = dataloader_class(**kwargs)
- scaler = GradScaler(enabled=trainer.use_fp16) if trainer.use_fp16 else None
+ scaler = GradScaler(enabled=True) if trainer.use_fp16 or trainer.use_bf16 else None
scaler = ShardedGradScaler(enabled=trainer.use_fp16) if trainer.use_fsdp else scaler
trainer.resume_checkpoint(
@@ -180,7 +184,10 @@
)
trainer.start_step = 0
- torch.cuda.empty_cache()
+ # device = next(model.parameters()).device
+ # if device.type == 'cuda':
+ # with torch.cuda.device():
+ # torch.cuda.empty_cache()
time_escaped = (time.perf_counter() - time_slice_i) / 3600.0
logging.info(
--
Gitblit v1.9.1