zhifu gao
2024-05-20 961ec280afb02f2464ce4f7b2fd7c821dd24044b
Dev gzf deepspeed (#1736)

* resume from step

* batch

* batch

* batch

* batch

* batch

* batch

* batch

* batch

* batch

* batch

* batch

* batch

* batch

* batch

* batch

* train_loss_avg train_acc_avg

* train_loss_avg train_acc_avg

* train_loss_avg train_acc_avg

* log step

* wav is not exist

* wav is not exist

* decoding

* decoding

* decoding

* wechat

* decoding key

* decoding key

* decoding key

* decoding key

* decoding key

* decoding key

* dynamic batch

* start_data_split_i=0

* total_time/accum_grad

* total_time/accum_grad

* total_time/accum_grad

* update avg slice

* update avg slice

* sensevoice sanm

* sensevoice sanm

* add

* add

* add

* add

* deepspeed

* update with main (#1731)

* c++ runtime adapt to 1.0 (#1724)

* adapt vad runtime to 1.0

* add json

* change yml name

* add func LoadVocabFromJson

* add token file for InitAsr

* add token path for OfflineStream

* add funcOpenYaml

* add token file for InitPunc

* add token file for stream

* update punc-model

* update funasr-wss-server

* update runtime_sdk_download_tool.py

* update docker list

* Delete docs/images/wechat.png

* Add files via upload

* Emo2Vec限定选择的情感类别 (#1730)

* 限定选择的情感类别

* 使用none来禁用情感标签输出

* 修改输出接口

* 使用unuse来禁用token

---------

Co-authored-by: 常材 <gaochangfeng.gcf@alibaba-inc.com>

* bugfix

* v1.0.27

* update docs

* hf hub

* Fix incorrect assignment of 'end' attribute to 'start' in sentences list comprehension (#1680)

---------

Co-authored-by: Yabin Li <wucong.lyb@alibaba-inc.com>
Co-authored-by: gaochangfeng <54253717+gaochangfeng@users.noreply.github.com>
Co-authored-by: 常材 <gaochangfeng.gcf@alibaba-inc.com>
Co-authored-by: nsdou <168500039+nsdou@users.noreply.github.com>

* docs

* docs

* deepspeed

* deepspeed

* deepspeed

* deepspeed

* update

* ds

* ds

* ds

* ds

* ds

* ds

* ds

---------

Co-authored-by: Yabin Li <wucong.lyb@alibaba-inc.com>
Co-authored-by: gaochangfeng <54253717+gaochangfeng@users.noreply.github.com>
Co-authored-by: 常材 <gaochangfeng.gcf@alibaba-inc.com>
Co-authored-by: nsdou <168500039+nsdou@users.noreply.github.com>
8个文件已修改
813 ■■■■■ 已修改文件
docs/images/wechat.png 补丁 | 查看 | 原始文档 | blame | 历史
funasr/bin/train_ds.py 46 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
funasr/datasets/audio_datasets/espnet_samplers.py 4 ●●● 补丁 | 查看 | 原始文档 | blame | 历史
funasr/models/sanm/attention.py 16 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
funasr/models/sense_voice/decoder.py 4 ●●● 补丁 | 查看 | 原始文档 | blame | 历史
funasr/models/sense_voice/whisper_lib/model.py 4 ●●● 补丁 | 查看 | 原始文档 | blame | 历史
funasr/train_utils/average_nbest_models.py 15 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
funasr/train_utils/trainer_ds.py 724 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
docs/images/wechat.png

funasr/bin/train_ds.py
@@ -130,32 +130,10 @@
    model = trainer.warp_model(model)
    kwargs["device"] = next(model.parameters()).device
    trainer.device = kwargs["device"]
    kwargs["device"] = int(os.environ.get("LOCAL_RANK", 0))
    trainer.device = int(os.environ.get("LOCAL_RANK", 0))
    # optim
    logging.info("Build optim")
    optim = kwargs.get("optim", "adam")
    assert optim in optim_classes
    optim_class = optim_classes.get(optim)
    optim = optim_class(model.parameters(), **kwargs.get("optim_conf"))
    # scheduler
    logging.info("Build scheduler")
    scheduler = kwargs.get("scheduler", "warmuplr")
    assert scheduler in scheduler_classes
    scheduler_class = scheduler_classes.get(scheduler)
    scheduler = scheduler_class(optim, **kwargs.get("scheduler_conf"))
    if use_deepspeed:
        args = OmegaConf.create({"deepspeed_config": kwargs.get("deepspeed_config", "")})
        model, optimizer, _, scheduler = deepspeed.initialize(
            args=args,
            model=model,
            optimizer=optim,
            lr_scheduler=scheduler,
            model_parameters=model.parameters(),
        )
    model, optim, scheduler = trainer.warp_optim_scheduler(model, **kwargs)
    # dataset
    logging.info("Build dataloader")
@@ -175,15 +153,6 @@
        scaler=scaler,
    )
    tensorboard_dir = os.path.join(kwargs.get("output_dir"), "tensorboard")
    os.makedirs(tensorboard_dir, exist_ok=True)
    try:
        from tensorboardX import SummaryWriter
        writer = SummaryWriter(tensorboard_dir)  # if trainer.rank == 0 else None
    except:
        writer = None
    dataloader_tr, dataloader_val = None, None
    for epoch in range(trainer.start_epoch, trainer.max_epoch):
        time1 = time.perf_counter()
@@ -201,7 +170,6 @@
                dataloader_train=dataloader_tr,
                dataloader_val=dataloader_val,
                epoch=epoch,
                writer=writer,
                data_split_i=data_split_i,
                data_split_num=dataloader.data_split_num,
                start_step=trainer.start_step,
@@ -211,9 +179,7 @@
            torch.cuda.empty_cache()
        trainer.start_data_split_i = 0
        trainer.validate_epoch(
            model=model, dataloader_val=dataloader_val, epoch=epoch + 1, writer=writer
        )
        trainer.validate_epoch(model=model, dataloader_val=dataloader_val, epoch=epoch + 1)
        scheduler.step()
        trainer.step_in_epoch = 0
        trainer.save_checkpoint(
@@ -232,7 +198,9 @@
        trainer.train_loss_avg = 0.0
    if trainer.rank == 0:
        average_checkpoints(trainer.output_dir, trainer.avg_nbest_model)
        average_checkpoints(
            trainer.output_dir, trainer.avg_nbest_model, use_deepspeed=trainer.use_deepspeed
        )
    trainer.close()
funasr/datasets/audio_datasets/espnet_samplers.py
@@ -72,6 +72,7 @@
        self.min_token_length = kwargs.get("min_token_length", 0)
        self.length_scale_source = kwargs.get("length_scale_source", 1.0)
        self.start_step = start_step
        self.batch_num = 1
        if self.start_step > 0:
            logging.info(f"Warning, start_step > 0, dataloader start from step: {self.start_step}")
        # super().__init__(dataset, num_replicas=num_replicas, rank=rank,
@@ -146,6 +147,7 @@
        start_idx = self.rank * batches_per_rank
        end_idx = start_idx + batches_per_rank
        rank_batches = buffer_batches[start_idx + self.start_step : end_idx]
        self.batch_num = len(rank_batches)
        logging.info(
            f"rank: {self.rank}, dataloader start from step: {self.start_step}, batch_num: {end_idx-start_idx}, batch_num_after_step: {len(rank_batches)}"
        )
@@ -154,7 +156,7 @@
    def __len__(self):
        # Calculate the number of batches per epoch for the current rank
        return 1
        return self.batch_num
    def set_epoch(self, epoch):
        # Set the epoch for shuffling
funasr/models/sanm/attention.py
@@ -100,7 +100,9 @@
        n_batch = value.size(0)
        if mask is not None:
            mask = mask.unsqueeze(1).eq(0)  # (batch, 1, *, time2)
            min_value = float(numpy.finfo(torch.tensor(0, dtype=scores.dtype).numpy().dtype).min)
            min_value = -float(
                "inf"
            )  # float(numpy.finfo(torch.tensor(0, dtype=scores.dtype).numpy().dtype).min)
            scores = scores.masked_fill(mask, min_value)
            self.attn = torch.softmax(scores, dim=-1).masked_fill(
                mask, 0.0
@@ -269,7 +271,9 @@
            mask = mask.unsqueeze(1).eq(0)  # (batch, 1, *, time2)
            min_value = float(numpy.finfo(torch.tensor(0, dtype=scores.dtype).numpy().dtype).min)
            min_value = -float(
                "inf"
            )  # float(numpy.finfo(torch.tensor(0, dtype=scores.dtype).numpy().dtype).min)
            scores = scores.masked_fill(mask, min_value)
            self.attn = torch.softmax(scores, dim=-1).masked_fill(
                mask, 0.0
@@ -673,7 +677,9 @@
        n_batch = value.size(0)
        if mask is not None:
            mask = mask.unsqueeze(1).eq(0)  # (batch, 1, *, time2)
            min_value = float(numpy.finfo(torch.tensor(0, dtype=scores.dtype).numpy().dtype).min)
            min_value = -float(
                "inf"
            )  # float(numpy.finfo(torch.tensor(0, dtype=scores.dtype).numpy().dtype).min)
            # logging.info(
            #     "scores: {}, mask_size: {}".format(scores.size(), mask.size()))
            scores = scores.masked_fill(mask, min_value)
@@ -858,7 +864,9 @@
            mask = mask.unsqueeze(1).eq(0)  # (batch, 1, *, time2)
            min_value = float(numpy.finfo(torch.tensor(0, dtype=scores.dtype).numpy().dtype).min)
            min_value = -float(
                "inf"
            )  # float(numpy.finfo(torch.tensor(0, dtype=scores.dtype).numpy().dtype).min)
            scores = scores.masked_fill(mask, min_value)
            self.attn = torch.softmax(scores, dim=-1).masked_fill(
                mask, 0.0
funasr/models/sense_voice/decoder.py
@@ -146,7 +146,9 @@
                qk = qk + mask[:n_ctx, :n_ctx]
            else:
                mask = mask.unsqueeze(1).eq(0)  # (batch, 1, *, time2)
                min_value = float(np.finfo(torch.tensor(0, dtype=qk.dtype).numpy().dtype).min)
                min_value = -float(
                    "inf"
                )  # min_value = float(np.finfo(torch.tensor(0, dtype=qk.dtype).numpy().dtype).min)
                qk = qk.masked_fill(mask, min_value)
        qk = qk.float()
funasr/models/sense_voice/whisper_lib/model.py
@@ -112,7 +112,9 @@
                qk = qk + mask[:n_ctx, :n_ctx]
            else:
                mask = mask.unsqueeze(1).eq(0)  # (batch, 1, *, time2)
                min_value = float(np.finfo(torch.tensor(0, dtype=qk.dtype).numpy().dtype).min)
                min_value = -float(
                    "inf"
                )  # min_value = float(np.finfo(torch.tensor(0, dtype=qk.dtype).numpy().dtype).min)
                qk = qk.masked_fill(mask, min_value)
        qk = qk.float()
funasr/train_utils/average_nbest_models.py
@@ -16,7 +16,7 @@
from functools import cmp_to_key
def _get_checkpoint_paths(output_dir: str, last_n: int = 5):
def _get_checkpoint_paths(output_dir: str, last_n: int = 5, use_deepspeed=False, **kwargs):
    """
    Get the paths of the last 'last_n' checkpoints by parsing filenames
    in the output directory.
@@ -29,7 +29,13 @@
        sorted_items = (
            sorted_items[:last_n] if avg_keep_nbest_models_type == "acc" else sorted_items[-last_n:]
        )
        checkpoint_paths = [os.path.join(output_dir, key) for key, value in sorted_items[:last_n]]
        checkpoint_paths = []
        for key, value in sorted_items[:last_n]:
            if not use_deepspeed:
                ckpt = os.path.join(output_dir, key)
            else:
                ckpt = os.path.join(output_dir, key, "mp_rank_00_model_states.pt")
    except:
        print(f"{checkpoint} does not exist, avg the lastet checkpoint.")
        # List all files in the output directory
@@ -49,7 +55,7 @@
    Average the last 'last_n' checkpoints' model state_dicts.
    If a tensor is of type torch.int, perform sum instead of average.
    """
    checkpoint_paths = _get_checkpoint_paths(output_dir, last_n)
    checkpoint_paths = _get_checkpoint_paths(output_dir, last_n, **kwargs)
    print(f"average_checkpoints: {checkpoint_paths}")
    state_dicts = []
@@ -62,7 +68,8 @@
    # Check if we have any state_dicts to average
    if len(state_dicts) < 1:
        raise RuntimeError("No checkpoints found for averaging.")
        print("No checkpoints found for averaging.")
        return
    # Average or sum weights
    avg_state_dict = OrderedDict()
funasr/train_utils/trainer_ds.py
@@ -23,12 +23,16 @@
@contextmanager
def maybe_autocast(enabled):
    if enabled:
        with autocast():
def maybe_autocast(dtype=None, use_deepspeed=False):
    if use_deepspeed:
        with torch.cuda.amp.autocast(enabled=True, dtype=dtype, cache_enabled=False):
            yield
    else:
        yield
        if dtype == torch.float16:
            with autocast(enabled=True):
                yield
        else:
            yield
class Trainer:
@@ -78,7 +82,7 @@
        self.world_size = world_size
        self.use_ddp = use_ddp
        self.use_fsdp = use_fsdp
        self.use_deepspeed = use_deepspeed
        self.device = kwargs.get("device", "cuda")
        self.output_dir = output_dir
@@ -91,7 +95,10 @@
        # self.kwargs = kwargs
        self.log_interval = kwargs.get("log_interval", 50)
        self.batch_total = 0
        self.dtype = torch.float32
        self.use_fp16 = use_fp16
        if self.use_fp16:
            self.dtype = torch.float16
        self.save_checkpoint_interval = kwargs.get("save_checkpoint_interval", 5000)
        self.validate_interval = kwargs.get("validate_interval", 5000)
        self.keep_nbest_models = kwargs.get("keep_nbest_models", 500)
@@ -128,6 +135,17 @@
                job_type="training",
                reinit=True,
            )
        tensorboard_dir = os.path.join(output_dir, "tensorboard")
        os.makedirs(tensorboard_dir, exist_ok=True)
        try:
            from tensorboardX import SummaryWriter
            self.writer = SummaryWriter(tensorboard_dir)  # if trainer.rank == 0 else None
        except:
            self.writer = None
        self.use_deepspeed = use_deepspeed
        self.deepspeed_config = kwargs.get("deepspeed_config", "")
    def save_checkpoint(
        self,
@@ -148,9 +166,113 @@
        Args:
            epoch (int): The epoch number at which the checkpoint is being saved.
        """
        step_in_epoch = None if step is None else step_in_epoch
        if self.rank == 0:
        if self.use_deepspeed:
            logging.info(f"Save checkpoint: {epoch}, rank: {self.local_rank}\n")
            # self.step_or_epoch += 1
            state = {
                "epoch": epoch,
                # "state_dict": model.state_dict(),
                # "optimizer": optim.state_dict(),
                # "scheduler": scheduler.state_dict(),
                "saved_ckpts": self.saved_ckpts,
                "val_acc_step_or_eoch": self.val_acc_step_or_eoch,
                "val_loss_step_or_eoch": self.val_loss_step_or_eoch,
                "best_step_or_epoch": self.best_step_or_epoch,
                "avg_keep_nbest_models_type": self.avg_keep_nbest_models_type,
                "step": step,
                "step_in_epoch": step_in_epoch,
                "data_split_i": kwargs.get("data_split_i", 0),
                "data_split_num": kwargs.get("data_split_num", 1),
                "batch_total": self.batch_total,
                "train_loss_avg": kwargs.get("train_loss_avg", 0),
                "train_acc_avg": kwargs.get("train_acc_avg", 0),
            }
            step = step_in_epoch
            if hasattr(model, "module"):
                state["state_dict"] = model.module.state_dict()
            if scaler:
                state["scaler_state"] = scaler.state_dict()
            # Create output directory if it does not exist
            os.makedirs(self.output_dir, exist_ok=True)
            if step is None:
                ckpt_name = f"model.pt.ep{epoch}"
            else:
                ckpt_name = f"model.pt.ep{epoch}.{step}"
            filename = os.path.join(self.output_dir, ckpt_name)
            # torch.save(state, filename)
            with torch.no_grad():
                model.save_checkpoint(save_dir=self.output_dir, tag=ckpt_name, client_state=state)
            logging.info(f"\nCheckpoint saved to {filename}\n")
            latest = Path(os.path.join(self.output_dir, f"model.pt"))
            # torch.save(state, latest)
            with torch.no_grad():
                model.save_checkpoint(save_dir=self.output_dir, tag=f"model.pt", client_state=state)
            if self.best_step_or_epoch == "":
                self.best_step_or_epoch = ckpt_name
            if self.avg_keep_nbest_models_type == "acc":
                if (
                    self.val_acc_step_or_eoch[ckpt_name]
                    >= self.val_acc_step_or_eoch[self.best_step_or_epoch]
                ):
                    self.best_step_or_epoch = ckpt_name
                    best_ckpt = Path(os.path.join(self.output_dir, f"model.pt.best"))
                    # torch.save(state, best_ckpt)
                    with torch.no_grad():
                        model.save_checkpoint(
                            save_dir=self.output_dir, tag=f"model.pt.best", client_state=state
                        )
                    logging.info(
                        f"Update best acc: {self.val_acc_step_or_eoch[self.best_step_or_epoch]:.4f}, {best_ckpt}"
                    )
                else:
                    logging.info(
                        f"No improvement in acc: {self.val_acc_step_or_eoch[ckpt_name]:.4f} < {self.val_acc_step_or_eoch[self.best_step_or_epoch]:.4f}, {os.path.join(self.output_dir, self.best_step_or_epoch)}"
                    )
            elif self.avg_keep_nbest_models_type == "loss":
                if (
                    self.val_loss_step_or_eoch[ckpt_name]
                    <= self.val_loss_step_or_eoch[self.best_step_or_epoch]
                ):
                    self.best_step_or_epoch = ckpt_name
                    best_ckpt = Path(os.path.join(self.output_dir, f"model.pt.best"))
                    # torch.save(state, best_ckpt)
                    with torch.no_grad():
                        model.save_checkpoint(
                            save_dir=self.output_dir, tag=f"model.pt.best", client_state=state
                        )
                    logging.info(
                        f"Update best loss: {self.val_loss_step_or_eoch[self.best_step_or_epoch]:.4f}, {best_ckpt}"
                    )
                else:
                    logging.info(
                        f"No improvement in loss: {self.val_loss_step_or_eoch[ckpt_name]:.4f} > {self.val_loss_step_or_eoch[self.best_step_or_epoch]:.4f}, {os.path.join(self.output_dir, self.best_step_or_epoch)}"
                    )
            else:
                print("Undo")
            self.saved_ckpts[ckpt_name] = getattr(
                self, f"val_{self.avg_keep_nbest_models_type}_step_or_eoch"
            )[ckpt_name]
            if self.keep_nbest_models > 0:
                if len(self.saved_ckpts) > self.keep_nbest_models:
                    if self.avg_keep_nbest_models_type == "acc":
                        key = min(self.saved_ckpts, key=self.saved_ckpts.get)
                    else:
                        key = max(self.saved_ckpts, key=self.saved_ckpts.get)
                    if key in self.saved_ckpts:
                        del self.saved_ckpts[key]
                    filename = os.path.join(self.output_dir, key)
                    logging.info(f"Delete: {filename}")
                    if os.path.exists(filename):
                        os.remove(filename)
        elif self.use_fsdp:
            pass
        elif self.rank == 0:
            logging.info(f"Save checkpoint: {epoch}, rank: {self.local_rank}\n")
            # self.step_or_epoch += 1
            state = {
@@ -258,66 +380,117 @@
            resume_path (str): The file path to the checkpoint to resume from.
        """
        if self.resume:
            ckpt = os.path.join(self.output_dir, "model.pt")
            if os.path.isfile(ckpt):
                checkpoint = torch.load(ckpt, map_location="cpu")
                self.start_epoch = checkpoint["epoch"]
                # self.model.load_state_dict(checkpoint['state_dict'])
                src_state = checkpoint["state_dict"]
                dst_state = model.state_dict()
                for k in dst_state.keys():
                    if not k.startswith("module.") and "module." + k in src_state.keys():
                        k_ddp = "module." + k
                    elif k.startswith("module.") and "module." + k not in src_state.keys():
                        k_ddp = k.replace("module.", "", 1)
                    else:
                        k_ddp = k
                    if k_ddp in src_state.keys():
                        dst_state[k] = src_state[k_ddp]
                    else:
                        print(f"Miss key in ckpt: model: {k}, ckpt: {k_ddp}")
                model.load_state_dict(dst_state)
                optim.load_state_dict(checkpoint["optimizer"])
                scheduler.load_state_dict(checkpoint["scheduler"])
                if scaler is not None and "scaler_state" in checkpoint:
                    scaler.load_state_dict(checkpoint["scaler_state"])
            if self.use_deepspeed:
                ckpt = os.path.join(self.output_dir, "model.pt")
                if os.path.exists(ckpt):
                    _, checkpoint = model.load_checkpoint(self.output_dir, "model.pt")
                self.saved_ckpts = checkpoint["saved_ckpts"]
                self.val_acc_step_or_eoch = (
                    checkpoint["val_acc_step_or_eoch"]
                    if "val_acc_step_or_eoch" in checkpoint
                    else {}
                )
                self.val_loss_step_or_eoch = (
                    checkpoint["val_loss_step_or_eoch"]
                    if "val_loss_step_or_eoch" in checkpoint
                    else {}
                )
                self.best_step_or_epoch = (
                    checkpoint["best_step_or_epoch"] if "best_step_or_epoch" in checkpoint else ""
                )
                self.start_data_split_i = (
                    checkpoint["data_split_i"] if "data_split_i" in checkpoint else 0
                )
                self.batch_total = checkpoint["batch_total"] if "batch_total" in checkpoint else 0
                self.start_step = checkpoint["step"] if "step" in checkpoint else 0
                self.start_step = 0 if self.start_step is None else self.start_step
                self.step_in_epoch = (
                    checkpoint["step_in_epoch"] if "step_in_epoch" in checkpoint else 0
                )
                self.step_in_epoch = 0 if self.step_in_epoch is None else self.step_in_epoch
                print(checkpoint["train_acc_avg"])
                self.train_acc_avg = (
                    checkpoint["train_acc_avg"] if "train_acc_avg" in checkpoint else 0
                )
                self.train_loss_avg = (
                    checkpoint["train_loss_avg"] if "train_loss_avg" in checkpoint else 0
                )
                model.to(self.device)
                print(f"Checkpoint loaded successfully from '{ckpt}'")
                    self.saved_ckpts = checkpoint["saved_ckpts"]
                    self.val_acc_step_or_eoch = (
                        checkpoint["val_acc_step_or_eoch"]
                        if "val_acc_step_or_eoch" in checkpoint
                        else {}
                    )
                    self.val_loss_step_or_eoch = (
                        checkpoint["val_loss_step_or_eoch"]
                        if "val_loss_step_or_eoch" in checkpoint
                        else {}
                    )
                    self.best_step_or_epoch = (
                        checkpoint["best_step_or_epoch"]
                        if "best_step_or_epoch" in checkpoint
                        else ""
                    )
                    self.start_data_split_i = (
                        checkpoint["data_split_i"] if "data_split_i" in checkpoint else 0
                    )
                    self.batch_total = (
                        checkpoint["batch_total"] if "batch_total" in checkpoint else 0
                    )
                    self.start_step = checkpoint["step"] if "step" in checkpoint else 0
                    self.start_step = 0 if self.start_step is None else self.start_step
                    self.step_in_epoch = (
                        checkpoint["step_in_epoch"] if "step_in_epoch" in checkpoint else 0
                    )
                    self.step_in_epoch = 0 if self.step_in_epoch is None else self.step_in_epoch
                    print(checkpoint["train_acc_avg"])
                    self.train_acc_avg = (
                        checkpoint["train_acc_avg"] if "train_acc_avg" in checkpoint else 0
                    )
                    self.train_loss_avg = (
                        checkpoint["train_loss_avg"] if "train_loss_avg" in checkpoint else 0
                    )
                    model.to(self.device)
                    print(f"Checkpoint loaded successfully from '{ckpt}'")
                else:
                    print(f"No checkpoint found at '{ckpt}', does not resume status!")
            else:
                print(f"No checkpoint found at '{ckpt}', does not resume status!")
                ckpt = os.path.join(self.output_dir, "model.pt")
                if os.path.isfile(ckpt):
                    checkpoint = torch.load(ckpt, map_location="cpu")
                    self.start_epoch = checkpoint["epoch"]
                    # self.model.load_state_dict(checkpoint['state_dict'])
                    src_state = checkpoint["state_dict"]
                    dst_state = model.state_dict()
                    for k in dst_state.keys():
                        if not k.startswith("module.") and "module." + k in src_state.keys():
                            k_ddp = "module." + k
                        elif k.startswith("module.") and "module." + k not in src_state.keys():
                            k_ddp = k.replace("module.", "", 1)
                        else:
                            k_ddp = k
                        if k_ddp in src_state.keys():
                            dst_state[k] = src_state[k_ddp]
                        else:
                            print(f"Miss key in ckpt: model: {k}, ckpt: {k_ddp}")
                    model.load_state_dict(dst_state)
                    optim.load_state_dict(checkpoint["optimizer"])
                    scheduler.load_state_dict(checkpoint["scheduler"])
                    if scaler is not None and "scaler_state" in checkpoint:
                        scaler.load_state_dict(checkpoint["scaler_state"])
                    self.saved_ckpts = checkpoint["saved_ckpts"]
                    self.val_acc_step_or_eoch = (
                        checkpoint["val_acc_step_or_eoch"]
                        if "val_acc_step_or_eoch" in checkpoint
                        else {}
                    )
                    self.val_loss_step_or_eoch = (
                        checkpoint["val_loss_step_or_eoch"]
                        if "val_loss_step_or_eoch" in checkpoint
                        else {}
                    )
                    self.best_step_or_epoch = (
                        checkpoint["best_step_or_epoch"]
                        if "best_step_or_epoch" in checkpoint
                        else ""
                    )
                    self.start_data_split_i = (
                        checkpoint["data_split_i"] if "data_split_i" in checkpoint else 0
                    )
                    self.batch_total = (
                        checkpoint["batch_total"] if "batch_total" in checkpoint else 0
                    )
                    self.start_step = checkpoint["step"] if "step" in checkpoint else 0
                    self.start_step = 0 if self.start_step is None else self.start_step
                    self.step_in_epoch = (
                        checkpoint["step_in_epoch"] if "step_in_epoch" in checkpoint else 0
                    )
                    self.step_in_epoch = 0 if self.step_in_epoch is None else self.step_in_epoch
                    print(checkpoint["train_acc_avg"])
                    self.train_acc_avg = (
                        checkpoint["train_acc_avg"] if "train_acc_avg" in checkpoint else 0
                    )
                    self.train_loss_avg = (
                        checkpoint["train_loss_avg"] if "train_loss_avg" in checkpoint else 0
                    )
                    model.to(self.device)
                    print(f"Checkpoint loaded successfully from '{ckpt}'")
                else:
                    print(f"No checkpoint found at '{ckpt}', does not resume status!")
        if self.use_ddp or self.use_fsdp:
            dist.barrier()
@@ -331,7 +504,6 @@
        dataloader_train=None,
        dataloader_val=None,
        epoch=None,
        writer=None,
        **kwargs,
    ):
        """
@@ -339,7 +511,7 @@
        Args:
            epoch (int): The current epoch number.
        """
        if self.use_ddp or self.use_fsdp:
        if self.use_ddp or self.use_fsdp or self.use_deepspeed:
            dist.barrier()
        logging.info(f"Train epoch: {epoch}, rank: {self.rank}\n")
        model.train()
@@ -356,14 +528,21 @@
        time_beg = time.perf_counter()
        time5 = time_beg
        for batch_idx, batch in enumerate(dataloader_train):
            if self.use_ddp or self.use_fsdp:
                dist.all_reduce(iterator_stop, dist.ReduceOp.SUM)
                if iterator_stop > 0:
                    break
            self.batch_total += 1
            self.step_in_epoch += 1
            loss_dict = {
                "speed_stats": {},
                "epoch": epoch,
                "batch_idx": batch_idx,
                "data_split_i": kwargs.get("data_split_i", 0),
                "data_split_num": kwargs.get("data_split_num", 1),
                "log_step": batch_idx + kwargs.get("start_step", 0),
                "batch_total": self.batch_total,
                "step_in_epoch": self.step_in_epoch,
            }
            time1 = time.perf_counter()
            speed_stats["data_load"] = f"{time1-time_beg:0.3f}"
            loss_dict["speed_stats"]["data_load"] = f"{time1-time_beg:0.3f}"
            batch = to_device(batch, self.device)
@@ -372,35 +551,43 @@
                my_context = model.no_sync if batch_idx % accum_grad != 0 else my_context
            with my_context():
                time2 = time.perf_counter()
                loss_dict = {}
                self.forward_step(model, batch, loss_dict=loss_dict)
                time3 = time.perf_counter()
                speed_stats["forward_time"] = f"{time3 - time2:0.3f}"
                loss_dict["speed_stats"]["forward_time"] = f"{time3 - time2:0.3f}"
                self.backward_step(model, scaler, loss_dict=loss_dict)
                time4 = time.perf_counter()
                speed_stats["backward_and_AllReaduce_time"] = f"{time4 - time3:0.3f}"
                loss_dict["speed_stats"]["backward_time"] = f"{time4 - time3:0.3f}"
                # self.train_loss_avg = (
                #     self.train_loss_avg * (batch_idx + kwargs.get("start_step", 0))
                #     + loss.detach().cpu().item()
                # ) / (batch_idx + kwargs.get("start_step", 0) + 1)
                # if "acc" in stats:
                #     self.train_acc_avg = (
                #         self.train_acc_avg * (batch_idx + kwargs.get("start_step", 0))
                #         + stats["acc"].detach().cpu().item()
                #     ) / (batch_idx + kwargs.get("start_step", 0) + 1)
            self.update_step(model, optim, scheduler, scaler, loss_dict=loss_dict)
            total_time = f"{(time.perf_counter() - time5):0.3f}"
            time5 = time.perf_counter()
            self.update_step(model, optim, scheduler, scaler, loss_dict)
            # Perform an optimizer step only after accumulating enough gradients
            loss_dict["speed_stats"]["optim_time"] = f"{time5 - time4:0.3f}"
            loss_dict["speed_stats"]["total_time"] = total_time
            loss_dict["lr"] = scheduler.get_last_lr()[0]
            loss_dict["batch_num_epoch"] = len(dataloader_train)
            self.train_loss_avg = (
                self.train_loss_avg * batch_idx + loss_dict["loss"].detach().cpu().item()
            ) / (batch_idx + 1)
            if "acc" in loss_dict["stats"]:
                self.train_acc_avg = (
                    self.train_acc_avg * batch_idx + loss_dict["stats"]["acc"].detach().cpu().item()
                ) / (batch_idx + 1)
            self.log(loss_dict, tag="train")
            if self.step_in_epoch % self.validate_interval == 0:
                self.validate_epoch(
                    model=model,
                    dataloader_val=dataloader_val,
                    epoch=epoch,
                    writer=writer,
                    writer=self.writer,
                    step=batch_idx + 1,
                    step_in_epoch=self.step_in_epoch,
                )
@@ -421,41 +608,22 @@
                )
            time_beg = time.perf_counter()
        else:
            if self.use_ddp or self.use_fsdp:
                iterator_stop.fill_(1)
                dist.all_reduce(iterator_stop, dist.ReduceOp.SUM)
        if self.use_ddp or self.use_fsdp:
            dist.barrier()
            iterator_stop = torch.tensor(0).to(self.device)
        if self.use_ddp or self.use_fsdp or self.use_deepspeed:
            train_loss_avg = torch.tensor(self.train_loss_avg, dtype=torch.float32).to(self.device)
            train_acc_avg = torch.tensor(self.train_acc_avg, dtype=torch.float32).to(self.device)
            dist.all_reduce(train_loss_avg, op=dist.ReduceOp.SUM)
            dist.all_reduce(train_acc_avg, op=dist.ReduceOp.SUM)
            self.train_loss_avg = train_loss_avg.detach().cpu().item() / self.world_size
            self.train_acc_avg = train_acc_avg.detach().cpu().item() / self.world_size
    def forward_step(self, model, batch, loss_dict={}):
        with maybe_autocast(self.use_fp16):
        dtype = torch.bfloat16
        with maybe_autocast(dtype=self.dtype, use_deepspeed=self.use_deepspeed):
            retval = model(**batch)
            if (
                self.reset_gpu_cache
                and (torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024) > 70
            ):
                torch.cuda.empty_cache()
        loss, stats, weight = retval
        stats = {k: v for k, v in stats.items() if v is not None}
        # if self.use_ddp or self.use_fsdp:
        #     # Apply weighted averaging for loss and stats
        #     loss = (loss * weight.type(loss.dtype)).sum()
        #     # if distributed, this method can also apply all_reduce()
        #     # stats, weight = recursive_average(stats, weight, distributed=True)
        #     if self.use_ddp or self.use_fsdp:
        #         dist.all_reduce(weight, op=dist.ReduceOp.SUM)
        #     # Now weight is summation over all workers
        #     loss /= weight.sum()  # shape:[1] -> shape:[]
        #     # Multiply world_size because DistributedDataParallel
        #     # automatically normalizes the gradient by world_size.
        #     loss *= self.world_size
        # loss *= self.world_size
        # Scale the loss since we're not updating for every mini-batch
        loss_dict["loss"] = loss
        loss_dict["stats"] = stats
@@ -473,69 +641,37 @@
            else:
                loss.backward()
    def update_step(self, model, optim, scheduler, scaler, batch_idx=0, loss_dict=loss_dict):
        if (batch_idx + 1) % self.accum_grad == 0:
            # Perform gradient clipping if it is set
            if self.grad_clip > 0:
                grad_norm = torch.nn.utils.clip_grad_norm_(
                    model.parameters(),
                    max_norm=self.grad_clip,
                    norm_type=self.grad_clip_type,
                )
                if not torch.isfinite(grad_norm):
                    logging.warning(f"The grad norm is {grad_norm}. Skipping updating the model.")
                    optim.zero_grad()  # Reset gradients
                    return
    def update_step(self, model, optim, scheduler, scaler, loss_dict=None):
        batch_idx = loss_dict["batch_idx"]
        if self.use_deepspeed:
            model.step()
        else:
            if (batch_idx + 1) % self.accum_grad == 0:
                # Perform gradient clipping if it is set
                if self.grad_clip > 0:
                    grad_norm = torch.nn.utils.clip_grad_norm_(
                        model.parameters(),
                        max_norm=self.grad_clip,
                        norm_type=self.grad_clip_type,
                    )
                    if not torch.isfinite(grad_norm):
                        logging.warning(
                            f"The grad norm is {grad_norm}. Skipping updating the model."
                        )
                        optim.zero_grad()  # Reset gradients
                        return
            # Execute an optimization step (update model parameters)
            if self.use_ddp or self.use_fsdp:
                dist.barrier()
            if self.use_fp16:
                scaler.step(optim)
                scaler.update()
            else:
                optim.step()
            scheduler.step()
            # Clear gradients for the next accumulation stage
            optim.zero_grad(set_to_none=True)
            if self.use_ddp or self.use_fsdp:
                train_loss_avg = torch.tensor(self.train_loss_avg, dtype=torch.float32).to(
                    self.device
                )
                train_acc_avg = torch.tensor(self.train_acc_avg, dtype=torch.float32).to(
                    self.device
                )
                dist.all_reduce(train_loss_avg, op=dist.ReduceOp.SUM)
                dist.all_reduce(train_acc_avg, op=dist.ReduceOp.SUM)
                self.train_loss_avg = train_loss_avg.detach().cpu().item() / self.world_size
                self.train_acc_avg = train_acc_avg.detach().cpu().item() / self.world_size
            total_time = f"{(time.perf_counter() - time5) / accum_grad:0.3f}"
            time5 = time.perf_counter()
            speed_stats["optim_time"] = f"{time5 - time4:0.3f}"
            speed_stats["total_time"] = total_time
            lr = scheduler.get_last_lr()[0]
            batch_num_epoch = 1
            if hasattr(dataloader_train, "__len__"):
                batch_num_epoch = len(dataloader_train)
            self.log(
                epoch,
                batch_idx,
                log_step=batch_idx + kwargs.get("start_step", 0),
                step_in_epoch=self.step_in_epoch,
                batch_num_epoch=batch_num_epoch,
                lr=lr,
                loss=loss.detach().cpu().item(),
                speed_stats=speed_stats,
                stats=stats,
                writer=writer,
                tag="train",
                data_split_i=kwargs.get("data_split_i", 0),
                data_split_num=kwargs.get("data_split_num", 1),
            )
                # Execute an optimization step (update model parameters)
                if self.use_ddp or self.use_fsdp:
                    dist.barrier()
                if self.use_fp16:
                    scaler.step(optim)
                    scaler.update()
                else:
                    optim.step()
                scheduler.step()
                # Clear gradients for the next accumulation stage
                optim.zero_grad(set_to_none=True)
    def validate_epoch(
        self,
@@ -552,7 +688,7 @@
        Args:
            epoch (int): The current epoch number.
        """
        if self.use_ddp or self.use_fsdp:
        if self.use_ddp or self.use_fsdp or self.use_deepspeed:
            dist.barrier()
        logging.info(f"Validate epoch: {epoch}, rank: {self.rank}\n")
        model.eval()
@@ -560,77 +696,61 @@
        with torch.no_grad():
            speed_stats = {}
            time5 = time.perf_counter()
            iterator_stop = torch.tensor(0).to(self.device)
            time_beg = time.perf_counter()
            time5 = time_beg
            dataloader_val.batch_sampler.set_epoch(epoch)
            for batch_idx, batch in enumerate(dataloader_val):
                if self.use_ddp or self.use_fsdp:
                    dist.all_reduce(iterator_stop, dist.ReduceOp.SUM)
                    if iterator_stop > 0:
                        break
                loss_dict = {
                    "speed_stats": {},
                    "epoch": epoch,
                    "batch_idx": batch_idx,
                    "data_split_i": kwargs.get("data_split_i", 0),
                    "data_split_num": kwargs.get("data_split_num", 1),
                    "log_step": batch_idx + kwargs.get("start_step", 0),
                    "batch_total": batch_idx,
                    "step_in_epoch": batch_idx,
                    "lr": 0.0,
                }
                time1 = time.perf_counter()
                speed_stats["data_load"] = f"{time1 - time5:0.3f}"
                loss_dict["speed_stats"]["data_load"] = f"{time1 - time_beg:0.3f}"
                batch = to_device(batch, self.device)
                time2 = time.perf_counter()
                retval = model(**batch)
                self.forward_step(model, batch, loss_dict=loss_dict)
                time3 = time.perf_counter()
                speed_stats["forward_time"] = f"{time3 - time2:0.3f}"
                loss, stats, weight = retval
                stats = {k: v for k, v in stats.items() if v is not None}
                if self.use_ddp or self.use_fsdp:
                    # Apply weighted averaging for loss and stats
                    loss = (loss * weight.type(loss.dtype)).sum()
                    # if distributed, this method can also apply all_reduce()
                    # stats, weight = recursive_average(stats, weight, distributed=True)
                    if self.use_ddp or self.use_fsdp:
                        dist.all_reduce(weight, op=dist.ReduceOp.SUM)
                    # Now weight is summation over all workers
                    loss /= weight.sum()  # shape:[1] -> shape:[]
                    # Multiply world_size because DistributedDataParallel
                    # automatically normalizes the gradient by world_size.
                    loss *= self.world_size
                # Scale the loss since we're not updating for every mini-batch
                loss = loss
                time4 = time.perf_counter()
                loss_dict["speed_stats"]["forward_time"] = f"{time3 - time2:0.3f}"
                self.val_loss_avg = (self.val_loss_avg * batch_idx + loss.detach().cpu().item()) / (
                    batch_idx + 1
                )
                if "acc" in stats:
                    self.val_acc_avg = (
                        self.val_acc_avg * batch_idx + stats["acc"].detach().cpu().item()
                    ) / (batch_idx + 1)
                if self.use_ddp or self.use_fsdp:
                    val_loss_avg = torch.tensor(self.val_loss_avg, dtype=torch.float32).to(
                        self.device
                    )
                    val_acc_avg = torch.tensor(self.val_acc_avg, dtype=torch.float32).to(
                        self.device
                    )
                    dist.all_reduce(val_loss_avg, op=dist.ReduceOp.SUM)
                    dist.all_reduce(val_acc_avg, op=dist.ReduceOp.SUM)
                    self.val_loss_avg = val_loss_avg.detach().cpu().item() / self.world_size
                    self.val_acc_avg = val_acc_avg.detach().cpu().item() / self.world_size
                total_time = f"{(time.perf_counter() - time5):0.3f}"
                time5 = time.perf_counter()
                batch_num_epoch = 1
                if hasattr(dataloader_val, "__len__"):
                    batch_num_epoch = len(dataloader_val)
                self.log(
                    epoch,
                    batch_idx,
                    batch_num_epoch=batch_num_epoch,
                    lr=0.0,
                    loss=loss.detach().cpu().item(),
                    speed_stats=speed_stats,
                    stats=stats,
                    writer=writer,
                    tag="val",
                )
            else:
                if self.use_ddp or self.use_fsdp:
                    iterator_stop.fill_(1)
                    dist.all_reduce(iterator_stop, dist.ReduceOp.SUM)
                loss_dict["speed_stats"]["total_time"] = total_time
                loss_dict["batch_num_epoch"] = len(dataloader_val)
                self.log(loss_dict, tag="val")
                time_beg = time.perf_counter()
                self.val_loss_avg = (
                    self.val_loss_avg * batch_idx + loss_dict["loss"].detach().cpu().item()
                ) / (batch_idx + 1)
                if "acc" in loss_dict["stats"]:
                    self.val_acc_avg = (
                        self.val_acc_avg * batch_idx
                        + loss_dict["stats"]["acc"].detach().cpu().item()
                    ) / (batch_idx + 1)
            if self.use_ddp or self.use_fsdp or self.use_deepspeed:
                val_loss_avg = torch.tensor(self.val_loss_avg, dtype=torch.float32).to(self.device)
                val_acc_avg = torch.tensor(self.val_acc_avg, dtype=torch.float32).to(self.device)
                dist.all_reduce(val_loss_avg, op=dist.ReduceOp.SUM)
                dist.all_reduce(val_acc_avg, op=dist.ReduceOp.SUM)
                self.val_loss_avg = val_loss_avg.detach().cpu().item() / self.world_size
                self.val_acc_avg = val_acc_avg.detach().cpu().item() / self.world_size
        if kwargs.get("step_in_epoch", None) is None:
            ckpt_name = f"model.pt.ep{epoch}"
@@ -640,27 +760,25 @@
        self.val_loss_step_or_eoch[ckpt_name] = self.val_loss_avg
        model.train()
        if self.use_ddp or self.use_fsdp:
            dist.barrier()
            iterator_stop = torch.tensor(0).to(self.device)
    def log(
        self,
        epoch=0,
        batch_idx=0,
        step_in_epoch=0,
        batch_num_epoch=-1,
        lr=0.0,
        loss=0.0,
        speed_stats=None,
        stats=None,
        writer=None,
        loss_dict: dict = None,
        tag="train",
        data_split_i=0,
        data_split_num=1,
        log_step=None,
        **kwargs,
    ):
        loss = loss_dict["loss"].detach().cpu().item()
        epoch = loss_dict["epoch"]
        batch_idx = loss_dict["batch_idx"]
        step_in_epoch = loss_dict["step_in_epoch"]
        batch_total = loss_dict["batch_total"]
        batch_num_epoch = loss_dict["batch_num_epoch"]
        lr = loss_dict["lr"]
        speed_stats = loss_dict["speed_stats"]
        stats = loss_dict["stats"]
        data_split_i = loss_dict["data_split_i"]
        data_split_num = loss_dict["data_split_num"]
        log_step = loss_dict.get("log_step", None)
        if (batch_idx + 1) % self.log_interval == 0:
            batch_idx = log_step if log_step is not None else batch_idx
@@ -683,7 +801,7 @@
                f"rank: {self.rank}, "
                f"epoch: {epoch}/{self.max_epoch}, "
                f"data_slice: {data_split_i}/{data_split_num}, "
                f"step_in_slice: {batch_idx + 1}/{batch_num_epoch}, step_in_epoch: {step_in_epoch}, total step: {self.batch_total}, "
                f"step_in_slice: {batch_idx + 1}/{batch_num_epoch}, step_in_epoch: {step_in_epoch}, total step: {batch_total}, "
                f"(loss_avg_rank: {loss:.3f}), "
                f"(loss_avg_slice: {loss_avg_epoch:.3f}), "
                f"(ppl_avg_slice: {math.exp(loss_avg_epoch):.3e}), "
@@ -700,23 +818,20 @@
                f"rank{self.rank}_lr/{tag}": lr,
            }
            writer = self.writer
            if writer is not None:
                writer.add_scalar(f"rank{self.rank}_loss/{tag}", loss, self.batch_total)
                writer.add_scalar(f"rank{self.rank}_lr/{tag}", lr, self.batch_total)
                writer.add_scalar(f"rank{self.rank}_loss/{tag}", loss, batch_total)
                writer.add_scalar(f"rank{self.rank}_lr/{tag}", lr, batch_total)
                for key, var in stats.items():
                    writer.add_scalar(
                        f"stats_rank{self.rank}_{key}/{tag}", var.item(), self.batch_total
                    )
                    writer.add_scalar(f"stats_rank{self.rank}_{key}/{tag}", var.item(), batch_total)
                    description_dict[f"stats_rank{self.rank}_{key}/{tag}"] = var.item()
                for key, var in speed_stats.items():
                    writer.add_scalar(
                        f"stats_rank{self.rank}_{key}/{tag}", eval(var), self.batch_total
                    )
                    writer.add_scalar(f"stats_rank{self.rank}_{key}/{tag}", eval(var), batch_total)
                    description_dict[f"stats_rank{self.rank}_{key}/{tag}"] = eval(var)
            if self.use_wandb and wandb is not None:
                wandb.log(
                    description_dict,
                    setp=self.batch_total,
                    setp=batch_total,
                )
    def close(self, writer=None):
@@ -770,31 +885,62 @@
                    "find_unused_parameters", False
                ),
            )
        # elif self.use_fsdp:
        #     # model = FSDP(model).cuda(local_rank)
        #
        #     def custom_auto_wrap_policy(
        #         module: nn.Module,
        #         recurse: bool,
        #         nonwrapped_numel: int,
        #         # Additional custom arguments
        #         min_num_params: int = int(1e8),
        #     ) -> bool:
        #         # 根据自定义逻辑决定是否包装模块
        #         is_large = unwrapped_params >= min_num_params
        #         requires_grad_uniform = len({p.requires_grad for p in module.parameters()}) == 1
        #         return is_large and requires_grad_uniform
        #
        #     # Configure a custom `min_num_params`
        #     my_auto_wrap_policy = functools.partial(custom_auto_wrap_policy, min_num_params=int(1e5))
        #     torch.cuda.set_device(local_rank)
        #     model = FSDP(
        #         model,
        #         auto_wrap_policy=custom_auto_wrap_policy,
        #         mixed_precision=None,
        #         device_id=torch.cuda.current_device(),
        #     )
        else:
            model = model.to(device=kwargs.get("device", "cuda"))
        return model
    def warp_optim_scheduler(self, model, **kwargs):
        from funasr.optimizers import optim_classes
        from funasr.schedulers import scheduler_classes
        from omegaconf import OmegaConf, DictConfig
        import json
        # optim
        logging.info("Build optim")
        optim = kwargs.get("optim", "adam")
        assert optim in optim_classes
        optim_class = optim_classes.get(optim)
        optim = optim_class(model.parameters(), **kwargs.get("optim_conf"))
        # scheduler
        logging.info("Build scheduler")
        scheduler = kwargs.get("scheduler", "warmuplr")
        assert scheduler in scheduler_classes
        scheduler_class = scheduler_classes.get(scheduler)
        scheduler = scheduler_class(optim, **kwargs.get("scheduler_conf"))
        if self.use_deepspeed:
            import deepspeed
            args = OmegaConf.create({"deepspeed_config": self.deepspeed_config})
            with open(self.deepspeed_config, "r") as fin:
                ds_configs = json.load(fin)
            if "bf16" in ds_configs and ds_configs["bf16"]["enabled"]:
                self.dtype = torch.bfloat16
            if "fp16" in ds_configs and ds_configs["fp16"]["enabled"]:
                self.dtype = torch.float16
            if "optimizer" in ds_configs:
                # NOTE(xcsong): Disable custom optimizer if it is set in ds_config,
                # extremely useful when enable cpu_offload, DeepspeedCpuAdam
                # could be 4~5x faster than torch native adam
                optim = None
                if "scheduler" in ds_configs:
                    scheduler = None
                else:
                    def scheduler(opt):
                        return scheduler_class(opt, **kwargs.get("scheduler_conf"))
            model, optimizer, _, scheduler = deepspeed.initialize(
                args=args,
                model=model,
                optimizer=optim,
                lr_scheduler=scheduler,
                model_parameters=model.parameters(),
            )
        return model, optim, scheduler