Dev gzf exp (#1670)
* resume from step
* batch
* batch
* batch
* batch
* batch
* batch
* batch
* batch
* batch
* batch
| | |
| | | |
| | | from contextlib import nullcontext |
| | | import torch.distributed as dist |
| | | from collections.abc import Sequence |
| | | |
| | | from omegaconf import DictConfig, OmegaConf |
| | | from torch.cuda.amp import autocast, GradScaler |
| | | from torch.nn.parallel import DistributedDataParallel as DDP |
| | |
| | | if freeze_param is not None: |
| | | if "," in freeze_param: |
| | | freeze_param = eval(freeze_param) |
| | | if not isinstance(freeze_param, Sequence): |
| | | if not isinstance(freeze_param, (list, tuple)): |
| | | freeze_param = (freeze_param,) |
| | | logging.info("freeze_param is not None: %s", freeze_param) |
| | | for t in freeze_param: |
| | |
| | | try: |
| | | from tensorboardX import SummaryWriter |
| | | |
| | | writer = SummaryWriter(tensorboard_dir) if trainer.rank == 0 else None |
| | | writer = SummaryWriter(tensorboard_dir) # if trainer.rank == 0 else None |
| | | except: |
| | | writer = None |
| | | |
| | |
| | | epoch, data_split_i=data_split_i, start_step=trainer.start_step |
| | | ) |
| | | trainer.start_step = 0 |
| | | |
| | | trainer.train_epoch( |
| | | model=model, |
| | | optim=optim, |
| | |
| | | model=model, dataloader_val=dataloader_val, epoch=epoch, writer=writer |
| | | ) |
| | | scheduler.step() |
| | | |
| | | trainer.step_cur_in_epoch = 0 |
| | | trainer.save_checkpoint(epoch, model=model, optim=optim, scheduler=scheduler, scaler=scaler) |
| | | |
| | | time2 = time.perf_counter() |
| | |
| | | self.batch_size = kwargs.get("batch_size") |
| | | self.batch_type = kwargs.get("batch_type") |
| | | self.prompt_ids_len = 0 |
| | | self.retry = kwargs.get("retry", 5) |
| | | |
| | | def get_source_len(self, index): |
| | | item = self.index_ds[index] |
| | |
| | | return len(self.index_ds) |
| | | |
| | | def __getitem__(self, index): |
| | | item = self.index_ds[index] |
| | | # import pdb; |
| | | # pdb.set_trace() |
| | | |
| | | output = None |
| | | for idx in range(self.retry): |
| | | if idx == 0: |
| | | index_cur = index |
| | | else: |
| | | if index <= self.retry: |
| | | index_cur = index + idx |
| | | else: |
| | | index_cur = torch.randint(0, index, ()).item() |
| | | |
| | | item = self.index_ds[index_cur] |
| | | |
| | | source = item["source"] |
| | | data_src = load_audio_text_image_video(source, fs=self.fs) |
| | | if self.preprocessor_speech: |
| | |
| | | ) # speech: [b, T, d] |
| | | |
| | | if speech_lengths > self.batch_size: |
| | | return None |
| | | continue |
| | | speech = speech.permute(0, 2, 1) |
| | | target = item["target"] |
| | | if self.preprocessor_text: |
| | |
| | | target_ids = self.tokenizer.encode(target, allowed_special="all") |
| | | target_ids_len = len(target_ids) + 1 # [lid, text] |
| | | if target_ids_len > 200: |
| | | return None |
| | | continue |
| | | |
| | | eos = self.tokenizer.encode(self.eos, allowed_special="all") # [eos] |
| | | |
| | |
| | | target_mask_lengths = len(target_mask) |
| | | target_mask = torch.tensor(target_mask, dtype=torch.float32) |
| | | target_mask_lengths = torch.tensor([target_mask_lengths], dtype=torch.int32) |
| | | return { |
| | | |
| | | output = { |
| | | "speech": speech[0, :, :], |
| | | "speech_lengths": speech_lengths, |
| | | "text": text, |
| | |
| | | "target_mask": target_mask, |
| | | "target_mask_lengths": target_mask_lengths, |
| | | } |
| | | break |
| | | |
| | | return output |
| | | |
| | | def collator(self, samples: list = None): |
| | | outputs = {} |
| | |
| | | outputs[key].append(sample[key]) |
| | | |
| | | if len(outputs) < 1: |
| | | logging.info(f"ERROR: data is empty!") |
| | | logging.error(f"ERROR: data is empty!") |
| | | outputs = { |
| | | "speech": torch.rand((10, 128), dtype=torch.float32), |
| | | "speech_lengths": torch.tensor([10], dtype=torch.int32), |
| | | "text": torch.tensor([58836], dtype=torch.int32), |
| | | "text_lengths": torch.tensor([1], dtype=torch.int32), |
| | | "target_mask": torch.tensor([[0] * (self.prompt_ids_len) + [1] * (1) + [1]]), |
| | | "speech": torch.rand((10, 128), dtype=torch.float32)[None, :, :], |
| | | "speech_lengths": torch.tensor( |
| | | [ |
| | | 10, |
| | | ], |
| | | dtype=torch.int32, |
| | | )[:, None], |
| | | "text": torch.tensor( |
| | | [ |
| | | 58836, |
| | | ], |
| | | dtype=torch.int32, |
| | | )[None, :], |
| | | "text_lengths": torch.tensor( |
| | | [ |
| | | 1, |
| | | ], |
| | | dtype=torch.int32, |
| | | )[:, None], |
| | | "target_mask": torch.tensor([[0] * (self.prompt_ids_len) + [1] * (1) + [1]])[ |
| | | None, : |
| | | ], |
| | | } |
| | | return outputs |
| | | |
| | |
| | | ) |
| | | for key, data_list in outputs.items(): |
| | | outputs[key] = outputs[key][beg : beg + b : 2] |
| | | |
| | | |
| | | speech_lengths_max = outputs["speech_lengths"].max().item() |
| | | outputs["speech"] = outputs["speech"][:, :speech_lengths_max, :] |
| | |
| | | self.reset_gpu_cache = kwargs.get("reset_gpu_cache", False) |
| | | self.start_data_split_i = 0 |
| | | self.start_step = 0 |
| | | self.step_cur_in_epoch = 0 |
| | | self.use_wandb = kwargs.get("use_wandb", False) |
| | | if self.use_wandb: |
| | | wandb.login(key=kwargs.get("wandb_token")) |
| | |
| | | optim=None, |
| | | scheduler=None, |
| | | scaler=None, |
| | | step_cur_in_epoch=None, |
| | | **kwargs, |
| | | ): |
| | | """ |
| | | Saves a checkpoint containing the model's state, the optimizer's state, |
| | |
| | | epoch (int): The epoch number at which the checkpoint is being saved. |
| | | """ |
| | | |
| | | step_cur_in_epoch = None if step is None else step_cur_in_epoch |
| | | if self.rank == 0: |
| | | logging.info(f"Save checkpoint: {epoch}, rank: {self.local_rank}\n") |
| | | # self.step_or_epoch += 1 |
| | |
| | | "best_step_or_epoch": self.best_step_or_epoch, |
| | | "avg_keep_nbest_models_type": self.avg_keep_nbest_models_type, |
| | | "step": step, |
| | | "step_cur_in_epoch": step_cur_in_epoch, |
| | | "data_split_i": kwargs.get("data_split_i", 0), |
| | | "data_split_num": kwargs.get("data_split_num", 1), |
| | | "batch_total": self.batch_total, |
| | | } |
| | | step = step_cur_in_epoch |
| | | if hasattr(model, "module"): |
| | | state["state_dict"] = model.module.state_dict() |
| | | |
| | |
| | | self.batch_total = checkpoint["batch_total"] if "batch_total" in checkpoint else 0 |
| | | self.start_step = checkpoint["step"] if "step" in checkpoint else 0 |
| | | self.start_step = 0 if self.start_step is None else self.start_step |
| | | self.step_cur_in_epoch = ( |
| | | checkpoint["step_cur_in_epoch"] if "step_cur_in_epoch" in checkpoint else 0 |
| | | ) |
| | | self.step_cur_in_epoch = ( |
| | | 0 if self.step_cur_in_epoch is None else self.step_cur_in_epoch |
| | | ) |
| | | |
| | | model.to(self.device) |
| | | print(f"Checkpoint loaded successfully from '{ckpt}'") |
| | |
| | | """ |
| | | if self.use_ddp or self.use_fsdp: |
| | | dist.barrier() |
| | | logging.info(f"Train epoch: {epoch}, rank: {self.local_rank}\n") |
| | | logging.info(f"Train epoch: {epoch}, rank: {self.rank}\n") |
| | | model.train() |
| | | |
| | | # Set the number of steps for gradient accumulation |
| | |
| | | if iterator_stop > 0: |
| | | break |
| | | self.batch_total += 1 |
| | | self.step_cur_in_epoch += 1 |
| | | time1 = time.perf_counter() |
| | | speed_stats["data_load"] = f"{time1-time_beg:0.3f}" |
| | | |
| | |
| | | self.log( |
| | | epoch, |
| | | batch_idx, |
| | | step_cur_in_epoch=self.step_cur_in_epoch, |
| | | batch_num_epoch=batch_num_epoch, |
| | | lr=lr, |
| | | loss=loss.detach().cpu().item(), |
| | |
| | | epoch=epoch, |
| | | writer=writer, |
| | | step=batch_idx + 1, |
| | | step_cur_in_epoch=self.step_cur_in_epoch, |
| | | ) |
| | | |
| | | if (batch_idx + 1) % self.save_checkpoint_interval == 0: |
| | |
| | | scheduler=scheduler, |
| | | scaler=scaler, |
| | | step=batch_idx + 1, |
| | | step_cur_in_epoch=self.step_cur_in_epoch, |
| | | data_split_i=kwargs.get("data_split_i", 0), |
| | | data_split_num=kwargs.get("data_split_num", 1), |
| | | ) |
| | | |
| | | time_beg = time.perf_counter() |
| | |
| | | """ |
| | | if self.use_ddp or self.use_fsdp: |
| | | dist.barrier() |
| | | logging.info(f"Validate epoch: {epoch}, rank: {self.local_rank}\n") |
| | | logging.info(f"Validate epoch: {epoch}, rank: {self.rank}\n") |
| | | model.eval() |
| | | |
| | | with torch.no_grad(): |
| | |
| | | iterator_stop.fill_(1) |
| | | dist.all_reduce(iterator_stop, dist.ReduceOp.SUM) |
| | | |
| | | if kwargs.get("step", None) is None: |
| | | if kwargs.get("step_cur_in_epoch", None) is None: |
| | | ckpt_name = f"model.pt.ep{epoch}" |
| | | else: |
| | | ckpt_name = f'model.pt.ep{epoch}.{kwargs.get("step")}' |
| | | ckpt_name = f'model.pt.ep{epoch}.{kwargs.get("step_cur_in_epoch")}' |
| | | self.val_acc_step_or_eoch[ckpt_name] = self.val_acc_avg |
| | | self.val_loss_step_or_eoch[ckpt_name] = self.val_loss_avg |
| | | model.train() |
| | |
| | | self, |
| | | epoch=0, |
| | | batch_idx=0, |
| | | step_cur_in_epoch=0, |
| | | batch_num_epoch=-1, |
| | | lr=0.0, |
| | | loss=0.0, |
| | |
| | | f"{tag}, " |
| | | f"rank: {self.rank}, " |
| | | f"epoch: {epoch}/{self.max_epoch}, " |
| | | f"step_cur_in_epoch: {step_cur_in_epoch}, " |
| | | f"data_slice: {data_split_i}/{data_split_num}, " |
| | | f"step: {batch_idx + 1}/{batch_num_epoch}, total step: {self.batch_total}, " |
| | | f"(loss_avg_rank: {loss:.3f}), " |