游雁
2024-02-21 497edf4c9d6c1565a4bcf1a3edfcd47ffec8c10d
funasr/train_utils/trainer.py
@@ -128,7 +128,20 @@
        if os.path.isfile(ckpt):
            checkpoint = torch.load(ckpt)
            self.start_epoch = checkpoint['epoch'] + 1
            self.model.load_state_dict(checkpoint['state_dict'])
            # self.model.load_state_dict(checkpoint['state_dict'])
            src_state = checkpoint['state_dict']
            dst_state = self.model.state_dict()
            for k in dst_state.keys():
                if not k.startswith("module.") and "module."+k in src_state.keys():
                    k_ddp = "module."+k
                else:
                    k_ddp = k
                if k_ddp in src_state.keys():
                    dst_state[k] = src_state[k_ddp]
                else:
                    print(f"Miss key in ckpt: model: {k}, ckpt: {k_ddp}")
            self.model.load_state_dict(dst_state)
            self.optim.load_state_dict(checkpoint['optimizer'])
            self.scheduler.load_state_dict(checkpoint['scheduler'])
            print(f"Checkpoint loaded successfully from '{ckpt}'")
@@ -273,11 +286,13 @@
                                             torch.cuda.memory_reserved()/1024/1024/1024,
                                             torch.cuda.max_memory_reserved()/1024/1024/1024,
                                             )
                lr = self.scheduler.get_last_lr()[0]
                description = (
                    f"rank: {self.local_rank}, "
                    f"epoch: {epoch}/{self.max_epoch}, "
                    f"step: {batch_idx+1}/{len(self.dataloader_train)}, total: {self.batch_total}, "
                    f"(loss: {loss.detach().cpu().item():.3f}), "
                    f"(lr: {lr:.3e}), "
                    f"{[(k, round(v.cpu().item(), 3)) for k, v in stats.items()]}, "
                    f"{speed_stats}, "
                    f"{gpu_info}"