| | |
| | | LFR_outputs = torch.vstack(LFR_inputs) |
| | | return LFR_outputs.type(torch.float32) |
| | | |
| | | @tables.register("frontend_classes", "wav_frontend") |
| | | @tables.register("frontend_classes", "WavFrontend") |
| | | class WavFrontend(nn.Module): |
| | | """Conventional frontend structure for ASR. |
| | |
| | | """ |
| | | ckpt = os.path.join(resume_path, "model.pt") |
| | | if os.path.isfile(ckpt): |
| | | checkpoint = torch.load(ckpt) |
| | | checkpoint = torch.load(ckpt, map_location="cpu") |
| | | self.start_epoch = checkpoint['epoch'] + 1 |
| | | # self.model.load_state_dict(checkpoint['state_dict']) |
| | | src_state = checkpoint['state_dict'] |
| | |
| | | else: |
| | | print(f"No checkpoint found at '{ckpt}', does not resume status!") |
| | | |
| | | self.model.to(self.device) |
| | | if self.use_ddp or self.use_fsdp: |
| | | dist.barrier() |
| | | |