游雁
2024-02-20 58b6154a73331a8807127d4579ed473432ce88de
update
2个文件已修改
28 ■■■■■ 已修改文件
funasr/bin/train.py 4 ●●● 补丁 | 查看 | 原始文档 | blame | 历史
funasr/train_utils/trainer.py 24 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
funasr/bin/train.py
@@ -44,14 +44,16 @@
def main(**kwargs):
    print(kwargs)
    # set random seed
    tables.print()
    set_all_random_seed(kwargs.get("seed", 0))
    torch.backends.cudnn.enabled = kwargs.get("cudnn_enabled", torch.backends.cudnn.enabled)
    torch.backends.cudnn.benchmark = kwargs.get("cudnn_benchmark", torch.backends.cudnn.benchmark)
    torch.backends.cudnn.deterministic = kwargs.get("cudnn_deterministic", True)
    
    local_rank = int(os.environ.get('LOCAL_RANK', 0))
    if local_rank == 0:
        tables.print()
    # Check if we are using DDP or FSDP
    use_ddp = 'WORLD_SIZE' in os.environ and int(os.environ["WORLD_SIZE"]) > 1
    use_fsdp = kwargs.get("use_fsdp", None)
funasr/train_utils/trainer.py
@@ -69,6 +69,7 @@
        self.device = next(model.parameters()).device
        self.avg_nbest_model = kwargs.get("avg_nbest_model", 5)
        self.kwargs = kwargs
        self.log_interval = kwargs.get("log_interval", 50)
        
    
        try:
@@ -274,8 +275,8 @@
            
            if self.local_rank == 0:
                pbar.update(1)
            if batch_idx % self.log_interval == 0 or batch_idx == len(self.dataloader_train) - 1:
                pbar.update(self.log_interval)
                gpu_info = "GPU, memory: {:.3f} GB, " \
                           "{:.3f} GB, "\
                           "{:.3f} GB, "\
@@ -285,23 +286,23 @@
                                             torch.cuda.max_memory_reserved()/1024/1024/1024,
                                             )
                description = (
                    f"rank: {self.local_rank}, "
                    f"Train epoch: {epoch}/{self.max_epoch}, "
                    f"step {batch_idx}/{len(self.dataloader_train)}, "
                    f"{speed_stats}, "
                    f"(loss: {loss.detach().cpu().item():.3f}), "
                    f"{[(k, round(v.cpu().item(), 3)) for k, v in stats.items()]}"
                    f"{gpu_info}"
                    f"rank: {self.local_rank}"
                )
                pbar.set_description(description)
                if self.writer:
                    self.writer.add_scalar('Loss/train', loss.item(),
                    self.writer.add_scalar(f'rank{self.local_rank}, Loss/train', loss.item(),
                                           epoch*len(self.dataloader_train) + batch_idx)
                    for key, var in stats.items():
                        self.writer.add_scalar(f'{key}/train', var.item(),
                        self.writer.add_scalar(f'rank{self.local_rank}, {key}/train', var.item(),
                                               epoch * len(self.dataloader_train) + batch_idx)
                    for key, var in speed_stats.items():
                        self.writer.add_scalar(f'{key}/train', eval(var),
                        self.writer.add_scalar(f'rank{self.local_rank}, {key}/train', eval(var),
                                               epoch * len(self.dataloader_train) + batch_idx)
                    
            # if batch_idx == 2:
@@ -347,9 +348,10 @@
                time4 = time.perf_counter()
                
                if self.local_rank == 0:
                    pbar.update(1)
                if batch_idx % self.log_interval == 0 or batch_idx == len(self.dataloader_train) - 1:
                    pbar.update(self.log_interval)
                    description = (
                        f"rank: {self.local_rank}, "
                        f"validation epoch: {epoch}/{self.max_epoch}, "
                        f"step {batch_idx}/{len(self.dataloader_train)}, "
                        f"{speed_stats}, "
@@ -359,11 +361,11 @@
                    )
                    pbar.set_description(description)
                    if self.writer:
                        self.writer.add_scalar('Loss/val', loss.item(),
                        self.writer.add_scalar(f"rank{self.local_rank}, Loss/val", loss.item(),
                                               epoch*len(self.dataloader_train) + batch_idx)
                        for key, var in stats.items():
                            self.writer.add_scalar(f'{key}/val', var.item(),
                            self.writer.add_scalar(f'rank{self.local_rank}, {key}/val', var.item(),
                                                   epoch * len(self.dataloader_train) + batch_idx)
                        for key, var in speed_stats.items():
                            self.writer.add_scalar(f'{key}/val', eval(var),
                            self.writer.add_scalar(f'rank{self.local_rank}, {key}/val', eval(var),
                                                   epoch * len(self.dataloader_train) + batch_idx)