游雁
2024-05-10 9be30f99dd09cfe0de929266ec43c1b95abb6d96
update avg slice
2个文件已修改
55 ■■■■ 已修改文件
funasr/bin/train.py 2 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
funasr/train_utils/trainer.py 53 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
funasr/bin/train.py
@@ -241,6 +241,8 @@
            f"estimated to finish {trainer.max_epoch} "
            f"epoch: {(trainer.max_epoch - epoch) * time_escaped:.3f} hours\n"
        )
        trainer.train_acc_avg = 0.0
        trainer.train_loss_avg = 0.0
    if trainer.rank == 0:
        average_checkpoints(trainer.output_dir, trainer.avg_nbest_model)
funasr/train_utils/trainer.py
@@ -384,19 +384,19 @@
                loss, stats, weight = retval
                stats = {k: v for k, v in stats.items() if v is not None}
                # if self.use_ddp or self.use_fsdp:
                #     # Apply weighted averaging for loss and stats
                #     loss = (loss * weight.type(loss.dtype)).sum()
                #     # if distributed, this method can also apply all_reduce()
                #     # stats, weight = recursive_average(stats, weight, distributed=True)
                #     if self.use_ddp or self.use_fsdp:
                #         dist.all_reduce(weight, op=dist.ReduceOp.SUM)
                #     # Now weight is summation over all workers
                #     loss /= weight.sum()  # shape:[1] -> shape:[]
                #     # Multiply world_size because DistributedDataParallel
                #     # automatically normalizes the gradient by world_size.
                #     loss *= self.world_size
                if self.use_ddp or self.use_fsdp:
                    # Apply weighted averaging for loss and stats
                    loss = (loss * weight.type(loss.dtype)).sum()
                    # if distributed, this method can also apply all_reduce()
                    # stats, weight = recursive_average(stats, weight, distributed=True)
                    if self.use_ddp or self.use_fsdp:
                        dist.all_reduce(weight, op=dist.ReduceOp.SUM)
                    # Now weight is summation over all workers
                    loss /= weight.sum()  # shape:[1] -> shape:[]
                    # Multiply world_size because DistributedDataParallel
                    # automatically normalizes the gradient by world_size.
                loss *= self.world_size
                # loss *= self.world_size
                # Scale the loss since we're not updating for every mini-batch
                loss = loss / accum_grad
@@ -417,6 +417,17 @@
                        self.train_acc_avg * (self.step_in_epoch - 1)
                        + stats["acc"].detach().cpu().item()
                    ) / self.step_in_epoch
                if self.use_ddp or self.use_fsdp:
                    train_loss_avg = torch.tensor(self.train_loss_avg, dtype=torch.float32).to(
                        self.device
                    )
                    train_acc_avg = torch.tensor(self.train_acc_avg, dtype=torch.float32).to(
                        self.device
                    )
                    dist.all_reduce(train_loss_avg, op=dist.ReduceOp.SUM)
                    dist.all_reduce(train_acc_avg, op=dist.ReduceOp.SUM)
                    self.train_loss_avg = train_loss_avg.detach().cpu().item() / self.world_size
                    self.train_acc_avg = train_acc_avg.detach().cpu().item() / self.world_size
            # Perform an optimizer step only after accumulating enough gradients
            if (batch_idx + 1) % accum_grad == 0:
@@ -447,18 +458,6 @@
                optim.zero_grad(set_to_none=True)
                total_time = f"{(time.perf_counter() - time5)/accum_grad:0.3f}"
                time5 = time.perf_counter()
                if self.use_ddp or self.use_fsdp:
                    train_loss_avg = torch.tensor(self.train_loss_avg, dtype=torch.float32).to(
                        self.device
                    )
                    train_acc_avg = torch.tensor(self.train_acc_avg, dtype=torch.float32).to(
                        self.device
                    )
                    dist.all_reduce(train_loss_avg, op=dist.ReduceOp.SUM)
                    dist.all_reduce(train_acc_avg, op=dist.ReduceOp.SUM)
                    self.train_loss_avg = train_loss_avg.detach().cpu().item() / self.world_size
                    self.train_acc_avg = train_acc_avg.detach().cpu().item() / self.world_size
                speed_stats["optim_time"] = f"{time5 - time4:0.3f}"
@@ -666,9 +665,9 @@
                f"data_slice: {data_split_i}/{data_split_num}, "
                f"step_in_slice: {batch_idx + 1}/{batch_num_epoch}, step_in_epoch: {step_in_epoch}, total step: {self.batch_total}, "
                f"(loss_avg_rank: {loss:.3f}), "
                f"(loss_avg_epoch: {loss_avg_epoch:.3f}), "
                f"(ppl_avg_epoch: {math.exp(loss_avg_epoch):.3e}), "
                f"(acc_avg_epoch: {acc_avg_epoch:.3f}), "
                f"(loss_avg_slice: {loss_avg_epoch:.3f}), "
                f"(ppl_avg_slice: {math.exp(loss_avg_epoch):.3e}), "
                f"(acc_avg_slice: {acc_avg_epoch:.3f}), "
                f"(lr: {lr:.3e}), "
                f"{[(k, round(v.detach().cpu().item(), 3)) for k, v in stats.items()]}, "
                f"{speed_stats}, "