funasr/bin/train_ds.py
@@ -81,7 +81,10 @@
        deepspeed.init_distributed(dist_backend=kwargs.get("backend", "nccl"))
    elif use_ddp or use_fsdp:
        logging.info(f"use_ddp: {use_ddp}, use_fsdp: {use_fsdp}")
        dist.init_process_group(backend=kwargs.get("backend", "nccl"), init_method="env://")
        dist.init_process_group(
            backend=kwargs.get("backend", "nccl"),
            init_method="env://",
        )
        torch.cuda.set_device(local_rank)
    # rank = dist.get_rank()
@@ -131,7 +134,7 @@
        **kwargs.get("train_conf"),
    )
    model = trainer.warp_model(model)
    model = trainer.warp_model(model, **kwargs)
    kwargs["device"] = int(os.environ.get("LOCAL_RANK", 0))
    trainer.device = int(os.environ.get("LOCAL_RANK", 0))
@@ -181,7 +184,10 @@
            )
            trainer.start_step = 0
            torch.cuda.empty_cache()
            device = next(model.parameters()).device
            if device.type == "cuda":
                with torch.cuda.device(device):
                    torch.cuda.empty_cache()
            time_escaped = (time.perf_counter() - time_slice_i) / 3600.0
            logging.info(