游雁
2025-02-13 604ae30fdbe96185282e6c83134e11217f3acd20
oom fix
3个文件已修改
24 ■■■■ 已修改文件
funasr/auto/auto_model.py 8 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
funasr/bin/train.py 8 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
funasr/bin/train_ds.py 8 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
funasr/auto/auto_model.py
@@ -147,7 +147,9 @@
        # if spk_model is not None, build spk model else None
        spk_model = kwargs.get("spk_model", None)
        spk_kwargs = {} if kwargs.get("spk_kwargs", {}) is None else kwargs.get("spk_kwargs", {})
        cb_kwargs = {} if spk_kwargs.get("cb_kwargs", {}) is None else spk_kwargs.get("cb_kwargs", {})
        cb_kwargs = (
            {} if spk_kwargs.get("cb_kwargs", {}) is None else spk_kwargs.get("cb_kwargs", {})
        )
        if spk_model is not None:
            logging.info("Building SPK model.")
            spk_kwargs["model"] = spk_model
@@ -368,8 +370,8 @@
            pbar.set_description(f"rtf_avg: {time_escape_total/time_speech_total:0.3f}")
        device = next(model.parameters()).device
        if device.type == 'cuda':
            with torch.cuda.device():
        if device.type == "cuda":
            with torch.cuda.device(device):
                torch.cuda.empty_cache()
        return asr_result_list
funasr/bin/train.py
@@ -221,10 +221,10 @@
            )
            trainer.start_step = 0
            # device = next(model.parameters()).device
            # if device.type == 'cuda':
            #     with torch.cuda.device():
            #         torch.cuda.empty_cache()
            device = next(model.parameters()).device
            if device.type == "cuda":
                with torch.cuda.device(device):
                    torch.cuda.empty_cache()
            time_escaped = (time.perf_counter() - time_slice_i) / 3600.0
            logging.info(
funasr/bin/train_ds.py
@@ -184,10 +184,10 @@
            )
            trainer.start_step = 0
            # device = next(model.parameters()).device
            # if device.type == 'cuda':
            #     with torch.cuda.device():
            #         torch.cuda.empty_cache()
            device = next(model.parameters()).device
            if device.type == "cuda":
                with torch.cuda.device(device):
                    torch.cuda.empty_cache()
            time_escaped = (time.perf_counter() - time_slice_i) / 3600.0
            logging.info(