From 2779602177ae5374547c7a7e17de0b11a166326d Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期一, 29 四月 2024 15:08:46 +0800
Subject: [PATCH] Merge branch 'dev_gzf_exp' of github.com:alibaba-damo-academy/FunASR into dev_gzf_exp merge
---
funasr/train_utils/average_nbest_models.py | 28 +++++++++++++++-------------
1 files changed, 15 insertions(+), 13 deletions(-)
diff --git a/funasr/train_utils/average_nbest_models.py b/funasr/train_utils/average_nbest_models.py
index 013a719..0f08804 100644
--- a/funasr/train_utils/average_nbest_models.py
+++ b/funasr/train_utils/average_nbest_models.py
@@ -16,32 +16,35 @@
from functools import cmp_to_key
-
-def _get_checkpoint_paths(output_dir: str, last_n: int=5):
+def _get_checkpoint_paths(output_dir: str, last_n: int = 5):
"""
Get the paths of the last 'last_n' checkpoints by parsing filenames
in the output directory.
"""
try:
- checkpoint = torch.load(os.path.exists(os.path.join(output_dir, "model.pt")), map_location="cpu")
+ checkpoint = torch.load(os.path.join(output_dir, "model.pt"), map_location="cpu")
avg_keep_nbest_models_type = checkpoint["avg_keep_nbest_models_type"]
val_step_or_eoch = checkpoint[f"val_{avg_keep_nbest_models_type}_step_or_eoch"]
- sorted_items = sorted(saved_ckpts.items(), key=lambda x: x[1], reverse=True)
- sorted_items = sorted_items[:last_n] if avg_keep_nbest_models_type == "acc" else sorted_items[-last_n:]
+ sorted_items = sorted(val_step_or_eoch.items(), key=lambda x: x[1], reverse=True)
+ sorted_items = (
+ sorted_items[:last_n] if avg_keep_nbest_models_type == "acc" else sorted_items[-last_n:]
+ )
checkpoint_paths = [os.path.join(output_dir, key) for key, value in sorted_items[:last_n]]
except:
+ print(f"{checkpoint} does not exist, avg the lastet checkpoint.")
# List all files in the output directory
files = os.listdir(output_dir)
# Filter out checkpoint files and extract epoch numbers
checkpoint_files = [f for f in files if f.startswith("model.pt.e")]
# Sort files by epoch number in descending order
- checkpoint_files.sort(key=lambda x: int(re.search(r'(\d+)', x).group()), reverse=True)
+ checkpoint_files.sort(key=lambda x: int(re.search(r"(\d+)", x).group()), reverse=True)
# Get the last 'last_n' checkpoint paths
checkpoint_paths = [os.path.join(output_dir, f) for f in checkpoint_files[:last_n]]
return checkpoint_paths
+
@torch.no_grad()
-def average_checkpoints(output_dir: str, last_n: int=5, **kwargs):
+def average_checkpoints(output_dir: str, last_n: int = 5, **kwargs):
"""
Average the last 'last_n' checkpoints' model state_dicts.
If a tensor is of type torch.int, perform sum instead of average.
@@ -53,13 +56,12 @@
# Load state_dicts from checkpoints
for path in checkpoint_paths:
if os.path.isfile(path):
- state_dicts.append(torch.load(path, map_location='cpu')['state_dict'])
+ state_dicts.append(torch.load(path, map_location="cpu")["state_dict"])
else:
print(f"Checkpoint file {path} not found.")
- continue
# Check if we have any state_dicts to average
- if not state_dicts:
+ if len(state_dicts) < 1:
raise RuntimeError("No checkpoints found for averaging.")
# Average or sum weights
@@ -75,6 +77,6 @@
# Perform average for other types of tensors
stacked_tensors = torch.stack(tensors)
avg_state_dict[key] = torch.mean(stacked_tensors, dim=0)
-
- torch.save({'state_dict': avg_state_dict}, os.path.join(output_dir, f"model.pt.avg{last_n}"))
- return avg_state_dict
\ No newline at end of file
+ checkpoint_outpath = os.path.join(output_dir, f"model.pt.avg{last_n}")
+ torch.save({"state_dict": avg_state_dict}, checkpoint_outpath)
+ return checkpoint_outpath
--
Gitblit v1.9.1