From ec42889511b121230e97bbcdf05f4d517f95d7ba Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期五, 23 二月 2024 15:09:35 +0800
Subject: [PATCH] update
---
funasr/models/llm_asr/model.py | 4 ++--
funasr/auto/auto_model.py | 21 ++++++++++++---------
funasr/train_utils/load_pretrained_model.py | 2 +-
3 files changed, 15 insertions(+), 12 deletions(-)
diff --git a/funasr/auto/auto_model.py b/funasr/auto/auto_model.py
index 3b70ad6..48a983c 100644
--- a/funasr/auto/auto_model.py
+++ b/funasr/auto/auto_model.py
@@ -181,15 +181,18 @@
# init_param
init_param = kwargs.get("init_param", None)
if init_param is not None:
- logging.info(f"Loading pretrained params from {init_param}")
- load_pretrained_model(
- model=model,
- path=init_param,
- ignore_init_mismatch=kwargs.get("ignore_init_mismatch", False),
- oss_bucket=kwargs.get("oss_bucket", None),
- scope_map=kwargs.get("scope_map", None),
- excludes=kwargs.get("excludes", None),
- )
+ if os.path.exists(init_param):
+ logging.info(f"Loading pretrained params from {init_param}")
+ load_pretrained_model(
+ model=model,
+ path=init_param,
+ ignore_init_mismatch=kwargs.get("ignore_init_mismatch", False),
+ oss_bucket=kwargs.get("oss_bucket", None),
+ scope_map=kwargs.get("scope_map", None),
+ excludes=kwargs.get("excludes", None),
+ )
+ else:
+ print(f"error, init_param does not exist!: {init_param}")
return model, kwargs
diff --git a/funasr/models/llm_asr/model.py b/funasr/models/llm_asr/model.py
index e3cf551..937c8e1 100644
--- a/funasr/models/llm_asr/model.py
+++ b/funasr/models/llm_asr/model.py
@@ -217,7 +217,7 @@
) -> Tuple[torch.Tensor, torch.Tensor]:
audio_mask = kwargs.get("audio_mask")
- audio_token_lengths = audio_mask.sum(-1)
+ audio_token_lengths = audio_mask.sum(-1) if audio_mask else None
batch = {"speech": speech, "speech_lengths": speech_lengths}
enc, enc_lens = self.audio_encoder.encode(**batch)
@@ -279,7 +279,7 @@
prompt_pre = "USER: \nINSTRUCTION: {}\nINPUT: ".format(prompt)
- prompt_ids = self.tokenizer.encode(prompt_pre)
+ prompt_ids = tokenizer.encode(prompt_pre)
prompt_length = len(prompt_ids)
prompt_ids = torch.tensor(prompt_ids, dtype=torch.int64).to(kwargs["device"])
diff --git a/funasr/train_utils/load_pretrained_model.py b/funasr/train_utils/load_pretrained_model.py
index 8493bf5..03a6ff5 100644
--- a/funasr/train_utils/load_pretrained_model.py
+++ b/funasr/train_utils/load_pretrained_model.py
@@ -118,7 +118,7 @@
else:
print(f"Warning, miss key in ckpt: {k}, mapped: {k_ddp}")
- flag = obj.load_state_dict(dst_state, strict=True)
+ flag = obj.load_state_dict(dst_state, strict=False)
# print(flag)
# def load_pretrained_model(
--
Gitblit v1.9.1