From c553a8db1712c2a5deeef5bbb68bd1fdf8d61ab7 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期四, 13 六月 2024 17:38:01 +0800
Subject: [PATCH] decoding
---
funasr/models/llm_asr/model.py | 44 +++++++++++++++++++++++++++-----------------
1 files changed, 27 insertions(+), 17 deletions(-)
diff --git a/funasr/models/llm_asr/model.py b/funasr/models/llm_asr/model.py
index fb0bee3..6e7939b 100644
--- a/funasr/models/llm_asr/model.py
+++ b/funasr/models/llm_asr/model.py
@@ -21,6 +21,8 @@
from funasr.train_utils.device_funcs import to_device
import traceback
+dtype_map = {"bf16": torch.bfloat16, "fp16": torch.float16, "fp32": torch.float32}
+
@tables.register("model_classes", "LLMASR")
class LLMASR(nn.Module):
@@ -408,20 +410,23 @@
audio_encoder_output_size = audio_encoder.output_size()
freeze = audio_encoder_conf.get("freeze", True)
freeze_layer_num = int(audio_encoder_conf.get("freeze_layer_num", -1))
- if freeze_layer_num > 0:
- freeze_layer_num = range(freeze_layer_num)
+ # if freeze_layer_num > 0:
+ # freeze_layer_num = range(freeze_layer_num)
if freeze:
for name, param in audio_encoder.named_parameters():
- idx = re.search(r"\.\d+\.", name)
- if idx is not None:
- beg, end = idx.regs[0]
- layer_id = int(name[beg + 1 : end - 1])
- if isinstance(freeze_layer_num, (list, tuple)):
- if layer_id in freeze_layer_num:
+ if freeze_layer_num > 0:
+ idx = re.search(r"\.\d+\.", name)
+ if idx is not None:
+ beg, end = idx.regs[0]
+ layer_id = int(name[beg + 1 : end - 1])
+ if layer_id < freeze_layer_num:
param.requires_grad = False
- else:
+ elif not name.startswith("audio_encoder.ln_post"):
param.requires_grad = False
+ else:
+ param.requires_grad = False
+
audio_encoder.eval()
self.audio_encoder = audio_encoder
@@ -444,7 +449,8 @@
for name, param in model.named_parameters():
param.requires_grad = False
model.eval()
- self.llm = model
+ self.llm_dtype = llm_conf.get("llm_dtype", "fp32")
+ self.llm = model.to(dtype_map[self.llm_dtype])
llm_dim = model.get_input_embeddings().weight.shape[-1]
# adaptor
@@ -524,12 +530,17 @@
batch_idx, :min_len, :
]
- labels_ids[labels_ids == -1] = -100
-
- model_outputs = self.llm(
- inputs_embeds=inputs_embeds, attention_mask=attention_mask, labels=labels_ids
- )
- loss = model_outputs.loss
+ with torch.cuda.amp.autocast(
+ enabled=True if self.llm_dtype != "fp32" else False, dtype=dtype_map[self.llm_dtype]
+ ):
+ labels_ids[labels_ids == -1] = -100
+ attention_mask[attention_mask < 0] = 0
+ model_outputs = self.llm(
+ inputs_embeds=inputs_embeds.to(dtype_map[self.llm_dtype]),
+ attention_mask=attention_mask,
+ labels=labels_ids,
+ )
+ loss = model_outputs.loss
stats = {}
with torch.no_grad():
@@ -734,7 +745,6 @@
llm_dtype = "fp16" if kwargs.get("fp16", False) else llm_dtype
llm_dtype = "bf16" if kwargs.get("bf16", False) else llm_dtype
- dtype_map = {"bf16": torch.bfloat16, "fp16": torch.float16, "fp32": torch.float32}
with torch.cuda.amp.autocast(
enabled=True if llm_dtype != "fp32" else False, dtype=dtype_map[llm_dtype]
):
--
Gitblit v1.9.1