From d43f77408b8f3e169c59dfb6b6d82e45e6b91714 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期二, 11 六月 2024 19:19:06 +0800
Subject: [PATCH] decoding
---
funasr/models/llm_asr/model.py | 13 +++++++++----
1 files changed, 9 insertions(+), 4 deletions(-)
diff --git a/funasr/models/llm_asr/model.py b/funasr/models/llm_asr/model.py
index f72b2c8..dd806cf 100644
--- a/funasr/models/llm_asr/model.py
+++ b/funasr/models/llm_asr/model.py
@@ -687,10 +687,8 @@
# fp16
if kwargs.get("fp16", False):
speech = speech.to(torch.float16)
- encoder_out_lens = encoder_out_lens.to(torch.float16)
elif kwargs.get("bf16", False):
speech = speech.to(torch.bfloat16)
- encoder_out_lens = encoder_out_lens.to(torch.bfloat16)
encoder_out, encoder_out_lens = self.audio_encoder(speech.permute(0, 2, 1), speech_lengths)
# audio_adaptor
@@ -714,12 +712,18 @@
]
llm_dtype = kwargs.get("llm_dtype", "fp32")
+ if llm_dtype == "fp32":
+ llm_dtype = "fp16" if kwargs.get("fp16", False) else llm_dtype
+ llm_dtype = "bf16" if kwargs.get("bf16", False) else llm_dtype
+
dtype_map = {"bf16": torch.bfloat16, "fp16": torch.float16, "fp32": torch.float32}
- with torch.cuda.amp.autocast(dtype=dtype_map[llm_dtype]):
+ with torch.cuda.amp.autocast(
+ enabled=True if llm_dtype != "fp32" else False, dtype=dtype_map[llm_dtype]
+ ):
label = contents["assistant"][0]
self.llm = self.llm.to(dtype_map[llm_dtype])
inputs_embeds = inputs_embeds.to(dtype_map[llm_dtype])
- attention_mask = attention_mask.to(dtype_map[llm_dtype])
+
if not kwargs.get("tearchforing", False):
generated_ids = self.llm.generate(
@@ -739,6 +743,7 @@
labels_ids = batch["labels_ids"]
labels_ids[labels_ids == -1] = -100
attention_mask = batch.get("attention_mask", None)
+ # attention_mask = attention_mask.to(dtype_map[llm_dtype])
model_outputs = self.llm(
inputs_embeds=inputs_embeds, attention_mask=attention_mask, labels=labels_ids
)
--
Gitblit v1.9.1