From a8653d897db4872055632e1fd9f3595291e787b0 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期二, 11 六月 2024 13:56:24 +0800
Subject: [PATCH] fixbug
---
funasr/models/llm_asr/model.py | 7 ++++---
1 files changed, 4 insertions(+), 3 deletions(-)
diff --git a/funasr/models/llm_asr/model.py b/funasr/models/llm_asr/model.py
index f72b2c8..519918c 100644
--- a/funasr/models/llm_asr/model.py
+++ b/funasr/models/llm_asr/model.py
@@ -717,9 +717,9 @@
dtype_map = {"bf16": torch.bfloat16, "fp16": torch.float16, "fp32": torch.float32}
with torch.cuda.amp.autocast(dtype=dtype_map[llm_dtype]):
label = contents["assistant"][0]
- self.llm = self.llm.to(dtype_map[llm_dtype])
- inputs_embeds = inputs_embeds.to(dtype_map[llm_dtype])
- attention_mask = attention_mask.to(dtype_map[llm_dtype])
+ # self.llm = self.llm.to(dtype_map[llm_dtype])
+ # inputs_embeds = inputs_embeds.to(dtype_map[llm_dtype])
+
if not kwargs.get("tearchforing", False):
generated_ids = self.llm.generate(
@@ -739,6 +739,7 @@
labels_ids = batch["labels_ids"]
labels_ids[labels_ids == -1] = -100
attention_mask = batch.get("attention_mask", None)
+ # attention_mask = attention_mask.to(dtype_map[llm_dtype])
model_outputs = self.llm(
inputs_embeds=inputs_embeds, attention_mask=attention_mask, labels=labels_ids
)
--
Gitblit v1.9.1