From 664c400545b3acb0272f7ef299196be817f93c5f Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期四, 13 六月 2024 16:59:40 +0800
Subject: [PATCH] decoding

---
 funasr/models/llm_asr/model.py |    8 +++++---
 1 files changed, 5 insertions(+), 3 deletions(-)

diff --git a/funasr/models/llm_asr/model.py b/funasr/models/llm_asr/model.py
index e1e331d..6e7939b 100644
--- a/funasr/models/llm_asr/model.py
+++ b/funasr/models/llm_asr/model.py
@@ -449,9 +449,9 @@
             for name, param in model.named_parameters():
                 param.requires_grad = False
             model.eval()
-        self.llm = model
-        llm_dim = model.get_input_embeddings().weight.shape[-1]
         self.llm_dtype = llm_conf.get("llm_dtype", "fp32")
+        self.llm = model.to(dtype_map[self.llm_dtype])
+        llm_dim = model.get_input_embeddings().weight.shape[-1]
 
         # adaptor
         adaptor_class = tables.adaptor_classes.get(audio_adaptor)
@@ -536,7 +536,9 @@
             labels_ids[labels_ids == -1] = -100
             attention_mask[attention_mask < 0] = 0
             model_outputs = self.llm(
-                inputs_embeds=inputs_embeds, attention_mask=attention_mask, labels=labels_ids
+                inputs_embeds=inputs_embeds.to(dtype_map[self.llm_dtype]),
+                attention_mask=attention_mask,
+                labels=labels_ids,
             )
             loss = model_outputs.loss
 

--
Gitblit v1.9.1