From 60b3c42d6d3d90b97918b10d506efd6c471e1ba8 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期五, 07 六月 2024 02:40:31 +0800
Subject: [PATCH] auto frontend
---
funasr/models/llm_asr/model.py | 12 +++---------
1 files changed, 3 insertions(+), 9 deletions(-)
diff --git a/funasr/models/llm_asr/model.py b/funasr/models/llm_asr/model.py
index 411b59d..0d9bf7f 100644
--- a/funasr/models/llm_asr/model.py
+++ b/funasr/models/llm_asr/model.py
@@ -476,14 +476,8 @@
# audio_adaptor
encoder_out, encoder_out_lens = self.audio_adaptor(encoder_out, encoder_out_lens)
- input_ids[input_ids == -1] = 0
- input_ids[input_ids == -100] = 0
- if hasattr(self.llm.model, "embed_tokens"):
- inputs_embeds = self.llm.model.embed_tokens(input_ids)
- elif hasattr(self.llm.model.model, "embed_tokens"):
- inputs_embeds = self.llm.model.model.embed_tokens(input_ids)
- else:
- inputs_embeds = self.llm.model.model.model.embed_tokens(input_ids)
+ input_ids[input_ids < 0] = 0
+ inputs_embeds = self.llm.model.get_input_embeddings()(input_ids)
batch_size, token_num, dims = inputs_embeds.shape
_, l, _ = encoder_out.shape
@@ -492,7 +486,7 @@
inputs_embeds[batch_idx, fbank_beg_idx : fbank_beg_idx + l, :] = encoder_out[
batch_idx, :l, :
]
-
+ labels_ids[labels_ids == -1] = -100
model_outputs = self.llm(
inputs_embeds=inputs_embeds, attention_mask=attention_mask, labels=labels_ids
)
--
Gitblit v1.9.1