From 7355e2050321bc1778fb44bdebded6c25861a36c Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期四, 13 六月 2024 17:40:40 +0800
Subject: [PATCH] decoding

---
 funasr/models/llm_asr/model.py |   11 +++++++----
 1 files changed, 7 insertions(+), 4 deletions(-)

diff --git a/funasr/models/llm_asr/model.py b/funasr/models/llm_asr/model.py
index 6e7939b..80c33fc 100644
--- a/funasr/models/llm_asr/model.py
+++ b/funasr/models/llm_asr/model.py
@@ -496,11 +496,14 @@
 
         batch_size, frames, _ = speech.shape
 
-        # audio encoder
-        encoder_out, encoder_out_lens = self.audio_encoder(speech.permute(0, 2, 1), speech_lengths)
+        with torch.cuda.amp.autocast(enabled=False):
+            # audio encoder
+            encoder_out, encoder_out_lens = self.audio_encoder(
+                speech.permute(0, 2, 1), speech_lengths
+            )
 
-        # audio_adaptor
-        encoder_out, encoder_out_lens = self.audio_adaptor(encoder_out, encoder_out_lens)
+            # audio_adaptor
+            encoder_out, encoder_out_lens = self.audio_adaptor(encoder_out, encoder_out_lens)
 
         input_ids[input_ids < 0] = 0
         inputs_embeds = self.llm.model.get_input_embeddings()(input_ids)

--
Gitblit v1.9.1