From 5ebcba3d611cc8caca3c25ccbc937b60487f12fb Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期四, 13 六月 2024 19:16:19 +0800
Subject: [PATCH] decoding

---
 funasr/models/llm_asr/model.py |   29 +++++++++++++++++------------
 1 files changed, 17 insertions(+), 12 deletions(-)

diff --git a/funasr/models/llm_asr/model.py b/funasr/models/llm_asr/model.py
index 15969e3..2d229b9 100644
--- a/funasr/models/llm_asr/model.py
+++ b/funasr/models/llm_asr/model.py
@@ -410,19 +410,19 @@
             audio_encoder_output_size = audio_encoder.output_size()
         freeze = audio_encoder_conf.get("freeze", True)
         freeze_layer_num = int(audio_encoder_conf.get("freeze_layer_num", -1))
-        if freeze_layer_num > 0:
-            freeze_layer_num = range(freeze_layer_num)
+        # if freeze_layer_num > 0:
+        #     freeze_layer_num = range(freeze_layer_num)
 
         if freeze:
             for name, param in audio_encoder.named_parameters():
-                if isinstance(freeze_layer_num, (list, tuple)):
+                if freeze_layer_num > 0:
                     idx = re.search(r"\.\d+\.", name)
                     if idx is not None:
                         beg, end = idx.regs[0]
                         layer_id = int(name[beg + 1 : end - 1])
-                        if layer_id in freeze_layer_num:
+                        if layer_id < freeze_layer_num:
                             param.requires_grad = False
-                    else:
+                    elif ".ln_post." not in name:
                         param.requires_grad = False
                 else:
                     param.requires_grad = False
@@ -449,9 +449,9 @@
             for name, param in model.named_parameters():
                 param.requires_grad = False
             model.eval()
-        self.llm = model
-        llm_dim = model.get_input_embeddings().weight.shape[-1]
         self.llm_dtype = llm_conf.get("llm_dtype", "fp32")
+        self.llm = model.to(dtype_map[self.llm_dtype])
+        llm_dim = model.get_input_embeddings().weight.shape[-1]
 
         # adaptor
         adaptor_class = tables.adaptor_classes.get(audio_adaptor)
@@ -496,11 +496,14 @@
 
         batch_size, frames, _ = speech.shape
 
-        # audio encoder
-        encoder_out, encoder_out_lens = self.audio_encoder(speech.permute(0, 2, 1), speech_lengths)
+        with torch.cuda.amp.autocast(enabled=False):
+            # audio encoder
+            encoder_out, encoder_out_lens = self.audio_encoder(
+                speech.permute(0, 2, 1), speech_lengths
+            )
 
-        # audio_adaptor
-        encoder_out, encoder_out_lens = self.audio_adaptor(encoder_out, encoder_out_lens)
+            # audio_adaptor
+            encoder_out, encoder_out_lens = self.audio_adaptor(encoder_out, encoder_out_lens)
 
         input_ids[input_ids < 0] = 0
         inputs_embeds = self.llm.model.get_input_embeddings()(input_ids)
@@ -536,7 +539,9 @@
             labels_ids[labels_ids == -1] = -100
             attention_mask[attention_mask < 0] = 0
             model_outputs = self.llm(
-                inputs_embeds=inputs_embeds, attention_mask=attention_mask, labels=labels_ids
+                inputs_embeds=inputs_embeds.to(dtype_map[self.llm_dtype]),
+                attention_mask=attention_mask,
+                labels=labels_ids,
             )
             loss = model_outputs.loss
 

--
Gitblit v1.9.1