游雁
2024-06-13 c553a8db1712c2a5deeef5bbb68bd1fdf8d61ab7
funasr/models/llm_asr/model.py
@@ -21,6 +21,8 @@
from funasr.train_utils.device_funcs import to_device
import traceback
dtype_map = {"bf16": torch.bfloat16, "fp16": torch.float16, "fp32": torch.float32}
@tables.register("model_classes", "LLMASR")
class LLMASR(nn.Module):
@@ -408,18 +410,20 @@
            audio_encoder_output_size = audio_encoder.output_size()
        freeze = audio_encoder_conf.get("freeze", True)
        freeze_layer_num = int(audio_encoder_conf.get("freeze_layer_num", -1))
        if freeze_layer_num > 0:
            freeze_layer_num = range(freeze_layer_num)
        # if freeze_layer_num > 0:
        #     freeze_layer_num = range(freeze_layer_num)
        if freeze:
            for name, param in audio_encoder.named_parameters():
                if isinstance(freeze_layer_num, (list, tuple)):
                if freeze_layer_num > 0:
                    idx = re.search(r"\.\d+\.", name)
                    if idx is not None:
                        beg, end = idx.regs[0]
                        layer_id = int(name[beg + 1 : end - 1])
                        if layer_id in freeze_layer_num:
                        if layer_id < freeze_layer_num:
                            param.requires_grad = False
                    elif not name.startswith("audio_encoder.ln_post"):
                        param.requires_grad = False
                else:
                    param.requires_grad = False
@@ -445,7 +449,8 @@
            for name, param in model.named_parameters():
                param.requires_grad = False
            model.eval()
        self.llm = model
        self.llm_dtype = llm_conf.get("llm_dtype", "fp32")
        self.llm = model.to(dtype_map[self.llm_dtype])
        llm_dim = model.get_input_embeddings().weight.shape[-1]
        # adaptor
@@ -525,12 +530,17 @@
                    batch_idx, :min_len, :
                ]
        labels_ids[labels_ids == -1] = -100
        model_outputs = self.llm(
            inputs_embeds=inputs_embeds, attention_mask=attention_mask, labels=labels_ids
        )
        loss = model_outputs.loss
        with torch.cuda.amp.autocast(
            enabled=True if self.llm_dtype != "fp32" else False, dtype=dtype_map[self.llm_dtype]
        ):
            labels_ids[labels_ids == -1] = -100
            attention_mask[attention_mask < 0] = 0
            model_outputs = self.llm(
                inputs_embeds=inputs_embeds.to(dtype_map[self.llm_dtype]),
                attention_mask=attention_mask,
                labels=labels_ids,
            )
            loss = model_outputs.loss
        stats = {}
        with torch.no_grad():
@@ -735,7 +745,6 @@
            llm_dtype = "fp16" if kwargs.get("fp16", False) else llm_dtype
            llm_dtype = "bf16" if kwargs.get("bf16", False) else llm_dtype
        dtype_map = {"bf16": torch.bfloat16, "fp16": torch.float16, "fp32": torch.float32}
        with torch.cuda.amp.autocast(
            enabled=True if llm_dtype != "fp32" else False, dtype=dtype_map[llm_dtype]
        ):