游雁
2024-06-12 9afcf0ea7d2877ddbbafec5b1a77f5cf025dab17
funasr/models/llm_asr/model.py
@@ -407,9 +407,21 @@
            audio_encoder = encoder_class(input_size=input_size, **audio_encoder_conf)
            audio_encoder_output_size = audio_encoder.output_size()
        freeze = audio_encoder_conf.get("freeze", True)
        freeze_layer_num = int(audio_encoder_conf.get("freeze_layer_num", -1))
        if freeze_layer_num > 0:
            freeze_layer_num = range(freeze_layer_num)
        if freeze:
            for name, param in audio_encoder.named_parameters():
                param.requires_grad = False
                idx = re.search(r"\.\d+\.", name)
                if idx is not None:
                    beg, end = idx.regs[0]
                    layer_id = int(name[beg + 1 : end - 1])
                    if isinstance(freeze_layer_num, (list, tuple)):
                        if layer_id in freeze_layer_num:
                            param.requires_grad = False
                    else:
                        param.requires_grad = False
            audio_encoder.eval()
        self.audio_encoder = audio_encoder
@@ -687,10 +699,8 @@
        # fp16
        if kwargs.get("fp16", False):
            speech = speech.to(torch.float16)
            encoder_out_lens = encoder_out_lens.to(torch.float16)
        elif kwargs.get("bf16", False):
            speech = speech.to(torch.bfloat16)
            encoder_out_lens = encoder_out_lens.to(torch.bfloat16)
        encoder_out, encoder_out_lens = self.audio_encoder(speech.permute(0, 2, 1), speech_lengths)
        # audio_adaptor