游雁
2024-06-14 08114ae27d85949106aeab03b3fa5d764d100b33
funasr/models/llm_asr/model.py
@@ -398,7 +398,9 @@
            # frontend = model.kwargs.get("frontend")
            audio_encoder_output_size = model.model.encoder_output_size
            audio_encoder = model.model.model.encoder
            audio_encoder = (
                model.model.model.encoder if hasattr(model.model, "model") else model.model.encoder
            )
            # self.frontend = frontend
@@ -422,7 +424,7 @@
                        layer_id = int(name[beg + 1 : end - 1])
                        if layer_id < freeze_layer_num:
                            param.requires_grad = False
                    elif ".ln_post." not in name:
                    elif "ln_post." not in name:
                        param.requires_grad = False
                else:
                    param.requires_grad = False
@@ -498,9 +500,7 @@
        with torch.cuda.amp.autocast(enabled=False):
            # audio encoder
            encoder_out, encoder_out_lens = self.audio_encoder(
                speech.permute(0, 2, 1), speech_lengths
            )
            encoder_out, encoder_out_lens = self.encode(speech, speech_lengths)
            # audio_adaptor
            encoder_out, encoder_out_lens = self.audio_adaptor(encoder_out, encoder_out_lens)
@@ -565,6 +565,12 @@
            batch_size = int((labels_ids > 0 + 1).sum())
        loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)
        return loss, stats, weight
    def encode(self, speech, speech_lengths):
        # audio encoder
        encoder_out, encoder_out_lens = self.audio_encoder(speech.permute(0, 2, 1), speech_lengths)
        return encoder_out, encoder_out_lens
    def data_template(self, data):
        system, user, assistant = [], [], []
@@ -721,7 +727,8 @@
            speech = speech.to(torch.float16)
        elif kwargs.get("bf16", False):
            speech = speech.to(torch.bfloat16)
        encoder_out, encoder_out_lens = self.audio_encoder(speech.permute(0, 2, 1), speech_lengths)
        # audio encoder
        encoder_out, encoder_out_lens = self.encode(speech, speech_lengths)
        # audio_adaptor
        encoder_out, encoder_out_lens = self.audio_adaptor(encoder_out, encoder_out_lens)
@@ -806,3 +813,21 @@
            ibest_writer["text_tn"][key[0]] = response_clean
        return results, meta_data
@tables.register("model_classes", "LLMASR3")
class LLMASR3(LLMASR2):
    """ """
    def __init__(
        self,
        *args,
        **kwargs,
    ):
        super().__init__(*args, **kwargs)
    def encode(self, speech, speech_lengths):
        # audio encoder
        encoder_out, encoder_out_lens = self.audio_encoder(speech, speech_lengths)
        return encoder_out, encoder_out_lens