| | |
| | | audio_encoder_output_size = audio_encoder.output_size() |
| | | freeze = audio_encoder_conf.get("freeze", True) |
| | | freeze_layer_num = int(audio_encoder_conf.get("freeze_layer_num", -1)) |
| | | if freeze_layer_num > 0: |
| | | freeze_layer_num = range(freeze_layer_num) |
| | | # if freeze_layer_num > 0: |
| | | # freeze_layer_num = range(freeze_layer_num) |
| | | |
| | | if freeze: |
| | | for name, param in audio_encoder.named_parameters(): |
| | | if isinstance(freeze_layer_num, (list, tuple)): |
| | | if freeze_layer_num > 0: |
| | | idx = re.search(r"\.\d+\.", name) |
| | | if idx is not None: |
| | | beg, end = idx.regs[0] |
| | | layer_id = int(name[beg + 1 : end - 1]) |
| | | if layer_id in freeze_layer_num: |
| | | if layer_id < freeze_layer_num: |
| | | param.requires_grad = False |
| | | else: |
| | | elif "ln_post." not in name: |
| | | param.requires_grad = False |
| | | else: |
| | | param.requires_grad = False |
| | |
| | | for name, param in model.named_parameters(): |
| | | param.requires_grad = False |
| | | model.eval() |
| | | self.llm = model |
| | | llm_dim = model.get_input_embeddings().weight.shape[-1] |
| | | self.llm_dtype = llm_conf.get("llm_dtype", "fp32") |
| | | self.llm = model.to(dtype_map[self.llm_dtype]) |
| | | llm_dim = model.get_input_embeddings().weight.shape[-1] |
| | | |
| | | # adaptor |
| | | adaptor_class = tables.adaptor_classes.get(audio_adaptor) |
| | |
| | | |
| | | batch_size, frames, _ = speech.shape |
| | | |
| | | # audio encoder |
| | | encoder_out, encoder_out_lens = self.audio_encoder(speech.permute(0, 2, 1), speech_lengths) |
| | | with torch.cuda.amp.autocast(enabled=False): |
| | | # audio encoder |
| | | encoder_out, encoder_out_lens = self.encode(speech, speech_lengths) |
| | | |
| | | # audio_adaptor |
| | | encoder_out, encoder_out_lens = self.audio_adaptor(encoder_out, encoder_out_lens) |
| | | # audio_adaptor |
| | | encoder_out, encoder_out_lens = self.audio_adaptor(encoder_out, encoder_out_lens) |
| | | |
| | | input_ids[input_ids < 0] = 0 |
| | | inputs_embeds = self.llm.model.get_input_embeddings()(input_ids) |
| | |
| | | labels_ids[labels_ids == -1] = -100 |
| | | attention_mask[attention_mask < 0] = 0 |
| | | model_outputs = self.llm( |
| | | inputs_embeds=inputs_embeds, attention_mask=attention_mask, labels=labels_ids |
| | | inputs_embeds=inputs_embeds.to(dtype_map[self.llm_dtype]), |
| | | attention_mask=attention_mask, |
| | | labels=labels_ids, |
| | | ) |
| | | loss = model_outputs.loss |
| | | |
| | |
| | | batch_size = int((labels_ids > 0 + 1).sum()) |
| | | loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device) |
| | | return loss, stats, weight |
| | | |
| | | def encode(self, speech, speech_lengths): |
| | | # audio encoder |
| | | encoder_out, encoder_out_lens = self.audio_encoder(speech.permute(0, 2, 1), speech_lengths) |
| | | |
| | | return encoder_out, encoder_out_lens |
| | | |
| | | def data_template(self, data): |
| | | system, user, assistant = [], [], [] |
| | |
| | | speech = speech.to(torch.float16) |
| | | elif kwargs.get("bf16", False): |
| | | speech = speech.to(torch.bfloat16) |
| | | encoder_out, encoder_out_lens = self.audio_encoder(speech.permute(0, 2, 1), speech_lengths) |
| | | # audio encoder |
| | | encoder_out, encoder_out_lens = self.encode(speech, speech_lengths) |
| | | |
| | | # audio_adaptor |
| | | encoder_out, encoder_out_lens = self.audio_adaptor(encoder_out, encoder_out_lens) |
| | |
| | | ibest_writer["text_tn"][key[0]] = response_clean |
| | | |
| | | return results, meta_data |
| | | |
| | | |
| | | @tables.register("model_classes", "LLMASR3") |
| | | class LLMASR3(nn.Module): |
| | | """ """ |
| | | |
| | | def __init__( |
| | | self, |
| | | *args, |
| | | **kwargs, |
| | | ): |
| | | |
| | | super().__init__(*args, **kwargs) |
| | | |
| | | def encode(self, speech, speech_lengths): |
| | | # audio encoder |
| | | encoder_out, encoder_out_lens = self.audio_encoder(speech, speech_lengths) |
| | | return encoder_out, encoder_out_lens |