| | |
| | | batch_size_scale_ratio_max: 2 |
| | | num_workers: 4 |
| | | audio_adaptor_downsample_rate: ${audio_adaptor_conf.downsample_rate} |
| | | audio_encoder_downsample_rate: 2 |
| | | audio_encoder_downsample_rate: 4 |
| | | data_split_num: 512 |
| | | batch_size_sample_max: 15 |
| | | retry: 20 |
| | |
| | | self.max_token_length = kwargs.get("max_token_length", 1024) |
| | | self.batch_size_scale_ratio_max = kwargs.get("batch_size_scale_ratio_max", 1.5) |
| | | self.batch_size_token_max = kwargs.get("batch_size_token_max", 2500) |
| | | self.audio_adaptor_downsample_rate = kwargs.get("audio_adaptor_downsample_rate", 2) |
| | | self.audio_encoder_downsample_rate = kwargs.get("audio_encoder_downsample_rate", 4) |
| | | |
| | | def get_source_len(self, index): |
| | | item = self.index_ds[index] |
| | |
| | | speech = speech.permute(0, 2, 1) |
| | | # if speech_lengths > self.batch_size: |
| | | # continue |
| | | if self.audio_encoder_downsample_rate == 4: |
| | | olens = 1 + (speech_lengths[0].item() - 3 + 2 * 1) // 2 |
| | | olens = 1 + (olens - 3 + 2 * 1) // 2 |
| | | elif self.audio_encoder_downsample_rate == 1: |
| | | olens = speech_lengths[0].item() |
| | | |
| | | olens = 1 + (speech_lengths[0].item() - 3 + 2 * 1) // 2 |
| | | olens = 1 + (olens - 3 + 2 * 1) // 2 |
| | | sub_token_len = (olens - 1) // 2 + 1 |
| | | sub_token_len = (olens - 1) // self.audio_adaptor_downsample_rate + 1 |
| | | sub_token = [0] * sub_token_len |
| | | fbank_beg_i = [len(source_ids)] |
| | | source_ids += sub_token |
| | |
| | | |
| | | with torch.cuda.amp.autocast(enabled=False): |
| | | # audio encoder |
| | | encoder_out, encoder_out_lens = self.audio_encoder( |
| | | speech.permute(0, 2, 1), speech_lengths |
| | | ) |
| | | encoder_out, encoder_out_lens = self.encode(speech, speech_lengths) |
| | | |
| | | # audio_adaptor |
| | | encoder_out, encoder_out_lens = self.audio_adaptor(encoder_out, encoder_out_lens) |
| | |
| | | batch_size = int((labels_ids > 0 + 1).sum()) |
| | | loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device) |
| | | return loss, stats, weight |
| | | |
| | | def encode(self, speech, speech_lengths): |
| | | # audio encoder |
| | | encoder_out, encoder_out_lens = self.audio_encoder(speech.permute(0, 2, 1), speech_lengths) |
| | | |
| | | return encoder_out, encoder_out_lens |
| | | |
| | | def data_template(self, data): |
| | | system, user, assistant = [], [], [] |
| | |
| | | speech = speech.to(torch.float16) |
| | | elif kwargs.get("bf16", False): |
| | | speech = speech.to(torch.bfloat16) |
| | | encoder_out, encoder_out_lens = self.audio_encoder(speech.permute(0, 2, 1), speech_lengths) |
| | | # audio encoder |
| | | encoder_out, encoder_out_lens = self.encode(speech, speech_lengths) |
| | | |
| | | # audio_adaptor |
| | | encoder_out, encoder_out_lens = self.audio_adaptor(encoder_out, encoder_out_lens) |
| | |
| | | ibest_writer["text_tn"][key[0]] = response_clean |
| | | |
| | | return results, meta_data |
| | | |
| | | |
| | | @tables.register("model_classes", "LLMASR3") |
| | | class LLMASR3(nn.Module): |
| | | """ """ |
| | | |
| | | def __init__( |
| | | self, |
| | | *args, |
| | | **kwargs, |
| | | ): |
| | | |
| | | super().__init__(*args, **kwargs) |
| | | |
| | | def encode(self, speech, speech_lengths): |
| | | # audio encoder |
| | | encoder_out, encoder_out_lens = self.audio_encoder(speech, speech_lengths) |
| | | return encoder_out, encoder_out_lens |
| | |
| | | self.length_normalized_loss = length_normalized_loss |
| | | self.beam_search = None |
| | | self.activation_checkpoint = kwargs.get("activation_checkpoint", False) |
| | | self.encoder_output_size = encoder_output_size |
| | | |
| | | def forward( |
| | | self, |
| | |
| | | self.ctc = ctc |
| | | |
| | | self.length_normalized_loss = length_normalized_loss |
| | | self.encoder_output_size = encoder_output_size |
| | | |
| | | def forward( |
| | | self, |