| | |
| | | if self.crit_attn_weight > 0: |
| | | self.attn_loss = torch.nn.L1Loss() |
| | | self.crit_attn_smooth = crit_attn_smooth |
| | | self.length_normalized_loss = length_normalized_loss |
| | | |
| | | def forward( |
| | | self, |
| | |
| | | # 2b. Attention decoder branch |
| | | if self.ctc_weight != 1.0: |
| | | loss_att, acc_att, cer_att, wer_att, loss_pre, loss_ideal = self._calc_att_clas_loss( |
| | | encoder_out, encoder_out_lens, text, text_lengths, hotword_pad, hotword_lengths, ideal_attn |
| | | encoder_out, encoder_out_lens, text, text_lengths, hotword_pad, hotword_lengths |
| | | ) |
| | | |
| | | # 3. CTC-Att loss definition |
| | |
| | | |
| | | stats["loss"] = torch.clone(loss.detach()) |
| | | # force_gatherable: to-device and to-tensor if scalar for DataParallel |
| | | if self.length_normalized_loss: |
| | | batch_size = int((text_lengths + self.predictor_bias).sum()) |
| | | loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device) |
| | | return loss, stats, weight |
| | | |
| | |
| | | ys_pad_lens: torch.Tensor, |
| | | hotword_pad: torch.Tensor, |
| | | hotword_lengths: torch.Tensor, |
| | | ideal_attn: torch.Tensor, |
| | | ): |
| | | encoder_out_mask = (~make_pad_mask(encoder_out_lens, maxlen=encoder_out.size(1))[:, None, :]).to( |
| | | encoder_out.device) |