update paraformer_tiny export
| | |
| | | speech: torch.Tensor, |
| | | speech_lengths: torch.Tensor, |
| | | ): |
| | | speech = speech * self._output_size ** 0.5 |
| | | mask = self.make_pad_mask(speech_lengths) |
| | | mask = self.prepare_mask(mask) |
| | | if self.embed is None: |
| | |
| | | |
| | | def forward(self, tgt, tgt_mask, memory, memory_mask, cache=None): |
| | | residual = tgt |
| | | tgt = self.norm1(tgt) |
| | | tgt_q = tgt |
| | | tgt_q_mask = tgt_mask |
| | | x = residual + self.self_attn(tgt_q, tgt, tgt, tgt_q_mask) |
| | |
| | | if self.feed_forward_macaron is not None: |
| | | residual = x |
| | | x = self.norm_ff_macaron(x) |
| | | x = residual + self.feed_forward_macaron(x) |
| | | x = residual + self.feed_forward_macaron(x) * 0.5 |
| | | |
| | | residual = x |
| | | x = self.norm_mha(x) |
| | |
| | | |
| | | residual = x |
| | | x = self.norm_ff(x) |
| | | x = residual + self.feed_forward(x) |
| | | x = residual + self.feed_forward(x) * 0.5 |
| | | |
| | | x = self.norm_final(x) |
| | | |