| | |
| | | |
| | | if xs is not None and len(xs.shape) == 3: |
| | | if length_dim == 1: |
| | | lengths = lengths.unsqueeze(1).expand( |
| | | *xs.transpose(1, 2).shape[:2]) |
| | | lengths = lengths.unsqueeze(1).expand(*xs.transpose(1, 2).shape[:2]) |
| | | else: |
| | | lengths = lengths.unsqueeze(1).expand(*xs.shape[:2]) |
| | | |
| | |
| | | else: |
| | | return mask |
| | | |
| | | |
| | | class sequence_mask(nn.Module): |
| | | def __init__(self, max_seq_len=512, flip=True): |
| | | super().__init__() |
| | |
| | | |
| | | return mask.type(dtype).to(device) if device is not None else mask.type(dtype) |
| | | |
| | | def normalize(input: torch.Tensor, p: float = 2.0, dim: int = 1, out: Optional[torch.Tensor] = None) -> torch.Tensor: |
| | | |
| | | def normalize( |
| | | input: torch.Tensor, p: float = 2.0, dim: int = 1, out: Optional[torch.Tensor] = None |
| | | ) -> torch.Tensor: |
| | | if out is None: |
| | | denom = input.norm(p, dim, keepdim=True).expand_as(input) |
| | | return input / denom |
| | | else: |
| | | denom = input.norm(p, dim, keepdim=True).expand_as(input) |
| | | return torch.div(input, denom, out=out) |
| | | |
| | | |
| | | def subsequent_mask(size: torch.Tensor): |
| | | return torch.ones(size, size).tril() |
| | |
| | | print(mask) |
| | | |
| | | |
| | | if __name__ == '__main__': |
| | | if __name__ == "__main__": |
| | | MakePadMask_test() |