| | |
| | | from typing import Optional, Tuple, List |
| | | import numpy as np |
| | | |
| | | |
| | | def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True, export=False): |
| | | return torch.nn.LayerNorm(normalized_shape, eps, elementwise_affine) |
| | | |
| | | |
| | | class SamePad(nn.Module): |
| | | def __init__(self, kernel_size, causal=False): |
| | |
| | | if self.remove > 0: |
| | | x = x[:, :, : -self.remove] |
| | | return x |
| | | |
| | | |
| | | class TransposeLast(nn.Module): |
| | | def __init__(self, deconstruct_idx=None): |
| | |
| | | |
| | | return x |
| | | |
| | | |
| | | def compute_mask_indices( |
| | | shape: Tuple[int, int], |
| | | padding_mask: Optional[torch.Tensor], |
| | |
| | | mask_idc = np.random.choice(sz - min_len, num_mask, replace=False) |
| | | |
| | | mask_idc = np.asarray( |
| | | [ |
| | | mask_idc[j] + offset |
| | | for j in range(len(mask_idc)) |
| | | for offset in range(lengths[j]) |
| | | ] |
| | | [mask_idc[j] + offset for j in range(len(mask_idc)) for offset in range(lengths[j])] |
| | | ) |
| | | |
| | | mask_idcs.append(np.unique(mask_idc[mask_idc < sz])) |
| | |
| | | mask_idc = np.random.choice(mask_idc, min_len, replace=False) |
| | | if mask_dropout > 0: |
| | | num_holes = np.rint(len(mask_idc) * mask_dropout).astype(int) |
| | | mask_idc = np.random.choice( |
| | | mask_idc, len(mask_idc) - num_holes, replace=False |
| | | ) |
| | | mask_idc = np.random.choice(mask_idc, len(mask_idc) - num_holes, replace=False) |
| | | |
| | | mask[i, mask_idc] = True |
| | | |