aky15
2023-04-12 7d1efe158eda74dc847c397db906f6cb77ac0f84
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
"""LinearInput block for Transducer encoder."""
 
from typing import Optional, Tuple, Union
 
import torch
 
class LinearInput(torch.nn.Module):
    """ConvInput module definition.
 
    Args:
        input_size: Input size.
        conv_size: Convolution size.
        subsampling_factor: Subsampling factor.
        vgg_like: Whether to use a VGG-like network.
        output_size: Block output dimension.
 
    """
 
    def __init__(
        self,
        input_size: int,
        output_size: Optional[int] = None,
        subsampling_factor: int = 1,
    ) -> None:
        """Construct a ConvInput object."""
        super().__init__()
        self.embed = torch.nn.Sequential(
            torch.nn.Linear(input_size, output_size),
            torch.nn.LayerNorm(output_size),
            torch.nn.Dropout(0.1),
        )
        self.subsampling_factor = subsampling_factor
        self.min_frame_length = 1
 
    def forward(
        self, x: torch.Tensor, mask: Optional[torch.Tensor]
    ) -> Tuple[torch.Tensor, torch.Tensor]:
        
        x = self.embed(x)
        return x, mask
 
    def get_size_before_subsampling(self, size: int) -> int:
        """Return the original size before subsampling for a given size.
 
        Args:
            size: Number of frames after subsampling.
 
        Returns:
            : Number of frames before subsampling.
 
        """
        return size