| | |
| | | self.affine = AffineTransform(proj_dim, linear_dim) |
| | | self.relu = RectifiedLinear(linear_dim, linear_dim) |
| | | |
| | | def forward(self, input: torch.Tensor, in_cache: Dict[str, torch.Tensor]): |
| | | def forward(self, input: torch.Tensor, cache: Dict[str, torch.Tensor]): |
| | | x1 = self.linear(input) # B T D |
| | | cache_layer_name = 'cache_layer_{}'.format(self.stack_layer) |
| | | if cache_layer_name not in in_cache: |
| | | in_cache[cache_layer_name] = torch.zeros(x1.shape[0], x1.shape[-1], (self.lorder - 1) * self.lstride, 1) |
| | | x2, in_cache[cache_layer_name] = self.fsmn_block(x1, in_cache[cache_layer_name]) |
| | | if cache_layer_name not in cache: |
| | | cache[cache_layer_name] = torch.zeros(x1.shape[0], x1.shape[-1], (self.lorder - 1) * self.lstride, 1) |
| | | x2, cache[cache_layer_name] = self.fsmn_block(x1, cache[cache_layer_name]) |
| | | x3 = self.affine(x2) |
| | | x4 = self.relu(x3) |
| | | return x4 |
| | |
| | | def __init__(self, *args): |
| | | super(FsmnStack, self).__init__(*args) |
| | | |
| | | def forward(self, input: torch.Tensor, in_cache: Dict[str, torch.Tensor]): |
| | | def forward(self, input: torch.Tensor, cache: Dict[str, torch.Tensor]): |
| | | x = input |
| | | for module in self._modules.values(): |
| | | x = module(x, in_cache) |
| | | x = module(x, cache) |
| | | return x |
| | | |
| | | |
| | |
| | | def forward( |
| | | self, |
| | | input: torch.Tensor, |
| | | in_cache: Dict[str, torch.Tensor] |
| | | cache: Dict[str, torch.Tensor] |
| | | ) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]: |
| | | """ |
| | | Args: |
| | | input (torch.Tensor): Input tensor (B, T, D) |
| | | in_cache: when in_cache is not None, the forward is in streaming. The type of in_cache is a dict, egs, |
| | | cache: when cache is not None, the forward is in streaming. The type of cache is a dict, egs, |
| | | {'cache_layer_1': torch.Tensor(B, T1, D)}, T1 is equal to self.lorder. It is {} for the 1st frame |
| | | """ |
| | | |
| | | x1 = self.in_linear1(input) |
| | | x2 = self.in_linear2(x1) |
| | | x3 = self.relu(x2) |
| | | x4 = self.fsmn(x3, in_cache) # self.in_cache will update automatically in self.fsmn |
| | | x4 = self.fsmn(x3, cache) # self.cache will update automatically in self.fsmn |
| | | x5 = self.out_linear1(x4) |
| | | x6 = self.out_linear2(x5) |
| | | x7 = self.softmax(x6) |