huangmingming
2023-01-30 adcee8828ef5d78b575043954deb662a35e318f7
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
import torch
from typing import Tuple
from typing import Union
from funasr.modules.nets_utils import make_non_pad_mask
 
 
class StatisticPooling(torch.nn.Module):
    def __init__(self, pooling_dim: Union[int, Tuple] = 2, eps=1e-12):
        super(StatisticPooling, self).__init__()
        if isinstance(pooling_dim, int):
            pooling_dim = (pooling_dim, )
        self.pooling_dim = pooling_dim
        self.eps = eps
 
    def forward(self, xs_pad, ilens=None):
        # xs_pad in (Batch, Channel, Time, Frequency)
 
        if ilens is None:
            masks = torch.ones_like(xs_pad).to(xs_pad)
        else:
            masks = make_non_pad_mask(ilens, xs_pad, length_dim=2).to(xs_pad)
        mean = (torch.sum(xs_pad, dim=self.pooling_dim, keepdim=True) /
                torch.sum(masks, dim=self.pooling_dim, keepdim=True))
        squared_difference = torch.pow(xs_pad - mean, 2.0)
        variance = (torch.sum(squared_difference, dim=self.pooling_dim, keepdim=True) /
                    torch.sum(masks, dim=self.pooling_dim, keepdim=True))
        for i in reversed(self.pooling_dim):
            mean, variance = torch.squeeze(mean, dim=i), torch.squeeze(variance, dim=i)
 
        mask = torch.less_equal(variance, self.eps).float()
        variance = (1.0 - mask) * variance + mask * self.eps
        stddev = torch.sqrt(variance)
 
        stat_pooling = torch.cat([mean, stddev], dim=1)
 
        return stat_pooling