From 23e7ddebccd3b05cf7ef89809bcfe565ad6dfa1f Mon Sep 17 00:00:00 2001
From: majic31 <majic31@163.com>
Date: 星期二, 24 十二月 2024 10:00:14 +0800
Subject: [PATCH] Fix the variable name (#2328)
---
funasr/models/fsmn_vad_streaming/encoder.py | 241 +++++++++++++++++++-----------------------------
1 files changed, 96 insertions(+), 145 deletions(-)
diff --git a/funasr/models/fsmn_vad_streaming/encoder.py b/funasr/models/fsmn_vad_streaming/encoder.py
index bc51a6f..14c2f5f 100755
--- a/funasr/models/fsmn_vad_streaming/encoder.py
+++ b/funasr/models/fsmn_vad_streaming/encoder.py
@@ -9,6 +9,7 @@
from funasr.register import tables
+
class LinearTransform(nn.Module):
def __init__(self, input_dim, output_dim):
@@ -53,13 +54,13 @@
class FSMNBlock(nn.Module):
def __init__(
- self,
- input_dim: int,
- output_dim: int,
- lorder=None,
- rorder=None,
- lstride=1,
- rstride=1,
+ self,
+ input_dim: int,
+ output_dim: int,
+ lorder=None,
+ rorder=None,
+ lstride=1,
+ rstride=1,
):
super(FSMNBlock, self).__init__()
@@ -74,28 +75,34 @@
self.rstride = rstride
self.conv_left = nn.Conv2d(
- self.dim, self.dim, [lorder, 1], dilation=[lstride, 1], groups=self.dim, bias=False)
+ self.dim, self.dim, [lorder, 1], dilation=[lstride, 1], groups=self.dim, bias=False
+ )
if self.rorder > 0:
self.conv_right = nn.Conv2d(
- self.dim, self.dim, [rorder, 1], dilation=[rstride, 1], groups=self.dim, bias=False)
+ self.dim, self.dim, [rorder, 1], dilation=[rstride, 1], groups=self.dim, bias=False
+ )
else:
self.conv_right = None
- def forward(self, input: torch.Tensor, cache: torch.Tensor):
+ def forward(self, input: torch.Tensor, cache: torch.Tensor = None):
x = torch.unsqueeze(input, 1)
x_per = x.permute(0, 3, 2, 1) # B D T C
-
- cache = cache.to(x_per.device)
- y_left = torch.cat((cache, x_per), dim=2)
- cache = y_left[:, :, -(self.lorder - 1) * self.lstride:, :]
+
+ if cache is not None:
+ cache = cache.to(x_per.device)
+ y_left = torch.cat((cache, x_per), dim=2)
+ cache = y_left[:, :, -(self.lorder - 1) * self.lstride :, :]
+ else:
+ y_left = F.pad(x_per, [0, 0, (self.lorder - 1) * self.lstride, 0])
+
y_left = self.conv_left(y_left)
out = x_per + y_left
if self.conv_right is not None:
# maybe need to check
y_right = F.pad(x_per, [0, 0, 0, self.rorder * self.rstride])
- y_right = y_right[:, :, self.rstride:, :]
+ y_right = y_right[:, :, self.rstride :, :]
y_right = self.conv_right(y_right)
out += y_right
@@ -106,15 +113,16 @@
class BasicBlock(nn.Module):
- def __init__(self,
- linear_dim: int,
- proj_dim: int,
- lorder: int,
- rorder: int,
- lstride: int,
- rstride: int,
- stack_layer: int
- ):
+ def __init__(
+ self,
+ linear_dim: int,
+ proj_dim: int,
+ lorder: int,
+ rorder: int,
+ lstride: int,
+ rstride: int,
+ stack_layer: int,
+ ):
super(BasicBlock, self).__init__()
self.lorder = lorder
self.rorder = rorder
@@ -126,19 +134,28 @@
self.affine = AffineTransform(proj_dim, linear_dim)
self.relu = RectifiedLinear(linear_dim, linear_dim)
- def forward(self, input: torch.Tensor, cache: Dict[str, torch.Tensor]):
+ def forward(self, input: torch.Tensor, cache: Dict[str, torch.Tensor] = None):
x1 = self.linear(input) # B T D
- cache_layer_name = 'cache_layer_{}'.format(self.stack_layer)
- if cache_layer_name not in cache:
- cache[cache_layer_name] = torch.zeros(x1.shape[0], x1.shape[-1], (self.lorder - 1) * self.lstride, 1)
- x2, cache[cache_layer_name] = self.fsmn_block(x1, cache[cache_layer_name])
+
+ if cache is not None:
+ cache_layer_name = 'cache_layer_{}'.format(self.stack_layer)
+ if cache_layer_name not in cache:
+ cache[cache_layer_name] = torch.zeros(
+ x1.shape[0], x1.shape[-1], (self.lorder - 1) * self.lstride, 1
+ )
+ x2, cache[cache_layer_name] = self.fsmn_block(x1, cache[cache_layer_name])
+ else:
+ x2, _ = self.fsmn_block(x1, None)
x3 = self.affine(x2)
x4 = self.relu(x3)
return x4
+
+
class BasicBlock_export(nn.Module):
- def __init__(self,
- model,
- ):
+ def __init__(
+ self,
+ model,
+ ):
super(BasicBlock_export, self).__init__()
self.linear = model.linear
self.fsmn_block = model.fsmn_block
@@ -167,7 +184,7 @@
return x
-'''
+"""
FSMN net for keyword spotting
input_dim: input dimension
linear_dim: fsmn input dimensionll
@@ -176,23 +193,25 @@
rorder: fsmn right order
num_syn: output dimension
fsmn_layers: no. of sequential fsmn layers
-'''
+"""
+
@tables.register("encoder_classes", "FSMN")
class FSMN(nn.Module):
def __init__(
- self,
- input_dim: int,
- input_affine_dim: int,
- fsmn_layers: int,
- linear_dim: int,
- proj_dim: int,
- lorder: int,
- rorder: int,
- lstride: int,
- rstride: int,
- output_affine_dim: int,
- output_dim: int
+ self,
+ input_dim: int,
+ input_affine_dim: int,
+ fsmn_layers: int,
+ linear_dim: int,
+ proj_dim: int,
+ lorder: int,
+ rorder: int,
+ lstride: int,
+ rstride: int,
+ output_affine_dim: int,
+ output_dim: int,
+ use_softmax: bool = True,
):
super().__init__()
@@ -207,19 +226,29 @@
self.in_linear1 = AffineTransform(input_dim, input_affine_dim)
self.in_linear2 = AffineTransform(input_affine_dim, linear_dim)
self.relu = RectifiedLinear(linear_dim, linear_dim)
- self.fsmn = FsmnStack(*[BasicBlock(linear_dim, proj_dim, lorder, rorder, lstride, rstride, i) for i in
- range(fsmn_layers)])
+ self.fsmn = FsmnStack(
+ *[
+ BasicBlock(linear_dim, proj_dim, lorder, rorder, lstride, rstride, i)
+ for i in range(fsmn_layers)
+ ]
+ )
self.out_linear1 = AffineTransform(linear_dim, output_affine_dim)
self.out_linear2 = AffineTransform(output_affine_dim, output_dim)
- self.softmax = nn.Softmax(dim=-1)
-
+
+ self.use_softmax = use_softmax
+ if self.use_softmax:
+ self.softmax = nn.Softmax(dim=-1)
+
def fuse_modules(self):
pass
+ def output_size(self) -> int:
+ return self.output_dim
+
def forward(
- self,
- input: torch.Tensor,
- cache: Dict[str, torch.Tensor]
+ self,
+ input: torch.Tensor,
+ cache: Dict[str, torch.Tensor] = None
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
"""
Args:
@@ -234,18 +263,23 @@
x4 = self.fsmn(x3, cache) # self.cache will update automatically in self.fsmn
x5 = self.out_linear1(x4)
x6 = self.out_linear2(x5)
- x7 = self.softmax(x6)
- return x7
+ if self.use_softmax:
+ x7 = self.softmax(x6)
+ return x7
+
+ return x6
@tables.register("encoder_classes", "FSMNExport")
class FSMNExport(nn.Module):
def __init__(
- self, model, **kwargs,
+ self,
+ model,
+ **kwargs,
):
super().__init__()
-
+
# self.input_dim = input_dim
# self.input_affine_dim = input_affine_dim
# self.fsmn_layers = fsmn_layers
@@ -262,6 +296,7 @@
# self.out_linear1 = AffineTransform(linear_dim, output_affine_dim)
# self.out_linear2 = AffineTransform(output_affine_dim, output_dim)
# self.softmax = nn.Softmax(dim=-1)
+
self.in_linear1 = model.in_linear1
self.in_linear2 = model.in_linear2
self.relu = model.relu
@@ -273,10 +308,10 @@
for i, d in enumerate(model.fsmn):
if isinstance(d, BasicBlock):
self.fsmn[i] = BasicBlock_export(d)
-
+
def fuse_modules(self):
pass
-
+
def forward(
self,
input: torch.Tensor,
@@ -288,7 +323,7 @@
in_cache: when in_cache is not None, the forward is in streaming. The type of in_cache is a dict, egs,
{'cache_layer_1': torch.Tensor(B, T1, D)}, T1 is equal to self.lorder. It is {} for the 1st frame
"""
-
+
x = self.in_linear1(input)
x = self.in_linear2(x)
x = self.relu(x)
@@ -301,89 +336,5 @@
x = self.out_linear1(x)
x = self.out_linear2(x)
x = self.softmax(x)
-
+
return x, out_caches
-
-
-'''
-one deep fsmn layer
-dimproj: projection dimension, input and output dimension of memory blocks
-dimlinear: dimension of mapping layer
-lorder: left order
-rorder: right order
-lstride: left stride
-rstride: right stride
-'''
-
-@tables.register("encoder_classes", "DFSMN")
-class DFSMN(nn.Module):
-
- def __init__(self, dimproj=64, dimlinear=128, lorder=20, rorder=1, lstride=1, rstride=1):
- super(DFSMN, self).__init__()
-
- self.lorder = lorder
- self.rorder = rorder
- self.lstride = lstride
- self.rstride = rstride
-
- self.expand = AffineTransform(dimproj, dimlinear)
- self.shrink = LinearTransform(dimlinear, dimproj)
-
- self.conv_left = nn.Conv2d(
- dimproj, dimproj, [lorder, 1], dilation=[lstride, 1], groups=dimproj, bias=False)
-
- if rorder > 0:
- self.conv_right = nn.Conv2d(
- dimproj, dimproj, [rorder, 1], dilation=[rstride, 1], groups=dimproj, bias=False)
- else:
- self.conv_right = None
-
- def forward(self, input):
- f1 = F.relu(self.expand(input))
- p1 = self.shrink(f1)
-
- x = torch.unsqueeze(p1, 1)
- x_per = x.permute(0, 3, 2, 1)
-
- y_left = F.pad(x_per, [0, 0, (self.lorder - 1) * self.lstride, 0])
-
- if self.conv_right is not None:
- y_right = F.pad(x_per, [0, 0, 0, (self.rorder) * self.rstride])
- y_right = y_right[:, :, self.rstride:, :]
- out = x_per + self.conv_left(y_left) + self.conv_right(y_right)
- else:
- out = x_per + self.conv_left(y_left)
-
- out1 = out.permute(0, 3, 2, 1)
- output = input + out1.squeeze(1)
-
- return output
-
-
-'''
-build stacked dfsmn layers
-'''
-
-
-def buildDFSMNRepeats(linear_dim=128, proj_dim=64, lorder=20, rorder=1, fsmn_layers=6):
- repeats = [
- nn.Sequential(
- DFSMN(proj_dim, linear_dim, lorder, rorder, 1, 1))
- for i in range(fsmn_layers)
- ]
-
- return nn.Sequential(*repeats)
-
-
-if __name__ == '__main__':
- fsmn = FSMN(400, 140, 4, 250, 128, 10, 2, 1, 1, 140, 2599)
- print(fsmn)
-
- num_params = sum(p.numel() for p in fsmn.parameters())
- print('the number of model params: {}'.format(num_params))
- x = torch.zeros(128, 200, 400) # batch-size * time * dim
- y, _ = fsmn(x) # batch-size * time * dim
- print('input shape: {}'.format(x.shape))
- print('output shape: {}'.format(y.shape))
-
- print(fsmn.to_kaldi_net())
--
Gitblit v1.9.1