From 1cdb3cc28d4d89a576cc06e5cd8eb80da1f3a3aa Mon Sep 17 00:00:00 2001
From: zhifu gao <zhifu.gzf@alibaba-inc.com>
Date: 星期五, 26 四月 2024 11:27:39 +0800
Subject: [PATCH] Dev gzf exp (#1665)
---
funasr/utils/torch_function.py | 20 ++++++++++++--------
1 files changed, 12 insertions(+), 8 deletions(-)
diff --git a/funasr/utils/torch_function.py b/funasr/utils/torch_function.py
index a078a7e..f637bbf 100644
--- a/funasr/utils/torch_function.py
+++ b/funasr/utils/torch_function.py
@@ -13,7 +13,7 @@
self.mask_pad = torch.Tensor(1 - np.tri(max_seq_len)).type(torch.bool)
else:
self.mask_pad = torch.Tensor(np.tri(max_seq_len)).type(torch.bool)
-
+
def forward(self, lengths, xs=None, length_dim=-1, maxlen=None):
"""Make mask tensor containing indices of padded part.
This implementation creates the same mask tensor with original make_pad_mask,
@@ -25,8 +25,7 @@
if xs is not None and len(xs.shape) == 3:
if length_dim == 1:
- lengths = lengths.unsqueeze(1).expand(
- *xs.transpose(1, 2).shape[:2])
+ lengths = lengths.unsqueeze(1).expand(*xs.transpose(1, 2).shape[:2])
else:
lengths = lengths.unsqueeze(1).expand(*xs.shape[:2])
@@ -44,26 +43,31 @@
else:
return mask
+
class sequence_mask(nn.Module):
def __init__(self, max_seq_len=512, flip=True):
super().__init__()
-
+
def forward(self, lengths, max_seq_len=None, dtype=torch.float32, device=None):
if max_seq_len is None:
max_seq_len = lengths.max()
row_vector = torch.arange(0, max_seq_len, 1).to(lengths.device)
matrix = torch.unsqueeze(lengths, dim=-1)
mask = row_vector < matrix
-
+
return mask.type(dtype).to(device) if device is not None else mask.type(dtype)
-def normalize(input: torch.Tensor, p: float = 2.0, dim: int = 1, out: Optional[torch.Tensor] = None) -> torch.Tensor:
+
+def normalize(
+ input: torch.Tensor, p: float = 2.0, dim: int = 1, out: Optional[torch.Tensor] = None
+) -> torch.Tensor:
if out is None:
denom = input.norm(p, dim, keepdim=True).expand_as(input)
return input / denom
else:
denom = input.norm(p, dim, keepdim=True).expand_as(input)
return torch.div(input, denom, out=out)
+
def subsequent_mask(size: torch.Tensor):
return torch.ones(size, size).tril()
@@ -76,5 +80,5 @@
print(mask)
-if __name__ == '__main__':
- MakePadMask_test()
\ No newline at end of file
+if __name__ == "__main__":
+ MakePadMask_test()
--
Gitblit v1.9.1