From bcf6be4c902bda2b2ae16ee018bf223d7bf7b590 Mon Sep 17 00:00:00 2001
From: Lizerui9926 <110582652+Lizerui9926@users.noreply.github.com>
Date: 星期三, 08 二月 2023 19:13:57 +0800
Subject: [PATCH] Merge pull request #74 from alibaba-damo-academy/dev_gzf

---
 funasr/export/utils/torch_function.py |   80 ++++++++++++++++++++++++++++++++++++++++
 1 files changed, 80 insertions(+), 0 deletions(-)

diff --git a/funasr/export/utils/torch_function.py b/funasr/export/utils/torch_function.py
new file mode 100644
index 0000000..a078a7e
--- /dev/null
+++ b/funasr/export/utils/torch_function.py
@@ -0,0 +1,80 @@
+from typing import Optional
+
+import torch
+import torch.nn as nn
+
+import numpy as np
+
+
+class MakePadMask(nn.Module):
+    def __init__(self, max_seq_len=512, flip=True):
+        super().__init__()
+        if flip:
+            self.mask_pad = torch.Tensor(1 - np.tri(max_seq_len)).type(torch.bool)
+        else:
+            self.mask_pad = torch.Tensor(np.tri(max_seq_len)).type(torch.bool)
+    
+    def forward(self, lengths, xs=None, length_dim=-1, maxlen=None):
+        """Make mask tensor containing indices of padded part.
+        This implementation creates the same mask tensor with original make_pad_mask,
+        which can be converted into onnx format.
+        Dimension length of xs should be 2 or 3.
+        """
+        if length_dim == 0:
+            raise ValueError("length_dim cannot be 0: {}".format(length_dim))
+
+        if xs is not None and len(xs.shape) == 3:
+            if length_dim == 1:
+                lengths = lengths.unsqueeze(1).expand(
+                    *xs.transpose(1, 2).shape[:2])
+            else:
+                lengths = lengths.unsqueeze(1).expand(*xs.shape[:2])
+
+        if maxlen is not None:
+            m = maxlen
+        elif xs is not None:
+            m = xs.shape[-1]
+        else:
+            m = torch.max(lengths)
+
+        mask = self.mask_pad[lengths - 1][..., :m].type(torch.float32)
+
+        if length_dim == 1:
+            return mask.transpose(1, 2)
+        else:
+            return mask
+
+class sequence_mask(nn.Module):
+    def __init__(self, max_seq_len=512, flip=True):
+        super().__init__()
+    
+    def forward(self, lengths, max_seq_len=None, dtype=torch.float32, device=None):
+        if max_seq_len is None:
+            max_seq_len = lengths.max()
+        row_vector = torch.arange(0, max_seq_len, 1).to(lengths.device)
+        matrix = torch.unsqueeze(lengths, dim=-1)
+        mask = row_vector < matrix
+        
+        return mask.type(dtype).to(device) if device is not None else mask.type(dtype)
+
+def normalize(input: torch.Tensor, p: float = 2.0, dim: int = 1, out: Optional[torch.Tensor] = None) -> torch.Tensor:
+    if out is None:
+        denom = input.norm(p, dim, keepdim=True).expand_as(input)
+        return input / denom
+    else:
+        denom = input.norm(p, dim, keepdim=True).expand_as(input)
+        return torch.div(input, denom, out=out)
+
+def subsequent_mask(size: torch.Tensor):
+    return torch.ones(size, size).tril()
+
+
+def MakePadMask_test():
+    feats_length = torch.tensor([10]).type(torch.long)
+    mask_fn = MakePadMask()
+    mask = mask_fn(feats_length)
+    print(mask)
+
+
+if __name__ == '__main__':
+    MakePadMask_test()
\ No newline at end of file

--
Gitblit v1.9.1