From 3cd71a385a31f987f2db99df902ca36ee02b1813 Mon Sep 17 00:00:00 2001
From: 九耳 <mengzhe.cmz@alibaba-inc.com>
Date: 星期四, 30 三月 2023 17:29:12 +0800
Subject: [PATCH] change name
---
funasr/export/models/encoder/sanm_encoder.py | 26 ++++----------------------
1 files changed, 4 insertions(+), 22 deletions(-)
diff --git a/funasr/export/models/encoder/sanm_encoder.py b/funasr/export/models/encoder/sanm_encoder.py
index 118e240..8390f68 100644
--- a/funasr/export/models/encoder/sanm_encoder.py
+++ b/funasr/export/models/encoder/sanm_encoder.py
@@ -9,20 +9,6 @@
from funasr.modules.positionwise_feed_forward import PositionwiseFeedForward
from funasr.export.models.modules.feedforward import PositionwiseFeedForward as PositionwiseFeedForward_export
-def subsequent_mask(size, device="cpu", dtype=torch.bool):
- """Create mask for subsequent steps (size, size).
-
- :param int size: size of mask
- :param str device: "cpu" or "cuda" or torch.Tensor.device
- :param torch.dtype dtype: result dtype
- :rtype: torch.Tensor
- >>> subsequent_mask(3)
- [[1, 0, 0],
- [1, 1, 0],
- [1, 1, 1]]
- """
- ret = torch.ones(size, size, device=device, dtype=dtype)
- return torch.tril(ret, out=ret)
class SANMEncoder(nn.Module):
def __init__(
@@ -163,14 +149,9 @@
self.num_heads = model.encoders[0].self_attn.h
self.hidden_size = model.encoders[0].self_attn.linear_out.out_features
- def prepare_mask(self, mask):
+ def prepare_mask(self, mask, sub_masks):
mask_3d_btd = mask[:, :, None]
- sub_masks = subsequent_mask(mask.size(-1))
- if len(mask.shape) == 2:
- mask_4d_bhlt = 1 - sub_masks[:, None, None, :]
- elif len(mask.shape) == 3:
- mask_4d_bhlt = 1 - sub_masks[:, None, :]
- mask_4d_bhlt = mask_4d_bhlt * -10000.0
+ mask_4d_bhlt = (1 - sub_masks) * -10000.0
return mask_3d_btd, mask_4d_bhlt
@@ -178,10 +159,11 @@
speech: torch.Tensor,
speech_lengths: torch.Tensor,
vad_mask: torch.Tensor,
+ sub_masks: torch.Tensor,
):
speech = speech * self._output_size ** 0.5
mask = self.make_pad_mask(speech_lengths)
- mask = self.prepare_mask(mask)
+ mask = self.prepare_mask(mask, sub_masks)
if self.embed is None:
xs_pad = speech
else:
--
Gitblit v1.9.1