From a016617c7ec98ab9c7475ff7d3b6150b98d5beeb Mon Sep 17 00:00:00 2001
From: zhifu gao <zhifu.gzf@alibaba-inc.com>
Date: 星期二, 28 二月 2023 18:36:52 +0800
Subject: [PATCH] Merge pull request #165 from alibaba-damo-academy/dev_cmz
---
funasr/modules/attention.py | 12 ++++++++++++
1 files changed, 12 insertions(+), 0 deletions(-)
diff --git a/funasr/modules/attention.py b/funasr/modules/attention.py
index c47d96d..6277005 100644
--- a/funasr/modules/attention.py
+++ b/funasr/modules/attention.py
@@ -439,6 +439,18 @@
att_outs = self.forward_attention(v_h, scores, mask, mask_att_chunk_encoder)
return att_outs + fsmn_memory
+class MultiHeadedAttentionSANMwithMask(MultiHeadedAttentionSANM):
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
+ def forward(self, x, mask, mask_shfit_chunk=None, mask_att_chunk_encoder=None):
+ q_h, k_h, v_h, v = self.forward_qkv(x)
+ fsmn_memory = self.forward_fsmn(v, mask[0], mask_shfit_chunk)
+ q_h = q_h * self.d_k ** (-0.5)
+ scores = torch.matmul(q_h, k_h.transpose(-2, -1))
+ att_outs = self.forward_attention(v_h, scores, mask[1], mask_att_chunk_encoder)
+ return att_outs + fsmn_memory
+
class MultiHeadedAttentionSANMDecoder(nn.Module):
"""Multi-Head Attention layer.
--
Gitblit v1.9.1