From 4ace5a95b052d338947fc88809a440ccd55cf6b4 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期四, 16 十一月 2023 16:39:52 +0800
Subject: [PATCH] funasr pages

---
 funasr/export/models/modules/multihead_att.py |   14 ++++++++------
 1 files changed, 8 insertions(+), 6 deletions(-)

diff --git a/funasr/export/models/modules/multihead_att.py b/funasr/export/models/modules/multihead_att.py
index 0a56676..4885c4e 100644
--- a/funasr/export/models/modules/multihead_att.py
+++ b/funasr/export/models/modules/multihead_att.py
@@ -64,19 +64,21 @@
         return self.linear_out(context_layer)  # (batch, time1, d_model)
 
 
-def preprocess_for_attn(x, mask, cache, pad_fn):
+def preprocess_for_attn(x, mask, cache, pad_fn, kernel_size):
     x = x * mask
     x = x.transpose(1, 2)
     if cache is None:
         x = pad_fn(x)
     else:
-        x = torch.cat((cache[:, :, 1:], x), dim=2)
-        cache = x
+        x = torch.cat((cache, x), dim=2)
+        cache = x[:, :, -(kernel_size-1):]
     return x, cache
 
 
-import torch.fx
-torch.fx.wrap('preprocess_for_attn')
+torch_version = tuple([int(i) for i in torch.__version__.split(".")[:2]])
+if torch_version >= (1, 8):
+    import torch.fx
+    torch.fx.wrap('preprocess_for_attn')
 
 
 class MultiHeadedAttentionSANMDecoder(nn.Module):
@@ -88,7 +90,7 @@
         self.attn = None
 
     def forward(self, inputs, mask, cache=None):
-        x, cache = preprocess_for_attn(inputs, mask, cache, self.pad_fn)
+        x, cache = preprocess_for_attn(inputs, mask, cache, self.pad_fn, self.kernel_size)
         x = self.fsmn_block(x)
         x = x.transpose(1, 2)
 

--
Gitblit v1.9.1