From 3f8294b9d7deaa0cbdb0b2ef6f3802d46ae133a9 Mon Sep 17 00:00:00 2001
From: zhifu gao <zhifu.gzf@alibaba-inc.com>
Date: 星期三, 25 十二月 2024 17:16:11 +0800
Subject: [PATCH] Revert "shfit to shift (#2266)" (#2336)
---
funasr/models/ct_transformer_streaming/attention.py | 4 ++--
1 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/funasr/models/ct_transformer_streaming/attention.py b/funasr/models/ct_transformer_streaming/attention.py
index be7113f..97e770b 100644
--- a/funasr/models/ct_transformer_streaming/attention.py
+++ b/funasr/models/ct_transformer_streaming/attention.py
@@ -11,9 +11,9 @@
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
- def forward(self, x, mask, mask_shift_chunk=None, mask_att_chunk_encoder=None):
+ def forward(self, x, mask, mask_shfit_chunk=None, mask_att_chunk_encoder=None):
q_h, k_h, v_h, v = self.forward_qkv(x)
- fsmn_memory = self.forward_fsmn(v, mask[0], mask_shift_chunk)
+ fsmn_memory = self.forward_fsmn(v, mask[0], mask_shfit_chunk)
q_h = q_h * self.d_k ** (-0.5)
scores = torch.matmul(q_h, k_h.transpose(-2, -1))
att_outs = self.forward_attention(v_h, scores, mask[1], mask_att_chunk_encoder)
--
Gitblit v1.9.1