From 0efc87352ce7d3903dbdedbfa5d01ca5e1cb19e7 Mon Sep 17 00:00:00 2001
From: Shi Xian <40013335+R1ckShi@users.noreply.github.com>
Date: 星期四, 05 十二月 2024 15:15:38 +0800
Subject: [PATCH] Merge pull request #2267 from modelscope/dev_sx2
---
funasr/models/lcbnet/attention.py | 8 ++++----
1 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/funasr/models/lcbnet/attention.py b/funasr/models/lcbnet/attention.py
index 05a5041..83753ed 100644
--- a/funasr/models/lcbnet/attention.py
+++ b/funasr/models/lcbnet/attention.py
@@ -78,19 +78,19 @@
mask = mask.unsqueeze(1).eq(0) # (batch, 1, *, time2)
min_value = torch.finfo(scores.dtype).min
scores = scores.masked_fill(mask, min_value)
- self.attn = torch.softmax(scores, dim=-1).masked_fill(
+ attn = torch.softmax(scores, dim=-1).masked_fill(
mask, 0.0
) # (batch, head, time1, time2)
else:
- self.attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)
+ attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)
- p_attn = self.dropout(self.attn)
+ p_attn = self.dropout(attn)
x = torch.matmul(p_attn, value) # (batch, head, time1, d_k)
x = (
x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)
) # (batch, time1, d_model)
- return self.linear_out(x), self.attn # (batch, time1, d_model)
+ return self.linear_out(x), attn # (batch, time1, d_model)
def forward(self, query, key, value, mask):
"""Compute scaled dot product attention.
--
Gitblit v1.9.1