From fc547e14e818772811c3dccd9bb09e45e35df168 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期三, 25 九月 2024 15:26:14 +0800
Subject: [PATCH] bugfix memory leaky
---
funasr/models/transformer/attention.py | 18 +++++++++---------
1 files changed, 9 insertions(+), 9 deletions(-)
diff --git a/funasr/models/transformer/attention.py b/funasr/models/transformer/attention.py
index 6e6f754..2844333 100644
--- a/funasr/models/transformer/attention.py
+++ b/funasr/models/transformer/attention.py
@@ -87,13 +87,13 @@
"inf"
) # min_value = float(np.finfo(torch.tensor(0, dtype=qk.dtype).numpy().dtype).min)
scores = scores.masked_fill(mask, min_value)
- self.attn = torch.softmax(scores, dim=-1).masked_fill(
+ attn = torch.softmax(scores, dim=-1).masked_fill(
mask, 0.0
) # (batch, head, time1, time2)
else:
- self.attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)
+ attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)
- p_attn = self.dropout(self.attn)
+ p_attn = self.dropout(attn)
x = torch.matmul(p_attn, value) # (batch, head, time1, d_k)
x = (
x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)
@@ -154,8 +154,8 @@
def forward_attention(self, value, scores, mask):
scores = scores + mask
- self.attn = torch.softmax(scores, dim=-1)
- context_layer = torch.matmul(self.attn, value) # (batch, head, time1, d_k)
+ attn = torch.softmax(scores, dim=-1)
+ context_layer = torch.matmul(attn, value) # (batch, head, time1, d_k)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
@@ -209,8 +209,8 @@
def forward_attention(self, value, scores, mask):
scores = scores + mask
- self.attn = torch.softmax(scores, dim=-1)
- context_layer = torch.matmul(self.attn, value) # (batch, head, time1, d_k)
+ attn = torch.softmax(scores, dim=-1)
+ context_layer = torch.matmul(attn, value) # (batch, head, time1, d_k)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
@@ -575,9 +575,9 @@
if chunk_mask is not None:
mask = chunk_mask.unsqueeze(0).unsqueeze(1) | mask
scores = scores.masked_fill(mask, float("-inf"))
- self.attn = torch.softmax(scores, dim=-1).masked_fill(mask, 0.0)
+ attn = torch.softmax(scores, dim=-1).masked_fill(mask, 0.0)
- attn_output = self.dropout(self.attn)
+ attn_output = self.dropout(attn)
attn_output = torch.matmul(attn_output, value)
attn_output = self.linear_out(
--
Gitblit v1.9.1