From f920ca62984a6b73b8d755b906c8bbda18d8e275 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期四, 21 十二月 2023 13:30:27 +0800
Subject: [PATCH] Merge branch 'dev_gzf_funasr2' of github.com:alibaba-damo-academy/FunASR into dev_gzf_funasr2 add
---
funasr/models/sanm/attention.py | 10 +++++-----
1 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/funasr/models/sanm/attention.py b/funasr/models/sanm/attention.py
index f48617c..10f0a3b 100644
--- a/funasr/models/sanm/attention.py
+++ b/funasr/models/sanm/attention.py
@@ -449,7 +449,7 @@
return q_h, k_h, v_h
- def forward_attention(self, value, scores, mask):
+ def forward_attention(self, value, scores, mask, ret_attn=False):
"""Compute attention context vector.
Args:
@@ -476,16 +476,16 @@
) # (batch, head, time1, time2)
else:
self.attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)
-
p_attn = self.dropout(self.attn)
x = torch.matmul(p_attn, value) # (batch, head, time1, d_k)
x = (
x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)
) # (batch, time1, d_model)
-
+ if ret_attn:
+ return self.linear_out(x), self.attn # (batch, time1, d_model)
return self.linear_out(x) # (batch, time1, d_model)
- def forward(self, x, memory, memory_mask):
+ def forward(self, x, memory, memory_mask, ret_attn=False):
"""Compute scaled dot product attention.
Args:
@@ -502,7 +502,7 @@
q_h, k_h, v_h = self.forward_qkv(x, memory)
q_h = q_h * self.d_k ** (-0.5)
scores = torch.matmul(q_h, k_h.transpose(-2, -1))
- return self.forward_attention(v_h, scores, memory_mask)
+ return self.forward_attention(v_h, scores, memory_mask, ret_attn=ret_attn)
def forward_chunk(self, x, memory, cache=None, chunk_size=None, look_back=0):
"""Compute scaled dot product attention.
--
Gitblit v1.9.1