wuhongsheng
2024-07-05 3a4281f4959534b1bf5d01acf0085f4f8e6f2ec8
funasr/models/lcbnet/attention.py
@@ -11,6 +11,7 @@
import torch
from torch import nn
class MultiHeadedAttentionReturnWeight(nn.Module):
    """Multi-Head Attention layer.
@@ -108,5 +109,3 @@
        q, k, v = self.forward_qkv(query, key, value)
        scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)
        return self.forward_attention(v, scores, mask)