From c553a8db1712c2a5deeef5bbb68bd1fdf8d61ab7 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期四, 13 六月 2024 17:38:01 +0800
Subject: [PATCH] decoding

---
 funasr/models/ct_transformer_streaming/attention.py |   20 +++-----------------
 1 files changed, 3 insertions(+), 17 deletions(-)

diff --git a/funasr/models/ct_transformer_streaming/attention.py b/funasr/models/ct_transformer_streaming/attention.py
index 382334e..97e770b 100644
--- a/funasr/models/ct_transformer_streaming/attention.py
+++ b/funasr/models/ct_transformer_streaming/attention.py
@@ -1,22 +1,10 @@
 #!/usr/bin/env python3
-# -*- coding: utf-8 -*-
+# -*- encoding: utf-8 -*-
+# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
+#  MIT License  (https://opensource.org/licenses/MIT)
 
-# Copyright 2019 Shigeki Karita
-#  Apache 2.0  (http://www.apache.org/licenses/LICENSE-2.0)
-
-"""Multi-Head Attention layer definition."""
-
-import math
-
-import numpy
 import torch
-from torch import nn
-import torch.nn.functional as F
-from typing import Optional, Tuple
-
 from funasr.models.sanm.attention import MultiHeadedAttentionSANM
-
-
 
 
 class MultiHeadedAttentionSANMwithMask(MultiHeadedAttentionSANM):
@@ -30,5 +18,3 @@
         scores = torch.matmul(q_h, k_h.transpose(-2, -1))
         att_outs = self.forward_attention(v_h, scores, mask[1], mask_att_chunk_encoder)
         return att_outs + fsmn_memory
-
-

--
Gitblit v1.9.1