游雁
2023-12-19 0e622e694e6cb4459955f1e5942a7c53349ce640
funasr/models/neat_contextual_paraformer/decoder.py
File was renamed from funasr/models/paraformer/contextual_decoder.py
@@ -6,15 +6,15 @@
import numpy as np
from funasr.models.scama import utils as myutils
from funasr.models.decoder.transformer_decoder import BaseTransformerDecoder
from funasr.models.transformer.attention import MultiHeadedAttentionSANMDecoder, MultiHeadedAttentionCrossAtt
from funasr.models.sanm.attention import MultiHeadedAttentionSANMDecoder, MultiHeadedAttentionCrossAtt
from funasr.models.transformer.embedding import PositionalEncoding
from funasr.models.transformer.layer_norm import LayerNorm
from funasr.models.transformer.positionwise_feed_forward import PositionwiseFeedForwardDecoderSANM
from funasr.models.transformer.repeat import repeat
from funasr.models.decoder.sanm_decoder import DecoderLayerSANM, ParaformerSANMDecoder
from funasr.models.sanm.positionwise_feed_forward import PositionwiseFeedForwardDecoderSANM
from funasr.models.transformer.utils.repeat import repeat
from funasr.models.paraformer.decoder import DecoderLayerSANM, ParaformerSANMDecoder
from funasr.utils.register import register_class, registry_tables
class ContextualDecoderLayer(nn.Module):
    def __init__(
@@ -98,7 +98,7 @@
            x =  self.dropout(self.src_attn(x, memory, memory_mask))
        return x, tgt_mask, memory, memory_mask, cache
@register_class("decoder_classes", "ContextualParaformerDecoder")
class ContextualParaformerDecoder(ParaformerSANMDecoder):
    """
    Author: Speech Lab of DAMO Academy, Alibaba Group