| File was renamed from funasr/models/paraformer/contextual_decoder.py |
| | |
| | | import numpy as np |
| | | |
| | | from funasr.models.scama import utils as myutils |
| | | from funasr.models.decoder.transformer_decoder import BaseTransformerDecoder |
| | | |
| | | from funasr.models.transformer.attention import MultiHeadedAttentionSANMDecoder, MultiHeadedAttentionCrossAtt |
| | | from funasr.models.sanm.attention import MultiHeadedAttentionSANMDecoder, MultiHeadedAttentionCrossAtt |
| | | from funasr.models.transformer.embedding import PositionalEncoding |
| | | from funasr.models.transformer.layer_norm import LayerNorm |
| | | from funasr.models.transformer.positionwise_feed_forward import PositionwiseFeedForwardDecoderSANM |
| | | from funasr.models.transformer.repeat import repeat |
| | | from funasr.models.decoder.sanm_decoder import DecoderLayerSANM, ParaformerSANMDecoder |
| | | from funasr.models.sanm.positionwise_feed_forward import PositionwiseFeedForwardDecoderSANM |
| | | from funasr.models.transformer.utils.repeat import repeat |
| | | from funasr.models.paraformer.decoder import DecoderLayerSANM, ParaformerSANMDecoder |
| | | |
| | | from funasr.utils.register import register_class, registry_tables |
| | | |
| | | class ContextualDecoderLayer(nn.Module): |
| | | def __init__( |
| | |
| | | x = self.dropout(self.src_attn(x, memory, memory_mask)) |
| | | return x, tgt_mask, memory, memory_mask, cache |
| | | |
| | | |
| | | @register_class("decoder_classes", "ContextualParaformerDecoder") |
| | | class ContextualParaformerDecoder(ParaformerSANMDecoder): |
| | | """ |
| | | Author: Speech Lab of DAMO Academy, Alibaba Group |