funasr/models/encoder/transformer_encoder.py
@@ -13,6 +13,7 @@ import logging from funasr.models.ctc import CTC from funasr.models.encoder.abs_encoder import AbsEncoder from funasr.modules.attention import MultiHeadedAttention from funasr.modules.embedding import PositionalEncoding from funasr.modules.layer_norm import LayerNorm @@ -143,7 +144,7 @@ return x, mask class TransformerEncoder(torch.nn.Module): class TransformerEncoder(AbsEncoder): """Transformer encoder module. Args: