嘉渊
2023-04-28 9611e07e39deda50d7616866bfebf46c9bb58170
funasr/models/encoder/conformer_encoder.py
@@ -14,12 +14,12 @@
from typeguard import check_argument_types
from funasr.models.ctc import CTC
from funasr.models.encoder.abs_encoder import AbsEncoder
from funasr.modules.attention import (
    MultiHeadedAttention,  # noqa: H301
    RelPositionMultiHeadedAttention,  # noqa: H301
    LegacyRelPositionMultiHeadedAttention,  # noqa: H301
)
from funasr.models.encoder.abs_encoder import AbsEncoder
from funasr.modules.embedding import (
    PositionalEncoding,  # noqa: H301
    ScaledPositionalEncoding,  # noqa: H301
@@ -41,8 +41,9 @@
from funasr.modules.subsampling import Conv2dSubsampling8
from funasr.modules.subsampling import TooShortUttError
from funasr.modules.subsampling import check_short_utt
from funasr.modules.subsampling import Conv2dSubsamplingPad
class ConvolutionModule(nn.Module):
class ConvolutionModule(AbsEncoder):
    """ConvolutionModule in Conformer model.
    Args:
@@ -277,7 +278,7 @@
        return x, mask
class ConformerEncoder(AbsEncoder):
class ConformerEncoder(torch.nn.Module):
    """Conformer encoder module.
    Args:
@@ -381,6 +382,13 @@
            )
        elif input_layer == "conv2d":
            self.embed = Conv2dSubsampling(
                input_size,
                output_size,
                dropout_rate,
                pos_enc_class(output_size, positional_dropout_rate),
            )
        elif input_layer == "conv2dpad":
            self.embed = Conv2dSubsamplingPad(
                input_size,
                output_size,
                dropout_rate,
@@ -546,6 +554,7 @@
                or isinstance(self.embed, Conv2dSubsampling2)
                or isinstance(self.embed, Conv2dSubsampling6)
                or isinstance(self.embed, Conv2dSubsampling8)
                or isinstance(self.embed, Conv2dSubsamplingPad)
        ):
            short_status, limit_size = check_short_utt(self.embed, xs_pad.size(1))
            if short_status: