From 4e37a5fda20f0878b593b8ba2b9ea46db63743b5 Mon Sep 17 00:00:00 2001
From: 嘉渊 <wangjiaming.wjm@alibaba-inc.com>
Date: 星期四, 11 五月 2023 14:16:28 +0800
Subject: [PATCH] update repo
---
funasr/models/encoder/conformer_encoder.py | 11 ++++++++++-
1 files changed, 10 insertions(+), 1 deletions(-)
diff --git a/funasr/models/encoder/conformer_encoder.py b/funasr/models/encoder/conformer_encoder.py
index 2df2ba6..527646f 100644
--- a/funasr/models/encoder/conformer_encoder.py
+++ b/funasr/models/encoder/conformer_encoder.py
@@ -14,12 +14,12 @@
from typeguard import check_argument_types
from funasr.models.ctc import CTC
-from funasr.models.encoder.abs_encoder import AbsEncoder
from funasr.modules.attention import (
MultiHeadedAttention, # noqa: H301
RelPositionMultiHeadedAttention, # noqa: H301
LegacyRelPositionMultiHeadedAttention, # noqa: H301
)
+from funasr.models.encoder.abs_encoder import AbsEncoder
from funasr.modules.embedding import (
PositionalEncoding, # noqa: H301
ScaledPositionalEncoding, # noqa: H301
@@ -41,6 +41,7 @@
from funasr.modules.subsampling import Conv2dSubsampling8
from funasr.modules.subsampling import TooShortUttError
from funasr.modules.subsampling import check_short_utt
+from funasr.modules.subsampling import Conv2dSubsamplingPad
class ConvolutionModule(nn.Module):
"""ConvolutionModule in Conformer model.
@@ -386,6 +387,13 @@
dropout_rate,
pos_enc_class(output_size, positional_dropout_rate),
)
+ elif input_layer == "conv2dpad":
+ self.embed = Conv2dSubsamplingPad(
+ input_size,
+ output_size,
+ dropout_rate,
+ pos_enc_class(output_size, positional_dropout_rate),
+ )
elif input_layer == "conv2d2":
self.embed = Conv2dSubsampling2(
input_size,
@@ -546,6 +554,7 @@
or isinstance(self.embed, Conv2dSubsampling2)
or isinstance(self.embed, Conv2dSubsampling6)
or isinstance(self.embed, Conv2dSubsampling8)
+ or isinstance(self.embed, Conv2dSubsamplingPad)
):
short_status, limit_size = check_short_utt(self.embed, xs_pad.size(1))
if short_status:
--
Gitblit v1.9.1