From 87f9f2ba8de57fd865c8627e39d6c8b9c8d6d6b7 Mon Sep 17 00:00:00 2001
From: 嘉渊 <wangjiaming.wjm@alibaba-inc.com>
Date: 星期三, 17 五月 2023 15:45:42 +0800
Subject: [PATCH] update repo
---
funasr/models/encoder/conformer_encoder.py | 6 +++---
1 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/funasr/models/encoder/conformer_encoder.py b/funasr/models/encoder/conformer_encoder.py
index 9777cee..aa3b67e 100644
--- a/funasr/models/encoder/conformer_encoder.py
+++ b/funasr/models/encoder/conformer_encoder.py
@@ -15,13 +15,13 @@
from typeguard import check_argument_types
from funasr.models.ctc import CTC
-from funasr.models.encoder.abs_encoder import AbsEncoder
from funasr.modules.attention import (
MultiHeadedAttention, # noqa: H301
RelPositionMultiHeadedAttention, # noqa: H301
RelPositionMultiHeadedAttentionChunk,
LegacyRelPositionMultiHeadedAttention, # noqa: H301
)
+from funasr.models.encoder.abs_encoder import AbsEncoder
from funasr.modules.embedding import (
PositionalEncoding, # noqa: H301
ScaledPositionalEncoding, # noqa: H301
@@ -307,7 +307,7 @@
feed_forward: torch.nn.Module,
feed_forward_macaron: torch.nn.Module,
conv_mod: torch.nn.Module,
- norm_class: torch.nn.Module = torch.nn.LayerNorm,
+ norm_class: torch.nn.Module = LayerNorm,
norm_args: Dict = {},
dropout_rate: float = 0.0,
) -> None:
@@ -1145,7 +1145,7 @@
x = x[:,::self.time_reduction_factor,:]
olens = torch.floor_divide(olens-1, self.time_reduction_factor) + 1
- return x, olens
+ return x, olens, None
def simu_chunk_forward(
self,
--
Gitblit v1.9.1