From 53fccccb24d15d788919d91c8c2b06a115ddacf3 Mon Sep 17 00:00:00 2001
From: 夜雨飘零 <yeyupiaoling@foxmail.com>
Date: 星期二, 19 十二月 2023 15:39:39 +0800
Subject: [PATCH] 修改cnn为合理的包名
---
funasr/export/models/encoder/sanm_encoder.py | 11 ++++++++---
1 files changed, 8 insertions(+), 3 deletions(-)
diff --git a/funasr/export/models/encoder/sanm_encoder.py b/funasr/export/models/encoder/sanm_encoder.py
index f583f56..7ef863e 100644
--- a/funasr/export/models/encoder/sanm_encoder.py
+++ b/funasr/export/models/encoder/sanm_encoder.py
@@ -3,11 +3,12 @@
from funasr.export.utils.torch_function import MakePadMask
from funasr.export.utils.torch_function import sequence_mask
-from funasr.modules.attention import MultiHeadedAttentionSANM
+from funasr.models.transformer.attention import MultiHeadedAttentionSANM
from funasr.export.models.modules.multihead_att import MultiHeadedAttentionSANM as MultiHeadedAttentionSANM_export
from funasr.export.models.modules.encoder_layer import EncoderLayerSANM as EncoderLayerSANM_export
-from funasr.modules.positionwise_feed_forward import PositionwiseFeedForward
+from funasr.models.transformer.positionwise_feed_forward import PositionwiseFeedForward
from funasr.export.models.modules.feedforward import PositionwiseFeedForward as PositionwiseFeedForward_export
+from funasr.models.transformer.embedding import StreamSinusoidalPositionEncoder
class SANMEncoder(nn.Module):
@@ -21,6 +22,8 @@
):
super().__init__()
self.embed = model.embed
+ if isinstance(self.embed, StreamSinusoidalPositionEncoder):
+ self.embed = None
self.model = model
self.feats_dim = feats_dim
self._output_size = model._output_size
@@ -63,8 +66,10 @@
def forward(self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
+ online: bool = False
):
- speech = speech * self._output_size ** 0.5
+ if not online:
+ speech = speech * self._output_size ** 0.5
mask = self.make_pad_mask(speech_lengths)
mask = self.prepare_mask(mask)
if self.embed is None:
--
Gitblit v1.9.1