From 2a66366be4c2715870e4859fd5a5db6e8a9dc00a Mon Sep 17 00:00:00 2001
From: chenmengzheAAA <123789350+chenmengzheAAA@users.noreply.github.com>
Date: 星期四, 14 九月 2023 19:00:17 +0800
Subject: [PATCH] Merge pull request #956 from alibaba-damo-academy/chenmengzheAAA-patch-4
---
funasr/export/models/encoder/sanm_encoder.py | 7 ++++++-
1 files changed, 6 insertions(+), 1 deletions(-)
diff --git a/funasr/export/models/encoder/sanm_encoder.py b/funasr/export/models/encoder/sanm_encoder.py
index f583f56..d1b4b1e 100644
--- a/funasr/export/models/encoder/sanm_encoder.py
+++ b/funasr/export/models/encoder/sanm_encoder.py
@@ -8,6 +8,7 @@
from funasr.export.models.modules.encoder_layer import EncoderLayerSANM as EncoderLayerSANM_export
from funasr.modules.positionwise_feed_forward import PositionwiseFeedForward
from funasr.export.models.modules.feedforward import PositionwiseFeedForward as PositionwiseFeedForward_export
+from funasr.modules.embedding import StreamSinusoidalPositionEncoder
class SANMEncoder(nn.Module):
@@ -21,6 +22,8 @@
):
super().__init__()
self.embed = model.embed
+ if isinstance(self.embed, StreamSinusoidalPositionEncoder):
+ self.embed = None
self.model = model
self.feats_dim = feats_dim
self._output_size = model._output_size
@@ -63,8 +66,10 @@
def forward(self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
+ online: bool = False
):
- speech = speech * self._output_size ** 0.5
+ if not online:
+ speech = speech * self._output_size ** 0.5
mask = self.make_pad_mask(speech_lengths)
mask = self.prepare_mask(mask)
if self.embed is None:
--
Gitblit v1.9.1