From 1d4ab65c8bfebaecbcb0eec0064bae9a321cad75 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期二, 14 二月 2023 16:27:37 +0800
Subject: [PATCH] export model

---
 funasr/export/models/encoder/sanm_encoder.py |   11 +++++++++--
 1 files changed, 9 insertions(+), 2 deletions(-)

diff --git a/funasr/export/models/encoder/sanm_encoder.py b/funasr/export/models/encoder/sanm_encoder.py
index ee45732..8a50538 100644
--- a/funasr/export/models/encoder/sanm_encoder.py
+++ b/funasr/export/models/encoder/sanm_encoder.py
@@ -2,6 +2,7 @@
 import torch.nn as nn
 
 from funasr.export.utils.torch_function import MakePadMask
+from funasr.export.utils.torch_function import sequence_mask
 from funasr.modules.attention import MultiHeadedAttentionSANM
 from funasr.export.models.modules.multihead_att import MultiHeadedAttentionSANM as MultiHeadedAttentionSANM_export
 from funasr.export.models.modules.encoder_layer import EncoderLayerSANM as EncoderLayerSANM_export
@@ -15,12 +16,18 @@
         max_seq_len=512,
         feats_dim=560,
         model_name='encoder',
+        onnx: bool = True,
     ):
         super().__init__()
         self.embed = model.embed
         self.model = model
-        self.make_pad_mask = MakePadMask(max_seq_len, flip=False)
         self.feats_dim = feats_dim
+        self._output_size = model._output_size
+
+        if onnx:
+            self.make_pad_mask = MakePadMask(max_seq_len, flip=False)
+        else:
+            self.make_pad_mask = sequence_mask(max_seq_len, flip=False)
 
         if hasattr(model, 'encoders0'):
             for i, d in enumerate(self.model.encoders0):
@@ -56,7 +63,7 @@
                 speech: torch.Tensor,
                 speech_lengths: torch.Tensor,
                 ):
-            
+        speech = speech * self._output_size ** 0.5
         mask = self.make_pad_mask(speech_lengths)
         mask = self.prepare_mask(mask)
         if self.embed is None:

--
Gitblit v1.9.1