From 5b0047bf58804686ab9390d78e090cd39110bf8e Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期二, 07 二月 2023 17:45:59 +0800
Subject: [PATCH] export model

---
 funasr/export/models/decoder/sanm_decoder.py |   12 ++++++++----
 1 files changed, 8 insertions(+), 4 deletions(-)

diff --git a/funasr/export/models/decoder/sanm_decoder.py b/funasr/export/models/decoder/sanm_decoder.py
index ca2563b..9084b7f 100644
--- a/funasr/export/models/decoder/sanm_decoder.py
+++ b/funasr/export/models/decoder/sanm_decoder.py
@@ -4,9 +4,8 @@
 import torch.nn as nn
 
 
-# from espnet.nets.pytorch_backend.transformer.mask import subsequent_mask
-
 from funasr.export.utils.torch_function import MakePadMask
+from funasr.export.utils.torch_function import sequence_mask
 
 from funasr.modules.attention import MultiHeadedAttentionSANMDecoder
 from funasr.export.models.modules.multihead_att import MultiHeadedAttentionSANMDecoder as MultiHeadedAttentionSANMDecoder_export
@@ -20,11 +19,15 @@
 class ParaformerSANMDecoder(nn.Module):
     def __init__(self, model,
                  max_seq_len=512,
-                 model_name='decoder'):
+                 model_name='decoder',
+                 onnx: bool = True,):
         super().__init__()
         # self.embed = model.embed #Embedding(model.embed, max_seq_len)
         self.model = model
-        self.make_pad_mask = MakePadMask(max_seq_len, flip=False)
+        if onnx:
+            self.make_pad_mask = MakePadMask(max_seq_len, flip=False)
+        else:
+            self.make_pad_mask = sequence_mask(max_seq_len, flip=False)
 
         for i, d in enumerate(self.model.decoders):
             if isinstance(d.feed_forward, PositionwiseFeedForwardDecoderSANM):
@@ -51,6 +54,7 @@
         self.output_layer = model.output_layer
         self.after_norm = model.after_norm
         self.model_name = model_name
+        
 
     def prepare_mask(self, mask):
         mask_3d_btd = mask[:, :, None]

--
Gitblit v1.9.1