From 5b0047bf58804686ab9390d78e090cd39110bf8e Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期二, 07 二月 2023 17:45:59 +0800
Subject: [PATCH] export model
---
funasr/export/models/encoder/sanm_encoder.py | 8 +++++++-
1 files changed, 7 insertions(+), 1 deletions(-)
diff --git a/funasr/export/models/encoder/sanm_encoder.py b/funasr/export/models/encoder/sanm_encoder.py
index ee45732..a3c9100 100644
--- a/funasr/export/models/encoder/sanm_encoder.py
+++ b/funasr/export/models/encoder/sanm_encoder.py
@@ -2,6 +2,7 @@
import torch.nn as nn
from funasr.export.utils.torch_function import MakePadMask
+from funasr.export.utils.torch_function import sequence_mask
from funasr.modules.attention import MultiHeadedAttentionSANM
from funasr.export.models.modules.multihead_att import MultiHeadedAttentionSANM as MultiHeadedAttentionSANM_export
from funasr.export.models.modules.encoder_layer import EncoderLayerSANM as EncoderLayerSANM_export
@@ -15,13 +16,18 @@
max_seq_len=512,
feats_dim=560,
model_name='encoder',
+ onnx: bool = True,
):
super().__init__()
self.embed = model.embed
self.model = model
- self.make_pad_mask = MakePadMask(max_seq_len, flip=False)
self.feats_dim = feats_dim
+ if onnx:
+ self.make_pad_mask = MakePadMask(max_seq_len, flip=False)
+ else:
+ self.make_pad_mask = sequence_mask(max_seq_len, flip=False)
+
if hasattr(model, 'encoders0'):
for i, d in enumerate(self.model.encoders0):
if isinstance(d.self_attn, MultiHeadedAttentionSANM):
--
Gitblit v1.9.1