From 15c4709beb4b588db2135fc1133cd6955b5ef819 Mon Sep 17 00:00:00 2001
From: zhifu gao <zhifu.gzf@alibaba-inc.com>
Date: 星期一, 11 三月 2024 22:04:03 +0800
Subject: [PATCH] onnx (#1473)

---
 funasr/models/paraformer/decoder.py |   24 +++++++++---------------
 1 files changed, 9 insertions(+), 15 deletions(-)

diff --git a/funasr/models/paraformer/decoder.py b/funasr/models/paraformer/decoder.py
index 59c6e1d..7c370ba 100644
--- a/funasr/models/paraformer/decoder.py
+++ b/funasr/models/paraformer/decoder.py
@@ -628,14 +628,12 @@
                  ):
         super().__init__()
         # self.embed = model.embed #Embedding(model.embed, max_seq_len)
-        from funasr.utils.torch_function import MakePadMask
+
         from funasr.utils.torch_function import sequence_mask
         
         self.model = model
-        if onnx:
-            self.make_pad_mask = MakePadMask(max_seq_len, flip=False)
-        else:
-            self.make_pad_mask = sequence_mask(max_seq_len, flip=False)
+
+        self.make_pad_mask = sequence_mask(max_seq_len, flip=False)
         
         from funasr.models.sanm.attention import MultiHeadedAttentionSANMDecoderExport
         from funasr.models.sanm.attention import MultiHeadedAttentionCrossAttExport
@@ -763,14 +761,12 @@
         super().__init__()
         # self.embed = model.embed #Embedding(model.embed, max_seq_len)
         self.model = model
-        from funasr.utils.torch_function import MakePadMask
+
         from funasr.utils.torch_function import sequence_mask
 
         self.model = model
-        if onnx:
-            self.make_pad_mask = MakePadMask(max_seq_len, flip=False)
-        else:
-            self.make_pad_mask = sequence_mask(max_seq_len, flip=False)
+        
+        self.make_pad_mask = sequence_mask(max_seq_len, flip=False)
 
         from funasr.models.sanm.attention import MultiHeadedAttentionSANMDecoderExport
         from funasr.models.sanm.attention import MultiHeadedAttentionCrossAttExport
@@ -1036,14 +1032,12 @@
         # self.embed = model.embed #Embedding(model.embed, max_seq_len)
         self.model = model
 
-        from funasr.utils.torch_function import MakePadMask
+
         from funasr.utils.torch_function import sequence_mask
 
         self.model = model
-        if onnx:
-            self.make_pad_mask = MakePadMask(max_seq_len, flip=False)
-        else:
-            self.make_pad_mask = sequence_mask(max_seq_len, flip=False)
+
+        self.make_pad_mask = sequence_mask(max_seq_len, flip=False)
 
 
         from funasr.models.transformer.decoder import DecoderLayerExport

--
Gitblit v1.9.1