From 0cf5dfec2c8313fc2ed2aab8d10bf3dc4b9c283f Mon Sep 17 00:00:00 2001
From: 雾聪 <wucong.lyb@alibaba-inc.com>
Date: 星期四, 14 三月 2024 14:41:49 +0800
Subject: [PATCH] update cmakelist
---
funasr/models/paraformer/decoder.py | 24 +++++++++---------------
1 files changed, 9 insertions(+), 15 deletions(-)
diff --git a/funasr/models/paraformer/decoder.py b/funasr/models/paraformer/decoder.py
index 59c6e1d..7c370ba 100644
--- a/funasr/models/paraformer/decoder.py
+++ b/funasr/models/paraformer/decoder.py
@@ -628,14 +628,12 @@
):
super().__init__()
# self.embed = model.embed #Embedding(model.embed, max_seq_len)
- from funasr.utils.torch_function import MakePadMask
+
from funasr.utils.torch_function import sequence_mask
self.model = model
- if onnx:
- self.make_pad_mask = MakePadMask(max_seq_len, flip=False)
- else:
- self.make_pad_mask = sequence_mask(max_seq_len, flip=False)
+
+ self.make_pad_mask = sequence_mask(max_seq_len, flip=False)
from funasr.models.sanm.attention import MultiHeadedAttentionSANMDecoderExport
from funasr.models.sanm.attention import MultiHeadedAttentionCrossAttExport
@@ -763,14 +761,12 @@
super().__init__()
# self.embed = model.embed #Embedding(model.embed, max_seq_len)
self.model = model
- from funasr.utils.torch_function import MakePadMask
+
from funasr.utils.torch_function import sequence_mask
self.model = model
- if onnx:
- self.make_pad_mask = MakePadMask(max_seq_len, flip=False)
- else:
- self.make_pad_mask = sequence_mask(max_seq_len, flip=False)
+
+ self.make_pad_mask = sequence_mask(max_seq_len, flip=False)
from funasr.models.sanm.attention import MultiHeadedAttentionSANMDecoderExport
from funasr.models.sanm.attention import MultiHeadedAttentionCrossAttExport
@@ -1036,14 +1032,12 @@
# self.embed = model.embed #Embedding(model.embed, max_seq_len)
self.model = model
- from funasr.utils.torch_function import MakePadMask
+
from funasr.utils.torch_function import sequence_mask
self.model = model
- if onnx:
- self.make_pad_mask = MakePadMask(max_seq_len, flip=False)
- else:
- self.make_pad_mask = sequence_mask(max_seq_len, flip=False)
+
+ self.make_pad_mask = sequence_mask(max_seq_len, flip=False)
from funasr.models.transformer.decoder import DecoderLayerExport
--
Gitblit v1.9.1