From 59f184a622be316b6a75ce053ee8e19e6a7b50ec Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期二, 07 二月 2023 15:19:18 +0800
Subject: [PATCH] export model

---
 funasr/export/models/decoder/__init__.py      |    0 
 funasr/export/__init__.py                     |    0 
 funasr/export/models/e2e_asr_paraformer.py    |   91 ++++
 funasr/export/utils/__init__.py               |    0 
 funasr/export/models/modules/feedforward.py   |   31 +
 funasr/export/models/predictor/cif.py         |  168 ++++++++
 funasr/export/models/modules/decoder_layer.py |   43 ++
 funasr/export/models/modules/encoder_layer.py |   37 +
 funasr/export/models/encoder/sanm_encoder.py  |  102 ++++
 funasr/export/models/modules/multihead_att.py |  135 ++++++
 funasr/export/models/encoder/__init__.py      |    0 
 funasr/export/utils/torch_function.py         |   68 +++
 funasr/export/models/decoder/sanm_decoder.py  |  155 +++++++
 funasr/export/export_model.py                 |   91 ++++
 funasr/export/models/__init__.py              |   91 ++++
 funasr/export/models/modules/__init__.py      |    0 
 funasr/export/models/predictor/cif_test.py    |  212 ++++++++++
 funasr/export/models/predictor/__init__.py    |    0 
 18 files changed, 1,224 insertions(+), 0 deletions(-)

diff --git a/funasr/export/__init__.py b/funasr/export/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/funasr/export/__init__.py
diff --git a/funasr/export/export_model.py b/funasr/export/export_model.py
new file mode 100644
index 0000000..17bc138
--- /dev/null
+++ b/funasr/export/export_model.py
@@ -0,0 +1,91 @@
+from typing import Union, Dict
+from pathlib import Path
+from typeguard import check_argument_types
+
+import os
+import logging
+import torch
+
+from funasr.bin.asr_inference_paraformer import Speech2Text
+from funasr.export.models import get_model
+
+
+
+class ASRModelExportParaformer:
+    def __init__(self, cache_dir: Union[Path, str] = None, onnx: bool = True):
+        assert check_argument_types()
+        if cache_dir is None:
+            cache_dir = Path.home() / "cache" / "export"
+
+        self.cache_dir = Path(cache_dir)
+        self.export_config = dict(
+            feats_dim=560,
+            onnx=onnx,
+        )
+        logging.info("output dir: {}".format(self.cache_dir))
+        self.onnx = onnx
+
+    def export(
+        self,
+        model: Speech2Text,
+        tag_name: str = None,
+        verbose: bool = False,
+    ):
+
+        export_dir = self.cache_dir / tag_name.replace(' ', '-')
+        os.makedirs(export_dir, exist_ok=True)
+
+        # export encoder1
+        self.export_config["model_name"] = "model"
+        model = get_model(
+            model,
+            self.export_config,
+        )
+        if self.onnx:
+            self._export_onnx(model, verbose, export_dir)
+
+        logging.info("output dir: {}".format(export_dir))
+
+
+    def export_from_modelscope(
+        self,
+        tag_name: str = 'damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch',
+    ):
+        
+        from funasr.tasks.asr import ASRTaskParaformer as ASRTask
+        from modelscope.hub.snapshot_download import snapshot_download
+
+        model_dir = snapshot_download(tag_name, cache_dir=self.cache_dir)
+        asr_train_config = os.path.join(model_dir, 'config.yaml')
+        asr_model_file = os.path.join(model_dir, 'model.pb')
+        cmvn_file = os.path.join(model_dir, 'am.mvn')
+        model, asr_train_args = ASRTask.build_model_from_file(
+            asr_train_config, asr_model_file, cmvn_file, 'cpu'
+        )
+        self.export(model, tag_name)
+
+
+
+    def _export_onnx(self, model, verbose, path, enc_size=None):
+        if enc_size:
+            dummy_input = model.get_dummy_inputs(enc_size)
+        else:
+            dummy_input = model.get_dummy_inputs()
+
+        # model_script = torch.jit.script(model)
+        model_script = model #torch.jit.trace(model)
+
+        torch.onnx.export(
+            model_script,
+            dummy_input,
+            os.path.join(path, f'{model.model_name}.onnx'),
+            verbose=verbose,
+            opset_version=12,
+            input_names=model.get_input_names(),
+            output_names=model.get_output_names(),
+            dynamic_axes=model.get_dynamic_axes()
+        )
+
+if __name__ == '__main__':
+    export_model = ASRModelExportParaformer()
+    export_model.export_from_modelscope('damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch')
\ No newline at end of file
diff --git a/funasr/export/models/__init__.py b/funasr/export/models/__init__.py
new file mode 100644
index 0000000..b21b080
--- /dev/null
+++ b/funasr/export/models/__init__.py
@@ -0,0 +1,91 @@
+# from .ctc import CTC
+# from .joint_network import JointNetwork
+#
+# # encoder
+# from espnet2.asr.encoder.rnn_encoder import RNNEncoder as espnetRNNEncoder
+# from espnet2.asr.encoder.vgg_rnn_encoder import VGGRNNEncoder as espnetVGGRNNEncoder
+# from espnet2.asr.encoder.contextual_block_transformer_encoder import ContextualBlockTransformerEncoder as espnetContextualTransformer
+# from espnet2.asr.encoder.contextual_block_conformer_encoder import ContextualBlockConformerEncoder as espnetContextualConformer
+# from espnet2.asr.encoder.transformer_encoder import TransformerEncoder as espnetTransformerEncoder
+# from espnet2.asr.encoder.conformer_encoder import ConformerEncoder as espnetConformerEncoder
+# from funasr.export.models.encoder.rnn import RNNEncoder
+# from funasr.export.models.encoders import TransformerEncoder
+# from funasr.export.models.encoders import ConformerEncoder
+# from funasr.export.models.encoder.contextual_block_xformer import ContextualBlockXformerEncoder
+#
+# # decoder
+# from espnet2.asr.decoder.rnn_decoder import RNNDecoder as espnetRNNDecoder
+# from espnet2.asr.transducer.transducer_decoder import TransducerDecoder as espnetTransducerDecoder
+# from funasr.export.models.decoder.rnn import (
+#     RNNDecoder
+# )
+# from funasr.export.models.decoders import XformerDecoder
+# from funasr.export.models.decoders import TransducerDecoder
+#
+# # lm
+# from espnet2.lm.seq_rnn_lm import SequentialRNNLM as espnetSequentialRNNLM
+# from espnet2.lm.transformer_lm import TransformerLM as espnetTransformerLM
+# from .language_models.seq_rnn import SequentialRNNLM
+# from .language_models.transformer import TransformerLM
+#
+# # frontend
+# from espnet2.asr.frontend.s3prl import S3prlFrontend as espnetS3PRLModel
+# from .frontends.s3prl import S3PRLModel
+#
+# from espnet2.asr.encoder.sanm_encoder import SANMEncoder_tf, SANMEncoderChunkOpt_tf
+# from espnet_onnx.export.asr.models.encoders.transformer_sanm import TransformerEncoderSANM_tf
+# from espnet2.asr.decoder.transformer_decoder import FsmnDecoderSCAMAOpt_tf
+# from funasr.export.models.decoders import XformerDecoderSANM
+
+from funasr.models.e2e_asr_paraformer import Paraformer
+from funasr.export.models.e2e_asr_paraformer import Paraformer as Paraformer_export
+
+def get_model(model, export_config=None):
+
+    if isinstance(model, Paraformer):
+        return Paraformer_export(model, **export_config)
+    else:
+        raise "The model is not exist!"
+
+
+# def get_encoder(model, frontend, preencoder, predictor=None, export_config=None):
+#     if isinstance(model, espnetRNNEncoder) or isinstance(model, espnetVGGRNNEncoder):
+#         return RNNEncoder(model, frontend, preencoder, **export_config)
+#     elif isinstance(model, espnetContextualTransformer) or isinstance(model, espnetContextualConformer):
+#         return ContextualBlockXformerEncoder(model, **export_config)
+#     elif isinstance(model, espnetTransformerEncoder):
+#         return TransformerEncoder(model, frontend, preencoder, **export_config)
+#     elif isinstance(model, espnetConformerEncoder):
+#         return ConformerEncoder(model, frontend, preencoder, **export_config)
+#     elif isinstance(model, SANMEncoder_tf) or isinstance(model, SANMEncoderChunkOpt_tf):
+#         return TransformerEncoderSANM_tf(model, frontend, preencoder, predictor, **export_config)
+#     else:
+#         raise "The model is not exist!"
+
+
+#
+# def get_decoder(model, export_config):
+#     if isinstance(model, espnetRNNDecoder):
+#         return RNNDecoder(model, **export_config)
+#     elif isinstance(model, espnetTransducerDecoder):
+#         return TransducerDecoder(model, **export_config)
+#     elif isinstance(model, FsmnDecoderSCAMAOpt_tf):
+#         return XformerDecoderSANM(model, **export_config)
+#     else:
+#         return XformerDecoder(model, **export_config)
+#
+#
+# def get_lm(model, export_config):
+#     if isinstance(model, espnetSequentialRNNLM):
+#         return SequentialRNNLM(model, **export_config)
+#     elif isinstance(model, espnetTransformerLM):
+#         return TransformerLM(model, **export_config)
+#
+#
+# def get_frontend_models(model, export_config):
+#     if isinstance(model, espnetS3PRLModel):
+#         return S3PRLModel(model, **export_config)
+#     else:
+#         return None
+#
+    
\ No newline at end of file
diff --git a/funasr/export/models/decoder/__init__.py b/funasr/export/models/decoder/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/funasr/export/models/decoder/__init__.py
diff --git a/funasr/export/models/decoder/sanm_decoder.py b/funasr/export/models/decoder/sanm_decoder.py
new file mode 100644
index 0000000..ca2563b
--- /dev/null
+++ b/funasr/export/models/decoder/sanm_decoder.py
@@ -0,0 +1,155 @@
+import os
+
+import torch
+import torch.nn as nn
+
+
+# from espnet.nets.pytorch_backend.transformer.mask import subsequent_mask
+
+from funasr.export.utils.torch_function import MakePadMask
+
+from funasr.modules.attention import MultiHeadedAttentionSANMDecoder
+from funasr.export.models.modules.multihead_att import MultiHeadedAttentionSANMDecoder as MultiHeadedAttentionSANMDecoder_export
+from funasr.modules.attention import MultiHeadedAttentionCrossAtt
+from funasr.export.models.modules.multihead_att import MultiHeadedAttentionCrossAtt as MultiHeadedAttentionCrossAtt_export
+from funasr.modules.positionwise_feed_forward import PositionwiseFeedForwardDecoderSANM
+from funasr.export.models.modules.feedforward import PositionwiseFeedForwardDecoderSANM as PositionwiseFeedForwardDecoderSANM_export
+from funasr.export.models.modules.decoder_layer import DecoderLayerSANM as DecoderLayerSANM_export
+
+
+class ParaformerSANMDecoder(nn.Module):
+    def __init__(self, model,
+                 max_seq_len=512,
+                 model_name='decoder'):
+        super().__init__()
+        # self.embed = model.embed #Embedding(model.embed, max_seq_len)
+        self.model = model
+        self.make_pad_mask = MakePadMask(max_seq_len, flip=False)
+
+        for i, d in enumerate(self.model.decoders):
+            if isinstance(d.feed_forward, PositionwiseFeedForwardDecoderSANM):
+                d.feed_forward = PositionwiseFeedForwardDecoderSANM_export(d.feed_forward)
+            if isinstance(d.self_attn, MultiHeadedAttentionSANMDecoder):
+                d.self_attn = MultiHeadedAttentionSANMDecoder_export(d.self_attn)
+            if isinstance(d.src_attn, MultiHeadedAttentionCrossAtt):
+                d.src_attn = MultiHeadedAttentionCrossAtt_export(d.src_attn)
+            self.model.decoders[i] = DecoderLayerSANM_export(d)
+
+        if self.model.decoders2 is not None:
+            for i, d in enumerate(self.model.decoders2):
+                if isinstance(d.feed_forward, PositionwiseFeedForwardDecoderSANM):
+                    d.feed_forward = PositionwiseFeedForwardDecoderSANM_export(d.feed_forward)
+                if isinstance(d.self_attn, MultiHeadedAttentionSANMDecoder):
+                    d.self_attn = MultiHeadedAttentionSANMDecoder_export(d.self_attn)
+                self.model.decoders2[i] = DecoderLayerSANM_export(d)
+
+        for i, d in enumerate(self.model.decoders3):
+            if isinstance(d.feed_forward, PositionwiseFeedForwardDecoderSANM):
+                d.feed_forward = PositionwiseFeedForwardDecoderSANM_export(d.feed_forward)
+            self.model.decoders3[i] = DecoderLayerSANM_export(d)
+        
+        self.output_layer = model.output_layer
+        self.after_norm = model.after_norm
+        self.model_name = model_name
+
+    def prepare_mask(self, mask):
+        mask_3d_btd = mask[:, :, None]
+        if len(mask.shape) == 2:
+            mask_4d_bhlt = 1 - mask[:, None, None, :]
+        elif len(mask.shape) == 3:
+            mask_4d_bhlt = 1 - mask[:, None, :]
+        mask_4d_bhlt = mask_4d_bhlt * -10000.0
+    
+        return mask_3d_btd, mask_4d_bhlt
+
+    def forward(
+        self,
+        hs_pad: torch.Tensor,
+        hlens: torch.Tensor,
+        ys_in_pad: torch.Tensor,
+        ys_in_lens: torch.Tensor,
+    ):
+
+        tgt = ys_in_pad
+        tgt_mask = self.make_pad_mask(ys_in_lens)
+        tgt_mask, _ = self.prepare_mask(tgt_mask)
+        # tgt_mask = myutils.sequence_mask(ys_in_lens, device=tgt.device)[:, :, None]
+
+        memory = hs_pad
+        memory_mask = self.make_pad_mask(hlens)
+        _, memory_mask = self.prepare_mask(memory_mask)
+        # memory_mask = myutils.sequence_mask(hlens, device=memory.device)[:, None, :]
+
+        x = tgt
+        x, tgt_mask, memory, memory_mask, _ = self.model.decoders(
+            x, tgt_mask, memory, memory_mask
+        )
+        if self.model.decoders2 is not None:
+            x, tgt_mask, memory, memory_mask, _ = self.model.decoders2(
+                x, tgt_mask, memory, memory_mask
+            )
+        x, tgt_mask, memory, memory_mask, _ = self.model.decoders3(
+            x, tgt_mask, memory, memory_mask
+        )
+        x = self.after_norm(x)
+        x = self.output_layer(x)
+
+        return x, ys_in_lens
+
+
+    def get_dummy_inputs(self, enc_size):
+        tgt = torch.LongTensor([0]).unsqueeze(0)
+        memory = torch.randn(1, 100, enc_size)
+        pre_acoustic_embeds = torch.randn(1, 1, enc_size)
+        cache_num = len(self.model.decoders) + len(self.model.decoders2)
+        cache = [
+            torch.zeros((1, self.model.decoders[0].size, self.model.decoders[0].self_attn.kernel_size))
+            for _ in range(cache_num)
+        ]
+        return (tgt, memory, pre_acoustic_embeds, cache)
+
+    def is_optimizable(self):
+        return True
+
+    def get_input_names(self):
+        cache_num = len(self.model.decoders) + len(self.model.decoders2)
+        return ['tgt', 'memory', 'pre_acoustic_embeds'] \
+               + ['cache_%d' % i for i in range(cache_num)]
+
+    def get_output_names(self):
+        cache_num = len(self.model.decoders) + len(self.model.decoders2)
+        return ['y'] \
+               + ['out_cache_%d' % i for i in range(cache_num)]
+
+    def get_dynamic_axes(self):
+        ret = {
+            'tgt': {
+                0: 'tgt_batch',
+                1: 'tgt_length'
+            },
+            'memory': {
+                0: 'memory_batch',
+                1: 'memory_length'
+            },
+            'pre_acoustic_embeds': {
+                0: 'acoustic_embeds_batch',
+                1: 'acoustic_embeds_length',
+            }
+        }
+        cache_num = len(self.model.decoders) + len(self.model.decoders2)
+        ret.update({
+            'cache_%d' % d: {
+                0: 'cache_%d_batch' % d,
+                2: 'cache_%d_length' % d
+            }
+            for d in range(cache_num)
+        })
+        return ret
+
+    def get_model_config(self, path):
+        return {
+            "dec_type": "XformerDecoder",
+            "model_path": os.path.join(path, f'{self.model_name}.onnx'),
+            "n_layers": len(self.model.decoders) + len(self.model.decoders2),
+            "odim": self.model.decoders[0].size
+        }
diff --git a/funasr/export/models/e2e_asr_paraformer.py b/funasr/export/models/e2e_asr_paraformer.py
new file mode 100644
index 0000000..162837a
--- /dev/null
+++ b/funasr/export/models/e2e_asr_paraformer.py
@@ -0,0 +1,91 @@
+import logging
+
+
+import torch
+import torch.nn as nn
+
+from funasr.export.utils.torch_function import MakePadMask
+from funasr.train.abs_espnet_model import AbsESPnetModel
+from funasr.models.encoder.sanm_encoder import SANMEncoder
+from funasr.export.models.encoder.sanm_encoder import SANMEncoder as SANMEncoder_export
+from funasr.models.predictor.cif import CifPredictorV2
+from funasr.export.models.predictor.cif import CifPredictorV2 as CifPredictorV2_export
+from funasr.models.decoder.sanm_decoder import ParaformerSANMDecoder
+from funasr.export.models.decoder.sanm_decoder import ParaformerSANMDecoder as ParaformerSANMDecoder_export
+
+class Paraformer(nn.Module):
+    """
+    Author: Speech Lab, Alibaba Group, China
+    Paraformer: Fast and Accurate Parallel Transformer for Non-autoregressive End-to-End Speech Recognition
+    https://arxiv.org/abs/2206.08317
+    """
+
+    def __init__(
+            self,
+            model,
+            max_seq_len=512,
+            feats_dim=560,
+            model_name='model',
+            **kwargs,
+    ):
+        super().__init__()
+        if isinstance(model.encoder, SANMEncoder):
+            self.encoder = SANMEncoder_export(model.encoder)
+        if isinstance(model.predictor, CifPredictorV2):
+            self.predictor = CifPredictorV2_export(model.predictor)
+        if isinstance(model.decoder, ParaformerSANMDecoder):
+            self.decoder = ParaformerSANMDecoder_export(model.decoder)
+        self.make_pad_mask = MakePadMask(max_seq_len, flip=False)
+        self.feats_dim = feats_dim
+        self.model_name = model_name
+        self.onnx = False
+        if "onnx" in kwargs:
+            self.onnx = kwargs["onnx"]
+    
+    def forward(
+            self,
+            speech: torch.Tensor,
+            speech_lengths: torch.Tensor,
+    ):
+        # a. To device
+        batch = {"speech": speech, "speech_lengths": speech_lengths}
+        # batch = to_device(batch, device=self.device)
+    
+        enc, enc_len = self.encoder(**batch)
+        mask = self.make_pad_mask(enc_len)[:, None, :]
+        pre_acoustic_embeds, pre_token_length, alphas, pre_peak_index = self.predictor(enc, mask)
+        pre_token_length = pre_token_length.round().long()
+
+        decoder_out, _ = self.decoder(enc, enc_len, pre_acoustic_embeds, pre_token_length)
+        decoder_out = torch.log_softmax(decoder_out, dim=-1)
+
+        return decoder_out, pre_token_length
+    
+    # def get_output_size(self):
+    #     return self.model.encoders[0].size
+
+    def get_dummy_inputs(self):
+        speech = torch.randn(2, 30, self.feats_dim)
+        speech_lengths = torch.tensor([6, 30]).long()
+        return (speech, speech_lengths)
+
+    def get_input_names(self):
+        return ['speech', 'speech_lengths']
+
+    def get_output_names(self):
+        return ['logits', 'token_num']
+
+    def get_dynamic_axes(self):
+        return {
+            'speech': {
+                0: 'batch_size',
+                1: 'feats_length'
+            },
+            'speech_lengths': {
+                0: 'batch_size',
+            },
+            'logits': {
+                0: 'batch_size',
+                1: 'logits_length'
+            },
+        }
\ No newline at end of file
diff --git a/funasr/export/models/encoder/__init__.py b/funasr/export/models/encoder/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/funasr/export/models/encoder/__init__.py
diff --git a/funasr/export/models/encoder/sanm_encoder.py b/funasr/export/models/encoder/sanm_encoder.py
new file mode 100644
index 0000000..ee45732
--- /dev/null
+++ b/funasr/export/models/encoder/sanm_encoder.py
@@ -0,0 +1,102 @@
+import torch
+import torch.nn as nn
+
+from funasr.export.utils.torch_function import MakePadMask
+from funasr.modules.attention import MultiHeadedAttentionSANM
+from funasr.export.models.modules.multihead_att import MultiHeadedAttentionSANM as MultiHeadedAttentionSANM_export
+from funasr.export.models.modules.encoder_layer import EncoderLayerSANM as EncoderLayerSANM_export
+from funasr.modules.positionwise_feed_forward import PositionwiseFeedForward
+from funasr.export.models.modules.feedforward import PositionwiseFeedForward as PositionwiseFeedForward_export
+
+class SANMEncoder(nn.Module):
+    def __init__(
+        self,
+        model,
+        max_seq_len=512,
+        feats_dim=560,
+        model_name='encoder',
+    ):
+        super().__init__()
+        self.embed = model.embed
+        self.model = model
+        self.make_pad_mask = MakePadMask(max_seq_len, flip=False)
+        self.feats_dim = feats_dim
+
+        if hasattr(model, 'encoders0'):
+            for i, d in enumerate(self.model.encoders0):
+                if isinstance(d.self_attn, MultiHeadedAttentionSANM):
+                    d.self_attn = MultiHeadedAttentionSANM_export(d.self_attn)
+                if isinstance(d.feed_forward, PositionwiseFeedForward):
+                    d.feed_forward = PositionwiseFeedForward_export(d.feed_forward)
+                self.model.encoders0[i] = EncoderLayerSANM_export(d)
+
+        for i, d in enumerate(self.model.encoders):
+            if isinstance(d.self_attn, MultiHeadedAttentionSANM):
+                d.self_attn = MultiHeadedAttentionSANM_export(d.self_attn)
+            if isinstance(d.feed_forward, PositionwiseFeedForward):
+                d.feed_forward = PositionwiseFeedForward_export(d.feed_forward)
+            self.model.encoders[i] = EncoderLayerSANM_export(d)
+        
+        self.model_name = model_name
+        self.num_heads = model.encoders[0].self_attn.h
+        self.hidden_size = model.encoders[0].self_attn.linear_out.out_features
+
+    
+    def prepare_mask(self, mask):
+        mask_3d_btd = mask[:, :, None]
+        if len(mask.shape) == 2:
+            mask_4d_bhlt = 1 - mask[:, None, None, :]
+        elif len(mask.shape) == 3:
+            mask_4d_bhlt = 1 - mask[:, None, :]
+        mask_4d_bhlt = mask_4d_bhlt * -10000.0
+        
+        return mask_3d_btd, mask_4d_bhlt
+
+    def forward(self,
+                speech: torch.Tensor,
+                speech_lengths: torch.Tensor,
+                ):
+            
+        mask = self.make_pad_mask(speech_lengths)
+        mask = self.prepare_mask(mask)
+        if self.embed is None:
+            xs_pad = speech
+        else:
+            xs_pad = self.embed(speech)
+
+        encoder_outs = self.model.encoders0(xs_pad, mask)
+        xs_pad, masks = encoder_outs[0], encoder_outs[1]
+
+        encoder_outs = self.model.encoders(xs_pad, mask)
+        xs_pad, masks = encoder_outs[0], encoder_outs[1]
+
+        xs_pad = self.model.after_norm(xs_pad)
+
+        return xs_pad, speech_lengths
+
+    def get_output_size(self):
+        return self.model.encoders[0].size
+
+    def get_dummy_inputs(self):
+        feats = torch.randn(1, 100, self.feats_dim)
+        return (feats)
+
+    def get_input_names(self):
+        return ['feats']
+
+    def get_output_names(self):
+        return ['encoder_out', 'encoder_out_lens', 'predictor_weight']
+
+    def get_dynamic_axes(self):
+        return {
+            'feats': {
+                1: 'feats_length'
+            },
+            'encoder_out': {
+                1: 'enc_out_length'
+            },
+            'predictor_weight':{
+                1: 'pre_out_length'
+            }
+
+        }
diff --git a/funasr/export/models/modules/__init__.py b/funasr/export/models/modules/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/funasr/export/models/modules/__init__.py
diff --git a/funasr/export/models/modules/decoder_layer.py b/funasr/export/models/modules/decoder_layer.py
new file mode 100644
index 0000000..bc306b1
--- /dev/null
+++ b/funasr/export/models/modules/decoder_layer.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+import torch
+from torch import nn
+
+
+class DecoderLayerSANM(nn.Module):
+
+    def __init__(
+        self,
+        model
+    ):
+        super().__init__()
+        self.self_attn = model.self_attn
+        self.src_attn = model.src_attn
+        self.feed_forward = model.feed_forward
+        self.norm1 = model.norm1
+        self.norm2 = model.norm2 if hasattr(model, 'norm2') else None
+        self.norm3 = model.norm3 if hasattr(model, 'norm3') else None
+        self.size = model.size
+
+
+    def forward(self, tgt, tgt_mask, memory, memory_mask=None, cache=None):
+
+        residual = tgt
+        tgt = self.norm1(tgt)
+        tgt = self.feed_forward(tgt)
+
+        x = tgt
+        if self.self_attn is not None:
+            tgt = self.norm2(tgt)
+            x, cache = self.self_attn(tgt, tgt_mask, cache=cache)
+            x = residual + x
+
+        if self.src_attn is not None:
+            residual = x
+            x = self.norm3(x)
+            x = residual + self.src_attn(x, memory, memory_mask)
+
+
+        return x, tgt_mask, memory, memory_mask, cache
+
diff --git a/funasr/export/models/modules/encoder_layer.py b/funasr/export/models/modules/encoder_layer.py
new file mode 100644
index 0000000..800a4f7
--- /dev/null
+++ b/funasr/export/models/modules/encoder_layer.py
@@ -0,0 +1,37 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+import torch
+from torch import nn
+
+
+class EncoderLayerSANM(nn.Module):
+    def __init__(
+        self,
+        model,
+    ):
+        """Construct an EncoderLayer object."""
+        super().__init__()
+        self.self_attn = model.self_attn
+        self.feed_forward = model.feed_forward
+        self.norm1 = model.norm1
+        self.norm2 = model.norm2
+        self.size = model.size
+
+    def forward(self, x, mask):
+
+        residual = x
+        x = self.norm1(x)
+        x = self.self_attn(x, mask)
+        if x.size(2) == residual.size(2):
+            x = x + residual
+        residual = x
+        x = self.norm2(x)
+        x = self.feed_forward(x)
+        if x.size(2) == residual.size(2):
+            x = x + residual
+
+        return x, mask
+
+
+
diff --git a/funasr/export/models/modules/feedforward.py b/funasr/export/models/modules/feedforward.py
new file mode 100644
index 0000000..9388ae1
--- /dev/null
+++ b/funasr/export/models/modules/feedforward.py
@@ -0,0 +1,31 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+import torch
+import torch.nn as nn
+
+
+class PositionwiseFeedForward(nn.Module):
+	def __init__(self, model):
+		super().__init__()
+		self.w_1 = model.w_1
+		self.w_2 = model.w_2
+		self.activation = model.activation
+	
+	def forward(self, x):
+		x = self.activation(self.w_1(x))
+		x = self.w_2(x)
+		return x
+
+
+class PositionwiseFeedForwardDecoderSANM(nn.Module):
+	def __init__(self, model):
+		super().__init__()
+		self.w_1 = model.w_1
+		self.w_2 = model.w_2
+		self.activation = model.activation
+		self.norm = model.norm
+	
+	def forward(self, x):
+		x = self.activation(self.w_1(x))
+		x = self.w_2(self.norm(x))
+		return x
\ No newline at end of file
diff --git a/funasr/export/models/modules/multihead_att.py b/funasr/export/models/modules/multihead_att.py
new file mode 100644
index 0000000..377b979
--- /dev/null
+++ b/funasr/export/models/modules/multihead_att.py
@@ -0,0 +1,135 @@
+import os
+import math
+
+import torch
+import torch.nn as nn
+
+class MultiHeadedAttentionSANM(nn.Module):
+    def __init__(self, model):
+        super().__init__()
+        self.d_k = model.d_k
+        self.h = model.h
+        self.linear_out = model.linear_out
+        self.linear_q_k_v = model.linear_q_k_v
+        self.fsmn_block = model.fsmn_block
+        self.pad_fn = model.pad_fn
+
+        self.attn = None
+        self.all_head_size = self.h * self.d_k
+
+    def forward(self, x, mask):
+        mask_3d_btd, mask_4d_bhlt = mask
+        q_h, k_h, v_h, v = self.forward_qkv(x)
+        fsmn_memory = self.forward_fsmn(v, mask_3d_btd)
+        q_h = q_h * self.d_k**(-0.5)
+        scores = torch.matmul(q_h, k_h.transpose(-2, -1))
+        att_outs = self.forward_attention(v_h, scores, mask_4d_bhlt)
+        return att_outs + fsmn_memory
+
+    def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
+        new_x_shape = x.size()[:-1] + (self.h, self.d_k)
+        x = x.view(new_x_shape)
+        return x.permute(0, 2, 1, 3)
+
+    def forward_qkv(self, x):
+
+        q_k_v = self.linear_q_k_v(x)
+        q, k, v = torch.split(q_k_v, int(self.h * self.d_k), dim=-1)
+        q_h = self.transpose_for_scores(q)
+        k_h = self.transpose_for_scores(k)
+        v_h = self.transpose_for_scores(v)
+        return q_h, k_h, v_h, v
+
+    def forward_fsmn(self, inputs, mask):
+
+        # b, t, d = inputs.size()
+        # mask = torch.reshape(mask, (b, -1, 1))
+        inputs = inputs * mask
+        x = inputs.transpose(1, 2)
+        x = self.pad_fn(x)
+        x = self.fsmn_block(x)
+        x = x.transpose(1, 2)
+        x = x + inputs
+        x = x * mask
+        return x
+
+
+    def forward_attention(self, value, scores, mask):
+        scores = scores + mask
+
+        self.attn = torch.softmax(scores, dim=-1)
+        context_layer = torch.matmul(self.attn, value)  # (batch, head, time1, d_k)
+
+        context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
+        new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
+        context_layer = context_layer.view(new_context_layer_shape)
+        return self.linear_out(context_layer)  # (batch, time1, d_model)
+
+class MultiHeadedAttentionSANMDecoder(nn.Module):
+    def __init__(self, model):
+        super().__init__()
+        self.fsmn_block = model.fsmn_block
+        self.pad_fn = model.pad_fn
+        self.kernel_size = model.kernel_size
+        self.attn = None
+
+    def forward(self, inputs, mask, cache=None):
+
+        # b, t, d = inputs.size()
+        # mask = torch.reshape(mask, (b, -1, 1))
+        inputs = inputs * mask
+
+        x = inputs.transpose(1, 2)
+        if cache is None:
+            x = self.pad_fn(x)
+        else:
+            x = torch.cat((cache[:, :, 1:], x), dim=2)
+            cache = x
+        x = self.fsmn_block(x)
+        x = x.transpose(1, 2)
+
+        x = x + inputs
+        x = x * mask
+        return x, cache
+
+class MultiHeadedAttentionCrossAtt(nn.Module):
+    def __init__(self, model):
+        super().__init__()
+        self.d_k = model.d_k
+        self.h = model.h
+        self.linear_q = model.linear_q
+        self.linear_k_v = model.linear_k_v
+        self.linear_out = model.linear_out
+        self.attn = None
+        self.all_head_size = self.h * self.d_k
+
+    def forward(self, x, memory, memory_mask):
+        q, k, v = self.forward_qkv(x, memory)
+        scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)
+        return self.forward_attention(v, scores, memory_mask)
+
+    def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
+        new_x_shape = x.size()[:-1] + (self.h, self.d_k)
+        x = x.view(new_x_shape)
+        return x.permute(0, 2, 1, 3)
+
+    def forward_qkv(self, x, memory):
+        q = self.linear_q(x)
+
+        k_v = self.linear_k_v(memory)
+        k, v = torch.split(k_v, int(self.h * self.d_k), dim=-1)
+        q = self.transpose_for_scores(q)
+        k = self.transpose_for_scores(k)
+        v = self.transpose_for_scores(v)
+        return q, k, v
+
+    def forward_attention(self, value, scores, mask):
+        scores = scores + mask
+
+        self.attn = torch.softmax(scores, dim=-1)
+        context_layer = torch.matmul(self.attn, value)  # (batch, head, time1, d_k)
+
+        context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
+        new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
+        context_layer = context_layer.view(new_context_layer_shape)
+        return self.linear_out(context_layer)  # (batch, time1, d_model)
diff --git a/funasr/export/models/predictor/__init__.py b/funasr/export/models/predictor/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/funasr/export/models/predictor/__init__.py
diff --git a/funasr/export/models/predictor/cif.py b/funasr/export/models/predictor/cif.py
new file mode 100644
index 0000000..32a3c13
--- /dev/null
+++ b/funasr/export/models/predictor/cif.py
@@ -0,0 +1,168 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+import torch
+from torch import nn
+import logging
+import numpy as np
+
+
+def sequence_mask(lengths, maxlen=None, dtype=torch.float32, device=None):
+	if maxlen is None:
+		maxlen = lengths.max()
+	row_vector = torch.arange(0, maxlen, 1).to(lengths.device)
+	matrix = torch.unsqueeze(lengths, dim=-1)
+	mask = row_vector < matrix
+	mask = mask.detach()
+	
+	return mask.type(dtype).to(device) if device is not None else mask.type(dtype)
+
+
+class CifPredictorV2(nn.Module):
+	def __init__(self, model):
+		super().__init__()
+		
+		self.pad = model.pad
+		self.cif_conv1d = model.cif_conv1d
+		self.cif_output = model.cif_output
+		self.threshold = model.threshold
+		self.smooth_factor = model.smooth_factor
+		self.noise_threshold = model.noise_threshold
+		self.tail_threshold = model.tail_threshold
+	
+	def forward(self, hidden: torch.Tensor,
+	            mask: torch.Tensor,
+	            ):
+		h = hidden
+		context = h.transpose(1, 2)
+		queries = self.pad(context)
+		output = torch.relu(self.cif_conv1d(queries))
+		output = output.transpose(1, 2)
+		
+		output = self.cif_output(output)
+		alphas = torch.sigmoid(output)
+		alphas = torch.nn.functional.relu(alphas * self.smooth_factor - self.noise_threshold)
+		mask = mask.transpose(-1, -2).float()
+		alphas = alphas * mask
+		
+		alphas = alphas.squeeze(-1)
+		
+		token_num = alphas.sum(-1)
+		
+		acoustic_embeds, cif_peak = cif(hidden, alphas, self.threshold)
+		
+		return acoustic_embeds, token_num, alphas, cif_peak
+	
+	def tail_process_fn(self, hidden, alphas, token_num=None, mask=None):
+		b, t, d = hidden.size()
+		tail_threshold = self.tail_threshold
+		
+		zeros_t = torch.zeros((b, 1), dtype=torch.float32, device=alphas.device)
+		ones_t = torch.ones_like(zeros_t)
+		mask_1 = torch.cat([mask, zeros_t], dim=1)
+		mask_2 = torch.cat([ones_t, mask], dim=1)
+		mask = mask_2 - mask_1
+		tail_threshold = mask * tail_threshold
+		alphas = torch.cat([alphas, tail_threshold], dim=1)
+		
+		zeros = torch.zeros((b, 1, d), dtype=hidden.dtype).to(hidden.device)
+		hidden = torch.cat([hidden, zeros], dim=1)
+		token_num = alphas.sum(dim=-1)
+		token_num_floor = torch.floor(token_num)
+		
+		return hidden, alphas, token_num_floor
+
+@torch.jit.script
+def cif(hidden, alphas, threshold: float):
+	batch_size, len_time, hidden_size = hidden.size()
+	threshold = torch.tensor([threshold], dtype=alphas.dtype).to(alphas.device)
+	
+	# loop varss
+	integrate = torch.zeros([batch_size], device=hidden.device)
+	frame = torch.zeros([batch_size, hidden_size], device=hidden.device)
+	# intermediate vars along time
+	list_fires = []
+	list_frames = []
+	
+	for t in range(len_time):
+		alpha = alphas[:, t]
+		distribution_completion = torch.ones([batch_size], device=hidden.device) - integrate
+		
+		integrate += alpha
+		list_fires.append(integrate)
+		
+		fire_place = integrate >= threshold
+		integrate = torch.where(fire_place,
+		                        integrate - torch.ones([batch_size], device=hidden.device),
+		                        integrate)
+		cur = torch.where(fire_place,
+		                  distribution_completion,
+		                  alpha)
+		remainds = alpha - cur
+		
+		frame += cur[:, None] * hidden[:, t, :]
+		list_frames.append(frame)
+		frame = torch.where(fire_place[:, None].repeat(1, hidden_size),
+		                    remainds[:, None] * hidden[:, t, :],
+		                    frame)
+	
+	fires = torch.stack(list_fires, 1)
+	frames = torch.stack(list_frames, 1)
+	list_ls = []
+	len_labels = torch.round(alphas.sum(-1)).int()
+	max_label_len = len_labels.max()
+	for b in range(batch_size):
+		fire = fires[b, :]
+		l = torch.index_select(frames[b, :, :], 0, torch.nonzero(fire >= threshold).squeeze())
+		pad_l = torch.zeros([int(max_label_len - l.size(0)), int(hidden_size)], device=hidden.device)
+		list_ls.append(torch.cat([l, pad_l], 0))
+	return torch.stack(list_ls, 0), fires
+
+
+def CifPredictorV2_test():
+	x = torch.rand([2, 21, 2])
+	x_len = torch.IntTensor([6, 21])
+	
+	mask = sequence_mask(x_len, maxlen=x.size(1), dtype=x.dtype)
+	x = x * mask[:, :, None]
+	
+	predictor_scripts = torch.jit.script(CifPredictorV2(2, 1, 1))
+	# cif_output, cif_length, alphas, cif_peak = predictor_scripts(x, mask=mask[:, None, :])
+	predictor_scripts.save('test.pt')
+	loaded = torch.jit.load('test.pt')
+	cif_output, cif_length, alphas, cif_peak = loaded(x, mask=mask[:, None, :])
+	# print(cif_output)
+	print(predictor_scripts.code)
+	# predictor = CifPredictorV2(2, 1, 1)
+	# cif_output, cif_length, alphas, cif_peak = predictor(x, mask=mask[:, None, :])
+	print(cif_output)
+
+
+def CifPredictorV2_export_test():
+	x = torch.rand([2, 21, 2])
+	x_len = torch.IntTensor([6, 21])
+	
+	mask = sequence_mask(x_len, maxlen=x.size(1), dtype=x.dtype)
+	x = x * mask[:, :, None]
+	
+	# predictor_scripts = torch.jit.script(CifPredictorV2(2, 1, 1))
+	# cif_output, cif_length, alphas, cif_peak = predictor_scripts(x, mask=mask[:, None, :])
+	predictor = CifPredictorV2(2, 1, 1)
+	predictor_trace = torch.jit.trace(predictor, (x, mask[:, None, :]))
+	predictor_trace.save('test_trace.pt')
+	loaded = torch.jit.load('test_trace.pt')
+	
+	x = torch.rand([3, 30, 2])
+	x_len = torch.IntTensor([6, 20, 30])
+	mask = sequence_mask(x_len, maxlen=x.size(1), dtype=x.dtype)
+	x = x * mask[:, :, None]
+	cif_output, cif_length, alphas, cif_peak = loaded(x, mask=mask[:, None, :])
+	print(cif_output)
+	# print(predictor_trace.code)
+	# predictor = CifPredictorV2(2, 1, 1)
+	# cif_output, cif_length, alphas, cif_peak = predictor(x, mask=mask[:, None, :])
+	# print(cif_output)
+
+
+if __name__ == '__main__':
+	# CifPredictorV2_test()
+	CifPredictorV2_export_test()
\ No newline at end of file
diff --git a/funasr/export/models/predictor/cif_test.py b/funasr/export/models/predictor/cif_test.py
new file mode 100644
index 0000000..954c434
--- /dev/null
+++ b/funasr/export/models/predictor/cif_test.py
@@ -0,0 +1,212 @@
+import torch
+from torch import nn
+import logging
+import numpy as np
+
+
+def sequence_mask(lengths, maxlen=None, dtype=torch.float32, device=None):
+	if maxlen is None:
+		maxlen = lengths.max()
+	row_vector = torch.arange(0, maxlen, 1).to(lengths.device)
+	matrix = torch.unsqueeze(lengths, dim=-1)
+	mask = row_vector < matrix
+	mask = mask.detach()
+	
+	return mask.type(dtype).to(device) if device is not None else mask.type(dtype)
+
+
+def make_pad_mask(lengths, xs=None, length_dim=-1, maxlen=None):
+
+	if length_dim == 0:
+		raise ValueError("length_dim cannot be 0: {}".format(length_dim))
+	
+	if not isinstance(lengths, list):
+		lengths = lengths.tolist()
+	bs = int(len(lengths))
+	if maxlen is None:
+		if xs is None:
+			maxlen = int(max(lengths))
+		else:
+			maxlen = xs.size(length_dim)
+	else:
+		assert xs is None
+		assert maxlen >= int(max(lengths))
+	
+	seq_range = torch.arange(0, maxlen, dtype=torch.int64)
+	seq_range_expand = seq_range.unsqueeze(0).expand(bs, maxlen)
+	seq_length_expand = seq_range_expand.new(lengths).unsqueeze(-1)
+	mask = seq_range_expand >= seq_length_expand
+	
+	if xs is not None:
+		assert xs.size(0) == bs, (xs.size(0), bs)
+		
+		if length_dim < 0:
+			length_dim = xs.dim() + length_dim
+		# ind = (:, None, ..., None, :, , None, ..., None)
+		ind = tuple(
+			slice(None) if i in (0, length_dim) else None for i in range(xs.dim())
+		)
+		mask = mask[ind].expand_as(xs).to(xs.device)
+	return mask
+
+
+
+class CifPredictorV2(nn.Module):
+	def __init__(self,
+	             idim: int,
+	             l_order: int,
+	             r_order: int,
+	             threshold: float = 1.0,
+	             dropout: float = 0.1,
+	             smooth_factor: float = 1.0,
+	             noise_threshold: float = 0,
+	             tail_threshold: float = 0.0,
+	             ):
+		super(CifPredictorV2, self).__init__()
+		
+		self.pad = nn.ConstantPad1d((l_order, r_order), 0.0)
+		self.cif_conv1d = nn.Conv1d(idim, idim, l_order + r_order + 1)
+		self.cif_output = nn.Linear(idim, 1)
+		self.dropout = torch.nn.Dropout(p=dropout)
+		self.threshold = threshold
+		self.smooth_factor = smooth_factor
+		self.noise_threshold = noise_threshold
+		self.tail_threshold = tail_threshold
+	
+	def forward(self, hidden: torch.Tensor,
+	            mask: torch.Tensor,
+	            ):
+		h = hidden
+		context = h.transpose(1, 2)
+		queries = self.pad(context)
+		output = torch.relu(self.cif_conv1d(queries))
+		output = output.transpose(1, 2)
+		
+		output = self.cif_output(output)
+		alphas = torch.sigmoid(output)
+		alphas = torch.nn.functional.relu(alphas * self.smooth_factor - self.noise_threshold)
+		mask = mask.transpose(-1, -2).float()
+		alphas = alphas * mask
+		
+		alphas = alphas.squeeze(-1)
+		
+		token_num = alphas.sum(-1)
+		
+		acoustic_embeds, cif_peak = cif(hidden, alphas, self.threshold)
+		
+		return acoustic_embeds, token_num, alphas, cif_peak
+	
+	def tail_process_fn(self, hidden, alphas, token_num=None, mask=None):
+		b, t, d = hidden.size()
+		tail_threshold = self.tail_threshold
+		
+		zeros_t = torch.zeros((b, 1), dtype=torch.float32, device=alphas.device)
+		ones_t = torch.ones_like(zeros_t)
+		mask_1 = torch.cat([mask, zeros_t], dim=1)
+		mask_2 = torch.cat([ones_t, mask], dim=1)
+		mask = mask_2 - mask_1
+		tail_threshold = mask * tail_threshold
+		alphas = torch.cat([alphas, tail_threshold], dim=1)
+		
+		zeros = torch.zeros((b, 1, d), dtype=hidden.dtype).to(hidden.device)
+		hidden = torch.cat([hidden, zeros], dim=1)
+		token_num = alphas.sum(dim=-1)
+		token_num_floor = torch.floor(token_num)
+		
+		return hidden, alphas, token_num_floor
+
+@torch.jit.script
+def cif(hidden, alphas, threshold: float):
+	batch_size, len_time, hidden_size = hidden.size()
+	threshold = torch.tensor([threshold], dtype=alphas.dtype).to(alphas.device)
+	
+	# loop varss
+	integrate = torch.zeros([batch_size], device=hidden.device)
+	frame = torch.zeros([batch_size, hidden_size], device=hidden.device)
+	# intermediate vars along time
+	list_fires = []
+	list_frames = []
+	
+	for t in range(len_time):
+		alpha = alphas[:, t]
+		distribution_completion = torch.ones([batch_size], device=hidden.device) - integrate
+		
+		integrate += alpha
+		list_fires.append(integrate)
+		
+		fire_place = integrate >= threshold
+		integrate = torch.where(fire_place,
+		                        integrate - torch.ones([batch_size], device=hidden.device),
+		                        integrate)
+		cur = torch.where(fire_place,
+		                  distribution_completion,
+		                  alpha)
+		remainds = alpha - cur
+		
+		frame += cur[:, None] * hidden[:, t, :]
+		list_frames.append(frame)
+		frame = torch.where(fire_place[:, None].repeat(1, hidden_size),
+		                    remainds[:, None] * hidden[:, t, :],
+		                    frame)
+	
+	fires = torch.stack(list_fires, 1)
+	frames = torch.stack(list_frames, 1)
+	list_ls = []
+	len_labels = torch.round(alphas.sum(-1)).int()
+	max_label_len = len_labels.max()
+	for b in range(batch_size):
+		fire = fires[b, :]
+		l = torch.index_select(frames[b, :, :], 0, torch.nonzero(fire >= threshold).squeeze())
+		pad_l = torch.zeros([int(max_label_len - l.size(0)), int(hidden_size)], device=hidden.device)
+		list_ls.append(torch.cat([l, pad_l], 0))
+	return torch.stack(list_ls, 0), fires
+
+
+def CifPredictorV2_test():
+	x = torch.rand([2, 21, 2])
+	x_len = torch.IntTensor([6, 21])
+	
+	mask = sequence_mask(x_len, maxlen=x.size(1), dtype=x.dtype)
+	x = x * mask[:, :, None]
+	
+	predictor_scripts = torch.jit.script(CifPredictorV2(2, 1, 1))
+	# cif_output, cif_length, alphas, cif_peak = predictor_scripts(x, mask=mask[:, None, :])
+	predictor_scripts.save('test.pt')
+	loaded = torch.jit.load('test.pt')
+	cif_output, cif_length, alphas, cif_peak = loaded(x, mask=mask[:, None, :])
+	# print(cif_output)
+	print(predictor_scripts.code)
+	# predictor = CifPredictorV2(2, 1, 1)
+	# cif_output, cif_length, alphas, cif_peak = predictor(x, mask=mask[:, None, :])
+	print(cif_output)
+
+
+def CifPredictorV2_export_test():
+	x = torch.rand([2, 21, 2])
+	x_len = torch.IntTensor([6, 21])
+	
+	mask = sequence_mask(x_len, maxlen=x.size(1), dtype=x.dtype)
+	x = x * mask[:, :, None]
+	
+	# predictor_scripts = torch.jit.script(CifPredictorV2(2, 1, 1))
+	# cif_output, cif_length, alphas, cif_peak = predictor_scripts(x, mask=mask[:, None, :])
+	predictor = CifPredictorV2(2, 1, 1)
+	predictor_trace = torch.jit.trace(predictor, (x, mask[:, None, :]))
+	predictor_trace.save('test_trace.pt')
+	loaded = torch.jit.load('test_trace.pt')
+	
+	x = torch.rand([3, 30, 2])
+	x_len = torch.IntTensor([6, 20, 30])
+	mask = sequence_mask(x_len, maxlen=x.size(1), dtype=x.dtype)
+	x = x * mask[:, :, None]
+	cif_output, cif_length, alphas, cif_peak = loaded(x, mask=mask[:, None, :])
+	print(cif_output)
+	# print(predictor_trace.code)
+	# predictor = CifPredictorV2(2, 1, 1)
+	# cif_output, cif_length, alphas, cif_peak = predictor(x, mask=mask[:, None, :])
+	# print(cif_output)
+
+
+if __name__ == '__main__':
+	# CifPredictorV2_test()
+	CifPredictorV2_export_test()
\ No newline at end of file
diff --git a/funasr/export/utils/__init__.py b/funasr/export/utils/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/funasr/export/utils/__init__.py
diff --git a/funasr/export/utils/torch_function.py b/funasr/export/utils/torch_function.py
new file mode 100644
index 0000000..e8e5e1a
--- /dev/null
+++ b/funasr/export/utils/torch_function.py
@@ -0,0 +1,68 @@
+from typing import Optional
+
+import torch
+import torch.nn as nn
+
+import numpy as np
+
+
+class MakePadMask(nn.Module):
+    def __init__(self, max_seq_len=512, flip=True):
+        super().__init__()
+        if flip:
+            self.mask_pad = torch.Tensor(1 - np.tri(max_seq_len)).type(torch.bool)
+        else:
+            self.mask_pad = torch.Tensor(np.tri(max_seq_len)).type(torch.bool)
+    
+    def forward(self, lengths, xs=None, length_dim=-1, maxlen=None):
+        """Make mask tensor containing indices of padded part.
+        This implementation creates the same mask tensor with original make_pad_mask,
+        which can be converted into onnx format.
+        Dimension length of xs should be 2 or 3.
+        """
+        if length_dim == 0:
+            raise ValueError("length_dim cannot be 0: {}".format(length_dim))
+
+        if xs is not None and len(xs.shape) == 3:
+            if length_dim == 1:
+                lengths = lengths.unsqueeze(1).expand(
+                    *xs.transpose(1, 2).shape[:2])
+            else:
+                lengths = lengths.unsqueeze(1).expand(*xs.shape[:2])
+
+        if maxlen is not None:
+            m = maxlen
+        elif xs is not None:
+            m = xs.shape[-1]
+        else:
+            m = torch.max(lengths)
+
+        mask = self.mask_pad[lengths - 1][..., :m].type(torch.float32)
+
+        if length_dim == 1:
+            return mask.transpose(1, 2)
+        else:
+            return mask
+
+
+def normalize(input: torch.Tensor, p: float = 2.0, dim: int = 1, out: Optional[torch.Tensor] = None) -> torch.Tensor:
+    if out is None:
+        denom = input.norm(p, dim, keepdim=True).expand_as(input)
+        return input / denom
+    else:
+        denom = input.norm(p, dim, keepdim=True).expand_as(input)
+        return torch.div(input, denom, out=out)
+
+def subsequent_mask(size: torch.Tensor):
+    return torch.ones(size, size).tril()
+
+
+def MakePadMask_test():
+    feats_length = torch.tensor([10]).type(torch.long)
+    mask_fn = MakePadMask()
+    mask = mask_fn(feats_length)
+    print(mask)
+
+
+if __name__ == '__main__':
+    MakePadMask_test()
\ No newline at end of file

--
Gitblit v1.9.1