From 0a4e3b7e64e9e095cfdcd4b3c28bde7aa58839e7 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期六, 11 二月 2023 17:40:00 +0800
Subject: [PATCH] readme
---
funasr/export/models/e2e_asr_paraformer.py | 35 +++++++++++++++++++++++------------
1 files changed, 23 insertions(+), 12 deletions(-)
diff --git a/funasr/export/models/e2e_asr_paraformer.py b/funasr/export/models/e2e_asr_paraformer.py
index 162837a..84dd9d2 100644
--- a/funasr/export/models/e2e_asr_paraformer.py
+++ b/funasr/export/models/e2e_asr_paraformer.py
@@ -5,7 +5,7 @@
import torch.nn as nn
from funasr.export.utils.torch_function import MakePadMask
-from funasr.train.abs_espnet_model import AbsESPnetModel
+from funasr.export.utils.torch_function import sequence_mask
from funasr.models.encoder.sanm_encoder import SANMEncoder
from funasr.export.models.encoder.sanm_encoder import SANMEncoder as SANMEncoder_export
from funasr.models.predictor.cif import CifPredictorV2
@@ -29,19 +29,24 @@
**kwargs,
):
super().__init__()
+ onnx = False
+ if "onnx" in kwargs:
+ onnx = kwargs["onnx"]
if isinstance(model.encoder, SANMEncoder):
- self.encoder = SANMEncoder_export(model.encoder)
+ self.encoder = SANMEncoder_export(model.encoder, onnx=onnx)
if isinstance(model.predictor, CifPredictorV2):
self.predictor = CifPredictorV2_export(model.predictor)
if isinstance(model.decoder, ParaformerSANMDecoder):
- self.decoder = ParaformerSANMDecoder_export(model.decoder)
- self.make_pad_mask = MakePadMask(max_seq_len, flip=False)
+ self.decoder = ParaformerSANMDecoder_export(model.decoder, onnx=onnx)
+
self.feats_dim = feats_dim
self.model_name = model_name
- self.onnx = False
- if "onnx" in kwargs:
- self.onnx = kwargs["onnx"]
-
+
+ if onnx:
+ self.make_pad_mask = MakePadMask(max_seq_len, flip=False)
+ else:
+ self.make_pad_mask = sequence_mask(max_seq_len, flip=False)
+
def forward(
self,
speech: torch.Tensor,
@@ -58,15 +63,21 @@
decoder_out, _ = self.decoder(enc, enc_len, pre_acoustic_embeds, pre_token_length)
decoder_out = torch.log_softmax(decoder_out, dim=-1)
+ # sample_ids = decoder_out.argmax(dim=-1)
return decoder_out, pre_token_length
-
- # def get_output_size(self):
- # return self.model.encoders[0].size
def get_dummy_inputs(self):
speech = torch.randn(2, 30, self.feats_dim)
- speech_lengths = torch.tensor([6, 30]).long()
+ speech_lengths = torch.tensor([6, 30], dtype=torch.int32)
+ return (speech, speech_lengths)
+
+ def get_dummy_inputs_txt(self, txt_file: str = "/mnt/workspace/data_fbank/0207/12345.wav.fea.txt"):
+ import numpy as np
+ fbank = np.loadtxt(txt_file)
+ fbank_lengths = np.array([fbank.shape[0], ], dtype=np.int32)
+ speech = torch.from_numpy(fbank[None, :, :].astype(np.float32))
+ speech_lengths = torch.from_numpy(fbank_lengths.astype(np.int32))
return (speech, speech_lengths)
def get_input_names(self):
--
Gitblit v1.9.1