From 4a7a984a5f3e3f894f86ce82e76ddd13d8a42a20 Mon Sep 17 00:00:00 2001
From: zhifu gao <zhifu.gzf@alibaba-inc.com>
Date: 星期一, 11 三月 2024 17:56:30 +0800
Subject: [PATCH] Dev gzf (#1465)
---
funasr/models/ct_transformer_streaming/model.py | 103 +++++++++++++++++++++++++++++++++++++++++++--------
1 files changed, 87 insertions(+), 16 deletions(-)
diff --git a/funasr/models/ct_transformer_streaming/model.py b/funasr/models/ct_transformer_streaming/model.py
index 5254d15..a9b2efb 100644
--- a/funasr/models/ct_transformer_streaming/model.py
+++ b/funasr/models/ct_transformer_streaming/model.py
@@ -1,20 +1,28 @@
-from typing import Any
-from typing import List
-from typing import Tuple
-from typing import Optional
-import numpy as np
-import torch.nn.functional as F
+#!/usr/bin/env python3
+# -*- encoding: utf-8 -*-
+# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
+# MIT License (https://opensource.org/licenses/MIT)
-from funasr.models.transformer.utils.nets_utils import make_pad_mask
-from funasr.train_utils.device_funcs import force_gatherable
-from funasr.train_utils.device_funcs import to_device
import torch
-import torch.nn as nn
-from funasr.models.ct_transformer.utils import split_to_mini_sentence, split_words
-from funasr.utils.load_utils import load_audio_text_image_video
-from funasr.models.ct_transformer.model import CTTransformer
+import numpy as np
+from contextlib import contextmanager
+from distutils.version import LooseVersion
from funasr.register import tables
+from funasr.train_utils.device_funcs import to_device
+from funasr.models.ct_transformer.model import CTTransformer
+from funasr.utils.load_utils import load_audio_text_image_video
+from funasr.models.ct_transformer.utils import split_to_mini_sentence, split_words
+
+
+if LooseVersion(torch.__version__) >= LooseVersion("1.6.0"):
+ from torch.cuda.amp import autocast
+else:
+ # Nothing to do if torch<1.6.0
+ @contextmanager
+ def autocast(enabled=True):
+ yield
+
@tables.register("model_classes", "CTTransformerStreaming")
class CTTransformerStreaming(CTTransformer):
@@ -47,10 +55,8 @@
def with_vad(self):
return True
-
-
- def generate(self,
+ def inference(self,
data_in,
data_lengths=None,
key: list = None,
@@ -167,3 +173,68 @@
return results, meta_data
+ def export(
+ self,
+ **kwargs,
+ ):
+
+ is_onnx = kwargs.get("type", "onnx") == "onnx"
+ encoder_class = tables.encoder_classes.get(kwargs["encoder"] + "Export")
+ self.encoder = encoder_class(self.encoder, onnx=is_onnx)
+
+ self.forward = self._export_forward
+
+ return self
+
+ def _export_forward(self, inputs: torch.Tensor,
+ text_lengths: torch.Tensor,
+ vad_indexes: torch.Tensor,
+ sub_masks: torch.Tensor,
+ ):
+ """Compute loss value from buffer sequences.
+
+ Args:
+ input (torch.Tensor): Input ids. (batch, len)
+ hidden (torch.Tensor): Target ids. (batch, len)
+
+ """
+ x = self.embed(inputs)
+ # mask = self._target_mask(input)
+ h, _ = self.encoder(x, text_lengths, vad_indexes, sub_masks)
+ y = self.decoder(h)
+ return y
+
+ def export_dummy_inputs(self):
+ length = 120
+ text_indexes = torch.randint(0, self.embed.num_embeddings, (1, length)).type(torch.int32)
+ text_lengths = torch.tensor([length], dtype=torch.int32)
+ vad_mask = torch.ones(length, length, dtype=torch.float32)[None, None, :, :]
+ sub_masks = torch.ones(length, length, dtype=torch.float32)
+ sub_masks = torch.tril(sub_masks).type(torch.float32)
+ return (text_indexes, text_lengths, vad_mask, sub_masks[None, None, :, :])
+
+ def export_input_names(self):
+ return ['inputs', 'text_lengths', 'vad_masks', 'sub_masks']
+
+ def export_output_names(self):
+ return ['logits']
+
+ def export_dynamic_axes(self):
+ return {
+ 'inputs': {
+ 1: 'feats_length'
+ },
+ 'vad_masks': {
+ 2: 'feats_length1',
+ 3: 'feats_length2'
+ },
+ 'sub_masks': {
+ 2: 'feats_length1',
+ 3: 'feats_length2'
+ },
+ 'logits': {
+ 1: 'logits_length'
+ },
+ }
+ def export_name(self):
+ return "model.onnx"
--
Gitblit v1.9.1