From 0c3c9be2c4c1c4e4da4628c3987708c9a0763391 Mon Sep 17 00:00:00 2001
From: will_wang <53147925+willnufe@users.noreply.github.com>
Date: 星期三, 04 十二月 2024 17:47:31 +0800
Subject: [PATCH] paraformer onnx fp16导出方案 (#2264)
---
funasr/utils/export_utils.py | 79 ++++++++++++++++++++++++++++++++++++++-
1 files changed, 77 insertions(+), 2 deletions(-)
diff --git a/funasr/utils/export_utils.py b/funasr/utils/export_utils.py
index af9f37b..667418c 100644
--- a/funasr/utils/export_utils.py
+++ b/funasr/utils/export_utils.py
@@ -1,6 +1,12 @@
import os
import torch
import functools
+import onnx
+from onnxconverter_common import float16
+
+import warnings
+warnings.filterwarnings("ignore")
+
def export(
@@ -35,8 +41,17 @@
if hasattr(m, "encoder") and hasattr(m, "decoder"):
_bladedisc_opt_for_encdec(m, path=export_dir, enable_fp16=True)
else:
+ print(f"export_dir: {export_dir}")
_torchscripts(m, path=export_dir, device="cuda")
- print("output dir: {}".format(export_dir))
+
+
+ elif type=='onnx_fp16':
+ assert (
+ torch.cuda.is_available()
+ ), "Currently onnx_fp16 optimization for FunASR only supports GPU"
+
+ if hasattr(m, "encoder") and hasattr(m, "decoder"):
+ _onnx_opt_for_encdec(m, path=export_dir, enable_fp16=True)
return export_dir
@@ -51,6 +66,8 @@
):
dummy_input = model.export_dummy_inputs()
+ dummy_input = (dummy_input[0].to("cuda"), dummy_input[1].to("cuda"))
+
verbose = kwargs.get("verbose", False)
@@ -64,6 +81,7 @@
dummy_input,
model_path,
verbose=verbose,
+ do_constant_folding=True,
opset_version=opset_version,
input_names=model.export_input_names(),
output_names=model.export_output_names(),
@@ -159,7 +177,7 @@
# Rescale encoder modules
fp16_scale = int(2 * absmax // 65536)
- print(f"rescale encoder modules with factor={fp16_scale}")
+ print(f"rescale encoder modules with factor={fp16_scale}\n\n")
model.encoder.model.encoders0.register_forward_pre_hook(
functools.partial(_rescale_input_hook, scale=fp16_scale),
)
@@ -200,3 +218,60 @@
model.decoder = _bladedisc_opt(model.decoder, tuple(decoder_inputs))
model_script = torch.jit.trace(model, input_data)
model_script.save(os.path.join(path, f"{model.export_name}_blade.torchscript"))
+
+
+
+def _onnx_opt_for_encdec(model, path, enable_fp16):
+
+ # Get input data
+ # TODO: better to use real data
+ input_data = model.export_dummy_inputs()
+
+ if isinstance(input_data, torch.Tensor):
+ input_data = input_data.cuda()
+ else:
+ input_data = tuple([i.cuda() for i in input_data])
+
+ # Get input data for decoder module
+ decoder_inputs = list()
+
+ def get_input_hook(m, x):
+ decoder_inputs.extend(list(x))
+
+ hook = model.decoder.register_forward_pre_hook(get_input_hook)
+ model = model.cuda()
+ model(*input_data)
+ hook.remove()
+
+ # Prevent FP16 overflow
+ if enable_fp16:
+ _rescale_encoder_model(model, input_data)
+
+ fp32_model_path = f"{path}/{model.export_name}_hook.onnx"
+ print("*" * 50)
+ print(f"[_onnx_opt_for_encdec(fp32)]: {fp32_model_path}\n\n")
+ if not os.path.exists(fp32_model_path):
+
+ torch.onnx.export(
+ model,
+ input_data,
+ fp32_model_path,
+ verbose=False,
+ do_constant_folding=True,
+ opset_version=13,
+ input_names=model.export_input_names(),
+ output_names=model.export_output_names(),
+ dynamic_axes=model.export_dynamic_axes(),
+ )
+
+
+ # fp32 to fp16
+ fp16_model_path = f"{path}/{model.export_name}_hook_fp16.onnx"
+ print("*" * 50)
+ print(f"[_onnx_opt_for_encdec(fp16)]: {fp16_model_path}\n\n")
+ if os.path.exists(fp32_model_path) and not os.path.exists(fp16_model_path):
+ fp32_onnx_model = onnx.load(fp32_model_path)
+ fp16_onnx_model = float16.convert_float_to_float16(fp32_onnx_model, keep_io_types=True)
+ onnx.save(
+ fp16_onnx_model, fp16_model_path
+ )
--
Gitblit v1.9.1