From 525f5d77564f016acdd03ff71197f7a4a9177840 Mon Sep 17 00:00:00 2001
From: wanchen.swc <wanchen.swc@alibaba-inc.com>
Date: 星期五, 10 三月 2023 17:08:04 +0800
Subject: [PATCH] [Quantization] onnx quantization
---
funasr/export/README.md | 16 ++++++++++------
funasr/export/export_model.py | 8 ++++++++
2 files changed, 18 insertions(+), 6 deletions(-)
diff --git a/funasr/export/README.md b/funasr/export/README.md
index c44ad33..a1ed892 100644
--- a/funasr/export/README.md
+++ b/funasr/export/README.md
@@ -11,31 +11,35 @@
`Tips`: torch>=1.11.0
```shell
- python -m funasr.export.export_model [model_name] [export_dir] [onnx]
+ python -m funasr.export.export_model [model_name] [export_dir] [onnx] [quant]
```
`model_name`: the model is to export. It could be the models from modelscope, or local finetuned model(named: model.pb).
+
`export_dir`: the dir where the onnx is export.
- `onnx`: `true`, export onnx format model; `false`, export torchscripts format model.
+
+ `onnx`: `true`, export onnx format model; `false`, export torchscripts format model.
+
+ `quant`: `true`, export quantized model at the same time; `false`, export fp32 model only.
## For example
### Export onnx format model
Export model from modelscope
```shell
-python -m funasr.export.export_model 'damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch' "./export" true
+python -m funasr.export.export_model 'damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch' "./export" true false
```
Export model from local path, the model'name must be `model.pb`.
```shell
-python -m funasr.export.export_model '/mnt/workspace/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch' "./export" true
+python -m funasr.export.export_model '/mnt/workspace/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch' "./export" true false
```
### Export torchscripts format model
Export model from modelscope
```shell
-python -m funasr.export.export_model 'damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch' "./export" false
+python -m funasr.export.export_model 'damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch' "./export" false false
```
Export model from local path, the model'name must be `model.pb`.
```shell
-python -m funasr.export.export_model '/mnt/workspace/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch' "./export" false
+python -m funasr.export.export_model '/mnt/workspace/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch' "./export" false false
```
diff --git a/funasr/export/export_model.py b/funasr/export/export_model.py
index 1c677c9..7370c3c 100644
--- a/funasr/export/export_model.py
+++ b/funasr/export/export_model.py
@@ -153,11 +153,19 @@
if self.quant:
from onnxruntime.quantization import QuantType, quantize_dynamic
+ import onnx
quant_model_path = os.path.join(path, f'{model.model_name}_quant.onnx')
+ onnx_model = onnx.load(model_path)
+ nodes = [n.name for n in onnx_model.graph.node]
+ nodes_to_exclude = [m for m in nodes if 'output' in m]
quantize_dynamic(
model_input=model_path,
model_output=quant_model_path,
+ op_types_to_quantize=['MatMul'],
+ per_channel=True,
+ reduce_range=False,
weight_type=QuantType.QUInt8,
+ nodes_to_exclude=nodes_to_exclude,
)
--
Gitblit v1.9.1