| | |
| | | --model-name [model_name] \ |
| | | --export-dir [export_dir] \ |
| | | --type [onnx, torch] \ |
| | | --quantize \ |
| | | --quantize [true, false] \ |
| | | --fallback-num [fallback_num] |
| | | ``` |
| | | `model-name`: the model is to export. It could be the models from modelscope, or local finetuned model(named: model.pb). |
| | |
| | | from funasr.export.models import get_model |
| | | import numpy as np |
| | | import random |
| | | |
| | | from funasr.utils.types import str2bool |
| | | # torch_version = float(".".join(torch.__version__.split(".")[:2])) |
| | | # assert torch_version > 1.9 |
| | | |
| | |
| | | parser.add_argument('--model-name', type=str, required=True) |
| | | parser.add_argument('--export-dir', type=str, required=True) |
| | | parser.add_argument('--type', type=str, default='onnx', help='["onnx", "torch"]') |
| | | parser.add_argument('--quantize', action='store_true', help='export quantized model') |
| | | parser.add_argument('--quantize', type=str2bool, default=False, help='export quantized model') |
| | | parser.add_argument('--fallback-num', type=int, default=0, help='amp fallback number') |
| | | parser.add_argument('--audio_in', type=str, default=None, help='["wav", "wav.scp"]') |
| | | parser.add_argument('--calib_num', type=int, default=200, help='calib max num') |
| | |
| | | import time |
| | | import sys |
| | | import librosa |
| | | |
| | | from funasr.utils.types import str2bool |
| | | |
| | | import argparse |
| | | parser = argparse.ArgumentParser() |
| | | parser.add_argument('--model_dir', type=str, required=True) |
| | | parser.add_argument('--backend', type=str, default='onnx', help='["onnx", "torch"]') |
| | | parser.add_argument('--wav_file', type=str, default=None, help='amp fallback number') |
| | | parser.add_argument('--quantize', type=bool, default=False, help='quantized model') |
| | | parser.add_argument('--quantize', type=str2bool, default=False, help='quantized model') |
| | | parser.add_argument('--intra_op_num_threads', type=int, default=1, help='intra_op_num_threads for onnx') |
| | | args = parser.parse_args() |
| | | |
| | |
| | | #:<<! |
| | | model_name="damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch" |
| | | backend="onnx" # "torch" |
| | | quantize='True' # 'False' |
| | | quantize='true' # 'False' |
| | | tag=${model_name}/${backend}_${quantize} |
| | | ! |
| | | |
| | |
| | | |
| | | if [ $stage == 0 ];then |
| | | |
| | | if [ $quantize == 'True' ];then |
| | | python -m funasr.export.export_model --model-name ${model_name} --export-dir ${export_root} --type ${backend} --quantize --audio_in ${scp} |
| | | else |
| | | python -m funasr.export.export_model --model-name ${model_name} --export-dir ${export_root} --type ${backend} |
| | | fi |
| | | |
| | | fi |
| | | |