| | |
| | | # assert torch_version > 1.9 |
| | | |
| | | class ASRModelExportParaformer: |
| | | def __init__(self, cache_dir: Union[Path, str] = None, onnx: bool = True): |
| | | def __init__( |
| | | self, |
| | | cache_dir: Union[Path, str] = None, |
| | | onnx: bool = True, |
| | | quant: bool = True, |
| | | fallback_num: int = 0, |
| | | audio_in: str = None, |
| | | calib_num: int = 200, |
| | | ): |
| | | assert check_argument_types() |
| | | self.set_all_random_seed(0) |
| | | if cache_dir is None: |
| | |
| | | ) |
| | | print("output dir: {}".format(self.cache_dir)) |
| | | self.onnx = onnx |
| | | self.quant = quant |
| | | self.fallback_num = fallback_num |
| | | self.frontend = None |
| | | self.audio_in = audio_in |
| | | self.calib_num = calib_num |
| | | |
| | | |
| | | def _export( |
| | |
| | | print("output dir: {}".format(export_dir)) |
| | | |
| | | |
| | | def _torch_quantize(self, model): |
| | | def _run_calibration_data(m): |
| | | # using dummy inputs for a example |
| | | if self.audio_in is not None: |
| | | feats, feats_len = self.load_feats(self.audio_in) |
| | | for i, (feat, len) in enumerate(zip(feats, feats_len)): |
| | | with torch.no_grad(): |
| | | m(feat, len) |
| | | else: |
| | | dummy_input = model.get_dummy_inputs() |
| | | m(*dummy_input) |
| | | |
| | | |
| | | from torch_quant.module import ModuleFilter |
| | | from torch_quant.quantizer import Backend, Quantizer |
| | | from funasr.export.models.modules.decoder_layer import DecoderLayerSANM |
| | | from funasr.export.models.modules.encoder_layer import EncoderLayerSANM |
| | | module_filter = ModuleFilter(include_classes=[EncoderLayerSANM, DecoderLayerSANM]) |
| | | module_filter.exclude_op_types = [torch.nn.Conv1d] |
| | | quantizer = Quantizer( |
| | | module_filter=module_filter, |
| | | backend=Backend.FBGEMM, |
| | | ) |
| | | model.eval() |
| | | calib_model = quantizer.calib(model) |
| | | _run_calibration_data(calib_model) |
| | | if self.fallback_num > 0: |
| | | # perform automatic mixed precision quantization |
| | | amp_model = quantizer.amp(model) |
| | | _run_calibration_data(amp_model) |
| | | quantizer.fallback(amp_model, num=self.fallback_num) |
| | | print('Fallback layers:') |
| | | print('\n'.join(quantizer.module_filter.exclude_names)) |
| | | quant_model = quantizer.quantize(model) |
| | | return quant_model |
| | | |
| | | |
| | | def _export_torchscripts(self, model, verbose, path, enc_size=None): |
| | | if enc_size: |
| | | dummy_input = model.get_dummy_inputs(enc_size) |
| | |
| | | model_script = torch.jit.trace(model, dummy_input) |
| | | model_script.save(os.path.join(path, f'{model.model_name}.torchscripts')) |
| | | |
| | | if self.quant: |
| | | quant_model = self._torch_quantize(model) |
| | | model_script = torch.jit.trace(quant_model, dummy_input) |
| | | model_script.save(os.path.join(path, f'{model.model_name}_quant.torchscripts')) |
| | | |
| | | |
| | | def set_all_random_seed(self, seed: int): |
| | | random.seed(seed) |
| | | np.random.seed(seed) |
| | | torch.random.manual_seed(seed) |
| | | |
| | | def parse_audio_in(self, audio_in): |
| | | |
| | | wav_list, name_list = [], [] |
| | | if audio_in.endswith(".scp"): |
| | | f = open(audio_in, 'r') |
| | | lines = f.readlines()[:self.calib_num] |
| | | for line in lines: |
| | | name, path = line.strip().split() |
| | | name_list.append(name) |
| | | wav_list.append(path) |
| | | else: |
| | | wav_list = [audio_in,] |
| | | name_list = ["test",] |
| | | return wav_list, name_list |
| | | |
| | | def load_feats(self, audio_in: str = None): |
| | | import torchaudio |
| | | |
| | | wav_list, name_list = self.parse_audio_in(audio_in) |
| | | feats = [] |
| | | feats_len = [] |
| | | for line in wav_list: |
| | | path = line.strip() |
| | | waveform, sampling_rate = torchaudio.load(path) |
| | | if sampling_rate != self.frontend.fs: |
| | | waveform = torchaudio.transforms.Resample(orig_freq=sampling_rate, |
| | | new_freq=self.frontend.fs)(waveform) |
| | | fbank, fbank_len = self.frontend(waveform, [waveform.size(1)]) |
| | | feats.append(fbank) |
| | | feats_len.append(fbank_len) |
| | | return feats, feats_len |
| | | |
| | | def export(self, |
| | | tag_name: str = 'damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch', |
| | | mode: str = 'paraformer', |
| | |
| | | model, asr_train_args = ASRTask.build_model_from_file( |
| | | asr_train_config, asr_model_file, cmvn_file, 'cpu' |
| | | ) |
| | | self.frontend = model.frontend |
| | | self._export(model, tag_name) |
| | | |
| | | |
| | |
| | | |
| | | # model_script = torch.jit.script(model) |
| | | model_script = model #torch.jit.trace(model) |
| | | model_path = os.path.join(path, f'{model.model_name}.onnx') |
| | | |
| | | torch.onnx.export( |
| | | model_script, |
| | | dummy_input, |
| | | os.path.join(path, f'{model.model_name}.onnx'), |
| | | model_path, |
| | | verbose=verbose, |
| | | opset_version=14, |
| | | input_names=model.get_input_names(), |
| | |
| | | dynamic_axes=model.get_dynamic_axes() |
| | | ) |
| | | |
| | | if self.quant: |
| | | from onnxruntime.quantization import QuantType, quantize_dynamic |
| | | import onnx |
| | | quant_model_path = os.path.join(path, f'{model.model_name}_quant.onnx') |
| | | onnx_model = onnx.load(model_path) |
| | | nodes = [n.name for n in onnx_model.graph.node] |
| | | nodes_to_exclude = [m for m in nodes if 'output' in m] |
| | | quantize_dynamic( |
| | | model_input=model_path, |
| | | model_output=quant_model_path, |
| | | op_types_to_quantize=['MatMul'], |
| | | per_channel=True, |
| | | reduce_range=False, |
| | | weight_type=QuantType.QUInt8, |
| | | nodes_to_exclude=nodes_to_exclude, |
| | | ) |
| | | |
| | | |
| | | if __name__ == '__main__': |
| | | import sys |
| | | |
| | | model_path = sys.argv[1] |
| | | output_dir = sys.argv[2] |
| | | onnx = sys.argv[3] |
| | | onnx = onnx.lower() |
| | | onnx = onnx == 'true' |
| | | # model_path = 'damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch' |
| | | # output_dir = "../export" |
| | | export_model = ASRModelExportParaformer(cache_dir=output_dir, onnx=onnx) |
| | | export_model.export(model_path) |
| | | # export_model.export('/root/cache/export/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch') |
| | | import argparse |
| | | parser = argparse.ArgumentParser() |
| | | parser.add_argument('--model-name', type=str, required=True) |
| | | parser.add_argument('--export-dir', type=str, required=True) |
| | | parser.add_argument('--type', type=str, default='onnx', help='["onnx", "torch"]') |
| | | parser.add_argument('--quantize', action='store_true', help='export quantized model') |
| | | parser.add_argument('--fallback-num', type=int, default=0, help='amp fallback number') |
| | | parser.add_argument('--audio_in', type=str, default=None, help='["wav", "wav.scp"]') |
| | | parser.add_argument('--calib_num', type=int, default=200, help='calib max num') |
| | | args = parser.parse_args() |
| | | |
| | | export_model = ASRModelExportParaformer( |
| | | cache_dir=args.export_dir, |
| | | onnx=args.type == 'onnx', |
| | | quant=args.quantize, |
| | | fallback_num=args.fallback_num, |
| | | audio_in=args.audio_in, |
| | | calib_num=args.calib_num, |
| | | ) |
| | | export_model.export(args.model_name) |