From 111450d5e67ed01e3e7de088fe5bb3115dc7eeff Mon Sep 17 00:00:00 2001
From: mengzhe.cmz <mengzhe.cmz@alibaba-inc.com>
Date: 星期四, 23 三月 2023 19:55:15 +0800
Subject: [PATCH] update infer.py

---
 funasr/export/export_model.py |  117 +++++++++++++++++++++++++++++++++++++++++++++++-----------
 1 files changed, 95 insertions(+), 22 deletions(-)

diff --git a/funasr/export/export_model.py b/funasr/export/export_model.py
index 1c677c9..f6ba616 100644
--- a/funasr/export/export_model.py
+++ b/funasr/export/export_model.py
@@ -10,13 +10,19 @@
 from funasr.export.models import get_model
 import numpy as np
 import random
-
+from funasr.utils.types import str2bool
 # torch_version = float(".".join(torch.__version__.split(".")[:2]))
 # assert torch_version > 1.9
 
 class ASRModelExportParaformer:
     def __init__(
-        self, cache_dir: Union[Path, str] = None, onnx: bool = True, quant: bool = True
+        self,
+        cache_dir: Union[Path, str] = None,
+        onnx: bool = True,
+        quant: bool = True,
+        fallback_num: int = 0,
+        audio_in: str = None,
+        calib_num: int = 200,
     ):
         assert check_argument_types()
         self.set_all_random_seed(0)
@@ -31,6 +37,10 @@
         print("output dir: {}".format(self.cache_dir))
         self.onnx = onnx
         self.quant = quant
+        self.fallback_num = fallback_num
+        self.frontend = None
+        self.audio_in = audio_in
+        self.calib_num = calib_num
         
 
     def _export(
@@ -60,8 +70,19 @@
 
 
     def _torch_quantize(self, model):
+        def _run_calibration_data(m):
+            # using dummy inputs for a example
+            if self.audio_in is not None:
+                feats, feats_len = self.load_feats(self.audio_in)
+                for i, (feat, len) in enumerate(zip(feats, feats_len)):
+                    with torch.no_grad():
+                        m(feat, len)
+            else:
+                dummy_input = model.get_dummy_inputs()
+                m(*dummy_input)
+            
+
         from torch_quant.module import ModuleFilter
-        from torch_quant.observer import HistogramObserver
         from torch_quant.quantizer import Backend, Quantizer
         from funasr.export.models.modules.decoder_layer import DecoderLayerSANM
         from funasr.export.models.modules.encoder_layer import EncoderLayerSANM
@@ -70,16 +91,20 @@
         quantizer = Quantizer(
             module_filter=module_filter,
             backend=Backend.FBGEMM,
-            act_ob_ctr=HistogramObserver,
         )
         model.eval()
         calib_model = quantizer.calib(model)
-        # run calibration data
-        # using dummy inputs for a example
-        dummy_input = model.get_dummy_inputs()
-        _ = calib_model(*dummy_input)
+        _run_calibration_data(calib_model)
+        if self.fallback_num > 0:
+            # perform automatic mixed precision quantization
+            amp_model = quantizer.amp(model)
+            _run_calibration_data(amp_model)
+            quantizer.fallback(amp_model, num=self.fallback_num)
+            print('Fallback layers:')
+            print('\n'.join(quantizer.module_filter.exclude_names))
         quant_model = quantizer.quantize(model)
         return quant_model
+
 
     def _export_torchscripts(self, model, verbose, path, enc_size=None):
         if enc_size:
@@ -101,6 +126,39 @@
         random.seed(seed)
         np.random.seed(seed)
         torch.random.manual_seed(seed)
+
+    def parse_audio_in(self, audio_in):
+        
+        wav_list, name_list = [], []
+        if audio_in.endswith(".scp"):
+            f = open(audio_in, 'r')
+            lines = f.readlines()[:self.calib_num]
+            for line in lines:
+                name, path = line.strip().split()
+                name_list.append(name)
+                wav_list.append(path)
+        else:
+            wav_list = [audio_in,]
+            name_list = ["test",]
+        return wav_list, name_list
+    
+    def load_feats(self, audio_in: str = None):
+        import torchaudio
+
+        wav_list, name_list = self.parse_audio_in(audio_in)
+        feats = []
+        feats_len = []
+        for line in wav_list:
+            path = line.strip()
+            waveform, sampling_rate = torchaudio.load(path)
+            if sampling_rate != self.frontend.fs:
+                waveform = torchaudio.transforms.Resample(orig_freq=sampling_rate,
+                                                          new_freq=self.frontend.fs)(waveform)
+            fbank, fbank_len = self.frontend(waveform, [waveform.size(1)])
+            feats.append(fbank)
+            feats_len.append(fbank_len)
+        return feats, feats_len
+    
     def export(self,
                tag_name: str = 'damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch',
                mode: str = 'paraformer',
@@ -127,6 +185,7 @@
         model, asr_train_args = ASRTask.build_model_from_file(
             asr_train_config, asr_model_file, cmvn_file, 'cpu'
         )
+        self.frontend = model.frontend
         self._export(model, tag_name)
             
 
@@ -153,26 +212,40 @@
 
         if self.quant:
             from onnxruntime.quantization import QuantType, quantize_dynamic
+            import onnx
             quant_model_path = os.path.join(path, f'{model.model_name}_quant.onnx')
+            onnx_model = onnx.load(model_path)
+            nodes = [n.name for n in onnx_model.graph.node]
+            nodes_to_exclude = [m for m in nodes if 'output' in m]
             quantize_dynamic(
                 model_input=model_path,
                 model_output=quant_model_path,
+                op_types_to_quantize=['MatMul'],
+                per_channel=True,
+                reduce_range=False,
                 weight_type=QuantType.QUInt8,
+                nodes_to_exclude=nodes_to_exclude,
             )
 
 
 if __name__ == '__main__':
-    import sys
-    
-    model_path = sys.argv[1]
-    output_dir = sys.argv[2]
-    onnx = sys.argv[3]
-    quant = sys.argv[4]
-    onnx = onnx.lower()
-    onnx = onnx == 'true'
-    quant = quant == 'true'
-    # model_path = 'damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch'
-    # output_dir = "../export"
-    export_model = ASRModelExportParaformer(cache_dir=output_dir, onnx=onnx, quant=quant)
-    export_model.export(model_path)
-    # export_model.export('/root/cache/export/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch')
+    import argparse
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--model-name', type=str, required=True)
+    parser.add_argument('--export-dir', type=str, required=True)
+    parser.add_argument('--type', type=str, default='onnx', help='["onnx", "torch"]')
+    parser.add_argument('--quantize', type=str2bool, default=False, help='export quantized model')
+    parser.add_argument('--fallback-num', type=int, default=0, help='amp fallback number')
+    parser.add_argument('--audio_in', type=str, default=None, help='["wav", "wav.scp"]')
+    parser.add_argument('--calib_num', type=int, default=200, help='calib max num')
+    args = parser.parse_args()
+
+    export_model = ASRModelExportParaformer(
+        cache_dir=args.export_dir,
+        onnx=args.type == 'onnx',
+        quant=args.quantize,
+        fallback_num=args.fallback_num,
+        audio_in=args.audio_in,
+        calib_num=args.calib_num,
+    )
+    export_model.export(args.model_name)

--
Gitblit v1.9.1