仁迷
2023-02-09 fdf74bb85cfe3dd0ce6cbaf51ec8d5b3ca3d2039
funasr/export/export_model.py
@@ -20,7 +20,7 @@
        self.cache_dir = Path(cache_dir)
        self.export_config = dict(
            feats_dim=560,
            onnx=onnx,
            onnx=False,
        )
        logging.info("output dir: {}".format(self.cache_dir))
        self.onnx = onnx
@@ -41,11 +41,24 @@
            model,
            self.export_config,
        )
        self._export_onnx(model, verbose, export_dir)
        if self.onnx:
            self._export_onnx(model, verbose, export_dir)
        else:
            self._export_torchscripts(model, verbose, export_dir)
        logging.info("output dir: {}".format(export_dir))
    def _export_torchscripts(self, model, verbose, path, enc_size=None):
        if enc_size:
            dummy_input = model.get_dummy_inputs(enc_size)
        else:
            dummy_input = model.get_dummy_inputs_txt()
        # model_script = torch.jit.script(model)
        model_script = torch.jit.trace(model, dummy_input)
        model_script.save(os.path.join(path, f'{model.model_name}.torchscripts'))
    def export_from_modelscope(
        self,
@@ -102,6 +115,6 @@
if __name__ == '__main__':
    output_dir = "../export"
    export_model = ASRModelExportParaformer(cache_dir=output_dir, onnx=True)
    export_model = ASRModelExportParaformer(cache_dir=output_dir, onnx=False)
    export_model.export_from_modelscope('damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch')
    # export_model.export_from_local('/root/cache/export/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch')