From fd22b6e7f36e963ef29dbd3eafb0e0d6f2e12fa7 Mon Sep 17 00:00:00 2001
From: 雾聪 <wucong.lyb@alibaba-inc.com>
Date: 星期三, 09 八月 2023 14:27:20 +0800
Subject: [PATCH] Merge branch 'main' of https://github.com/alibaba-damo-academy/FunASR into main
---
funasr/export/export_model.py | 39 ++++++++++++++++++++++++---------------
1 files changed, 24 insertions(+), 15 deletions(-)
diff --git a/funasr/export/export_model.py b/funasr/export/export_model.py
index 9e13260..8c3108b 100644
--- a/funasr/export/export_model.py
+++ b/funasr/export/export_model.py
@@ -1,7 +1,6 @@
import json
from typing import Union, Dict
from pathlib import Path
-from typeguard import check_argument_types
import os
import logging
@@ -10,7 +9,7 @@
from funasr.export.models import get_model
import numpy as np
import random
-from funasr.utils.types import str2bool
+from funasr.utils.types import str2bool, str2triple_str
# torch_version = float(".".join(torch.__version__.split(".")[:2]))
# assert torch_version > 1.9
@@ -24,8 +23,8 @@
fallback_num: int = 0,
audio_in: str = None,
calib_num: int = 200,
+ model_revision: str = None,
):
- assert check_argument_types()
self.set_all_random_seed(0)
self.cache_dir = cache_dir
@@ -41,6 +40,7 @@
self.frontend = None
self.audio_in = audio_in
self.calib_num = calib_num
+ self.model_revision = model_revision
def _export(
@@ -55,18 +55,21 @@
# export encoder1
self.export_config["model_name"] = "model"
- model = get_model(
+ models = get_model(
model,
self.export_config,
)
- model.eval()
- # self._export_onnx(model, verbose, export_dir)
- if self.onnx:
- self._export_onnx(model, verbose, export_dir)
- else:
- self._export_torchscripts(model, verbose, export_dir)
-
- print("output dir: {}".format(export_dir))
+ if not isinstance(models, tuple):
+ models = (models,)
+
+ for i, model in enumerate(models):
+ model.eval()
+ if self.onnx:
+ self._export_onnx(model, verbose, export_dir)
+ else:
+ self._export_torchscripts(model, verbose, export_dir)
+
+ print("output dir: {}".format(export_dir))
def _torch_quantize(self, model):
@@ -171,7 +174,7 @@
model_dir = tag_name
if model_dir.startswith('damo'):
from modelscope.hub.snapshot_download import snapshot_download
- model_dir = snapshot_download(model_dir, cache_dir=self.cache_dir)
+ model_dir = snapshot_download(model_dir, cache_dir=self.cache_dir, revision=self.model_revision)
self.cache_dir = model_dir
if mode is None:
@@ -192,6 +195,7 @@
config, model_file, cmvn_file, 'cpu'
)
self.frontend = model.frontend
+ self.export_config["feats_dim"] = 560
elif mode.startswith('offline'):
from funasr.tasks.vad import VADTask
config = os.path.join(model_dir, 'vad.yaml')
@@ -263,7 +267,8 @@
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
- parser.add_argument('--model-name', type=str, required=True)
+ # parser.add_argument('--model-name', type=str, required=True)
+ parser.add_argument('--model-name', type=str, action="append", required=True, default=[])
parser.add_argument('--export-dir', type=str, required=True)
parser.add_argument('--type', type=str, default='onnx', help='["onnx", "torch"]')
parser.add_argument('--device', type=str, default='cpu', help='["cpu", "cuda"]')
@@ -271,6 +276,7 @@
parser.add_argument('--fallback-num', type=int, default=0, help='amp fallback number')
parser.add_argument('--audio_in', type=str, default=None, help='["wav", "wav.scp"]')
parser.add_argument('--calib_num', type=int, default=200, help='calib max num')
+ parser.add_argument('--model_revision', type=str, default=None, help='model_revision')
args = parser.parse_args()
export_model = ModelExport(
@@ -281,5 +287,8 @@
fallback_num=args.fallback_num,
audio_in=args.audio_in,
calib_num=args.calib_num,
+ model_revision=args.model_revision,
)
- export_model.export(args.model_name)
+ for model_name in args.model_name:
+ print("export model: {}".format(model_name))
+ export_model.export(model_name)
--
Gitblit v1.9.1