From 602fe75a1f0a8d64ccb6fc4d69ad510872fdfd13 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期五, 17 三月 2023 20:30:40 +0800
Subject: [PATCH] rtf benchmark
---
funasr/export/export_model.py | 9 +++++----
1 files changed, 5 insertions(+), 4 deletions(-)
diff --git a/funasr/export/export_model.py b/funasr/export/export_model.py
index b827f16..f6ba616 100644
--- a/funasr/export/export_model.py
+++ b/funasr/export/export_model.py
@@ -10,7 +10,7 @@
from funasr.export.models import get_model
import numpy as np
import random
-
+from funasr.utils.types import str2bool
# torch_version = float(".".join(torch.__version__.split(".")[:2]))
# assert torch_version > 1.9
@@ -74,8 +74,9 @@
# using dummy inputs for a example
if self.audio_in is not None:
feats, feats_len = self.load_feats(self.audio_in)
- for feat, len in zip(feats, feats_len):
- m(feat, len)
+ for i, (feat, len) in enumerate(zip(feats, feats_len)):
+ with torch.no_grad():
+ m(feat, len)
else:
dummy_input = model.get_dummy_inputs()
m(*dummy_input)
@@ -233,7 +234,7 @@
parser.add_argument('--model-name', type=str, required=True)
parser.add_argument('--export-dir', type=str, required=True)
parser.add_argument('--type', type=str, default='onnx', help='["onnx", "torch"]')
- parser.add_argument('--quantize', action='store_true', help='export quantized model')
+ parser.add_argument('--quantize', type=str2bool, default=False, help='export quantized model')
parser.add_argument('--fallback-num', type=int, default=0, help='amp fallback number')
parser.add_argument('--audio_in', type=str, default=None, help='["wav", "wav.scp"]')
parser.add_argument('--calib_num', type=int, default=200, help='calib max num')
--
Gitblit v1.9.1