From e847f85a14b85a94cfa6e109ecab69974eeeb443 Mon Sep 17 00:00:00 2001
From: zhifu gao <zhifu.gzf@alibaba-inc.com>
Date: 星期一, 11 三月 2024 19:32:07 +0800
Subject: [PATCH] Dev gzf (#1468)
---
funasr/auto/auto_model.py | 16 ++++++++--------
1 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/funasr/auto/auto_model.py b/funasr/auto/auto_model.py
index d7b6cb9..c8cd30c 100644
--- a/funasr/auto/auto_model.py
+++ b/funasr/auto/auto_model.py
@@ -100,9 +100,7 @@
def __init__(self, **kwargs):
if not kwargs.get("disable_log", True):
tables.print()
- if kwargs.get("export_model", False):
- os.environ['EXPORTING_MODEL'] = 'TRUE'
-
+
model, kwargs = self.build_model(**kwargs)
# if vad_model is not None, build vad model else None
@@ -157,9 +155,9 @@
device = "cpu"
kwargs["batch_size"] = 1
kwargs["device"] = device
-
- if kwargs.get("ncpu", None):
- torch.set_num_threads(kwargs.get("ncpu"))
+
+ torch.set_num_threads(kwargs.get("ncpu", 4))
+
# build tokenizer
tokenizer = kwargs.get("tokenizer", None)
@@ -478,11 +476,13 @@
calib_num: int = 100,
opset_version: int = 14,
**cfg):
- os.environ['EXPORTING_MODEL'] = 'TRUE'
+
+ device = cfg.get("device", "cpu")
+ model = self.model.to(device=device)
kwargs = self.kwargs
deep_update(kwargs, cfg)
+ kwargs["device"] = device
del kwargs["model"]
- model = self.model
model.eval()
batch_size = 1
--
Gitblit v1.9.1