From d3d2fe73c08ee51d3a44d7ffb7b31eff32b60404 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期一, 18 三月 2024 20:46:23 +0800
Subject: [PATCH] wav fronend
---
funasr/auto/auto_model.py | 41 ++++++++++++++++++++---------------------
1 files changed, 20 insertions(+), 21 deletions(-)
diff --git a/funasr/auto/auto_model.py b/funasr/auto/auto_model.py
index 47456a3..69aef28 100644
--- a/funasr/auto/auto_model.py
+++ b/funasr/auto/auto_model.py
@@ -29,7 +29,7 @@
from funasr.models.campplus.utils import sv_chunk, postprocess, distribute_spk
from funasr.models.campplus.cluster_backend import ClusterBackend
except:
- print("If you want to use the speaker diarization, please `pip install hdbscan`")
+ print("Notice: If you want to use the speaker diarization, please `pip install hdbscan`")
def prepare_data_iterator(data_in, input_len=None, data_type=None, key=None):
@@ -164,22 +164,23 @@
tokenizer_class = tables.tokenizer_classes.get(tokenizer)
tokenizer_conf = kwargs.get("tokenizer_conf", {})
tokenizer = tokenizer_class(**tokenizer_conf)
- kwargs["tokenizer"] = tokenizer
+
kwargs["token_list"] = tokenizer.token_list if hasattr(tokenizer, "token_list") else None
kwargs["token_list"] = tokenizer.get_vocab() if hasattr(tokenizer, "get_vocab") else kwargs["token_list"]
vocab_size = len(kwargs["token_list"]) if kwargs["token_list"] is not None else -1
else:
vocab_size = -1
+ kwargs["tokenizer"] = tokenizer
+
# build frontend
frontend = kwargs.get("frontend", None)
kwargs["input_size"] = None
if frontend is not None:
frontend_class = tables.frontend_classes.get(frontend)
frontend = frontend_class(**kwargs["frontend_conf"])
- kwargs["frontend"] = frontend
kwargs["input_size"] = frontend.output_size() if hasattr(frontend, "output_size") else None
-
+ kwargs["frontend"] = frontend
# build model
model_class = tables.model_classes.get(kwargs["model"])
model = model_class(**kwargs, **kwargs.get("model_conf", {}), vocab_size=vocab_size)
@@ -290,7 +291,7 @@
# step.2 compute asr model
model = self.model
deep_update(kwargs, cfg)
- batch_size = int(kwargs.get("batch_size_s", 300))*1000
+ batch_size = max(int(kwargs.get("batch_size_s", 300))*1000, 1)
batch_size_threshold_ms = int(kwargs.get("batch_size_threshold_s", 60))*1000
kwargs["batch_size"] = batch_size
@@ -469,13 +470,19 @@
# f"time_escape_all: {time_escape_total_all_samples:0.3f}")
return results_ret_list
- def export(self, input=None,
- type : str = "onnx",
- quantize: bool = False,
- fallback_num: int = 5,
- calib_num: int = 100,
- opset_version: int = 14,
- **cfg):
+ def export(self, input=None, **cfg):
+
+ """
+
+ :param input:
+ :param type:
+ :param quantize:
+ :param fallback_num:
+ :param calib_num:
+ :param opset_version:
+ :param cfg:
+ :return:
+ """
device = cfg.get("device", "cpu")
model = self.model.to(device=device)
@@ -485,7 +492,7 @@
del kwargs["model"]
model.eval()
- batch_size = 1
+ type = kwargs.get("type", "onnx")
key_list, data_list = prepare_data_iterator(input, input_len=None, data_type=kwargs.get("data_type", None), key=None)
@@ -495,19 +502,11 @@
export_dir = export_utils.export_onnx(
model=model,
data_in=data_list,
- quantize=quantize,
- fallback_num=fallback_num,
- calib_num=calib_num,
- opset_version=opset_version,
**kwargs)
else:
export_dir = export_utils.export_torchscripts(
model=model,
data_in=data_list,
- quantize=quantize,
- fallback_num=fallback_num,
- calib_num=calib_num,
- opset_version=opset_version,
**kwargs)
return export_dir
\ No newline at end of file
--
Gitblit v1.9.1