From 7bb2dfba0cb98c0eaaa18b2dfbb47a647eac9d58 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期四, 23 二月 2023 11:49:39 +0800
Subject: [PATCH] bugfix
---
funasr/bin/asr_inference_uniasr_vad.py | 13 +++++++++++++
1 files changed, 13 insertions(+), 0 deletions(-)
diff --git a/funasr/bin/asr_inference_uniasr_vad.py b/funasr/bin/asr_inference_uniasr_vad.py
index 0a5824c..ac3b4b6 100644
--- a/funasr/bin/asr_inference_uniasr_vad.py
+++ b/funasr/bin/asr_inference_uniasr_vad.py
@@ -433,12 +433,25 @@
output_dir_v2: Optional[str] = None,
fs: dict = None,
param_dict: dict = None,
+ **kwargs,
):
# 3. Build data-iterator
if data_path_and_name_and_type is None and raw_inputs is not None:
if isinstance(raw_inputs, torch.Tensor):
raw_inputs = raw_inputs.numpy()
data_path_and_name_and_type = [raw_inputs, "speech", "waveform"]
+ if param_dict is not None and "decoding_model" in param_dict:
+ if param_dict["decoding_model"] == "fast":
+ speech2text.decoding_ind = 0
+ speech2text.decoding_mode = "model1"
+ elif param_dict["decoding_model"] == "normal":
+ speech2text.decoding_ind = 0
+ speech2text.decoding_mode = "model2"
+ elif param_dict["decoding_model"] == "offline":
+ speech2text.decoding_ind = 1
+ speech2text.decoding_mode = "model2"
+ else:
+ raise NotImplementedError("unsupported decoding model {}".format(param_dict["decoding_model"]))
loader = ASRTask.build_streaming_iterator(
data_path_and_name_and_type,
dtype=dtype,
--
Gitblit v1.9.1