From 1ce704d8c09bd4d4c7e5ab087f951f31fad9fca6 Mon Sep 17 00:00:00 2001
From: nichongjia-2007 <nichongjia@gmail.com>
Date: 星期五, 07 七月 2023 15:47:19 +0800
Subject: [PATCH] Merge branch 'main' of https://github.com/alibaba-damo-academy/FunASR
---
funasr/bin/asr_inference_launch.py | 27 ++++++++++++++++++++++-----
1 files changed, 22 insertions(+), 5 deletions(-)
diff --git a/funasr/bin/asr_inference_launch.py b/funasr/bin/asr_inference_launch.py
index 81513ae..de18894 100644
--- a/funasr/bin/asr_inference_launch.py
+++ b/funasr/bin/asr_inference_launch.py
@@ -255,8 +255,10 @@
if param_dict is not None:
hotword_list_or_file = param_dict.get('hotword')
export_mode = param_dict.get("export_mode", False)
+ clas_scale = param_dict.get('clas_scale', 1.0)
else:
hotword_list_or_file = None
+ clas_scale = 1.0
if kwargs.get("device", None) == "cpu":
ngpu = 0
@@ -289,6 +291,7 @@
penalty=penalty,
nbest=nbest,
hotword_list_or_file=hotword_list_or_file,
+ clas_scale=clas_scale,
)
speech2text = Speech2TextParaformer(**speech2text_kwargs)
@@ -617,10 +620,27 @@
sorted_data = sorted(data_with_index, key=lambda x: x[0][1] - x[0][0])
results_sorted = []
+ if not len(sorted_data):
+ key = keys[0]
+ # no active segments after VAD
+ if writer is not None:
+ # Write empty results
+ ibest_writer["token"][key] = ""
+ ibest_writer["token_int"][key] = ""
+ ibest_writer["vad"][key] = ""
+ ibest_writer["text"][key] = ""
+ ibest_writer["text_with_punc"][key] = ""
+ if use_timestamp:
+ ibest_writer["time_stamp"][key] = ""
+
+ logging.info("decoding, utt: {}, empty speech".format(key))
+ continue
+
batch_size_token_ms = batch_size_token*60
if speech2text.device == "cpu":
batch_size_token_ms = 0
- batch_size_token_ms = max(batch_size_token_ms, sorted_data[0][0][1] - sorted_data[0][0][0])
+ if len(sorted_data) > 0 and len(sorted_data[0]) > 0:
+ batch_size_token_ms = max(batch_size_token_ms, sorted_data[0][0][1] - sorted_data[0][0][0])
batch_size_token_ms_cum = 0
beg_idx = 0
@@ -1349,10 +1369,7 @@
left_context=left_context,
right_context=right_context,
)
- speech2text = Speech2TextTransducer.from_pretrained(
- model_tag=model_tag,
- **speech2text_kwargs,
- )
+ speech2text = Speech2TextTransducer(**speech2text_kwargs)
def _forward(data_path_and_name_and_type,
raw_inputs: Union[np.ndarray, torch.Tensor] = None,
--
Gitblit v1.9.1