From bf4b3ef9cb95acaa2b92b98f236c4f3228cdbc2d Mon Sep 17 00:00:00 2001
From: hnluo <haoneng.lhn@alibaba-inc.com>
Date: 星期四, 21 九月 2023 16:30:43 +0800
Subject: [PATCH] Merge pull request #976 from alibaba-damo-academy/dev_lhn
---
funasr/bin/asr_inference_launch.py | 271 +++++++++++++++++++++++++++++++++++------------------
1 files changed, 179 insertions(+), 92 deletions(-)
diff --git a/funasr/bin/asr_inference_launch.py b/funasr/bin/asr_inference_launch.py
index 656a965..50b9886 100644
--- a/funasr/bin/asr_inference_launch.py
+++ b/funasr/bin/asr_inference_launch.py
@@ -19,8 +19,8 @@
import numpy as np
import torch
import torchaudio
+import soundfile
import yaml
-from typeguard import check_argument_types
from funasr.bin.asr_infer import Speech2Text
from funasr.bin.asr_infer import Speech2TextMFCCA
@@ -45,7 +45,7 @@
from funasr.utils.types import str2triple_str
from funasr.utils.types import str_or_none
from funasr.utils.vad_utils import slice_padding_fbank
-
+from tqdm import tqdm
def inference_asr(
maxlenratio: float,
@@ -79,7 +79,6 @@
param_dict: dict = None,
**kwargs,
):
- assert check_argument_types()
ncpu = kwargs.get("ncpu", 1)
torch.set_num_threads(ncpu)
if batch_size > 1:
@@ -237,9 +236,9 @@
timestamp_infer_config: Union[Path, str] = None,
timestamp_model_file: Union[Path, str] = None,
param_dict: dict = None,
+ decoding_ind: int = 0,
**kwargs,
):
- assert check_argument_types()
ncpu = kwargs.get("ncpu", 1)
torch.set_num_threads(ncpu)
@@ -257,11 +256,11 @@
if param_dict is not None:
hotword_list_or_file = param_dict.get('hotword')
export_mode = param_dict.get("export_mode", False)
+ clas_scale = param_dict.get('clas_scale', 1.0)
else:
hotword_list_or_file = None
+ clas_scale = 1.0
- if kwargs.get("device", None) == "cpu":
- ngpu = 0
if ngpu >= 1 and torch.cuda.is_available():
device = "cuda"
else:
@@ -291,6 +290,8 @@
penalty=penalty,
nbest=nbest,
hotword_list_or_file=hotword_list_or_file,
+ clas_scale=clas_scale,
+ decoding_ind=decoding_ind,
)
speech2text = Speech2TextParaformer(**speech2text_kwargs)
@@ -313,6 +314,7 @@
**kwargs,
):
+ decoding_ind = None
hotword_list_or_file = None
if param_dict is not None:
hotword_list_or_file = param_dict.get('hotword')
@@ -320,6 +322,8 @@
hotword_list_or_file = kwargs['hotword']
if hotword_list_or_file is not None or 'hotword' in kwargs:
speech2text.hotword_list = speech2text.generate_hotwords_list(hotword_list_or_file)
+ if param_dict is not None and "decoding_ind" in param_dict:
+ decoding_ind = param_dict["decoding_ind"]
# 3. Build data-iterator
if data_path_and_name_and_type is None and raw_inputs is not None:
@@ -366,10 +370,11 @@
# N-best list of (text, token, token_int, hyp_object)
time_beg = time.time()
+ batch["decoding_ind"] = decoding_ind
results = speech2text(**batch)
if len(results) < 1:
hyp = Hypothesis(score=0.0, scores={}, states={}, yseq=[])
- results = [[" ", ["sil"], [2], hyp, 10, 6]] * nbest
+ results = [[" ", ["sil"], [2], hyp, 10, 6, []]] * nbest
time_end = time.time()
forward_time = time_end - time_beg
lfr_factor = results[0][-1]
@@ -410,7 +415,7 @@
ibest_writer["rtf"][key] = rtf_cur
if text is not None:
- if use_timestamp and timestamp is not None:
+ if use_timestamp and timestamp is not None and len(timestamp):
postprocessed_result = postprocess_utils.sentence_postprocess(token, timestamp)
else:
postprocessed_result = postprocess_utils.sentence_postprocess(token)
@@ -438,6 +443,7 @@
logging.info(rtf_avg)
if writer is not None:
ibest_writer["rtf"]["rtf_avf"] = rtf_avg
+ torch.cuda.empty_cache()
return asr_result_list
return _forward
@@ -480,7 +486,6 @@
param_dict: dict = None,
**kwargs,
):
- assert check_argument_types()
ncpu = kwargs.get("ncpu", 1)
torch.set_num_threads(ncpu)
@@ -564,6 +569,8 @@
if 'hotword' in kwargs:
hotword_list_or_file = kwargs['hotword']
+ speech2vadsegment.vad_model.vad_opts.max_single_segment_time = kwargs.get("max_single_segment_time", 60000)
+ batch_size_token_threshold_s = kwargs.get("batch_size_token_threshold_s", int(speech2vadsegment.vad_model.vad_opts.max_single_segment_time*0.67/1000)) * 1000
batch_size_token = kwargs.get("batch_size_token", 6000)
print("batch_size_token: ", batch_size_token)
@@ -620,17 +627,34 @@
sorted_data = sorted(data_with_index, key=lambda x: x[0][1] - x[0][0])
results_sorted = []
+ if not len(sorted_data):
+ key = keys[0]
+ # no active segments after VAD
+ if writer is not None:
+ # Write empty results
+ ibest_writer["token"][key] = ""
+ ibest_writer["token_int"][key] = ""
+ ibest_writer["vad"][key] = ""
+ ibest_writer["text"][key] = ""
+ ibest_writer["text_with_punc"][key] = ""
+ if use_timestamp:
+ ibest_writer["time_stamp"][key] = ""
+
+ logging.info("decoding, utt: {}, empty speech".format(key))
+ continue
+
batch_size_token_ms = batch_size_token*60
if speech2text.device == "cpu":
batch_size_token_ms = 0
- batch_size_token_ms = max(batch_size_token_ms, sorted_data[0][0][1] - sorted_data[0][0][0])
+ if len(sorted_data) > 0 and len(sorted_data[0]) > 0:
+ batch_size_token_ms = max(batch_size_token_ms, sorted_data[0][0][1] - sorted_data[0][0][0])
batch_size_token_ms_cum = 0
beg_idx = 0
- for j, _ in enumerate(range(0, n)):
+ beg_asr_total = time.time()
+ for j, _ in enumerate(tqdm(range(0, n))):
batch_size_token_ms_cum += (sorted_data[j][0][1] - sorted_data[j][0][0])
- if j < n - 1 and (batch_size_token_ms_cum + sorted_data[j + 1][0][1] - sorted_data[j + 1][0][
- 0]) < batch_size_token_ms:
+ if j < n - 1 and (batch_size_token_ms_cum + sorted_data[j + 1][0][1] - sorted_data[j + 1][0][0]) < batch_size_token_ms and (sorted_data[j + 1][0][1] - sorted_data[j + 1][0][0]) < batch_size_token_threshold_s:
continue
batch_size_token_ms_cum = 0
end_idx = j + 1
@@ -638,16 +662,17 @@
beg_idx = end_idx
batch = {"speech": speech_j, "speech_lengths": speech_lengths_j}
batch = to_device(batch, device=device)
- print("batch: ", speech_j.shape[0])
+ # print("batch: ", speech_j.shape[0])
beg_asr = time.time()
results = speech2text(**batch)
end_asr = time.time()
- print("time cost asr: ", end_asr - beg_asr)
+ # print("time cost asr: ", end_asr - beg_asr)
if len(results) < 1:
results = [["", [], [], [], [], [], []]]
results_sorted.extend(results)
-
+ end_asr_total = time.time()
+ print("total time cost asr: ", end_asr_total-beg_asr_total)
restored_data = [0] * n
for j in range(n):
index = sorted_data[j][1]
@@ -669,7 +694,7 @@
text, token, token_int = result[0], result[1], result[2]
time_stamp = result[4] if len(result[4]) > 0 else None
- if use_timestamp and time_stamp is not None:
+ if use_timestamp and time_stamp is not None and len(time_stamp):
postprocessed_result = postprocess_utils.sentence_postprocess(token, time_stamp)
else:
postprocessed_result = postprocess_utils.sentence_postprocess(token)
@@ -713,6 +738,7 @@
ibest_writer["time_stamp"][key] = "{}".format(time_stamp_postprocessed)
logging.info("decoding, utt: {}, predictions: {}".format(key, text_postprocessed_punc))
+ torch.cuda.empty_cache()
return asr_result_list
return _forward
@@ -748,7 +774,6 @@
param_dict: dict = None,
**kwargs,
):
- assert check_argument_types()
if word_lm_train_config is not None:
raise NotImplementedError("Word LM is not implemented")
@@ -817,37 +842,72 @@
data = yaml.load(f, Loader=yaml.Loader)
return data
- def _prepare_cache(cache: dict = {}, chunk_size=[5, 10, 5], batch_size=1):
+ def _prepare_cache(cache: dict = {}, chunk_size=[5, 10, 5], encoder_chunk_look_back=0,
+ decoder_chunk_look_back=0, batch_size=1):
if len(cache) > 0:
return cache
config = _read_yaml(asr_train_config)
enc_output_size = config["encoder_conf"]["output_size"]
feats_dims = config["frontend_conf"]["n_mels"] * config["frontend_conf"]["lfr_m"]
cache_en = {"start_idx": 0, "cif_hidden": torch.zeros((batch_size, 1, enc_output_size)),
- "cif_alphas": torch.zeros((batch_size, 1)), "chunk_size": chunk_size, "last_chunk": False,
+ "cif_alphas": torch.zeros((batch_size, 1)), "chunk_size": chunk_size,
+ "encoder_chunk_look_back": encoder_chunk_look_back, "last_chunk": False, "opt": None,
"feats": torch.zeros((batch_size, chunk_size[0] + chunk_size[2], feats_dims)), "tail_chunk": False}
cache["encoder"] = cache_en
- cache_de = {"decode_fsmn": None}
+ cache_de = {"decode_fsmn": None, "decoder_chunk_look_back": decoder_chunk_look_back, "opt": None, "chunk_size": chunk_size}
cache["decoder"] = cache_de
return cache
- def _cache_reset(cache: dict = {}, chunk_size=[5, 10, 5], batch_size=1):
+ def _cache_reset(cache: dict = {}, chunk_size=[5, 10, 5], encoder_chunk_look_back=0,
+ decoder_chunk_look_back=0, batch_size=1):
if len(cache) > 0:
config = _read_yaml(asr_train_config)
enc_output_size = config["encoder_conf"]["output_size"]
feats_dims = config["frontend_conf"]["n_mels"] * config["frontend_conf"]["lfr_m"]
cache_en = {"start_idx": 0, "cif_hidden": torch.zeros((batch_size, 1, enc_output_size)),
- "cif_alphas": torch.zeros((batch_size, 1)), "chunk_size": chunk_size, "last_chunk": False,
- "feats": torch.zeros((batch_size, chunk_size[0] + chunk_size[2], feats_dims)),
- "tail_chunk": False}
+ "cif_alphas": torch.zeros((batch_size, 1)), "chunk_size": chunk_size,
+ "encoder_chunk_look_back": encoder_chunk_look_back, "last_chunk": False, "opt": None,
+ "feats": torch.zeros((batch_size, chunk_size[0] + chunk_size[2], feats_dims)), "tail_chunk": False}
cache["encoder"] = cache_en
- cache_de = {"decode_fsmn": None}
+ cache_de = {"decode_fsmn": None, "decoder_chunk_look_back": decoder_chunk_look_back, "opt": None, "chunk_size": chunk_size}
cache["decoder"] = cache_de
return cache
+
+ #def _prepare_cache(cache: dict = {}, chunk_size=[5, 10, 5], batch_size=1):
+ # if len(cache) > 0:
+ # return cache
+ # config = _read_yaml(asr_train_config)
+ # enc_output_size = config["encoder_conf"]["output_size"]
+ # feats_dims = config["frontend_conf"]["n_mels"] * config["frontend_conf"]["lfr_m"]
+ # cache_en = {"start_idx": 0, "cif_hidden": torch.zeros((batch_size, 1, enc_output_size)),
+ # "cif_alphas": torch.zeros((batch_size, 1)), "chunk_size": chunk_size, "last_chunk": False,
+ # "feats": torch.zeros((batch_size, chunk_size[0] + chunk_size[2], feats_dims)), "tail_chunk": False}
+ # cache["encoder"] = cache_en
+
+ # cache_de = {"decode_fsmn": None}
+ # cache["decoder"] = cache_de
+
+ # return cache
+
+ #def _cache_reset(cache: dict = {}, chunk_size=[5, 10, 5], batch_size=1):
+ # if len(cache) > 0:
+ # config = _read_yaml(asr_train_config)
+ # enc_output_size = config["encoder_conf"]["output_size"]
+ # feats_dims = config["frontend_conf"]["n_mels"] * config["frontend_conf"]["lfr_m"]
+ # cache_en = {"start_idx": 0, "cif_hidden": torch.zeros((batch_size, 1, enc_output_size)),
+ # "cif_alphas": torch.zeros((batch_size, 1)), "chunk_size": chunk_size, "last_chunk": False,
+ # "feats": torch.zeros((batch_size, chunk_size[0] + chunk_size[2], feats_dims)),
+ # "tail_chunk": False}
+ # cache["encoder"] = cache_en
+
+ # cache_de = {"decode_fsmn": None}
+ # cache["decoder"] = cache_de
+
+ # return cache
def _forward(
data_path_and_name_and_type,
@@ -863,31 +923,47 @@
raw_inputs = _load_bytes(data_path_and_name_and_type[0])
raw_inputs = torch.tensor(raw_inputs)
if data_path_and_name_and_type is not None and data_path_and_name_and_type[2] == "sound":
- raw_inputs = torchaudio.load(data_path_and_name_and_type[0])[0][0]
+ try:
+ raw_inputs = torchaudio.load(data_path_and_name_and_type[0])[0][0]
+ except:
+ raw_inputs = soundfile.read(data_path_and_name_and_type[0], dtype='float32')[0]
+ if raw_inputs.ndim == 2:
+ raw_inputs = raw_inputs[:, 0]
+ raw_inputs = torch.tensor(raw_inputs)
if data_path_and_name_and_type is None and raw_inputs is not None:
if isinstance(raw_inputs, np.ndarray):
raw_inputs = torch.tensor(raw_inputs)
is_final = False
cache = {}
chunk_size = [5, 10, 5]
+ encoder_chunk_look_back = 0
+ decoder_chunk_look_back = 0
if param_dict is not None and "cache" in param_dict:
cache = param_dict["cache"]
if param_dict is not None and "is_final" in param_dict:
is_final = param_dict["is_final"]
if param_dict is not None and "chunk_size" in param_dict:
chunk_size = param_dict["chunk_size"]
+ if param_dict is not None and "encoder_chunk_look_back" in param_dict:
+ encoder_chunk_look_back = param_dict["encoder_chunk_look_back"]
+ if encoder_chunk_look_back > 0:
+ chunk_size[0] = 0
+ if param_dict is not None and "decoder_chunk_look_back" in param_dict:
+ decoder_chunk_look_back = param_dict["decoder_chunk_look_back"]
# 7 .Start for-loop
# FIXME(kamo): The output format should be discussed about
raw_inputs = torch.unsqueeze(raw_inputs, axis=0)
asr_result_list = []
- cache = _prepare_cache(cache, chunk_size=chunk_size, batch_size=1)
+ cache = _prepare_cache(cache, chunk_size=chunk_size, encoder_chunk_look_back=encoder_chunk_look_back,
+ decoder_chunk_look_back=decoder_chunk_look_back, batch_size=1)
item = {}
if data_path_and_name_and_type is not None and data_path_and_name_and_type[2] == "sound":
sample_offset = 0
speech_length = raw_inputs.shape[1]
stride_size = chunk_size[1] * 960
- cache = _prepare_cache(cache, chunk_size=chunk_size, batch_size=1)
+ cache = _prepare_cache(cache, chunk_size=chunk_size, encoder_chunk_look_back=encoder_chunk_look_back,
+ decoder_chunk_look_back=decoder_chunk_look_back, batch_size=1)
final_result = ""
for sample_offset in range(0, speech_length, min(stride_size, speech_length - sample_offset)):
if sample_offset + stride_size >= speech_length - 1:
@@ -908,7 +984,8 @@
asr_result_list.append(item)
if is_final:
- cache = _cache_reset(cache, chunk_size=chunk_size, batch_size=1)
+ cache = _cache_reset(cache, chunk_size=chunk_size, encoder_chunk_look_back=encoder_chunk_look_back,
+ decoder_chunk_look_back=decoder_chunk_look_back, batch_size=1)
return asr_result_list
return _forward
@@ -950,7 +1027,6 @@
param_dict: dict = None,
**kwargs,
):
- assert check_argument_types()
ncpu = kwargs.get("ncpu", 1)
torch.set_num_threads(ncpu)
if batch_size > 1:
@@ -1119,7 +1195,6 @@
param_dict: dict = None,
**kwargs,
):
- assert check_argument_types()
ncpu = kwargs.get("ncpu", 1)
torch.set_num_threads(ncpu)
if batch_size > 1:
@@ -1252,27 +1327,28 @@
nbest: int,
num_workers: int,
log_level: Union[int, str],
- data_path_and_name_and_type: Sequence[Tuple[str, str, str]],
+ # data_path_and_name_and_type: Sequence[Tuple[str, str, str]],
asr_train_config: Optional[str],
asr_model_file: Optional[str],
- cmvn_file: Optional[str],
- beam_search_config: Optional[dict],
- lm_train_config: Optional[str],
- lm_file: Optional[str],
- model_tag: Optional[str],
- token_type: Optional[str],
- bpemodel: Optional[str],
- key_file: Optional[str],
- allow_variable_data_keys: bool,
- quantize_asr_model: Optional[bool],
- quantize_modules: Optional[List[str]],
- quantize_dtype: Optional[str],
- streaming: Optional[bool],
- simu_streaming: Optional[bool],
- chunk_size: Optional[int],
- left_context: Optional[int],
- right_context: Optional[int],
- display_partial_hypotheses: bool,
+ cmvn_file: Optional[str] = None,
+ beam_search_config: Optional[dict] = None,
+ lm_train_config: Optional[str] = None,
+ lm_file: Optional[str] = None,
+ model_tag: Optional[str] = None,
+ token_type: Optional[str] = None,
+ bpemodel: Optional[str] = None,
+ key_file: Optional[str] = None,
+ allow_variable_data_keys: bool = False,
+ quantize_asr_model: Optional[bool] = False,
+ quantize_modules: Optional[List[str]] = None,
+ quantize_dtype: Optional[str] = "float16",
+ streaming: Optional[bool] = False,
+ fake_streaming: Optional[bool] = False,
+ full_utt: Optional[bool] = False,
+ chunk_size: Optional[int] = 16,
+ left_context: Optional[int] = 16,
+ right_context: Optional[int] = 0,
+ display_partial_hypotheses: bool = False,
**kwargs,
) -> None:
"""Transducer model inference.
@@ -1307,7 +1383,6 @@
right_context: Number of frames in right context AFTER subsampling.
display_partial_hypotheses: Whether to display partial hypotheses.
"""
- assert check_argument_types()
if batch_size > 1:
raise NotImplementedError("batch decoding is not implemented")
@@ -1319,7 +1394,7 @@
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
- if ngpu >= 1:
+ if ngpu >= 1 and torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
@@ -1345,15 +1420,13 @@
quantize_modules=quantize_modules,
quantize_dtype=quantize_dtype,
streaming=streaming,
- simu_streaming=simu_streaming,
+ fake_streaming=fake_streaming,
+ full_utt=full_utt,
chunk_size=chunk_size,
left_context=left_context,
right_context=right_context,
)
- speech2text = Speech2TextTransducer.from_pretrained(
- model_tag=model_tag,
- **speech2text_kwargs,
- )
+ speech2text = Speech2TextTransducer(**speech2text_kwargs)
def _forward(data_path_and_name_and_type,
raw_inputs: Union[np.ndarray, torch.Tensor] = None,
@@ -1372,47 +1445,57 @@
key_file=key_file,
num_workers=num_workers,
)
+ asr_result_list = []
+
+ if output_dir is not None:
+ writer = DatadirWriter(output_dir)
+ else:
+ writer = None
# 4 .Start for-loop
- with DatadirWriter(output_dir) as writer:
- for keys, batch in loader:
- assert isinstance(batch, dict), type(batch)
- assert all(isinstance(s, str) for s in keys), keys
+ for keys, batch in loader:
+ assert isinstance(batch, dict), type(batch)
+ assert all(isinstance(s, str) for s in keys), keys
- _bs = len(next(iter(batch.values())))
- assert len(keys) == _bs, f"{len(keys)} != {_bs}"
- batch = {k: v[0] for k, v in batch.items() if not k.endswith("_lengths")}
- assert len(batch.keys()) == 1
+ _bs = len(next(iter(batch.values())))
+ assert len(keys) == _bs, f"{len(keys)} != {_bs}"
+ batch = {k: v[0] for k, v in batch.items() if not k.endswith("_lengths")}
+ assert len(batch.keys()) == 1
- try:
- if speech2text.streaming:
- speech = batch["speech"]
+ try:
+ if speech2text.streaming:
+ speech = batch["speech"]
- _steps = len(speech) // speech2text._ctx
- _end = 0
- for i in range(_steps):
- _end = (i + 1) * speech2text._ctx
+ _steps = len(speech) // speech2text._ctx
+ _end = 0
+ for i in range(_steps):
+ _end = (i + 1) * speech2text._ctx
- speech2text.streaming_decode(
- speech[i * speech2text._ctx: _end], is_final=False
- )
-
- final_hyps = speech2text.streaming_decode(
- speech[_end: len(speech)], is_final=True
+ speech2text.streaming_decode(
+ speech[i * speech2text._ctx: _end + speech2text._right_ctx], is_final=False
)
- elif speech2text.simu_streaming:
- final_hyps = speech2text.simu_streaming_decode(**batch)
- else:
- final_hyps = speech2text(**batch)
- results = speech2text.hypotheses_to_results(final_hyps)
- except TooShortUttError as e:
- logging.warning(f"Utterance {keys} {e}")
- hyp = Hypothesis(score=0.0, yseq=[], dec_state=None)
- results = [[" ", ["<space>"], [2], hyp]] * nbest
+ final_hyps = speech2text.streaming_decode(
+ speech[_end: len(speech)], is_final=True
+ )
+ elif speech2text.fake_streaming:
+ final_hyps = speech2text.fake_streaming_decode(**batch)
+ elif speech2text.full_utt:
+ final_hyps = speech2text.full_utt_decode(**batch)
+ else:
+ final_hyps = speech2text(**batch)
- key = keys[0]
- for n, (text, token, token_int, hyp) in zip(range(1, nbest + 1), results):
+ results = speech2text.hypotheses_to_results(final_hyps)
+ except TooShortUttError as e:
+ logging.warning(f"Utterance {keys} {e}")
+ hyp = Hypothesis(score=0.0, yseq=[], dec_state=None)
+ results = [[" ", ["<space>"], [2], hyp]] * nbest
+
+ key = keys[0]
+ for n, (text, token, token_int, hyp) in zip(range(1, nbest + 1), results):
+ item = {'key': key, 'value': text}
+ asr_result_list.append(item)
+ if writer is not None:
ibest_writer = writer[f"{n}best_recog"]
ibest_writer["token"][key] = " ".join(token)
@@ -1422,6 +1505,8 @@
if text is not None:
ibest_writer["text"][key] = text
+ logging.info("decoding, utt: {}, predictions: {}".format(key, text))
+ return asr_result_list
return _forward
@@ -1457,7 +1542,6 @@
param_dict: dict = None,
**kwargs,
):
- assert check_argument_types()
if batch_size > 1:
raise NotImplementedError("batch decoding is not implemented")
if word_lm_train_config is not None:
@@ -1605,6 +1689,8 @@
elif mode == "mfcca":
return inference_mfcca(**kwargs)
elif mode == "rnnt":
+ return inference_transducer(**kwargs)
+ elif mode == "bat":
return inference_transducer(**kwargs)
elif mode == "sa_asr":
return inference_sa_asr(**kwargs)
@@ -1783,7 +1869,8 @@
group.add_argument("--lm_weight", type=float, default=1.0, help="RNNLM weight")
group.add_argument("--ngram_weight", type=float, default=0.9, help="ngram weight")
group.add_argument("--streaming", type=str2bool, default=False)
- group.add_argument("--simu_streaming", type=str2bool, default=False)
+ group.add_argument("--fake_streaming", type=str2bool, default=False)
+ group.add_argument("--full_utt", type=str2bool, default=False)
group.add_argument("--chunk_size", type=int, default=16)
group.add_argument("--left_context", type=int, default=16)
group.add_argument("--right_context", type=int, default=0)
--
Gitblit v1.9.1