From 9b4e0969f29dccdfa78a55219f8107e549c96e45 Mon Sep 17 00:00:00 2001
From: lzr265946 <lzr265946@alibaba-inc.com>
Date: 星期四, 07 九月 2023 17:19:02 +0800
Subject: [PATCH] fix transformerLM inference recipe
---
funasr/bin/asr_infer.py | 48 ++++++++++++++++++++++++++++++++++++++++++++----
1 files changed, 44 insertions(+), 4 deletions(-)
diff --git a/funasr/bin/asr_infer.py b/funasr/bin/asr_infer.py
index 259a286..2e002b7 100644
--- a/funasr/bin/asr_infer.py
+++ b/funasr/bin/asr_infer.py
@@ -280,6 +280,7 @@
nbest: int = 1,
frontend_conf: dict = None,
hotword_list_or_file: str = None,
+ clas_scale: float = 1.0,
decoding_ind: int = 0,
**kwargs,
):
@@ -376,6 +377,7 @@
# 6. [Optional] Build hotword list from str, local file or url
self.hotword_list = None
self.hotword_list = self.generate_hotwords_list(hotword_list_or_file)
+ self.clas_scale = clas_scale
is_use_lm = lm_weight != 0.0 and lm_file is not None
if (ctc_weight == 0.0 or asr_model.ctc == None) and not is_use_lm:
@@ -439,16 +441,20 @@
pre_token_length = pre_token_length.round().long()
if torch.max(pre_token_length) < 1:
return []
- if not isinstance(self.asr_model, ContextualParaformer) and not isinstance(self.asr_model,
- NeatContextualParaformer):
+ if not isinstance(self.asr_model, ContextualParaformer) and \
+ not isinstance(self.asr_model, NeatContextualParaformer):
if self.hotword_list:
logging.warning("Hotword is given but asr model is not a ContextualParaformer.")
decoder_outs = self.asr_model.cal_decoder_with_predictor(enc, enc_len, pre_acoustic_embeds,
pre_token_length)
decoder_out, ys_pad_lens = decoder_outs[0], decoder_outs[1]
else:
- decoder_outs = self.asr_model.cal_decoder_with_predictor(enc, enc_len, pre_acoustic_embeds,
- pre_token_length, hw_list=self.hotword_list)
+ decoder_outs = self.asr_model.cal_decoder_with_predictor(enc,
+ enc_len,
+ pre_acoustic_embeds,
+ pre_token_length,
+ hw_list=self.hotword_list,
+ clas_scale=self.clas_scale)
decoder_out, ys_pad_lens = decoder_outs[0], decoder_outs[1]
if isinstance(self.asr_model, BiCifParaformer):
@@ -1330,6 +1336,7 @@
nbest: int = 1,
streaming: bool = False,
simu_streaming: bool = False,
+ full_utt: bool = False,
chunk_size: int = 16,
left_context: int = 32,
right_context: int = 0,
@@ -1424,6 +1431,7 @@
self.beam_search = beam_search
self.streaming = streaming
self.simu_streaming = simu_streaming
+ self.full_utt = full_utt
self.chunk_size = max(chunk_size, 0)
self.left_context = left_context
self.right_context = max(right_context, 0)
@@ -1443,6 +1451,7 @@
self._ctx = self.asr_model.encoder.get_encoder_input_size(
self.window_size
)
+ self._right_ctx = right_context
self.last_chunk_length = (
self.asr_model.encoder.embed.min_frame_length + self.right_context + 1
@@ -1540,6 +1549,37 @@
return nbest_hyps
@torch.no_grad()
+ def full_utt_decode(self, speech: Union[torch.Tensor, np.ndarray]) -> List[HypothesisTransducer]:
+ """Speech2Text call.
+ Args:
+ speech: Speech data. (S)
+ Returns:
+ nbest_hypothesis: N-best hypothesis.
+ """
+ assert check_argument_types()
+
+ if isinstance(speech, np.ndarray):
+ speech = torch.tensor(speech)
+
+ if self.frontend is not None:
+ speech = torch.unsqueeze(speech, axis=0)
+ speech_lengths = speech.new_full([1], dtype=torch.long, fill_value=speech.size(1))
+ feats, feats_lengths = self.frontend(speech, speech_lengths)
+ else:
+ feats = speech.unsqueeze(0).to(getattr(torch, self.dtype))
+ feats_lengths = feats.new_full([1], dtype=torch.long, fill_value=feats.size(1))
+
+ if self.asr_model.normalize is not None:
+ feats, feats_lengths = self.asr_model.normalize(feats, feats_lengths)
+
+ feats = to_device(feats, device=self.device)
+ feats_lengths = to_device(feats_lengths, device=self.device)
+ enc_out = self.asr_model.encoder.full_utt_forward(feats, feats_lengths)
+ nbest_hyps = self.beam_search(enc_out[0])
+
+ return nbest_hyps
+
+ @torch.no_grad()
def __call__(self, speech: Union[torch.Tensor, np.ndarray]) -> List[HypothesisTransducer]:
"""Speech2Text call.
Args:
--
Gitblit v1.9.1