From 6e6475cd2afebd5db41beef633645f154bb4cf05 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期三, 16 十月 2024 14:35:56 +0800
Subject: [PATCH] funasr tables
---
runtime/python/onnxruntime/funasr_onnx/paraformer_bin.py | 50 ++++++++++++++++++++++++++++++++++++++++++++------
1 files changed, 44 insertions(+), 6 deletions(-)
diff --git a/runtime/python/onnxruntime/funasr_onnx/paraformer_bin.py b/runtime/python/onnxruntime/funasr_onnx/paraformer_bin.py
index 871674e..4f35fcc 100644
--- a/runtime/python/onnxruntime/funasr_onnx/paraformer_bin.py
+++ b/runtime/python/onnxruntime/funasr_onnx/paraformer_bin.py
@@ -322,13 +322,19 @@
self.pred_bias = config["model_conf"]["predictor_bias"]
else:
self.pred_bias = 0
+ if "lang" in config:
+ self.language = config["lang"]
+ else:
+ self.language = None
def __call__(
self, wav_content: Union[str, np.ndarray, List[str]], hotwords: str, **kwargs
) -> List:
+ # def __call__(
+ # self, waveform_list:list, hotwords: str, **kwargs
+ # ) -> List:
# make hotword list
hotwords, hotwords_length = self.proc_hotword(hotwords)
- # import pdb; pdb.set_trace()
[bias_embed] = self.eb_infer(hotwords, hotwords_length)
# index from bias_embed
bias_embed = bias_embed.transpose(1, 0, 2)
@@ -345,15 +351,47 @@
try:
outputs = self.bb_infer(feats, feats_len, bias_embed)
am_scores, valid_token_lens = outputs[0], outputs[1]
+
+ if len(outputs) == 4:
+ # for BiCifParaformer Inference
+ us_alphas, us_peaks = outputs[2], outputs[3]
+ else:
+ us_alphas, us_peaks = None, None
+
except ONNXRuntimeError:
# logging.warning(traceback.format_exc())
logging.warning("input wav is silence or noise")
preds = [""]
else:
preds = self.decode(am_scores, valid_token_lens)
- for pred in preds:
- pred = sentence_postprocess(pred)
- asr_res.append({"preds": pred})
+ if us_peaks is None:
+ for pred in preds:
+ if self.language == "en-bpe":
+ pred = sentence_postprocess_sentencepiece(pred)
+ else:
+ pred = sentence_postprocess(pred)
+ asr_res.append({"preds": pred})
+ else:
+ for pred, us_peaks_ in zip(preds, us_peaks):
+ raw_tokens = pred
+ timestamp, timestamp_raw = time_stamp_lfr6_onnx(
+ us_peaks_, copy.copy(raw_tokens)
+ )
+ text_proc, timestamp_proc, _ = sentence_postprocess(
+ raw_tokens, timestamp_raw
+ )
+ # logging.warning(timestamp)
+ if len(self.plot_timestamp_to):
+ self.plot_wave_timestamp(
+ waveform_list[0], timestamp, self.plot_timestamp_to
+ )
+ asr_res.append(
+ {
+ "preds": text_proc,
+ "timestamp": timestamp_proc,
+ "raw_tokens": raw_tokens,
+ }
+ )
return asr_res
def proc_hotword(self, hotwords):
@@ -376,10 +414,10 @@
return np.array(hotwords)
hotword_int = [word_map(i) for i in hotwords]
- # import pdb; pdb.set_trace()
+
hotword_int.append(np.array([1]))
hotwords = pad_list(hotword_int, pad_value=0, max_len=10)
- # import pdb; pdb.set_trace()
+
return hotwords, hotwords_length
def bb_infer(
--
Gitblit v1.9.1