| | |
| | | from pathlib import Path |
| | | from typing import List, Union, Tuple |
| | | |
| | | import copy |
| | | import librosa |
| | | import numpy as np |
| | | |
| | |
| | | read_yaml) |
| | | from .utils.postprocess_utils import sentence_postprocess |
| | | from .utils.frontend import WavFrontend |
| | | from funasr.utils.timestamp_tools import time_stamp_lfr6_pl |
| | | |
| | | logging = get_logger() |
| | | |
| | |
| | | |
| | | asr_res = [] |
| | | for beg_idx in range(0, waveform_nums, self.batch_size): |
| | | res = {} |
| | | end_idx = min(waveform_nums, beg_idx + self.batch_size) |
| | | |
| | | feats, feats_len = self.extract_feat(waveform_list[beg_idx:end_idx]) |
| | | |
| | | try: |
| | | am_scores, valid_token_lens = self.infer(feats, feats_len) |
| | | outputs = self.infer(feats, feats_len) |
| | | am_scores, valid_token_lens = outputs[0], outputs[1] |
| | | if len(outputs) == 4: |
| | | # for BiCifParaformer Inference |
| | | us_alphas, us_cif_peak = outputs[2], outputs[3] |
| | | else: |
| | | us_alphas, us_cif_peak = None, None |
| | | except ONNXRuntimeError: |
| | | #logging.warning(traceback.format_exc()) |
| | | logging.warning("input wav is silence or noise") |
| | | preds = [''] |
| | | else: |
| | | preds = self.decode(am_scores, valid_token_lens) |
| | | |
| | | asr_res.extend(preds) |
| | | preds, raw_token = self.decode(am_scores, valid_token_lens)[0] |
| | | res['preds'] = preds |
| | | if us_cif_peak is not None: |
| | | timestamp = time_stamp_lfr6_pl(us_alphas, us_cif_peak, copy.copy(raw_token), log=False) |
| | | res['timestamp'] = timestamp |
| | | asr_res.append(res) |
| | | return asr_res |
| | | |
| | | def load_data(self, |
| | |
| | | |
| | | def infer(self, feats: np.ndarray, |
| | | feats_len: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: |
| | | am_scores, token_nums = self.ort_infer([feats, feats_len]) |
| | | return am_scores, token_nums |
| | | outputs = self.ort_infer([feats, feats_len]) |
| | | return outputs |
| | | |
| | | def decode(self, am_scores: np.ndarray, token_nums: int) -> List[str]: |
| | | return [self.decode_one(am_score, token_num) |
| | |
| | | |
| | | # Change integer-ids to tokens |
| | | token = self.converter.ids2tokens(token_int) |
| | | token = token[:valid_token_num-1] |
| | | # token = token[:valid_token_num-1] |
| | | texts = sentence_postprocess(token) |
| | | text = texts[0] |
| | | # text = self.tokenizer.tokens2text(token) |
| | | return text |
| | | return text, token |
| | | |