游雁
2023-03-21 263c7feefbd04b25dbe1584f84b15a840d98d3e7
Merge branch 'main' of github.com:alibaba-damo-academy/FunASR
add
4个文件已修改
68 ■■■■ 已修改文件
funasr/runtime/python/libtorch/torch_paraformer/paraformer_bin.py 24 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
funasr/runtime/python/libtorch/torch_paraformer/utils/timestamp_utils.py 15 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
funasr/runtime/python/onnxruntime/rapid_paraformer/paraformer_onnx.py 23 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
funasr/runtime/python/onnxruntime/rapid_paraformer/utils/timestamp_utils.py 6 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
funasr/runtime/python/libtorch/torch_paraformer/paraformer_bin.py
@@ -62,26 +62,28 @@
                am_scores, valid_token_lens = outputs[0], outputs[1]
                if len(outputs) == 4:
                    # for BiCifParaformer Inference
                    us_alphas, us_cif_peak = outputs[2], outputs[3]
                    us_alphas, us_peaks = outputs[2], outputs[3]
                else:
                    us_alphas, us_cif_peak = None, None
                    us_alphas, us_peaks = None, None
            except:
                #logging.warning(traceback.format_exc())
                logging.warning("input wav is silence or noise")
                preds = ['']
            else:
                am_scores, valid_token_lens = am_scores.detach().cpu().numpy(), valid_token_lens.detach().cpu().numpy()
                preds = self.decode(am_scores, valid_token_lens)
                if us_cif_peak is None:
                if us_peaks is None:
                    for pred in preds:
                        pred = sentence_postprocess(pred)
                        asr_res.append({'preds': pred})
                else:
                    for pred, us_cif_peak_ in zip(preds, us_cif_peak):
                        text, tokens = pred
                        timestamp, timestamp_total = time_stamp_lfr6_onnx(us_cif_peak_, copy.copy(tokens))
                    for pred, us_peaks_ in zip(preds, us_peaks):
                        raw_tokens = pred
                        timestamp, timestamp_raw = time_stamp_lfr6_onnx(us_peaks_, copy.copy(raw_tokens))
                        text_proc, timestamp_proc, _ = sentence_postprocess(raw_tokens, timestamp_raw)
                        # logging.warning(timestamp)
                        if len(self.plot_timestamp_to):
                            self.plot_wave_timestamp(waveform_list[0], timestamp_total, self.plot_timestamp_to)
                        asr_res.append({'preds': text, 'timestamp': timestamp})
                            self.plot_wave_timestamp(waveform_list[0], timestamp, self.plot_timestamp_to)
                        asr_res.append({'preds': text_proc, 'timestamp': timestamp_proc, "raw_tokens": raw_tokens})
        return asr_res
    def plot_wave_timestamp(self, wav, text_timestamp, dest):
@@ -182,6 +184,6 @@
        # Change integer-ids to tokens
        token = self.converter.ids2tokens(token_int)
        token = token[:valid_token_num-self.pred_bias]
        texts = sentence_postprocess(token)
        return texts
        # texts = sentence_postprocess(token)
        return token
funasr/runtime/python/libtorch/torch_paraformer/utils/timestamp_utils.py
@@ -1,11 +1,11 @@
import numpy as np
def time_stamp_lfr6_onnx(us_cif_peak, char_list, begin_time=0.0):
def time_stamp_lfr6_onnx(us_cif_peak, char_list, begin_time=0.0, total_offset=-1.5):
    if not len(char_list):
        return []
    START_END_THRESHOLD = 5
    MAX_TOKEN_DURATION = 14
    MAX_TOKEN_DURATION = 30
    TIME_RATE = 10.0 * 6 / 1000 / 3  #  3 times upsampled
    cif_peak = us_cif_peak.reshape(-1)
    num_frames = cif_peak.shape[-1]
@@ -16,7 +16,7 @@
    new_char_list = []
    # for bicif model trained with large data, cif2 actually fires when a character starts
    # so treat the frames between two peaks as the duration of the former token
    fire_place = np.where(cif_peak>1.0-1e-4)[0] - 1.5  # np format
    fire_place = np.where(cif_peak>1.0-1e-4)[0] + total_offset  # np format
    num_peak = len(fire_place)
    assert num_peak == len(char_list) + 1 # number of peaks is supposed to be number of tokens + 1
    # begin silence
@@ -27,7 +27,7 @@
    # tokens timestamp
    for i in range(len(fire_place)-1):
        new_char_list.append(char_list[i])
        if MAX_TOKEN_DURATION < 0 or fire_place[i+1] - fire_place[i] < MAX_TOKEN_DURATION:
        if i == len(fire_place)-2 or MAX_TOKEN_DURATION < 0 or fire_place[i+1] - fire_place[i] < MAX_TOKEN_DURATION:
            timestamp_list.append([fire_place[i]*TIME_RATE, fire_place[i+1]*TIME_RATE])
        else:
            # cut the duration to token and sil of the 0-weight frames last long
@@ -48,11 +48,12 @@
            timestamp_list[i][0] = timestamp_list[i][0] + begin_time / 1000.0
            timestamp_list[i][1] = timestamp_list[i][1] + begin_time / 1000.0
    assert len(new_char_list) == len(timestamp_list)
    res_total = []
    res_str = ""
    for char, timestamp in zip(new_char_list, timestamp_list):
        res_total.append([char, timestamp[0], timestamp[1]])  # += "{} {} {};".format(char, timestamp[0], timestamp[1])
        res_str += "{} {} {};".format(char, timestamp[0], timestamp[1])
    res = []
    for char, timestamp in zip(new_char_list, timestamp_list):
        if char != '<sil>':
            res.append([int(timestamp[0] * 1000), int(timestamp[1] * 1000)])
    return res, res_total
    return res_str, res
funasr/runtime/python/onnxruntime/rapid_paraformer/paraformer_onnx.py
@@ -64,25 +64,28 @@
                am_scores, valid_token_lens = outputs[0], outputs[1]
                if len(outputs) == 4:
                    # for BiCifParaformer Inference
                    us_alphas, us_cif_peak = outputs[2], outputs[3]
                    us_alphas, us_peaks = outputs[2], outputs[3]
                else:
                    us_alphas, us_cif_peak = None, None
                    us_alphas, us_peaks = None, None
            except ONNXRuntimeError:
                #logging.warning(traceback.format_exc())
                logging.warning("input wav is silence or noise")
                preds = ['']
            else:
                preds = self.decode(am_scores, valid_token_lens)
                if us_cif_peak is None:
                if us_peaks is None:
                    for pred in preds:
                        pred = sentence_postprocess(pred)
                        asr_res.append({'preds': pred})
                else:
                    for pred, us_cif_peak_ in zip(preds, us_cif_peak):
                        text, tokens = pred
                        timestamp, timestamp_total = time_stamp_lfr6_onnx(us_cif_peak_, copy.copy(tokens))
                    for pred, us_peaks_ in zip(preds, us_peaks):
                        raw_tokens = pred
                        timestamp, timestamp_raw = time_stamp_lfr6_onnx(us_peaks_, copy.copy(raw_tokens))
                        text_proc, timestamp_proc, _ = sentence_postprocess(raw_tokens, timestamp_raw)
                        # logging.warning(timestamp)
                        if len(self.plot_timestamp_to):
                            self.plot_wave_timestamp(waveform_list[0], timestamp_total, self.plot_timestamp_to)
                        asr_res.append({'preds': text, 'timestamp': timestamp})
                            self.plot_wave_timestamp(waveform_list[0], timestamp, self.plot_timestamp_to)
                        asr_res.append({'preds': text_proc, 'timestamp': timestamp_proc, "raw_tokens": raw_tokens})
        return asr_res
    def plot_wave_timestamp(self, wav, text_timestamp, dest):
@@ -181,6 +184,6 @@
        # Change integer-ids to tokens
        token = self.converter.ids2tokens(token_int)
        token = token[:valid_token_num-self.pred_bias]
        texts = sentence_postprocess(token)
        return texts
        # texts = sentence_postprocess(token)
        return token
funasr/runtime/python/onnxruntime/rapid_paraformer/utils/timestamp_utils.py
@@ -48,12 +48,12 @@
            timestamp_list[i][0] = timestamp_list[i][0] + begin_time / 1000.0
            timestamp_list[i][1] = timestamp_list[i][1] + begin_time / 1000.0
    assert len(new_char_list) == len(timestamp_list)
    res_total = []
    res_str = ""
    for char, timestamp in zip(new_char_list, timestamp_list):
        res_total.append([char, timestamp[0], timestamp[1]])  # += "{} {} {};".format(char, timestamp[0], timestamp[1])
        res_str += "{} {} {};".format(char, timestamp[0], timestamp[1])
    res = []
    for char, timestamp in zip(new_char_list, timestamp_list):
        if char != '<sil>':
            res.append([int(timestamp[0] * 1000), int(timestamp[1] * 1000)])
    return res, res_total
    return res_str, res