update timestamp and add ploter
| | |
| | | |
| | | model = Paraformer(model_dir, batch_size=1) |
| | | |
| | | wav_path = ['/Users/shixian/code/funasr2/export/damo/speech_paraformer-tiny-commandword_asr_nat-zh-cn-16k-vocab544-pytorch/example/asr_example.wav'] |
| | | wav_path = ['/Users/shixian/code/funasr2/export/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/example/asr_example.wav'] |
| | | |
| | | result = model(wav_path) |
| | | print(result) |
| | |
| | | # -*- encoding: utf-8 -*- |
| | | # @Author: SWHL |
| | | # @Contact: liekkaskono@163.com |
| | | from cgitb import text |
| | | import os.path |
| | | from pathlib import Path |
| | | from typing import List, Union, Tuple |
| | |
| | | def __init__(self, model_dir: Union[str, Path] = None, |
| | | batch_size: int = 1, |
| | | device_id: Union[str, int] = "-1", |
| | | plot_timestamp: bool = True, |
| | | ): |
| | | |
| | | if not Path(model_dir).exists(): |
| | |
| | | ) |
| | | self.ort_infer = OrtInferSession(model_file, device_id) |
| | | self.batch_size = batch_size |
| | | self.plot = True |
| | | self.plot = plot_timestamp |
| | | |
| | | def __call__(self, wav_content: Union[str, np.ndarray, List[str]], **kwargs) -> List: |
| | | waveform_list = self.load_data(wav_content, self.frontend.opts.frame_opts.samp_freq) |
| | |
| | | |
| | | def plot_wave_timestamp(self, wav, text_timestamp): |
| | | # TODO: Plot the wav and timestamp results with matplotlib |
| | | import pdb; pdb.set_trace() |
| | | import matplotlib |
| | | matplotlib.use('Agg') |
| | | matplotlib.rc("font", family='Alibaba PuHuiTi') # set it to a font that your system supports |
| | | import matplotlib.pyplot as plt |
| | | fig, ax1 = plt.subplots(figsize=(11, 3.5), dpi=320) |
| | | ax2 = ax1.twinx() |
| | | ax2.set_ylim([0, 2.0]) |
| | | # plot waveform |
| | | ax1.set_ylim([-0.3, 0.3]) |
| | | time = np.arange(wav.shape[0]) / 16000 |
| | | ax1.plot(time, wav/wav.max()*0.3, color='gray', alpha=0.4) |
| | | # plot lines and text |
| | | for (char, start, end) in text_timestamp: |
| | | ax1.vlines(start, -0.3, 0.3, ls='--') |
| | | ax1.vlines(end, -0.3, 0.3, ls='--') |
| | | x_adj = 0.045 if char != '<sil>' else 0.12 |
| | | ax1.text((start + end) * 0.5 - x_adj, 0, char) |
| | | # plt.legend() |
| | | plotname = "debug.png" |
| | | plt.savefig(plotname, bbox_inches='tight') |
| | | |
| | | def load_data(self, |
| | | wav_content: Union[str, np.ndarray, List[str]], fs: int = None) -> List: |
| | |
| | | import numpy as np |
| | | |
| | | |
| | | def time_stamp_lfr6_onnx(us_cif_peak, char_list, begin_time=0.0): |
| | | def time_stamp_lfr6_onnx(us_cif_peak, char_list, begin_time=0.0, total_offset=-1.5): |
| | | if not len(char_list): |
| | | return [] |
| | | START_END_THRESHOLD = 5 |
| | | MAX_TOKEN_DURATION = 14 |
| | | MAX_TOKEN_DURATION = 30 |
| | | TIME_RATE = 10.0 * 6 / 1000 / 3 # 3 times upsampled |
| | | cif_peak = us_cif_peak.reshape(-1) |
| | | num_frames = cif_peak.shape[-1] |
| | |
| | | new_char_list = [] |
| | | # for bicif model trained with large data, cif2 actually fires when a character starts |
| | | # so treat the frames between two peaks as the duration of the former token |
| | | fire_place = np.where(cif_peak>1.0-1e-4)[0] - 1.5 # np format |
| | | fire_place = np.where(cif_peak>1.0-1e-4)[0] + total_offset # np format |
| | | num_peak = len(fire_place) |
| | | assert num_peak == len(char_list) + 1 # number of peaks is supposed to be number of tokens + 1 |
| | | # begin silence |
| | |
| | | # tokens timestamp |
| | | for i in range(len(fire_place)-1): |
| | | new_char_list.append(char_list[i]) |
| | | if MAX_TOKEN_DURATION < 0 or fire_place[i+1] - fire_place[i] < MAX_TOKEN_DURATION: |
| | | if i == len(fire_place)-2 or MAX_TOKEN_DURATION < 0 or fire_place[i+1] - fire_place[i] < MAX_TOKEN_DURATION: |
| | | timestamp_list.append([fire_place[i]*TIME_RATE, fire_place[i+1]*TIME_RATE]) |
| | | else: |
| | | # cut the duration to token and sil of the 0-weight frames last long |
| | |
| | | for char, timestamp in zip(new_char_list, timestamp_list): |
| | | if char != '<sil>': |
| | | res.append([int(timestamp[0] * 1000), int(timestamp[1] * 1000)]) |
| | | return res, res_total |
| | | return res, res_total |
| | | |