From 94de39dde2e616a01683c518023d0fab72b4e103 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期一, 19 二月 2024 22:21:50 +0800
Subject: [PATCH] aishell example
---
funasr/utils/timestamp_tools.py | 232 ++++++++++++++++++++++++++++++++--------------------------
1 files changed, 128 insertions(+), 104 deletions(-)
diff --git a/funasr/utils/timestamp_tools.py b/funasr/utils/timestamp_tools.py
index 12337d1..63f179a 100644
--- a/funasr/utils/timestamp_tools.py
+++ b/funasr/utils/timestamp_tools.py
@@ -1,137 +1,161 @@
import torch
-import copy
+import codecs
import logging
+import argparse
import numpy as np
-from typing import Any, List, Tuple, Union
+# import edit_distance
+from itertools import zip_longest
-def cut_interval(alphas: torch.Tensor, start: int, end: int, tail: bool):
- if not tail:
- if end == start + 1:
- cut = (end + start) / 2.0
- else:
- alpha = alphas[start+1: end].tolist()
- reverse_steps = 1
- for reverse_alpha in alpha[::-1]:
- if reverse_alpha > 0.35:
- reverse_steps += 1
- else:
- break
- cut = end - reverse_steps
- else:
- if end != len(alphas) - 1:
- cut = end + 1
- else:
- cut = start + 1
- return float(cut)
-def time_stamp_lfr6(alphas: torch.Tensor, speech_lengths: torch.Tensor, raw_text: List[str], begin: int = 0, end: int = None):
- time_stamp_list = []
- alphas = alphas[0]
- text = copy.deepcopy(raw_text)
- if end is None:
- time = speech_lengths * 60 / 1000
- sacle_rate = (time / speech_lengths[0]).tolist()
- else:
- time = (end - begin) / 1000
- sacle_rate = (time / speech_lengths[0]).tolist()
+def cif_wo_hidden(alphas, threshold):
+ batch_size, len_time = alphas.size()
+ # loop varss
+ integrate = torch.zeros([batch_size], device=alphas.device)
+ # intermediate vars along time
+ list_fires = []
+ for t in range(len_time):
+ alpha = alphas[:, t]
+ integrate += alpha
+ list_fires.append(integrate)
+ fire_place = integrate >= threshold
+ integrate = torch.where(fire_place,
+ integrate - torch.ones([batch_size], device=alphas.device)*threshold,
+ integrate)
+ fires = torch.stack(list_fires, 1)
+ return fires
- predictor = (alphas > 0.5).int()
- fire_places = torch.nonzero(predictor == 1).squeeze(1).tolist()
-
- cuts = []
- npeak = int(predictor.sum())
- nchar = len(raw_text)
- if npeak - 1 == nchar:
- fire_places = torch.where((alphas > 0.5) == 1)[0].tolist()
- for i in range(len(fire_places)):
- if fire_places[i] < len(alphas) - 1:
- if 0.05 < alphas[fire_places[i]+1] < 0.5:
- fire_places[i] += 1
- elif npeak < nchar:
- lost_num = nchar - npeak
- lost_fire = speech_lengths[0].tolist() - fire_places[-1]
- interval_distance = lost_fire // (lost_num + 1)
- for i in range(1, lost_num + 1):
- fire_places.append(fire_places[-1] + interval_distance)
- elif npeak - 1 > nchar:
- redundance_num = npeak - 1 - nchar
- for i in range(redundance_num):
- fire_places.pop()
- cuts.append(0)
- start_sil = True
- if start_sil:
- text.insert(0, '<sil>')
-
- for i in range(len(fire_places)-1):
- cuts.append(cut_interval(alphas, fire_places[i], fire_places[i+1], tail=(i==len(fire_places)-2)))
-
- for i in range(2, len(fire_places)-2):
- if fire_places[i-2] == fire_places[i-1] - 1 and fire_places[i-1] != fire_places[i] - 1:
- cuts[i-1] += 1
-
- if cuts[-1] != len(alphas) - 1:
- text.append('<sil>')
- cuts.append(speech_lengths[0].tolist())
- cuts.insert(-1, (cuts[-1] + cuts[-2]) * 0.5)
- sec_fire_places = np.array(cuts) * sacle_rate
- for i in range(1, len(sec_fire_places) - 1):
- start, end = sec_fire_places[i], sec_fire_places[i+1]
- if i == len(sec_fire_places) - 2:
- end = time
- time_stamp_list.append([int(round(start, 2) * 1000) + begin, int(round(end, 2) * 1000) + begin])
- text = text[1:]
- if npeak - 1 == nchar or npeak > nchar:
- return time_stamp_list[:-1]
- else:
- return time_stamp_list
-
-def time_stamp_lfr6_pl(us_alphas, us_cif_peak, char_list, begin_time=0.0, end_time=None):
+def ts_prediction_lfr6_standard(us_alphas,
+ us_peaks,
+ char_list,
+ vad_offset=0.0,
+ force_time_shift=-1.5,
+ sil_in_str=True
+ ):
+ if not len(char_list):
+ return "", []
START_END_THRESHOLD = 5
+ MAX_TOKEN_DURATION = 12
TIME_RATE = 10.0 * 6 / 1000 / 3 # 3 times upsampled
- if len(us_alphas.shape) == 3:
- alphas, cif_peak = us_alphas[0], us_cif_peak[0] # support inference batch_size=1 only
+ if len(us_alphas.shape) == 2:
+ alphas, peaks = us_alphas[0], us_peaks[0] # support inference batch_size=1 only
else:
- alphas, cif_peak = us_alphas, us_cif_peak
- num_frames = cif_peak.shape[0]
+ alphas, peaks = us_alphas, us_peaks
if char_list[-1] == '</s>':
char_list = char_list[:-1]
- # char_list = [i for i in text]
+ fire_place = torch.where(peaks>1.0-1e-4)[0].cpu().numpy() + force_time_shift # total offset
+ if len(fire_place) != len(char_list) + 1:
+ alphas /= (alphas.sum() / (len(char_list) + 1))
+ alphas = alphas.unsqueeze(0)
+ peaks = cif_wo_hidden(alphas, threshold=1.0-1e-4)[0]
+ fire_place = torch.where(peaks>1.0-1e-4)[0].cpu().numpy() + force_time_shift # total offset
+ num_frames = peaks.shape[0]
timestamp_list = []
+ new_char_list = []
# for bicif model trained with large data, cif2 actually fires when a character starts
# so treat the frames between two peaks as the duration of the former token
- fire_place = torch.where(cif_peak>1.0-1e-4)[0].cpu().numpy() - 1.5
- num_peak = len(fire_place)
- assert num_peak == len(char_list) + 1 # number of peaks is supposed to be number of tokens + 1
+ fire_place = torch.where(peaks>1.0-1e-4)[0].cpu().numpy() + force_time_shift # total offset
+ # assert num_peak == len(char_list) + 1 # number of peaks is supposed to be number of tokens + 1
# begin silence
if fire_place[0] > START_END_THRESHOLD:
- char_list.insert(0, '<sil>')
+ # char_list.insert(0, '<sil>')
timestamp_list.append([0.0, fire_place[0]*TIME_RATE])
+ new_char_list.append('<sil>')
# tokens timestamp
for i in range(len(fire_place)-1):
- # the peak is always a little ahead of the start time
- # timestamp_list.append([(fire_place[i]-1.2)*TIME_RATE, fire_place[i+1]*TIME_RATE])
- timestamp_list.append([(fire_place[i])*TIME_RATE, fire_place[i+1]*TIME_RATE])
- # cut the duration to token and sil of the 0-weight frames last long
+ new_char_list.append(char_list[i])
+ if MAX_TOKEN_DURATION < 0 or fire_place[i+1] - fire_place[i] <= MAX_TOKEN_DURATION:
+ timestamp_list.append([fire_place[i]*TIME_RATE, fire_place[i+1]*TIME_RATE])
+ else:
+ # cut the duration to token and sil of the 0-weight frames last long
+ _split = fire_place[i] + MAX_TOKEN_DURATION
+ timestamp_list.append([fire_place[i]*TIME_RATE, _split*TIME_RATE])
+ timestamp_list.append([_split*TIME_RATE, fire_place[i+1]*TIME_RATE])
+ new_char_list.append('<sil>')
# tail token and end silence
+ # new_char_list.append(char_list[-1])
if num_frames - fire_place[-1] > START_END_THRESHOLD:
- _end = (num_frames + fire_place[-1]) / 2
+ _end = (num_frames + fire_place[-1]) * 0.5
+ # _end = fire_place[-1]
timestamp_list[-1][1] = _end*TIME_RATE
timestamp_list.append([_end*TIME_RATE, num_frames*TIME_RATE])
- char_list.append("<sil>")
+ new_char_list.append("<sil>")
else:
timestamp_list[-1][1] = num_frames*TIME_RATE
- if begin_time: # add offset time in model with vad
+ if vad_offset: # add offset time in model with vad
for i in range(len(timestamp_list)):
- timestamp_list[i][0] = timestamp_list[i][0] + begin_time / 1000.0
- timestamp_list[i][1] = timestamp_list[i][1] + begin_time / 1000.0
+ timestamp_list[i][0] = timestamp_list[i][0] + vad_offset / 1000.0
+ timestamp_list[i][1] = timestamp_list[i][1] + vad_offset / 1000.0
res_txt = ""
- for char, timestamp in zip(char_list, timestamp_list):
- res_txt += "{} {} {};".format(char, timestamp[0], timestamp[1])
- logging.warning(res_txt) # for test
+ for char, timestamp in zip(new_char_list, timestamp_list):
+ #if char != '<sil>':
+ if not sil_in_str and char == '<sil>': continue
+ res_txt += "{} {} {};".format(char, str(timestamp[0]+0.0005)[:5], str(timestamp[1]+0.0005)[:5])
res = []
- for char, timestamp in zip(char_list, timestamp_list):
+ for char, timestamp in zip(new_char_list, timestamp_list):
if char != '<sil>':
res.append([int(timestamp[0] * 1000), int(timestamp[1] * 1000)])
+ return res_txt, res
+
+
+def timestamp_sentence(punc_id_list, timestamp_postprocessed, text_postprocessed):
+ punc_list = ['锛�', '銆�', '锛�', '銆�']
+ res = []
+ if text_postprocessed is None:
+ return res
+ if timestamp_postprocessed is None:
+ return res
+ if len(timestamp_postprocessed) == 0:
+ return res
+ if len(text_postprocessed) == 0:
+ return res
+
+ if punc_id_list is None or len(punc_id_list) == 0:
+ res.append({
+ 'text': text_postprocessed.split(),
+ "start": timestamp_postprocessed[0][0],
+ "end": timestamp_postprocessed[-1][1],
+ "timestamp": timestamp_postprocessed,
+ })
+ return res
+ if len(punc_id_list) != len(timestamp_postprocessed):
+ logging.warning("length mismatch between punc and timestamp")
+ sentence_text = ""
+ sentence_text_seg = ""
+ ts_list = []
+ sentence_start = timestamp_postprocessed[0][0]
+ sentence_end = timestamp_postprocessed[0][1]
+ texts = text_postprocessed.split()
+ punc_stamp_text_list = list(zip_longest(punc_id_list, timestamp_postprocessed, texts, fillvalue=None))
+ for punc_stamp_text in punc_stamp_text_list:
+ punc_id, timestamp, text = punc_stamp_text
+ # sentence_text += text if text is not None else ''
+ if text is not None:
+ if 'a' <= text[0] <= 'z' or 'A' <= text[0] <= 'Z':
+ sentence_text += ' ' + text
+ elif len(sentence_text) and ('a' <= sentence_text[-1] <= 'z' or 'A' <= sentence_text[-1] <= 'Z'):
+ sentence_text += ' ' + text
+ else:
+ sentence_text += text
+ sentence_text_seg += text + ' '
+ ts_list.append(timestamp)
+
+ punc_id = int(punc_id) if punc_id is not None else 1
+ sentence_end = timestamp[1] if timestamp is not None else sentence_end
+
+ if punc_id > 1:
+ sentence_text += punc_list[punc_id - 2]
+ res.append({
+ 'text': sentence_text,
+ "start": sentence_start,
+ "end": sentence_end,
+ "timestamp": ts_list
+ })
+ sentence_text = ''
+ sentence_text_seg = ''
+ ts_list = []
+ sentence_start = sentence_end
return res
+
+
--
Gitblit v1.9.1