From 94de39dde2e616a01683c518023d0fab72b4e103 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期一, 19 二月 2024 22:21:50 +0800
Subject: [PATCH] aishell example

---
 funasr/utils/timestamp_tools.py |  165 ++++++++++++++++++++++++++++++++++++------------------
 1 files changed, 109 insertions(+), 56 deletions(-)

diff --git a/funasr/utils/timestamp_tools.py b/funasr/utils/timestamp_tools.py
index f6a6e98..63f179a 100644
--- a/funasr/utils/timestamp_tools.py
+++ b/funasr/utils/timestamp_tools.py
@@ -1,107 +1,160 @@
 import torch
-import copy
+import codecs
 import logging
+import argparse
 import numpy as np
-from typing import Any, List, Tuple, Union
+# import edit_distance
+from itertools import zip_longest
 
-def time_stamp_lfr6_pl(us_alphas, us_cif_peak, char_list, begin_time=0.0, end_time=None):
+
+def cif_wo_hidden(alphas, threshold):
+    batch_size, len_time = alphas.size()
+    # loop varss
+    integrate = torch.zeros([batch_size], device=alphas.device)
+    # intermediate vars along time
+    list_fires = []
+    for t in range(len_time):
+        alpha = alphas[:, t]
+        integrate += alpha
+        list_fires.append(integrate)
+        fire_place = integrate >= threshold
+        integrate = torch.where(fire_place,
+                                integrate - torch.ones([batch_size], device=alphas.device)*threshold,
+                                integrate)
+    fires = torch.stack(list_fires, 1)
+    return fires
+
+
+def ts_prediction_lfr6_standard(us_alphas, 
+                       us_peaks, 
+                       char_list, 
+                       vad_offset=0.0, 
+                       force_time_shift=-1.5,
+                       sil_in_str=True
+                       ):
     if not len(char_list):
-        return []
+        return "", []
     START_END_THRESHOLD = 5
+    MAX_TOKEN_DURATION = 12
     TIME_RATE = 10.0 * 6 / 1000 / 3  #  3 times upsampled
-    if len(us_alphas.shape) == 3:
-        alphas, cif_peak = us_alphas[0], us_cif_peak[0]  # support inference batch_size=1 only
+    if len(us_alphas.shape) == 2:
+        alphas, peaks = us_alphas[0], us_peaks[0]  # support inference batch_size=1 only
     else:
-        alphas, cif_peak = us_alphas, us_cif_peak
-    num_frames = cif_peak.shape[0]
+        alphas, peaks = us_alphas, us_peaks
     if char_list[-1] == '</s>':
         char_list = char_list[:-1]
-    # char_list = [i for i in text]
+    fire_place = torch.where(peaks>1.0-1e-4)[0].cpu().numpy() + force_time_shift  # total offset
+    if len(fire_place) != len(char_list) + 1:
+        alphas /= (alphas.sum() / (len(char_list) + 1))
+        alphas = alphas.unsqueeze(0)
+        peaks = cif_wo_hidden(alphas, threshold=1.0-1e-4)[0]
+        fire_place = torch.where(peaks>1.0-1e-4)[0].cpu().numpy() + force_time_shift  # total offset
+    num_frames = peaks.shape[0]
     timestamp_list = []
+    new_char_list = []
     # for bicif model trained with large data, cif2 actually fires when a character starts
     # so treat the frames between two peaks as the duration of the former token
-    fire_place = torch.where(cif_peak>1.0-1e-4)[0].cpu().numpy() - 1.5
-    num_peak = len(fire_place)
-    assert num_peak == len(char_list) + 1 # number of peaks is supposed to be number of tokens + 1
+    fire_place = torch.where(peaks>1.0-1e-4)[0].cpu().numpy() + force_time_shift  # total offset
+    # assert num_peak == len(char_list) + 1 # number of peaks is supposed to be number of tokens + 1
     # begin silence
     if fire_place[0] > START_END_THRESHOLD:
-        char_list.insert(0, '<sil>')
+        # char_list.insert(0, '<sil>')
         timestamp_list.append([0.0, fire_place[0]*TIME_RATE])
+        new_char_list.append('<sil>')
     # tokens timestamp
     for i in range(len(fire_place)-1):
-        # the peak is always a little ahead of the start time
-        # timestamp_list.append([(fire_place[i]-1.2)*TIME_RATE, fire_place[i+1]*TIME_RATE])
-        timestamp_list.append([(fire_place[i])*TIME_RATE, fire_place[i+1]*TIME_RATE])
-        # cut the duration to token and sil of the 0-weight frames last long
+        new_char_list.append(char_list[i])
+        if MAX_TOKEN_DURATION < 0 or fire_place[i+1] - fire_place[i] <= MAX_TOKEN_DURATION:
+            timestamp_list.append([fire_place[i]*TIME_RATE, fire_place[i+1]*TIME_RATE])
+        else:
+            # cut the duration to token and sil of the 0-weight frames last long
+            _split = fire_place[i] + MAX_TOKEN_DURATION
+            timestamp_list.append([fire_place[i]*TIME_RATE, _split*TIME_RATE])
+            timestamp_list.append([_split*TIME_RATE, fire_place[i+1]*TIME_RATE])
+            new_char_list.append('<sil>')
     # tail token and end silence
+    # new_char_list.append(char_list[-1])
     if num_frames - fire_place[-1] > START_END_THRESHOLD:
-        _end = (num_frames + fire_place[-1]) / 2
+        _end = (num_frames + fire_place[-1]) * 0.5
+        # _end = fire_place[-1] 
         timestamp_list[-1][1] = _end*TIME_RATE
         timestamp_list.append([_end*TIME_RATE, num_frames*TIME_RATE])
-        char_list.append("<sil>")
+        new_char_list.append("<sil>")
     else:
         timestamp_list[-1][1] = num_frames*TIME_RATE
-    if begin_time:  # add offset time in model with vad
+    if vad_offset:  # add offset time in model with vad
         for i in range(len(timestamp_list)):
-            timestamp_list[i][0] = timestamp_list[i][0] + begin_time / 1000.0
-            timestamp_list[i][1] = timestamp_list[i][1] + begin_time / 1000.0
+            timestamp_list[i][0] = timestamp_list[i][0] + vad_offset / 1000.0
+            timestamp_list[i][1] = timestamp_list[i][1] + vad_offset / 1000.0
     res_txt = ""
-    for char, timestamp in zip(char_list, timestamp_list):
-        res_txt += "{} {} {};".format(char, timestamp[0], timestamp[1])
+    for char, timestamp in zip(new_char_list, timestamp_list):
+        #if char != '<sil>':
+        if not sil_in_str and char == '<sil>': continue
+        res_txt += "{} {} {};".format(char, str(timestamp[0]+0.0005)[:5], str(timestamp[1]+0.0005)[:5])
     res = []
-    for char, timestamp in zip(char_list, timestamp_list):
+    for char, timestamp in zip(new_char_list, timestamp_list):
         if char != '<sil>':
             res.append([int(timestamp[0] * 1000), int(timestamp[1] * 1000)])
-    return res
+    return res_txt, res
 
-def time_stamp_sentence(punc_id_list, time_stamp_postprocessed, text_postprocessed):
+
+def timestamp_sentence(punc_id_list, timestamp_postprocessed, text_postprocessed):
+    punc_list = ['锛�', '銆�', '锛�', '銆�']
     res = []
     if text_postprocessed is None:
         return res
-    if time_stamp_postprocessed is None:
+    if timestamp_postprocessed is None:
         return res
-    if len(time_stamp_postprocessed) == 0:
+    if len(timestamp_postprocessed) == 0:
         return res
     if len(text_postprocessed) == 0:
         return res
+
     if punc_id_list is None or len(punc_id_list) == 0:
         res.append({
             'text': text_postprocessed.split(),
-            "start": time_stamp_postprocessed[0][0],
-            "end": time_stamp_postprocessed[-1][1]
+            "start": timestamp_postprocessed[0][0],
+            "end": timestamp_postprocessed[-1][1],
+            "timestamp": timestamp_postprocessed,
         })
         return res
-    if len(punc_id_list) != len(time_stamp_postprocessed):
-        res.append({
-            'text': text_postprocessed.split(),
-            "start": time_stamp_postprocessed[0][0],
-            "end": time_stamp_postprocessed[-1][1]
-        })
-        return res
-
-    sentence_text = ''
-    sentence_start = time_stamp_postprocessed[0][0]
+    if len(punc_id_list) != len(timestamp_postprocessed):
+        logging.warning("length mismatch between punc and timestamp")
+    sentence_text = ""
+    sentence_text_seg = ""
+    ts_list = []
+    sentence_start = timestamp_postprocessed[0][0]
+    sentence_end = timestamp_postprocessed[0][1]
     texts = text_postprocessed.split()
-    for i in range(len(punc_id_list)):
-        sentence_text += texts[i]
-        if punc_id_list[i] == 2:
-            sentence_text += ','
+    punc_stamp_text_list = list(zip_longest(punc_id_list, timestamp_postprocessed, texts, fillvalue=None))
+    for punc_stamp_text in punc_stamp_text_list:
+        punc_id, timestamp, text = punc_stamp_text
+        # sentence_text += text if text is not None else ''
+        if text is not None:
+            if 'a' <= text[0] <= 'z' or 'A' <= text[0] <= 'Z':
+                sentence_text += ' ' + text
+            elif len(sentence_text) and ('a' <= sentence_text[-1] <= 'z' or 'A' <= sentence_text[-1] <= 'Z'):
+                sentence_text += ' ' + text
+            else:
+                sentence_text += text
+            sentence_text_seg += text + ' '
+        ts_list.append(timestamp)
+
+        punc_id = int(punc_id) if punc_id is not None else 1
+        sentence_end = timestamp[1] if timestamp is not None else sentence_end
+
+        if punc_id > 1:
+            sentence_text += punc_list[punc_id - 2]
             res.append({
                 'text': sentence_text,
                 "start": sentence_start,
-                "end": time_stamp_postprocessed[i][1]
+                "end": sentence_end,
+                "timestamp": ts_list
             })
             sentence_text = ''
-            sentence_start = time_stamp_postprocessed[i][1]
-        elif punc_id_list[i] == 3:
-            sentence_text += '.'
-            res.append({
-                'text': sentence_text,
-                "start": sentence_start,
-                "end": time_stamp_postprocessed[i][1]
-            })
-            sentence_text = ''
-            sentence_start = time_stamp_postprocessed[i][1]
+            sentence_text_seg = ''
+            ts_list = []
+            sentence_start = sentence_end
     return res
 
 

--
Gitblit v1.9.1