From d2dc3af1a69ee4075bcfc0c83dc0fb8e3fc1db4e Mon Sep 17 00:00:00 2001
From: yhliang <68215459+yhliang-aslp@users.noreply.github.com>
Date: 星期四, 11 五月 2023 16:31:40 +0800
Subject: [PATCH] Merge pull request #492 from alibaba-damo-academy/dev_smohan
---
funasr/bin/tp_inference.py | 87 +++++++++++++------------------------------
1 files changed, 27 insertions(+), 60 deletions(-)
diff --git a/funasr/bin/tp_inference.py b/funasr/bin/tp_inference.py
index e7a1f1b..6e513c5 100644
--- a/funasr/bin/tp_inference.py
+++ b/funasr/bin/tp_inference.py
@@ -28,6 +28,8 @@
from funasr.utils.types import str_or_none
from funasr.models.frontend.wav_frontend import WavFrontend
from funasr.text.token_id_converter import TokenIDConverter
+from funasr.utils.timestamp_tools import ts_prediction_lfr6_standard
+
header_colors = '\033[95m'
end_colors = '\033[0m'
@@ -37,61 +39,6 @@
'audio_fs': 16000,
'model_fs': 16000
}
-
-def time_stamp_lfr6_advance(us_alphas, us_cif_peak, char_list):
- START_END_THRESHOLD = 5
- MAX_TOKEN_DURATION = 12
- TIME_RATE = 10.0 * 6 / 1000 / 3 # 3 times upsampled
- if len(us_cif_peak.shape) == 2:
- alphas, cif_peak = us_alphas[0], us_cif_peak[0] # support inference batch_size=1 only
- else:
- alphas, cif_peak = us_alphas, us_cif_peak
- num_frames = cif_peak.shape[0]
- if char_list[-1] == '</s>':
- char_list = char_list[:-1]
- # char_list = [i for i in text]
- timestamp_list = []
- new_char_list = []
- # for bicif model trained with large data, cif2 actually fires when a character starts
- # so treat the frames between two peaks as the duration of the former token
- fire_place = torch.where(cif_peak>1.0-1e-4)[0].cpu().numpy() - 3.2 # total offset
- num_peak = len(fire_place)
- assert num_peak == len(char_list) + 1 # number of peaks is supposed to be number of tokens + 1
- # begin silence
- if fire_place[0] > START_END_THRESHOLD:
- # char_list.insert(0, '<sil>')
- timestamp_list.append([0.0, fire_place[0]*TIME_RATE])
- new_char_list.append('<sil>')
- # tokens timestamp
- for i in range(len(fire_place)-1):
- new_char_list.append(char_list[i])
- if MAX_TOKEN_DURATION < 0 or fire_place[i+1] - fire_place[i] < MAX_TOKEN_DURATION:
- timestamp_list.append([fire_place[i]*TIME_RATE, fire_place[i+1]*TIME_RATE])
- else:
- # cut the duration to token and sil of the 0-weight frames last long
- _split = fire_place[i] + MAX_TOKEN_DURATION
- timestamp_list.append([fire_place[i]*TIME_RATE, _split*TIME_RATE])
- timestamp_list.append([_split*TIME_RATE, fire_place[i+1]*TIME_RATE])
- new_char_list.append('<sil>')
- # tail token and end silence
- # new_char_list.append(char_list[-1])
- if num_frames - fire_place[-1] > START_END_THRESHOLD:
- _end = (num_frames + fire_place[-1]) * 0.5
- # _end = fire_place[-1]
- timestamp_list[-1][1] = _end*TIME_RATE
- timestamp_list.append([_end*TIME_RATE, num_frames*TIME_RATE])
- new_char_list.append("<sil>")
- else:
- timestamp_list[-1][1] = num_frames*TIME_RATE
- assert len(new_char_list) == len(timestamp_list)
- res_str = ""
- for char, timestamp in zip(new_char_list, timestamp_list):
- res_str += "{} {} {};".format(char, str(timestamp[0]+0.0005)[:5], str(timestamp[1]+0.0005)[:5])
- res = []
- for char, timestamp in zip(new_char_list, timestamp_list):
- if char != '<sil>':
- res.append([int(timestamp[0] * 1000), int(timestamp[1] * 1000)])
- return res_str, res
class SpeechText2Timestamp:
@@ -107,7 +54,7 @@
assert check_argument_types()
# 1. Build ASR model
tp_model, tp_train_args = ASRTask.build_model_from_file(
- timestamp_infer_config, timestamp_model_file, device
+ timestamp_infer_config, timestamp_model_file, device=device
)
if 'cuda' in device:
tp_model = tp_model.cuda() # force model to cuda
@@ -169,8 +116,8 @@
enc = enc[0]
# c. Forward Predictor
- _, _, us_alphas, us_cif_peak = self.tp_model.calc_predictor_timestamp(enc, enc_len, text_lengths.to(self.device)+1)
- return us_alphas, us_cif_peak
+ _, _, us_alphas, us_peaks = self.tp_model.calc_predictor_timestamp(enc, enc_len, text_lengths.to(self.device)+1)
+ return us_alphas, us_peaks
def inference(
@@ -232,6 +179,9 @@
**kwargs,
):
assert check_argument_types()
+ ncpu = kwargs.get("ncpu", 1)
+ torch.set_num_threads(ncpu)
+
if batch_size > 1:
raise NotImplementedError("batch decoding is not implemented")
if ngpu > 1:
@@ -272,6 +222,13 @@
split_with_space=split_with_space,
seg_dict_file=seg_dict_file,
)
+
+ if output_dir is not None:
+ writer = DatadirWriter(output_dir)
+ tp_writer = writer[f"timestamp_prediction"]
+ # ibest_writer["token_list"][""] = " ".join(speech2text.asr_train_args.token_list)
+ else:
+ tp_writer = None
def _forward(
data_path_and_name_and_type,
@@ -280,7 +237,14 @@
fs: dict = None,
param_dict: dict = None,
**kwargs
- ):
+ ):
+ output_path = output_dir_v2 if output_dir_v2 is not None else output_dir
+ writer = None
+ if output_path is not None:
+ writer = DatadirWriter(output_path)
+ tp_writer = writer[f"timestamp_prediction"]
+ else:
+ tp_writer = None
# 3. Build data-iterator
if data_path_and_name_and_type is None and raw_inputs is not None:
if isinstance(raw_inputs, torch.Tensor):
@@ -315,9 +279,12 @@
for batch_id in range(_bs):
key = keys[batch_id]
token = speechtext2timestamp.converter.ids2tokens(batch['text'][batch_id])
- ts_str, ts_list = time_stamp_lfr6_advance(us_alphas[batch_id], us_cif_peak[batch_id], token)
+ ts_str, ts_list = ts_prediction_lfr6_standard(us_alphas[batch_id], us_cif_peak[batch_id], token, force_time_shift=-3.0)
logging.warning(ts_str)
item = {'key': key, 'value': ts_str, 'timestamp':ts_list}
+ if tp_writer is not None:
+ tp_writer["tp_sync"][key+'#'] = ts_str
+ tp_writer["tp_time"][key+'#'] = str(ts_list)
tp_result_list.append(item)
return tp_result_list
--
Gitblit v1.9.1