From 8dab6d184a034ca86eafa644ea0d2100aadfe27d Mon Sep 17 00:00:00 2001
From: jmwang66 <wangjiaming.wjm@alibaba-inc.com>
Date: 星期二, 09 五月 2023 10:58:33 +0800
Subject: [PATCH] Merge pull request #473 from alibaba-damo-academy/dev_smohan

---
 funasr/bin/tp_inference.py |  137 +++++++++++++++++++++------------------------
 1 files changed, 65 insertions(+), 72 deletions(-)

diff --git a/funasr/bin/tp_inference.py b/funasr/bin/tp_inference.py
index 67e82a7..6e513c5 100644
--- a/funasr/bin/tp_inference.py
+++ b/funasr/bin/tp_inference.py
@@ -1,5 +1,6 @@
 import argparse
 import logging
+from optparse import Option
 import sys
 import json
 from pathlib import Path
@@ -11,15 +12,12 @@
 from typing import Union
 from typing import Dict
 
-import math
 import numpy as np
 import torch
 from typeguard import check_argument_types
-from typeguard import check_return_type
 
 from funasr.fileio.datadir_writer import DatadirWriter
-from funasr.modules.scorers.scorer_interface import BatchScorerInterface
-from funasr.modules.subsampling import TooShortUttError
+from funasr.datasets.preprocessor import LMPreprocessor
 from funasr.tasks.asr import ASRTaskAligner as ASRTask
 from funasr.torch_utils.device_funcs import to_device
 from funasr.torch_utils.set_all_random_seed import set_all_random_seed
@@ -28,9 +26,10 @@
 from funasr.utils.types import str2bool
 from funasr.utils.types import str2triple_str
 from funasr.utils.types import str_or_none
-from funasr.utils import asr_utils, wav_utils, postprocess_utils
 from funasr.models.frontend.wav_frontend import WavFrontend
 from funasr.text.token_id_converter import TokenIDConverter
+from funasr.utils.timestamp_tools import ts_prediction_lfr6_standard
+
 
 header_colors = '\033[95m'
 end_colors = '\033[0m'
@@ -40,61 +39,6 @@
     'audio_fs': 16000,
     'model_fs': 16000
 }
-
-def time_stamp_lfr6_advance(us_alphas, us_cif_peak, char_list):
-    START_END_THRESHOLD = 5
-    MAX_TOKEN_DURATION = 12
-    TIME_RATE = 10.0 * 6 / 1000 / 3  #  3 times upsampled
-    if len(us_cif_peak.shape) == 2:
-        alphas, cif_peak = us_alphas[0], us_cif_peak[0]  # support inference batch_size=1 only
-    else:
-        alphas, cif_peak = us_alphas, us_cif_peak
-    num_frames = cif_peak.shape[0]
-    if char_list[-1] == '</s>':
-        char_list = char_list[:-1]
-    # char_list = [i for i in text]
-    timestamp_list = []
-    new_char_list = []
-    # for bicif model trained with large data, cif2 actually fires when a character starts
-    # so treat the frames between two peaks as the duration of the former token
-    fire_place = torch.where(cif_peak>1.0-1e-4)[0].cpu().numpy() - 3.2  # total offset
-    num_peak = len(fire_place)
-    assert num_peak == len(char_list) + 1 # number of peaks is supposed to be number of tokens + 1
-    # begin silence
-    if fire_place[0] > START_END_THRESHOLD:
-        # char_list.insert(0, '<sil>')
-        timestamp_list.append([0.0, fire_place[0]*TIME_RATE])
-        new_char_list.append('<sil>')
-    # tokens timestamp
-    for i in range(len(fire_place)-1):
-        new_char_list.append(char_list[i])
-        if MAX_TOKEN_DURATION < 0 or fire_place[i+1] - fire_place[i] < MAX_TOKEN_DURATION:
-            timestamp_list.append([fire_place[i]*TIME_RATE, fire_place[i+1]*TIME_RATE])
-        else:
-            # cut the duration to token and sil of the 0-weight frames last long
-            _split = fire_place[i] + MAX_TOKEN_DURATION
-            timestamp_list.append([fire_place[i]*TIME_RATE, _split*TIME_RATE])
-            timestamp_list.append([_split*TIME_RATE, fire_place[i+1]*TIME_RATE])
-            new_char_list.append('<sil>')
-    # tail token and end silence
-    # new_char_list.append(char_list[-1])
-    if num_frames - fire_place[-1] > START_END_THRESHOLD:
-        _end = (num_frames + fire_place[-1]) * 0.5
-        # _end = fire_place[-1] 
-        timestamp_list[-1][1] = _end*TIME_RATE
-        timestamp_list.append([_end*TIME_RATE, num_frames*TIME_RATE])
-        new_char_list.append("<sil>")
-    else:
-        timestamp_list[-1][1] = num_frames*TIME_RATE
-    assert len(new_char_list) == len(timestamp_list)
-    res_str = ""
-    for char, timestamp in zip(new_char_list, timestamp_list):
-        res_str += "{} {} {};".format(char, str(timestamp[0]+0.0005)[:5], str(timestamp[1]+0.0005)[:5])
-    res = []
-    for char, timestamp in zip(char_list, timestamp_list):
-        if char != '<sil>':
-            res.append([int(timestamp[0] * 1000), int(timestamp[1] * 1000)])
-    return res_str, res
 
 
 class SpeechText2Timestamp:
@@ -110,11 +54,11 @@
         assert check_argument_types()
         # 1. Build ASR model
         tp_model, tp_train_args = ASRTask.build_model_from_file(
-            timestamp_infer_config, timestamp_model_file, device
+            timestamp_infer_config, timestamp_model_file, device=device
         )
         if 'cuda' in device:
-            tp_model = tp_model.cuda()
-            
+            tp_model = tp_model.cuda()  # force model to cuda
+
         frontend = None
         if tp_train_args.frontend is not None:
             frontend = WavFrontend(cmvn_file=timestamp_cmvn_file, **tp_train_args.frontend_conf)
@@ -172,8 +116,8 @@
             enc = enc[0]
 
         # c. Forward Predictor
-        _, _, us_alphas, us_cif_peak = self.tp_model.calc_predictor_timestamp(enc, enc_len, text_lengths.to(self.device)+1)
-        return us_alphas, us_cif_peak
+        _, _, us_alphas, us_peaks = self.tp_model.calc_predictor_timestamp(enc, enc_len, text_lengths.to(self.device)+1)
+        return us_alphas, us_peaks
 
 
 def inference(
@@ -191,6 +135,8 @@
         dtype: str = "float32",
         seed: int = 0,
         num_workers: int = 1,
+        split_with_space: bool = True,
+        seg_dict_file: Optional[str] = None,
         **kwargs,
 ):
     inference_pipeline = inference_modelscope(
@@ -206,6 +152,8 @@
         dtype=dtype,
         seed=seed,
         num_workers=num_workers,
+        split_with_space=split_with_space,
+        seg_dict_file=seg_dict_file,
         **kwargs,
     )
     return inference_pipeline(data_path_and_name_and_type, raw_inputs)
@@ -226,9 +174,14 @@
         dtype: str = "float32",
         seed: int = 0,
         num_workers: int = 1,
+        split_with_space: bool = True,
+        seg_dict_file: Optional[str] = None,
         **kwargs,
 ):
     assert check_argument_types()
+    ncpu = kwargs.get("ncpu", 1)
+    torch.set_num_threads(ncpu)
+    
     if batch_size > 1:
         raise NotImplementedError("batch decoding is not implemented")
     if ngpu > 1:
@@ -256,6 +209,26 @@
     )
     logging.info("speechtext2timestamp_kwargs: {}".format(speechtext2timestamp_kwargs))
     speechtext2timestamp = SpeechText2Timestamp(**speechtext2timestamp_kwargs)
+
+    preprocessor = LMPreprocessor(
+        train=False,
+        token_type=speechtext2timestamp.tp_train_args.token_type,
+        token_list=speechtext2timestamp.tp_train_args.token_list,
+        bpemodel=None,
+        text_cleaner=None,
+        g2p_type=None,
+        text_name="text",
+        non_linguistic_symbols=speechtext2timestamp.tp_train_args.non_linguistic_symbols,
+        split_with_space=split_with_space,
+        seg_dict_file=seg_dict_file,
+    )
+
+    if output_dir is not None:
+        writer = DatadirWriter(output_dir)
+        tp_writer = writer[f"timestamp_prediction"]
+        # ibest_writer["token_list"][""] = " ".join(speech2text.asr_train_args.token_list)
+    else:
+        tp_writer = None
     
     def _forward(
             data_path_and_name_and_type,
@@ -264,7 +237,14 @@
             fs: dict = None,
             param_dict: dict = None,
             **kwargs
-    ):
+    ):  
+        output_path = output_dir_v2 if output_dir_v2 is not None else output_dir
+        writer = None
+        if output_path is not None:
+            writer = DatadirWriter(output_path)
+            tp_writer = writer[f"timestamp_prediction"]
+        else:
+            tp_writer = None
         # 3. Build data-iterator
         if data_path_and_name_and_type is None and raw_inputs is not None:
             if isinstance(raw_inputs, torch.Tensor):
@@ -277,14 +257,11 @@
             batch_size=batch_size,
             key_file=key_file,
             num_workers=num_workers,
-            preprocess_fn=ASRTask.build_preprocess_fn(speechtext2timestamp.tp_train_args, False),
+            preprocess_fn=preprocessor,
             collate_fn=ASRTask.build_collate_fn(speechtext2timestamp.tp_train_args, False),
             allow_variable_data_keys=allow_variable_data_keys,
             inference=True,
         )
-
-        finish_count = 0
-        file_count = 1
 
         tp_result_list = []
         for keys, batch in loader:
@@ -302,9 +279,13 @@
             for batch_id in range(_bs):
                 key = keys[batch_id]
                 token = speechtext2timestamp.converter.ids2tokens(batch['text'][batch_id])
-                ts_str, ts_list = time_stamp_lfr6_advance(us_alphas[batch_id], us_cif_peak[batch_id], token)
+                ts_str, ts_list = ts_prediction_lfr6_standard(us_alphas[batch_id], us_cif_peak[batch_id], token, force_time_shift=-3.0)
                 logging.warning(ts_str)
-                tp_result_list.append({'text':"".join([i for i in token if i != '<sil>']), 'timestamp': ts_list})
+                item = {'key': key, 'value': ts_str, 'timestamp':ts_list}
+                if tp_writer is not None:
+                    tp_writer["tp_sync"][key+'#'] = ts_str
+                    tp_writer["tp_time"][key+'#'] = str(ts_list)
+                tp_result_list.append(item)
         return tp_result_list
 
     return _forward
@@ -389,6 +370,18 @@
         default=1,
         help="The batch size for inference",
     )
+    group.add_argument(
+        "--seg_dict_file",
+        type=str,
+        default=None,
+        help="The batch size for inference",
+    )
+    group.add_argument(
+        "--split_with_space",
+        type=bool,
+        default=False,
+        help="The batch size for inference",
+    )
 
     return parser
 

--
Gitblit v1.9.1