From 0a4e3b7e64e9e095cfdcd4b3c28bde7aa58839e7 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期六, 11 二月 2023 17:40:00 +0800
Subject: [PATCH] readme

---
 funasr/bin/asr_inference_paraformer_vad_punc.py |  150 +++++++++++++++++++++----------------------------
 1 files changed, 65 insertions(+), 85 deletions(-)

diff --git a/funasr/bin/asr_inference_paraformer_vad_punc.py b/funasr/bin/asr_inference_paraformer_vad_punc.py
index 10c1da6..c4bb61b 100644
--- a/funasr/bin/asr_inference_paraformer_vad_punc.py
+++ b/funasr/bin/asr_inference_paraformer_vad_punc.py
@@ -14,6 +14,7 @@
 from typing import Any
 from typing import List
 import math
+import copy
 import numpy as np
 import torch
 from typeguard import check_argument_types
@@ -38,20 +39,13 @@
 from funasr.utils import asr_utils, wav_utils, postprocess_utils
 from funasr.models.frontend.wav_frontend import WavFrontend
 from funasr.tasks.vad import VADTask
-from funasr.utils.timestamp_tools import time_stamp_lfr6
+from funasr.utils.timestamp_tools import time_stamp_lfr6, time_stamp_lfr6_pl
 from funasr.bin.punctuation_infer import Text2Punc
-from funasr.torch_utils.forward_adaptor import ForwardAdaptor
-from funasr.datasets.preprocessor import CommonPreprocessor
-from funasr.punctuation.text_preprocessor import split_to_mini_sentence
+from funasr.models.e2e_asr_paraformer import BiCifParaformer
 
 header_colors = '\033[95m'
 end_colors = '\033[0m'
 
-global_asr_language: str = 'zh-cn'
-global_sample_rate: Union[int, Dict[Any, int]] = {
-    'audio_fs': 16000,
-    'model_fs': 16000
-}
 
 class Speech2Text:
     """Speech2Text class
@@ -242,6 +236,10 @@
         decoder_outs = self.asr_model.cal_decoder_with_predictor(enc, enc_len, pre_acoustic_embeds, pre_token_length)
         decoder_out, ys_pad_lens = decoder_outs[0], decoder_outs[1]
 
+        if isinstance(self.asr_model, BiCifParaformer):
+            _, _, us_alphas, us_cif_peak = self.asr_model.calc_predictor_timestamp(enc, enc_len,
+                                                                                   pre_token_length)  # test no bias cif2
+
         results = []
         b, n, d = decoder_out.size()
         for i in range(b):
@@ -284,9 +282,12 @@
                 else:
                     text = None
 
-                time_stamp = time_stamp_lfr6(alphas[i:i+1,], enc_len[i:i+1,], token, begin_time, end_time)
-    
-                results.append((text, token, token_int, time_stamp, enc_len_batch_total, lfr_factor))
+                if isinstance(self.asr_model, BiCifParaformer):
+                    timestamp = time_stamp_lfr6_pl(us_alphas[i], us_cif_peak[i], copy.copy(token), begin_time, end_time)
+                    results.append((text, token, token_int, timestamp, enc_len_batch_total, lfr_factor))
+                else:
+                    time_stamp = time_stamp_lfr6(alphas[i:i + 1, ], enc_len[i:i + 1, ], copy.copy(token), begin_time, end_time)
+                    results.append((text, token, token_int, time_stamp, enc_len_batch_total, lfr_factor))
 
         # assert check_return_type(results)
         return results
@@ -537,8 +538,9 @@
         nbest=nbest,
     )
     speech2text = Speech2Text(**speech2text_kwargs)
-    
-    text2punc = Text2Punc(punc_infer_config, punc_model_file, device=device, dtype=dtype)
+    text2punc = None
+    if punc_model_file is not None: 
+        text2punc = Text2Punc(punc_infer_config, punc_model_file, device=device, dtype=dtype)
 
     if output_dir is not None:
         writer = DatadirWriter(output_dir)
@@ -548,6 +550,7 @@
     def _forward(data_path_and_name_and_type,
                  raw_inputs: Union[np.ndarray, torch.Tensor] = None,
                  output_dir_v2: Optional[str] = None,
+                 fs: dict = None,
                  param_dict: dict = None,
                  ):
         # 3. Build data-iterator
@@ -558,6 +561,7 @@
         loader = ASRTask.build_streaming_iterator(
             data_path_and_name_and_type,
             dtype=dtype,
+            fs=fs,
             batch_size=1,
             key_file=key_file,
             num_workers=num_workers,
@@ -566,38 +570,33 @@
             allow_variable_data_keys=allow_variable_data_keys,
             inference=True,
         )
-        
-        forward_time_total = 0.0
-        length_total = 0.0
+
+        if param_dict is not None:
+            use_timestamp = param_dict.get('use_timestamp', True)
+        else:
+            use_timestamp = True
+    
         finish_count = 0
         file_count = 1
         lfr_factor = 6
         # 7 .Start for-loop
         asr_result_list = []
         output_path = output_dir_v2 if output_dir_v2 is not None else output_dir
+        writer = None
         if output_path is not None:
             writer = DatadirWriter(output_path)
             ibest_writer = writer[f"1best_recog"]
-            # ibest_writer["punc_dict"][""] = " ".join(punc_infer_config.punc_list)
-            # ibest_writer["token_list"][""] = " ".join(asr_train_config.token_list)
-        else:
-            writer = None
-        
+    
         for keys, batch in loader:
             assert isinstance(batch, dict), type(batch)
             assert all(isinstance(s, str) for s in keys), keys
             _bs = len(next(iter(batch.values())))
             assert len(keys) == _bs, f"{len(keys)} != {_bs}"
-            # batch = {k: v for k, v in batch.items() if not k.endswith("_lengths")}
-            
-            logging.info("decoding, utt_id: {}".format(keys))
-            # N-best list of (text, token, token_int, hyp_object)
-            time_beg = time.time()
+    
             vad_results = speech2vadsegment(**batch)
-            time_end = time.time()
             fbanks, vadsegments = vad_results[0], vad_results[1]
             for i, segments in enumerate(vadsegments):
-                result_segments = [["", [], [], ]]
+                result_segments = [["", [], [], []]]
                 for j, segment_idx in enumerate(segments):
                     bed_idx, end_idx = int(segment_idx[0] / 10), int(segment_idx[1] / 10)
                     segment = fbanks[:, bed_idx:end_idx, :].to(device)
@@ -606,76 +605,57 @@
                              "end_time": vadsegments[i][j][1]}
                     results = speech2text(**batch)
                     if len(results) < 1:
-                        hyp = Hypothesis(score=0.0, scores={}, states={}, yseq=[])
-                        results = [[" ", ["sil"], [2], 0, 1, 6]] * nbest
-                    time_end = time.time()
-                    forward_time = time_end - time_beg
-                    lfr_factor = results[0][-1]
-                    length = results[0][-2]
-                    forward_time_total += forward_time
-                    length_total += length
-                    logging.info(
-                        "decoding, feature length: {}, forward_time: {:.4f}, rtf: {:.4f}".
-                        format(length, forward_time, 100 * forward_time / (length * lfr_factor)))
+                        continue
+    
                     result_cur = [results[0][:-2]]
                     if j == 0:
                         result_segments = result_cur
                     else:
                         result_segments = [[result_segments[0][i] + result_cur[0][i] for i in range(len(result_cur[0]))]]
-                
+    
                 key = keys[0]
                 result = result_segments[0]
                 text, token, token_int = result[0], result[1], result[2]
                 time_stamp = None if len(result) < 4 else result[3]
-                
-                # Create a directory: outdir/{n}best_recog
+   
+                if use_timestamp and time_stamp is not None: 
+                    postprocessed_result = postprocess_utils.sentence_postprocess(token, time_stamp)
+                else:
+                    postprocessed_result = postprocess_utils.sentence_postprocess(token)
+                text_postprocessed = ""
+                time_stamp_postprocessed = ""
+                text_postprocessed_punc = postprocessed_result
+                if len(postprocessed_result) == 3:
+                    text_postprocessed, time_stamp_postprocessed, word_lists = postprocessed_result[0], \
+                                                                               postprocessed_result[1], \
+                                                                               postprocessed_result[2]
+                else:
+                    text_postprocessed, word_lists = postprocessed_result[0], postprocessed_result[1]
+
+                text_postprocessed_punc = text_postprocessed
+                if len(word_lists) > 0 and text2punc is not None:
+                    text_postprocessed_punc, punc_id_list = text2punc(word_lists, 20)
+    
+                item = {'key': key, 'value': text_postprocessed_punc}
+                if text_postprocessed != "":
+                    item['text_postprocessed'] = text_postprocessed
+                if time_stamp_postprocessed != "":
+                    item['time_stamp'] = time_stamp_postprocessed
+    
+                asr_result_list.append(item)
+                finish_count += 1
+                # asr_utils.print_progress(finish_count / file_count)
                 if writer is not None:
                     # Write the result to each file
                     ibest_writer["token"][key] = " ".join(token)
                     ibest_writer["token_int"][key] = " ".join(map(str, token_int))
                     ibest_writer["vad"][key] = "{}".format(vadsegments)
-                
-                if text is not None:
-                    postprocessed_result = postprocess_utils.sentence_postprocess(token, time_stamp)
-                    if len(postprocessed_result) == 3:
-                        text_postprocessed, time_stamp_postprocessed, word_lists = postprocessed_result[0], \
-                                                                                   postprocessed_result[1], \
-                                                                                   postprocessed_result[2]
-                        if len(word_lists) > 0: 
-                            text_postprocessed_punc, punc_id_list = text2punc(word_lists, 20)
-                            text_postprocessed_punc_time_stamp = json.dumps({"predictions": text_postprocessed_punc,
-                                                                             "time_stamp": time_stamp_postprocessed},
-                                                                            ensure_ascii=False)
-                        else:
-                            text_postprocessed_punc = ""
-                            punc_id_list = []
-                            text_postprocessed_punc_time_stamp = ""
-                            
-                    else:
-                        text_postprocessed = ""
-                        time_stamp_postprocessed = ""
-                        word_lists = ""
-                        text_postprocessed_punc_time_stamp = ""
-                        punc_id_list = ""
-                        text_postprocessed_punc = ""
-
-                    item = {'key': key, 'value': text_postprocessed_punc, 'text_postprocessed': text_postprocessed,
-                            'time_stamp': time_stamp_postprocessed, 'token': token}
-                    asr_result_list.append(item)
-                    finish_count += 1
-                    # asr_utils.print_progress(finish_count / file_count)
-                    if writer is not None:
-                        ibest_writer["text"][key] = text_postprocessed
-                        ibest_writer["punc_id"][key] = "{}".format(punc_id_list)
-                        ibest_writer["text_with_punc"][key] = text_postprocessed_punc_time_stamp
-                        if time_stamp_postprocessed is not None:
-                            ibest_writer["time_stamp"][key] = "{}".format(time_stamp_postprocessed)
-                
-                logging.info("decoding, utt: {}, predictions: {}, time_stamp: {}".format(key, text_postprocessed_punc,
-                                                                                         time_stamp_postprocessed))
-        
-        logging.info("decoding, feature length total: {}, forward_time total: {:.4f}, rtf avg: {:.4f}".
-                     format(length_total, forward_time_total, 100 * forward_time_total / (length_total * lfr_factor+1e-6)))
+                    ibest_writer["text"][key] = text_postprocessed
+                    ibest_writer["text_with_punc"][key] = text_postprocessed_punc
+                    if time_stamp_postprocessed is not None:
+                        ibest_writer["time_stamp"][key] = "{}".format(time_stamp_postprocessed)
+    
+                logging.info("decoding, utt: {}, predictions: {}".format(key, text_postprocessed_punc))
         return asr_result_list
     return _forward
 

--
Gitblit v1.9.1