huangmingming
2023-01-30 adcee8828ef5d78b575043954deb662a35e318f7
funasr/bin/asr_inference_paraformer_vad_punc.py
@@ -3,6 +3,7 @@
import logging
import sys
import time
import json
from pathlib import Path
from typing import Optional
from typing import Sequence
@@ -100,10 +101,13 @@
        # logging.info("asr_train_args: {}".format(asr_train_args))
        asr_model.to(dtype=getattr(torch, dtype)).eval()
        ctc = CTCPrefixScorer(ctc=asr_model.ctc, eos=asr_model.eos)
        if asr_model.ctc != None:
            ctc = CTCPrefixScorer(ctc=asr_model.ctc, eos=asr_model.eos)
            scorers.update(
                ctc=ctc
            )
        token_list = asr_model.token_list
        scorers.update(
            ctc=ctc,
            length_bonus=LengthBonus(len(token_list)),
        )
@@ -171,7 +175,7 @@
        self.converter = converter
        self.tokenizer = tokenizer
        is_use_lm = lm_weight != 0.0 and lm_file is not None
        if ctc_weight == 0.0 and not is_use_lm:
        if (ctc_weight == 0.0 or asr_model.ctc == None) and not is_use_lm:
            beam_search = None
        self.beam_search = beam_search
        logging.info(f"Beam_search: {self.beam_search}")
@@ -364,201 +368,6 @@
        return fbanks, segments
# def inference(
#         maxlenratio: float,
#         minlenratio: float,
#         batch_size: int,
#         beam_size: int,
#         ngpu: int,
#         ctc_weight: float,
#         lm_weight: float,
#         penalty: float,
#         log_level: Union[int, str],
#         data_path_and_name_and_type,
#         asr_train_config: Optional[str],
#         asr_model_file: Optional[str],
#         cmvn_file: Optional[str] = None,
#         raw_inputs: Union[np.ndarray, torch.Tensor] = None,
#         lm_train_config: Optional[str] = None,
#         lm_file: Optional[str] = None,
#         token_type: Optional[str] = None,
#         key_file: Optional[str] = None,
#         word_lm_train_config: Optional[str] = None,
#         bpemodel: Optional[str] = None,
#         allow_variable_data_keys: bool = False,
#         streaming: bool = False,
#         output_dir: Optional[str] = None,
#         dtype: str = "float32",
#         seed: int = 0,
#         ngram_weight: float = 0.9,
#         nbest: int = 1,
#         num_workers: int = 1,
#         vad_infer_config: Optional[str] = None,
#         vad_model_file: Optional[str] = None,
#         vad_cmvn_file: Optional[str] = None,
#         time_stamp_writer: bool = False,
#         punc_infer_config: Optional[str] = None,
#         punc_model_file: Optional[str] = None,
#         **kwargs,
# ):
#     assert check_argument_types()
#
#     if word_lm_train_config is not None:
#         raise NotImplementedError("Word LM is not implemented")
#     if ngpu > 1:
#         raise NotImplementedError("only single GPU decoding is supported")
#
#     logging.basicConfig(
#         level=log_level,
#         format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
#     )
#
#     if ngpu >= 1 and torch.cuda.is_available():
#         device = "cuda"
#     else:
#         device = "cpu"
#
#     # 1. Set random-seed
#     set_all_random_seed(seed)
#
#     # 2. Build speech2vadsegment
#     speech2vadsegment_kwargs = dict(
#         vad_infer_config=vad_infer_config,
#         vad_model_file=vad_model_file,
#         vad_cmvn_file=vad_cmvn_file,
#         device=device,
#         dtype=dtype,
#     )
#     # logging.info("speech2vadsegment_kwargs: {}".format(speech2vadsegment_kwargs))
#     speech2vadsegment = Speech2VadSegment(**speech2vadsegment_kwargs)
#
#     # 3. Build speech2text
#     speech2text_kwargs = dict(
#         asr_train_config=asr_train_config,
#         asr_model_file=asr_model_file,
#         cmvn_file=cmvn_file,
#         lm_train_config=lm_train_config,
#         lm_file=lm_file,
#         token_type=token_type,
#         bpemodel=bpemodel,
#         device=device,
#         maxlenratio=maxlenratio,
#         minlenratio=minlenratio,
#         dtype=dtype,
#         beam_size=beam_size,
#         ctc_weight=ctc_weight,
#         lm_weight=lm_weight,
#         ngram_weight=ngram_weight,
#         penalty=penalty,
#         nbest=nbest,
#         frontend_conf=frontend_conf,
#     )
#     speech2text = Speech2Text(**speech2text_kwargs)
#
#     text2punc = Text2Punc(punc_infer_config, punc_model_file, device=device, dtype=dtype)
#
#     # 3. Build data-iterator
#     loader = ASRTask.build_streaming_iterator(
#         data_path_and_name_and_type,
#         dtype=dtype,
#         batch_size=1,
#         key_file=key_file,
#         num_workers=num_workers,
#         preprocess_fn=VADTask.build_preprocess_fn(speech2vadsegment.vad_infer_args, False),
#         collate_fn=VADTask.build_collate_fn(speech2vadsegment.vad_infer_args, False),
#         allow_variable_data_keys=allow_variable_data_keys,
#         inference=True,
#     )
#
#     forward_time_total = 0.0
#     length_total = 0.0
#     finish_count = 0
#     file_count = 1
#     # 7 .Start for-loop
#     asr_result_list = []
#     if output_dir is not None:
#         writer = DatadirWriter(output_dir)
#     else:
#         writer = None
#
#     for keys, batch in loader:
#         assert isinstance(batch, dict), type(batch)
#         assert all(isinstance(s, str) for s in keys), keys
#         _bs = len(next(iter(batch.values())))
#         assert len(keys) == _bs, f"{len(keys)} != {_bs}"
#         # batch = {k: v for k, v in batch.items() if not k.endswith("_lengths")}
#
#         logging.info("decoding, utt_id: {}".format(keys))
#         # N-best list of (text, token, token_int, hyp_object)
#         time_beg = time.time()
#         vad_results = speech2vadsegment(**batch)
#         time_end = time.time()
#         fbanks, vadsegments = vad_results[0], vad_results[1]
#         for i, segments in enumerate(vadsegments):
#             result_segments = [["", [], [], ]]
#             for j, segment_idx in enumerate(segments):
#                 bed_idx, end_idx = int(segment_idx[0]/10), int(segment_idx[1]/10)
#                 segment = fbanks[:, bed_idx:end_idx, :].to(device)
#                 speech_lengths = torch.Tensor([end_idx-bed_idx]).int().to(device)
#                 batch = {"speech": segment, "speech_lengths": speech_lengths, "begin_time": vadsegments[i][j][0], "end_time": vadsegments[i][j][1]}
#                 results = speech2text(**batch)
#                 if len(results) < 1:
#                     hyp = Hypothesis(score=0.0, scores={}, states={}, yseq=[])
#                     results = [[" ", ["<space>"], [2], 10, 6]] * nbest
#                 time_end = time.time()
#                 forward_time = time_end - time_beg
#                 lfr_factor = results[0][-1]
#                 length = results[0][-2]
#                 forward_time_total += forward_time
#                 length_total += length
#                 logging.info(
#                     "decoding, feature length: {}, forward_time: {:.4f}, rtf: {:.4f}".
#                         format(length, forward_time, 100 * forward_time / (length*lfr_factor)))
#                 result_cur = [results[0][:-2]]
#                 if j == 0:
#                     result_segments = result_cur
#                 else:
#                     result_segments = [[result_segments[0][i] + result_cur[0][i] for i in range(len(result_cur[0]))]]
#
#             key = keys[0]
#             result = result_segments[0]
#             text, token, token_int, time_stamp = result
#
#             # Create a directory: outdir/{n}best_recog
#             if writer is not None:
#                 ibest_writer = writer[f"1best_recog"]
#
#                 # Write the result to each file
#                 ibest_writer["token"][key] = " ".join(token)
#                 ibest_writer["token_int"][key] = " ".join(map(str, token_int))
#
#             if text is not None:
#                 postprocessed_result = postprocess_utils.sentence_postprocess(token, time_stamp)
#                 if len(postprocessed_result) == 3:
#                     text_postprocessed, time_stamp_postprocessed, word_lists = postprocessed_result[0], postprocessed_result[1], postprocessed_result[2]
#                     text_postprocessed_punc, punc_id_list = text2punc(word_lists, 20)
#                     text_postprocessed_punc_time_stamp = "predictions: {}  time_stamp: {}".format(text_postprocessed_punc, time_stamp_postprocessed)
#                 else:
#                     text_postprocessed = postprocessed_result
#                     time_stamp_postprocessed = None
#                     word_lists = None
#                     text_postprocessed_punc_time_stamp = None
#                     punc_id_list = None
#
#                 item = {'key': key, 'value': text_postprocessed_punc_time_stamp, 'text': text_postprocessed, 'time_stamp': time_stamp_postprocessed, 'punc': punc_id_list}
#                 asr_result_list.append(item)
#                 finish_count += 1
#                 # asr_utils.print_progress(finish_count / file_count)
#                 if writer is not None:
#                     ibest_writer["text"][key] = text_postprocessed
#                     if time_stamp_writer and time_stamp_postprocessed is not None:
#                         ibest_writer["time_stamp"][key] = " ".join(["-".join(map(str, ts)) for ts in time_stamp_postprocessed])
#
#             logging.info("decoding, utt: {}, predictions: {}, time_stamp: {}".format(key, text_postprocessed_punc, time_stamp_postprocessed))
#
#     logging.info("decoding, feature length total: {}, forward_time total: {:.4f}, rtf avg: {:.4f}".
#                  format(length_total, forward_time_total, 100 * forward_time_total / (length_total*lfr_factor)))
#     return asr_result_list
def inference(
    maxlenratio: float,
@@ -666,9 +475,10 @@
    vad_infer_config: Optional[str] = None,
    vad_model_file: Optional[str] = None,
    vad_cmvn_file: Optional[str] = None,
    time_stamp_writer: bool = False,
    time_stamp_writer: bool = True,
    punc_infer_config: Optional[str] = None,
    punc_model_file: Optional[str] = None,
    outputs_dict: Optional[bool] = True,
    **kwargs,
):
    assert check_argument_types()
@@ -725,6 +535,11 @@
    speech2text = Speech2Text(**speech2text_kwargs)
    
    text2punc = Text2Punc(punc_infer_config, punc_model_file, device=device, dtype=dtype)
    if output_dir is not None:
        writer = DatadirWriter(output_dir)
        ibest_writer = writer[f"1best_recog"]
        ibest_writer["token_list"][""] = " ".join(speech2text.asr_train_args.token_list)
    
    def _forward(data_path_and_name_and_type,
                 raw_inputs: Union[np.ndarray, torch.Tensor] = None,
@@ -751,11 +566,15 @@
        length_total = 0.0
        finish_count = 0
        file_count = 1
        lfr_factor = 6
        # 7 .Start for-loop
        asr_result_list = []
        output_path = output_dir_v2 if output_dir_v2 is not None else output_dir
        if output_path is not None:
            writer = DatadirWriter(output_path)
            ibest_writer = writer[f"1best_recog"]
            # ibest_writer["punc_dict"][""] = " ".join(punc_infer_config.punc_list)
            # ibest_writer["token_list"][""] = " ".join(asr_train_config.token_list)
        else:
            writer = None
        
@@ -783,7 +602,7 @@
                    results = speech2text(**batch)
                    if len(results) < 1:
                        hyp = Hypothesis(score=0.0, scores={}, states={}, yseq=[])
                        results = [[" ", ["<space>"], [2], 10, 6]] * nbest
                        results = [[" ", ["<space>"], [2], 0, 1, 6]] * nbest
                    time_end = time.time()
                    forward_time = time_end - time_beg
                    lfr_factor = results[0][-1]
@@ -801,15 +620,15 @@
                
                key = keys[0]
                result = result_segments[0]
                text, token, token_int, time_stamp = result
                text, token, token_int = result[0], result[1], result[2]
                time_stamp = None if len(result) < 4 else result[3]
                
                # Create a directory: outdir/{n}best_recog
                if writer is not None:
                    ibest_writer = writer[f"1best_recog"]
                    # Write the result to each file
                    ibest_writer["token"][key] = " ".join(token)
                    # ibest_writer["token_int"][key] = " ".join(map(str, token_int))
                    ibest_writer["token_int"][key] = " ".join(map(str, token_int))
                    ibest_writer["vad"][key] = "{}".format(vadsegments)
                
                if text is not None:
                    postprocessed_result = postprocess_utils.sentence_postprocess(token, time_stamp)
@@ -817,32 +636,45 @@
                        text_postprocessed, time_stamp_postprocessed, word_lists = postprocessed_result[0], \
                                                                                   postprocessed_result[1], \
                                                                                   postprocessed_result[2]
                        text_postprocessed_punc, punc_id_list = text2punc(word_lists, 20)
                        text_postprocessed_punc_time_stamp = "predictions: {}  time_stamp: {}".format(
                            text_postprocessed_punc, time_stamp_postprocessed)
                        if len(word_lists) > 0:
                            text_postprocessed_punc, punc_id_list = text2punc(word_lists, 20)
                            text_postprocessed_punc_time_stamp = json.dumps({"predictions": text_postprocessed_punc,
                                                                             "time_stamp": time_stamp_postprocessed},
                                                                            ensure_ascii=False)
                        else:
                            text_postprocessed_punc = ""
                            punc_id_list = []
                            text_postprocessed_punc_time_stamp = ""
                    else:
                        text_postprocessed = postprocessed_result
                        time_stamp_postprocessed = None
                        word_lists = None
                        text_postprocessed_punc_time_stamp = None
                        punc_id_list = None
                        text_postprocessed = ""
                        time_stamp_postprocessed = ""
                        word_lists = ""
                        text_postprocessed_punc_time_stamp = ""
                        punc_id_list = ""
                        text_postprocessed_punc = ""
                    item = {'key': key, 'value': text_postprocessed_punc_time_stamp, 'text': text_postprocessed,
                            'time_stamp': time_stamp_postprocessed, 'punc': punc_id_list}
                            'time_stamp': time_stamp_postprocessed, 'punc': punc_id_list, 'token': token}
                    if outputs_dict:
                        item = {'text_punc': text_postprocessed_punc, 'text': text_postprocessed,
                                'punc_id': punc_id_list, 'token': token, 'time_stamp': time_stamp_postprocessed}
                        item = {'key': key, 'value': item}
                    asr_result_list.append(item)
                    finish_count += 1
                    # asr_utils.print_progress(finish_count / file_count)
                    if writer is not None:
                        ibest_writer["text"][key] = text_postprocessed
                        if time_stamp_writer and time_stamp_postprocessed is not None:
                            ibest_writer["time_stamp"][key] = " ".join(
                                ["-".join(map(str, ts)) for ts in time_stamp_postprocessed])
                        ibest_writer["punc_id"][key] = "{}".format(punc_id_list)
                        ibest_writer["text_with_punc"][key] = text_postprocessed_punc_time_stamp
                        if time_stamp_postprocessed is not None:
                            ibest_writer["time_stamp"][key] = "{}".format(time_stamp_postprocessed)
                
                logging.info("decoding, utt: {}, predictions: {}, time_stamp: {}".format(key, text_postprocessed_punc,
                                                                                         time_stamp_postprocessed))
        
        logging.info("decoding, feature length total: {}, forward_time total: {:.4f}, rtf avg: {:.4f}".
                     format(length_total, forward_time_total, 100 * forward_time_total / (length_total * lfr_factor)))
                     format(length_total, forward_time_total, 100 * forward_time_total / (length_total * lfr_factor+1e-6)))
        return asr_result_list
    return _forward
@@ -869,7 +701,6 @@
            punc_list[i] = "?"
        elif punc_list[i] == "。":
            period = i
    preprocessor = CommonPreprocessor(
        train=False,
        token_type="word",
@@ -887,7 +718,8 @@
        cache_sent = []
        mini_sentences = split_to_mini_sentence(words, split_size)
        new_mini_sentence = ""
        new_mini_sentence_punc = ""
        new_mini_sentence_punc = []
        cache_pop_trigger_limit = 200
        for mini_sentence_i in range(len(mini_sentences)):
            mini_sentence = mini_sentences[mini_sentence_i]
            mini_sentence = cache_sent + mini_sentence
@@ -904,24 +736,31 @@
            if indices.size()[0] != 1:
                punctuations = torch.squeeze(indices)
            assert punctuations.size()[0] == len(mini_sentence)
            # Search for the last Period/QuestionMark as cache
            if mini_sentence_i < len(mini_sentences) - 1:
                sentenceEnd = -1
                last_comma_index = -1
                for i in range(len(punctuations) - 2, 1, -1):
                    if punc_list[punctuations[i]] == "。" or punc_list[punctuations[i]] == "?":
                        sentenceEnd = i
                        break
                    if last_comma_index < 0 and punc_list[punctuations[i]] == ",":
                        last_comma_index = i
                if sentenceEnd < 0 and len(mini_sentence) > cache_pop_trigger_limit and last_comma_index >= 0:
                    # The sentence it too long, cut off at a comma.
                    sentenceEnd = last_comma_index
                    punctuations[sentenceEnd] = period
                cache_sent = mini_sentence[sentenceEnd + 1:]
                mini_sentence = mini_sentence[0:sentenceEnd + 1]
                punctuations = punctuations[0:sentenceEnd + 1]
            # if len(punctuations) == 0:
            #    continue
            punctuations_np = punctuations.cpu().numpy()
            new_mini_sentence_punc += "".join([str(x) for x in punctuations_np])
            new_mini_sentence_punc += [int(x) for x in punctuations_np]
            words_with_punc = []
            for i in range(len(mini_sentence)):
                if i > 0:
@@ -931,9 +770,8 @@
                if punc_list[punctuations[i]] != "_":
                    words_with_punc.append(punc_list[punctuations[i]])
            new_mini_sentence += "".join(words_with_punc)
        return new_mini_sentence, new_mini_sentence_punc
    return _forward
def get_parser():