zhifu gao
2022-12-09 f9fed09e96f43e7eab88378fc444c4987933badb
funasr/bin/asr_inference_paraformer.py
@@ -8,6 +8,9 @@
from typing import Sequence
from typing import Tuple
from typing import Union
from typing import Dict
from typing import Any
from typing import List
import numpy as np
import torch
@@ -30,7 +33,21 @@
from funasr.utils.types import str2bool
from funasr.utils.types import str2triple_str
from funasr.utils.types import str_or_none
from funasr.utils import asr_utils, wav_utils, postprocess_utils
from funasr.models.frontend.wav_frontend import WavFrontend
from modelscope.utils.logger import get_logger
logger = get_logger()
header_colors = '\033[95m'
end_colors = '\033[0m'
global_asr_language: str = 'zh-cn'
global_sample_rate: Union[int, Dict[Any, int]] = {
    'audio_fs': 16000,
    'model_fs': 16000
}
class Speech2Text:
    """Speech2Text class
@@ -62,6 +79,7 @@
            ngram_weight: float = 0.9,
            penalty: float = 0.0,
            nbest: int = 1,
            frontend_conf: dict = None,
            **kwargs,
    ):
        assert check_argument_types()
@@ -71,6 +89,9 @@
        asr_model, asr_train_args = ASRTask.build_model_from_file(
            asr_train_config, asr_model_file, device
        )
        if asr_model.frontend is None and frontend_conf is not None:
            frontend = WavFrontend(**frontend_conf)
            asr_model.frontend = frontend
        logging.info("asr_model: {}".format(asr_model))
        logging.info("asr_train_args: {}".format(asr_train_args))
        asr_model.to(dtype=getattr(torch, dtype)).eval()
@@ -145,6 +166,9 @@
        self.asr_train_args = asr_train_args
        self.converter = converter
        self.tokenizer = tokenizer
        has_lm = lm_weight == 0.0 or lm_file is None
        if ctc_weight == 0.0 and has_lm:
            beam_search = None
        self.beam_search = beam_search
        self.beam_search_transducer = beam_search_transducer
        self.maxlenratio = maxlenratio
@@ -155,12 +179,12 @@
    @torch.no_grad()
    def __call__(
            self, speech: Union[torch.Tensor, np.ndarray]
            self, speech: Union[torch.Tensor, np.ndarray], speech_lengths: Union[torch.Tensor, np.ndarray] = None
    ):
        """Inference
        Args:
                data: Input speech data
                speech: Input speech data
        Returns:
                text, token, token_int, hyp
@@ -172,11 +196,13 @@
            speech = torch.tensor(speech)
        # data: (Nsamples,) -> (1, Nsamples)
        speech = speech.unsqueeze(0).to(getattr(torch, self.dtype))
        lfr_factor = max(1, (speech.size()[-1]//80)-1)
        # lengths: (1,)
        lengths = speech.new_full([1], dtype=torch.long, fill_value=speech.size(1))
        batch = {"speech": speech, "speech_lengths": lengths}
        if len(speech.size()) < 3:
            speech = speech.unsqueeze(0).to(getattr(torch, self.dtype))
            speech_lengths = speech.new_full([1], dtype=torch.long, fill_value=speech.size(1))
        lfr_factor = max(1, (speech.size()[-1]//80)-1)
        batch = {"speech": speech, "speech_lengths": speech_lengths}
        # a. To device
        batch = to_device(batch, device=self.device)
@@ -185,78 +211,98 @@
        enc, enc_len = self.asr_model.encode(**batch)
        if isinstance(enc, tuple):
            enc = enc[0]
        assert len(enc) == 1, len(enc)
        # assert len(enc) == 1, len(enc)
        enc_len_batch_total = torch.sum(enc_len).item()
        predictor_outs = self.asr_model.calc_predictor(enc, enc_len)
        pre_acoustic_embeds, pre_token_length = predictor_outs[0], predictor_outs[1]
        pre_token_length = pre_token_length.long()
        pre_token_length = pre_token_length.round().long()
        decoder_outs = self.asr_model.cal_decoder_with_predictor(enc, enc_len, pre_acoustic_embeds, pre_token_length)
        decoder_out, ys_pad_lens = decoder_outs[0], decoder_outs[1]
        nbest_hyps = self.beam_search(
            x=enc[0], am_scores=decoder_out[0], maxlenratio=self.maxlenratio, minlenratio=self.minlenratio
        )
        nbest_hyps = nbest_hyps[: self.nbest]
        results = []
        for hyp in nbest_hyps:
            assert isinstance(hyp, (Hypothesis)), type(hyp)
            # remove sos/eos and get results
            last_pos = -1
            if isinstance(hyp.yseq, list):
                token_int = hyp.yseq[1:last_pos]
        b, n, d = decoder_out.size()
        for i in range(b):
            x = enc[i, :enc_len[i], :]
            am_scores = decoder_out[i, :pre_token_length[i], :]
            if self.beam_search is not None:
                nbest_hyps = self.beam_search(
                    x=x, am_scores=am_scores, maxlenratio=self.maxlenratio, minlenratio=self.minlenratio
                )
                nbest_hyps = nbest_hyps[: self.nbest]
            else:
                token_int = hyp.yseq[1:last_pos].tolist()
            # remove blank symbol id, which is assumed to be 0
            token_int = list(filter(lambda x: x != 0, token_int))
            # Change integer-ids to tokens
            token = self.converter.ids2tokens(token_int)
            if self.tokenizer is not None:
                text = self.tokenizer.tokens2text(token)
            else:
                text = None
            results.append((text, token, token_int, hyp, speech.size(1), lfr_factor))
                yseq = am_scores.argmax(dim=-1)
                score = am_scores.max(dim=-1)[0]
                score = torch.sum(score, dim=-1)
                # pad with mask tokens to ensure compatibility with sos/eos tokens
                yseq = torch.tensor(
                    [self.asr_model.sos] + yseq.tolist() + [self.asr_model.eos], device=yseq.device
                )
                nbest_hyps = [Hypothesis(yseq=yseq, score=score)]
            for hyp in nbest_hyps:
                assert isinstance(hyp, (Hypothesis)), type(hyp)
                # remove sos/eos and get results
                last_pos = -1
                if isinstance(hyp.yseq, list):
                    token_int = hyp.yseq[1:last_pos]
                else:
                    token_int = hyp.yseq[1:last_pos].tolist()
                # remove blank symbol id, which is assumed to be 0
                token_int = list(filter(lambda x: x != 0, token_int))
                # Change integer-ids to tokens
                token = self.converter.ids2tokens(token_int)
                if self.tokenizer is not None:
                    text = self.tokenizer.tokens2text(token)
                else:
                    text = None
                results.append((text, token, token_int, hyp, enc_len_batch_total, lfr_factor))
        # assert check_return_type(results)
        return results
def inference(
        output_dir: str,
        maxlenratio: float,
        minlenratio: float,
        batch_size: int,
        dtype: str,
        beam_size: int,
        ngpu: int,
        seed: int,
        ctc_weight: float,
        lm_weight: float,
        ngram_weight: float,
        penalty: float,
        nbest: int,
        num_workers: int,
        log_level: Union[int, str],
        data_path_and_name_and_type: Sequence[Tuple[str, str, str]],
        key_file: Optional[str],
        data_path_and_name_and_type,
        asr_train_config: Optional[str],
        asr_model_file: Optional[str],
        lm_train_config: Optional[str],
        lm_file: Optional[str],
        word_lm_train_config: Optional[str],
        token_type: Optional[str],
        bpemodel: Optional[str],
        allow_variable_data_keys: bool,
        audio_lists: Union[List[Any], bytes] = None,
        lm_train_config: Optional[str] = None,
        lm_file: Optional[str] = None,
        token_type: Optional[str] = None,
        key_file: Optional[str] = None,
        word_lm_train_config: Optional[str] = None,
        bpemodel: Optional[str] = None,
        allow_variable_data_keys: bool = False,
        streaming: bool = False,
        output_dir: Optional[str] = None,
        dtype: str = "float32",
        seed: int = 0,
        ngram_weight: float = 0.9,
        nbest: int = 1,
        num_workers: int = 1,
        frontend_conf: dict = None,
        fs: Union[dict, int] = 16000,
        lang: Optional[str] = None,
        **kwargs,
):
    assert check_argument_types()
    if batch_size > 1:
        raise NotImplementedError("batch decoding is not implemented")
    if word_lm_train_config is not None:
        raise NotImplementedError("Word LM is not implemented")
    if ngpu > 1:
@@ -271,7 +317,46 @@
        device = "cuda"
    else:
        device = "cpu"
    hop_length: int = 160
    sr: int = 16000
    if isinstance(fs, int):
        sr = fs
    else:
        if 'model_fs' in fs and fs['model_fs'] is not None:
            sr = fs['model_fs']
    # data_path_and_name_and_type for modelscope: (data from audio_lists)
    # ['speech', 'sound', 'am.mvn']
    # data_path_and_name_and_type for funasr:
    # [('/mnt/data/jiangyu.xzy/exp/maas/mvn.1.scp', 'speech', 'kaldi_ark')]
    if isinstance(data_path_and_name_and_type[0], Tuple):
        features_type: str = data_path_and_name_and_type[0][1]
    elif isinstance(data_path_and_name_and_type[0], str):
        features_type: str = data_path_and_name_and_type[1]
    else:
        raise NotImplementedError("unknown features type:{0}".format(data_path_and_name_and_type))
    if features_type != 'sound':
        frontend_conf = None
        flag_modelscope = False
    else:
        flag_modelscope = True
    if frontend_conf is not None:
        if 'hop_length' in frontend_conf:
            hop_length = frontend_conf['hop_length']
    finish_count = 0
    file_count = 1
    if flag_modelscope and not isinstance(data_path_and_name_and_type[0], Tuple):
        data_path_and_name_and_type_new = [
            audio_lists, data_path_and_name_and_type[0], data_path_and_name_and_type[1]
        ]
        if isinstance(audio_lists, bytes):
            file_count = 1
        else:
            file_count = len(audio_lists)
        if len(data_path_and_name_and_type) >= 3 and frontend_conf is not None:
            mvn_file = data_path_and_name_and_type[2]
            mvn_data = wav_utils.extract_CMVN_featrures(mvn_file)
            frontend_conf['mvn_data'] = mvn_data
    # 1. Set random-seed
    set_all_random_seed(seed)
@@ -293,73 +378,107 @@
        ngram_weight=ngram_weight,
        penalty=penalty,
        nbest=nbest,
        frontend_conf=frontend_conf,
    )
    speech2text = Speech2Text(**speech2text_kwargs)
    # 3. Build data-iterator
    loader = ASRTask.build_streaming_iterator(
        data_path_and_name_and_type,
        dtype=dtype,
        batch_size=batch_size,
        key_file=key_file,
        num_workers=num_workers,
        preprocess_fn=ASRTask.build_preprocess_fn(speech2text.asr_train_args, False),
        collate_fn=ASRTask.build_collate_fn(speech2text.asr_train_args, False),
        allow_variable_data_keys=allow_variable_data_keys,
        inference=True,
    )
    if flag_modelscope:
        loader = ASRTask.build_streaming_iterator_modelscope(
            data_path_and_name_and_type_new,
            dtype=dtype,
            batch_size=batch_size,
            key_file=key_file,
            num_workers=num_workers,
            preprocess_fn=ASRTask.build_preprocess_fn(speech2text.asr_train_args, False),
            collate_fn=ASRTask.build_collate_fn(speech2text.asr_train_args, False),
            allow_variable_data_keys=allow_variable_data_keys,
            inference=True,
            sample_rate=fs
        )
    else:
        loader = ASRTask.build_streaming_iterator(
            data_path_and_name_and_type,
            dtype=dtype,
            batch_size=batch_size,
            key_file=key_file,
            num_workers=num_workers,
            preprocess_fn=ASRTask.build_preprocess_fn(speech2text.asr_train_args, False),
            collate_fn=ASRTask.build_collate_fn(speech2text.asr_train_args, False),
            allow_variable_data_keys=allow_variable_data_keys,
            inference=True,
        )
    forward_time_total = 0.0
    length_total = 0.0
    # 7 .Start for-loop
    # FIXME(kamo): The output format should be discussed about
    with DatadirWriter(output_dir) as writer:
        for keys, batch in loader:
            assert isinstance(batch, dict), type(batch)
            assert all(isinstance(s, str) for s in keys), keys
            _bs = len(next(iter(batch.values())))
            assert len(keys) == _bs, f"{len(keys)} != {_bs}"
            batch = {k: v[0] for k, v in batch.items() if not k.endswith("_lengths")}
    asr_result_list = []
    if output_dir is not None:
        writer = DatadirWriter(output_dir)
    else:
        writer = None
            logging.info("decoding, utt_id: {}".format(keys))
            # N-best list of (text, token, token_int, hyp_object)
    for keys, batch in loader:
        assert isinstance(batch, dict), type(batch)
        assert all(isinstance(s, str) for s in keys), keys
        _bs = len(next(iter(batch.values())))
        assert len(keys) == _bs, f"{len(keys)} != {_bs}"
        # batch = {k: v for k, v in batch.items() if not k.endswith("_lengths")}
            try:
                time_beg = time.time()
                results = speech2text(**batch)
                time_end = time.time()
                forward_time = time_end - time_beg
                lfr_factor = results[0][-1]
                length = results[0][-2]
                results = [results[0][:-2]]
                forward_time_total += forward_time
                length_total += length
                logging.info(
                    "decoding, feature length: {}, forward_time: {:.4f}, rtf: {:.4f}".
                        format(length, forward_time, 100 * forward_time / (length*lfr_factor)))
            except TooShortUttError as e:
                logging.warning(f"Utterance {keys} {e}")
                hyp = Hypothesis(score=0.0, scores={}, states={}, yseq=[])
                results = [[" ", ["<space>"], [2], hyp]] * nbest
        logging.info("decoding, utt_id: {}".format(keys))
        # N-best list of (text, token, token_int, hyp_object)
            # Only supporting batch_size==1
            key = keys[0]
            for n, (text, token, token_int, hyp) in zip(range(1, nbest + 1), results):
        time_beg = time.time()
        results = speech2text(**batch)
        time_end = time.time()
        forward_time = time_end - time_beg
        lfr_factor = results[0][-1]
        length = results[0][-2]
        forward_time_total += forward_time
        length_total += length
        logging.info(
            "decoding, feature length: {}, forward_time: {:.4f}, rtf: {:.4f}".
                format(length, forward_time, 100 * forward_time / (length*lfr_factor)))
        for batch_id in range(len(results)):
            result = [results[batch_id][:-2]]
            key = keys[batch_id]
            for n, (text, token, token_int, hyp) in zip(range(1, nbest + 1), result):
                # Create a directory: outdir/{n}best_recog
                ibest_writer = writer[f"{n}best_recog"]
                # Write the result to each file
                ibest_writer["token"][key] = " ".join(token)
                ibest_writer["token_int"][key] = " ".join(map(str, token_int))
                ibest_writer["score"][key] = str(hyp.score)
                if writer is not None:
                    ibest_writer = writer[f"{n}best_recog"]
                    # Write the result to each file
                    ibest_writer["token"][key] = " ".join(token)
                    ibest_writer["token_int"][key] = " ".join(map(str, token_int))
                    ibest_writer["score"][key] = str(hyp.score)
                if text is not None:
                    ibest_writer["text"][key] = text
                logging.info("decoding, predictions: {}".format(text))
                    text_postprocessed = postprocess_utils.sentence_postprocess(token)
                    item = {'key': key, 'value': text_postprocessed}
                    asr_result_list.append(item)
                    finish_count += 1
                    asr_utils.print_progress(finish_count / file_count)
                    if writer is not None:
                        ibest_writer["text"][key] = text
                logging.info("decoding, utt: {}, predictions: {}".format(key, text))
    logging.info("decoding, feature length total: {}, forward_time total: {:.4f}, rtf avg: {:.4f}".
                 format(length_total, forward_time_total, 100 * forward_time_total / (length_total*lfr_factor)))
    return asr_result_list
def set_parameters(language: str = None,
                   sample_rate: Union[int, Dict[Any, int]] = None):
    if language is not None:
        global global_asr_language
        global_asr_language = language
    if sample_rate is not None:
        global global_sample_rate
        global_sample_rate = sample_rate
def get_parser():
@@ -494,6 +613,8 @@
        default=None,
        help="",
    )
    group.add_argument("--audio_lists", type=list, default=None)
    # example=[{'key':'EdevDEWdIYQ_0021','file':'/mnt/data/jiangyu.xzy/test_data/speech_io/SPEECHIO_ASR_ZH00007_zhibodaihuo/wav/EdevDEWdIYQ_0021.wav'}])
    group = parser.add_argument_group("Text converter related")
    group.add_argument(