雾聪
2024-03-14 0cf5dfec2c8313fc2ed2aab8d10bf3dc4b9c283f
funasr/models/lcbnet/model.py
@@ -1,3 +1,8 @@
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
#  MIT License  (https://opensource.org/licenses/MIT)
import logging
from typing import Union, Dict, List, Tuple, Optional
@@ -17,10 +22,14 @@
from funasr.utils.datadir_writer import DatadirWriter
from funasr.register import tables
@tables.register("model_classes", "Transformer")
class Transformer(nn.Module):
    """CTC-attention hybrid Encoder-Decoder model"""
import pdb
@tables.register("model_classes", "LCBNet")
class LCBNet(nn.Module):
    """
    Author: Speech Lab of DAMO Academy, Alibaba Group
    LCB-NET: LONG-CONTEXT BIASING FOR AUDIO-VISUAL SPEECH RECOGNITION
    https://arxiv.org/abs/2401.06390
    """
    
    def __init__(
        self,
@@ -32,10 +41,19 @@
        encoder_conf: dict = None,
        decoder: str = None,
        decoder_conf: dict = None,
        text_encoder: str = None,
        text_encoder_conf: dict = None,
        bias_predictor: str = None,
        bias_predictor_conf: dict = None,
        fusion_encoder: str = None,
        fusion_encoder_conf: dict = None,
        ctc: str = None,
        ctc_conf: dict = None,
        ctc_weight: float = 0.5,
        interctc_weight: float = 0.0,
        select_num: int = 2,
        select_length: int = 3,
        insert_blank: bool = True,
        input_size: int = 80,
        vocab_size: int = -1,
        ignore_id: int = -1,
@@ -66,6 +84,16 @@
        encoder_class = tables.encoder_classes.get(encoder)
        encoder = encoder_class(input_size=input_size, **encoder_conf)
        encoder_output_size = encoder.output_size()
        # lcbnet modules: text encoder, fusion encoder and bias predictor
        text_encoder_class = tables.encoder_classes.get(text_encoder)
        text_encoder = text_encoder_class(input_size=vocab_size, **text_encoder_conf)
        fusion_encoder_class = tables.encoder_classes.get(fusion_encoder)
        fusion_encoder = fusion_encoder_class(**fusion_encoder_conf)
        bias_predictor_class = tables.encoder_classes.get(bias_predictor)
        bias_predictor = bias_predictor_class(**bias_predictor_conf)
        if decoder is not None:
            decoder_class = tables.decoder_classes.get(decoder)
            decoder = decoder_class(
@@ -83,14 +111,21 @@
            )
    
        self.blank_id = blank_id
        self.sos = sos if sos is not None else vocab_size - 1
        self.eos = eos if eos is not None else vocab_size - 1
        self.sos = vocab_size - 1
        self.eos = vocab_size - 1
        self.vocab_size = vocab_size
        self.ignore_id = ignore_id
        self.ctc_weight = ctc_weight
        self.specaug = specaug
        self.normalize = normalize
        self.encoder = encoder
        # lcbnet
        self.text_encoder = text_encoder
        self.fusion_encoder = fusion_encoder
        self.bias_predictor = bias_predictor
        self.select_num = select_num
        self.select_length = select_length
        self.insert_blank = insert_blank
        if not hasattr(self.encoder, "interctc_use_conditioning"):
            self.encoder.interctc_use_conditioning = False
@@ -146,8 +181,7 @@
                text: (Batch, Length)
                text_lengths: (Batch,)
        """
        # import pdb;
        # pdb.set_trace()
        if len(text_lengths.size()) > 1:
            text_lengths = text_lengths[:, 0]
        if len(speech_lengths.size()) > 1:
@@ -239,15 +273,12 @@
                ind: int
        """
        with autocast(False):
            # Data augmentation
            if self.specaug is not None and self.training:
                speech, speech_lengths = self.specaug(speech, speech_lengths)
            # Normalization for feature: e.g. Global-CMVN, Utterance-CMVN
            if self.normalize is not None:
                speech, speech_lengths = self.normalize(speech, speech_lengths)
        # Forward encoder
        # feats: (Batch, Length, Dim)
        # -> encoder_out: (Batch, Length2, Dim2)
@@ -264,7 +295,6 @@
        
        if intermediate_outs is not None:
            return (encoder_out, intermediate_outs), encoder_out_lens
        return encoder_out, encoder_out_lens
    
    def _calc_att_loss(
@@ -344,14 +374,14 @@
        scorers["ngram"] = ngram
        
        weights = dict(
            decoder=1.0 - kwargs.get("decoding_ctc_weight", 0.5),
            ctc=kwargs.get("decoding_ctc_weight", 0.5),
            decoder=1.0 - kwargs.get("decoding_ctc_weight", 0.3),
            ctc=kwargs.get("decoding_ctc_weight", 0.3),
            lm=kwargs.get("lm_weight", 0.0),
            ngram=kwargs.get("ngram_weight", 0.0),
            length_bonus=kwargs.get("penalty", 0.0),
        )
        beam_search = BeamSearch(
            beam_size=kwargs.get("beam_size", 10),
            beam_size=kwargs.get("beam_size", 20),
            weights=weights,
            scorers=scorers,
            sos=self.sos,
@@ -391,16 +421,22 @@
        else:
            # extract fbank feats
            time1 = time.perf_counter()
            audio_sample_list = load_audio_text_image_video(data_in, fs=frontend.fs, audio_fs=kwargs.get("fs", 16000),
            sample_list = load_audio_text_image_video(data_in, fs=frontend.fs, audio_fs=kwargs.get("fs", 16000),
                                                            data_type=kwargs.get("data_type", "sound"),
                                                            tokenizer=tokenizer)
            time2 = time.perf_counter()
            meta_data["load_data"] = f"{time2 - time1:0.3f}"
            audio_sample_list = sample_list[0]
            if len(sample_list) >1:
                ocr_sample_list = sample_list[1]
            else:
                ocr_sample_list = [[294, 0]]
            speech, speech_lengths = extract_fbank(audio_sample_list, data_type=kwargs.get("data_type", "sound"),
                                                   frontend=frontend)
            time3 = time.perf_counter()
            meta_data["extract_feat"] = f"{time3 - time2:0.3f}"
            meta_data["batch_data_time"] = speech_lengths.sum().item() * frontend.frame_shift * frontend.lfr_n / 1000
            frame_shift = 10
            meta_data["batch_data_time"] = speech_lengths.sum().item() * frame_shift / 1000
        speech = speech.to(device=kwargs["device"])
        speech_lengths = speech_lengths.to(device=kwargs["device"])
@@ -408,14 +444,19 @@
        encoder_out, encoder_out_lens = self.encode(speech, speech_lengths)
        if isinstance(encoder_out, tuple):
            encoder_out = encoder_out[0]
        ocr_list_new = [[x + 1 if x != 0 else x for x in sublist] for sublist in ocr_sample_list]
        ocr = torch.tensor(ocr_list_new).to(device=kwargs["device"])
        ocr_lengths = ocr.new_full([1], dtype=torch.long, fill_value=ocr.size(1)).to(device=kwargs["device"])
        ocr, ocr_lens, _ = self.text_encoder(ocr, ocr_lengths)
        fusion_out, _, _, _ = self.fusion_encoder(encoder_out,None, ocr, None)
        encoder_out = encoder_out + fusion_out
        # c. Passed the encoder result and the beam search
        nbest_hyps = self.beam_search(
            x=encoder_out[0], maxlenratio=kwargs.get("maxlenratio", 0.0), minlenratio=kwargs.get("minlenratio", 0.0)
        )
        
        nbest_hyps = nbest_hyps[: self.nbest]
        results = []
        b, n, d = encoder_out.size()
@@ -441,7 +482,7 @@
                # Change integer-ids to tokens
                token = tokenizer.ids2tokens(token_int)
                text = tokenizer.tokens2text(token)
                text_postprocessed, _ = postprocess_utils.sentence_postprocess(token)
                result_i = {"key": key[i], "token": token, "text": text_postprocessed}
                results.append(result_i)