游雁
2024-03-21 bbda5496ffae1d9ab052e8736a8c0b080ea017f5
funasr/models/contextual_paraformer/model.py
@@ -17,9 +17,6 @@
from distutils.version import LooseVersion
from funasr.register import tables
from funasr.losses.label_smoothing_loss import (
    LabelSmoothingLoss,  # noqa: H301
)
from funasr.utils import postprocess_utils
from funasr.metrics.compute_acc import th_accuracy
from funasr.models.paraformer.model import Paraformer
@@ -29,7 +26,7 @@
from funasr.models.transformer.utils.add_sos_eos import add_sos_eos
from funasr.models.transformer.utils.nets_utils import make_pad_mask, pad_list
from funasr.utils.load_utils import load_audio_text_image_video, extract_fbank
import pdb
if LooseVersion(torch.__version__) >= LooseVersion("1.6.0"):
    from torch.cuda.amp import autocast
@@ -81,7 +78,6 @@
            self.attn_loss = torch.nn.L1Loss()
        self.crit_attn_smooth = crit_attn_smooth
    def forward(
        self,
        speech: torch.Tensor,
@@ -98,16 +94,14 @@
                text: (Batch, Length)
                text_lengths: (Batch,)
        """
        if len(text_lengths.size()) > 1:
            text_lengths = text_lengths[:, 0]
        if len(speech_lengths.size()) > 1:
            speech_lengths = speech_lengths[:, 0]
        text_lengths = text_lengths.squeeze()
        speech_lengths = speech_lengths.squeeze()
        batch_size = speech.shape[0]
        hotword_pad = kwargs.get("hotword_pad")
        hotword_lengths = kwargs.get("hotword_lengths")
        dha_pad = kwargs.get("dha_pad")
        # dha_pad = kwargs.get("dha_pad")
        # 1. Encoder
        encoder_out, encoder_out_lens = self.encode(speech, speech_lengths)
@@ -156,7 +150,6 @@
        loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)
        return loss, stats, weight
    
    def _calc_att_clas_loss(
        self,
        encoder_out: torch.Tensor,
@@ -189,13 +182,10 @@
        # 0. sampler
        decoder_out_1st = None
        if self.sampling_ratio > 0.0:
            if self.step_cur < 2:
                logging.info("enable sampler in paraformer, sampling_ratio: {}".format(self.sampling_ratio))
            sematic_embeds, decoder_out_1st = self.sampler(encoder_out, encoder_out_lens, ys_pad, ys_pad_lens,
                                                           pre_acoustic_embeds, contextual_info)
        else:
            if self.step_cur < 2:
                logging.info("disable sampler in paraformer, sampling_ratio: {}".format(self.sampling_ratio))
            sematic_embeds = pre_acoustic_embeds
        # 1. Forward decoder
@@ -234,7 +224,6 @@
        
        return loss_att, acc_att, cer_att, wer_att, loss_pre, loss_ideal
    
    def sampler(self, encoder_out, encoder_out_lens, ys_pad, ys_pad_lens, pre_acoustic_embeds, contextual_info):
        tgt_mask = (~make_pad_mask(ys_pad_lens, maxlen=ys_pad_lens.max())[:, :, None]).to(ys_pad.device)
        ys_pad = ys_pad * tgt_mask[:, :, 0]
@@ -266,7 +255,6 @@
        sematic_embeds = pre_acoustic_embeds.masked_fill(~input_mask_expand_dim, 0) + ys_pad_embed.masked_fill(
            input_mask_expand_dim, 0)
        return sematic_embeds * tgt_mask, decoder_out * tgt_mask
    
    def cal_decoder_with_predictor(self, encoder_out, encoder_out_lens, sematic_embeds, ys_pad_lens, hw_list=None,
                                   clas_scale=1.0):
@@ -417,7 +405,6 @@
        
        return results, meta_data
    def generate_hotwords_list(self, hotword_list_or_file, tokenizer=None, frontend=None):
        def load_seg_dict(seg_dict_file):
            seg_dict = {}
@@ -519,3 +506,12 @@
            hotword_list = None
        return hotword_list
    def export(
        self,
        **kwargs,
    ):
        if 'max_seq_len' not in kwargs:
            kwargs['max_seq_len'] = 512
        from .export_meta import export_rebuild_model
        models = export_rebuild_model(model=self, **kwargs)
        return models