语帆
2024-02-21 a0ffe57b05679d91e56227ce1109a5d725d93192
funasr/models/contextual_paraformer/model.py
@@ -63,7 +63,6 @@
        crit_attn_smooth = kwargs.get("crit_attn_smooth", 0.0)
        bias_encoder_dropout_rate = kwargs.get("bias_encoder_dropout_rate", 0.0)
        pdb.set_trace()
        if bias_encoder_type == 'lstm':
            self.bias_encoder = torch.nn.LSTM(inner_dim, inner_dim, 1, batch_first=True, dropout=bias_encoder_dropout_rate)
            self.bias_embed = torch.nn.Embedding(self.vocab_size, inner_dim)
@@ -81,7 +80,6 @@
        if self.crit_attn_weight > 0:
            self.attn_loss = torch.nn.L1Loss()
        self.crit_attn_smooth = crit_attn_smooth
        pdb.set_trace()
    def forward(
@@ -313,20 +311,24 @@
                 **kwargs,
                 ):
        # init beamsearch
        pdb.set_trace()
        is_use_ctc = kwargs.get("decoding_ctc_weight", 0.0) > 0.00001 and self.ctc != None
        is_use_lm = kwargs.get("lm_weight", 0.0) > 0.00001 and kwargs.get("lm_file", None) is not None
        if self.beam_search is None and (is_use_lm or is_use_ctc):
            logging.info("enable beam_search")
            self.init_beam_search(**kwargs)
            self.nbest = kwargs.get("nbest", 1)
        pdb.set_trace()
        meta_data = {}
        
        # extract fbank feats
        time1 = time.perf_counter()
        pdb.set_trace()
        audio_sample_list = load_audio_text_image_video(data_in, fs=frontend.fs, audio_fs=kwargs.get("fs", 16000))
        pdb.set_trace()
        time2 = time.perf_counter()
        meta_data["load_data"] = f"{time2 - time1:0.3f}"
        pdb.set_trace()
        speech, speech_lengths = extract_fbank(audio_sample_list, data_type=kwargs.get("data_type", "sound"),
                                               frontend=frontend)
        time3 = time.perf_counter()
@@ -334,38 +336,50 @@
        meta_data[
            "batch_data_time"] = speech_lengths.sum().item() * frontend.frame_shift * frontend.lfr_n / 1000
        
        pdb.set_trace()
        speech = speech.to(device=kwargs["device"])
        speech_lengths = speech_lengths.to(device=kwargs["device"])
        # hotword
        pdb.set_trace()
        self.hotword_list = self.generate_hotwords_list(kwargs.get("hotword", None), tokenizer=tokenizer, frontend=frontend)
        pdb.set_trace()
        
        # Encoder
        encoder_out, encoder_out_lens = self.encode(speech, speech_lengths)
        if isinstance(encoder_out, tuple):
            encoder_out = encoder_out[0]
        pdb.set_trace()
        # predictor
        predictor_outs = self.calc_predictor(encoder_out, encoder_out_lens)
        pre_acoustic_embeds, pre_token_length, alphas, pre_peak_index = predictor_outs[0], predictor_outs[1], \
                                                                        predictor_outs[2], predictor_outs[3]
        pdb.set_trace()
        pre_token_length = pre_token_length.round().long()
        if torch.max(pre_token_length) < 1:
            return []
        pdb.set_trace()
        decoder_outs = self.cal_decoder_with_predictor(encoder_out, encoder_out_lens,
                                                                 pre_acoustic_embeds,
                                                                 pre_token_length,
                                                                 hw_list=self.hotword_list,
                                                                 clas_scale=kwargs.get("clas_scale", 1.0))
        pdb.set_trace()
        decoder_out, ys_pad_lens = decoder_outs[0], decoder_outs[1]
        
        pdb.set_trace()
        results = []
        b, n, d = decoder_out.size()
        pdb.set_trace()
        for i in range(b):
            x = encoder_out[i, :encoder_out_lens[i], :]
            am_scores = decoder_out[i, :pre_token_length[i], :]
            pdb.set_trace()
            if self.beam_search is not None:
                nbest_hyps = self.beam_search(
                    x=x, am_scores=am_scores, maxlenratio=kwargs.get("maxlenratio", 0.0),