语帆
2024-02-28 39de3adfbc12bc491f6da9eb9ffdc5122a3f623d
funasr/models/lcbnet/model.py
@@ -438,15 +438,17 @@
        speech = speech.to(device=kwargs["device"])
        speech_lengths = speech_lengths.to(device=kwargs["device"])
        pdb.set_trace()
        # Encoder
        encoder_out, encoder_out_lens = self.encode(speech, speech_lengths)
        if isinstance(encoder_out, tuple):
            encoder_out = encoder_out[0]
        pdb.set_trace()
        ocr = ocr_sample_list[0]
        ocr_list_new = [[x + 1 if x != 0 else x for x in sublist] for sublist in ocr_sample_list]
        ocr = torch.tensor(ocr_list_new)
        ocr_lengths = ocr.new_full([1], dtype=torch.long, fill_value=ocr.size(1))
        ocr, ocr_lens, _ = self.text_encoder(ocr, ocr_lengths)
        fusion_out, _, _, _ = self.fusion_encoder(encoder_out,None, ocr, None)
        encoder_out = encoder_out + fusion_out
        pdb.set_trace()
        # c. Passed the encoder result and the beam search
        nbest_hyps = self.beam_search(
@@ -455,7 +457,7 @@
        
        nbest_hyps = nbest_hyps[: self.nbest]
        pdb.set_trace()
        results = []
        b, n, d = encoder_out.size()
        for i in range(b):
@@ -477,10 +479,13 @@
                # remove blank symbol id, which is assumed to be 0
                token_int = list(filter(lambda x: x != self.eos and x != self.sos and x != self.blank_id, token_int))
                
                pdb.set_trace()
                # Change integer-ids to tokens
                token = tokenizer.ids2tokens(token_int)
                pdb.set_trace()
                text = tokenizer.tokens2text(token)
                pdb.set_trace()
                text_postprocessed, _ = postprocess_utils.sentence_postprocess(token)
                result_i = {"key": key[i], "token": token, "text": text_postprocessed}
                results.append(result_i)