北念
2023-02-21 f13cfbc18e6b7e37d4e5a515cf18411aa0c56d55
funasr/models/e2e_uni_asr.py
@@ -206,7 +206,7 @@
            with torch.no_grad():
                speech_raw, encoder_out, encoder_out_lens = self.encode(speech, speech_lengths, ind=ind)
        else:
            speech_raw, encoder_out_lens = self.encode(speech, speech_lengths, ind=ind)
            speech_raw, encoder_out, encoder_out_lens = self.encode(speech, speech_lengths, ind=ind)
        intermediate_outs = None
        if isinstance(encoder_out, tuple):