北念
2023-02-09 16d4e0054986cd5036cc311cc45fa6dff36cc9da
funasr/models/e2e_uni_asr.py
@@ -206,7 +206,7 @@
            with torch.no_grad():
                speech_raw, encoder_out, encoder_out_lens = self.encode(speech, speech_lengths, ind=ind)
        else:
            speech_raw, encoder_out_lens = self.encode(speech, speech_lengths, ind=ind)
            speech_raw, encoder_out, encoder_out_lens = self.encode(speech, speech_lengths, ind=ind)
        intermediate_outs = None
        if isinstance(encoder_out, tuple):