funasr/models/llm_asr/model.py
@@ -687,10 +687,8 @@ # fp16 if kwargs.get("fp16", False): speech = speech.to(torch.float16) encoder_out_lens = encoder_out_lens.to(torch.float16) elif kwargs.get("bf16", False): speech = speech.to(torch.bfloat16) encoder_out_lens = encoder_out_lens.to(torch.bfloat16) encoder_out, encoder_out_lens = self.audio_encoder(speech.permute(0, 2, 1), speech_lengths) # audio_adaptor