From 1b9ac4f7a2eb600e5b769e7294cb4a8f4ec31b63 Mon Sep 17 00:00:00 2001
From: TeaPoly <lekai.huang@gmail.com>
Date: 星期五, 02 十二月 2022 12:00:22 +0800
Subject: [PATCH] Fix some issue to make batch inference easy for predictor and decoder.
---
funasr/bin/asr_inference_paraformer.py | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)
diff --git a/funasr/bin/asr_inference_paraformer.py b/funasr/bin/asr_inference_paraformer.py
index ed75010..179a62b 100755
--- a/funasr/bin/asr_inference_paraformer.py
+++ b/funasr/bin/asr_inference_paraformer.py
@@ -189,7 +189,7 @@
predictor_outs = self.asr_model.calc_predictor(enc, enc_len)
pre_acoustic_embeds, pre_token_length = predictor_outs[0], predictor_outs[1]
- pre_token_length = torch.tensor([pre_acoustic_embeds.size(1)], device=pre_acoustic_embeds.device)
+ pre_token_length = pre_token_length.long()
decoder_outs = self.asr_model.cal_decoder_with_predictor(enc, enc_len, pre_acoustic_embeds, pre_token_length)
decoder_out, ys_pad_lens = decoder_outs[0], decoder_outs[1]
--
Gitblit v1.9.1