From dee1354d0d984df21d16a2eba5bacec31bfb0b4b Mon Sep 17 00:00:00 2001
From: 维石 <shixian.shi@alibaba-inc.com>
Date: 星期五, 19 四月 2024 14:57:31 +0800
Subject: [PATCH] empty result bug fix

---
 funasr/models/paraformer/model.py |    6 +++---
 1 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/funasr/models/paraformer/model.py b/funasr/models/paraformer/model.py
index 316255d..6c7957c 100644
--- a/funasr/models/paraformer/model.py
+++ b/funasr/models/paraformer/model.py
@@ -181,15 +181,12 @@
                 text: (Batch, Length)
                 text_lengths: (Batch,)
         """
-        # import pdb;
-        # pdb.set_trace()
         if len(text_lengths.size()) > 1:
             text_lengths = text_lengths[:, 0]
         if len(speech_lengths.size()) > 1:
             speech_lengths = speech_lengths[:, 0]
         
         batch_size = speech.shape[0]
-        
         
         # Encoder
         encoder_out, encoder_out_lens = self.encode(speech, speech_lengths)
@@ -231,6 +228,7 @@
         stats["loss_pre"] = loss_pre.detach().cpu() if loss_pre is not None else None
         
         stats["loss"] = torch.clone(loss.detach())
+        stats["batch_size"] = batch_size
         
         # force_gatherable: to-device and to-tensor if scalar for DataParallel
         if self.length_normalized_loss:
@@ -474,6 +472,8 @@
         speech = speech.to(device=kwargs["device"])
         speech_lengths = speech_lengths.to(device=kwargs["device"])
         # Encoder
+        if kwargs.get("fp16", False):
+            speech = speech.half()
         encoder_out, encoder_out_lens = self.encode(speech, speech_lengths)
         if isinstance(encoder_out, tuple):
             encoder_out = encoder_out[0]

--
Gitblit v1.9.1