From 930fe72f43524b4e355ef671c7180cc6cf9eefb5 Mon Sep 17 00:00:00 2001
From: 雾聪 <wucong.lyb@alibaba-inc.com>
Date: 星期一, 01 四月 2024 15:27:20 +0800
Subject: [PATCH] set batch default value

---
 funasr/models/bicif_paraformer/model.py |   16 +++++++---------
 1 files changed, 7 insertions(+), 9 deletions(-)

diff --git a/funasr/models/bicif_paraformer/model.py b/funasr/models/bicif_paraformer/model.py
index eb7318b..9849c8c 100644
--- a/funasr/models/bicif_paraformer/model.py
+++ b/funasr/models/bicif_paraformer/model.py
@@ -23,7 +23,7 @@
 from funasr.utils.timestamp_tools import ts_prediction_lfr6_standard
 from funasr.models.transformer.utils.nets_utils import make_pad_mask, pad_list
 from funasr.utils.load_utils import load_audio_text_image_video, extract_fbank
-
+from funasr.train_utils.device_funcs import to_device
 
 if LooseVersion(torch.__version__) >= LooseVersion("1.6.0"):
     from torch.cuda.amp import autocast
@@ -348,6 +348,7 @@
         max_seq_len=512,
         **kwargs,
     ):
+        self.device = kwargs.get("device")
         is_onnx = kwargs.get("type", "onnx") == "onnx"
         encoder_class = tables.encoder_classes.get(kwargs["encoder"] + "Export")
         self.encoder = encoder_class(self.encoder, onnx=is_onnx)
@@ -358,26 +359,23 @@
         decoder_class = tables.decoder_classes.get(kwargs["decoder"] + "Export")
         self.decoder = decoder_class(self.decoder, onnx=is_onnx)
     
-        from funasr.utils.torch_function import MakePadMask
         from funasr.utils.torch_function import sequence_mask
+
+        self.make_pad_mask = sequence_mask(max_seq_len, flip=False)
     
-        if is_onnx:
-            self.make_pad_mask = MakePadMask(max_seq_len, flip=False)
-        else:
-            self.make_pad_mask = sequence_mask(max_seq_len, flip=False)
     
-        self.forward = self._export_forward
+        self.forward = self.export_forward
     
         return self
 
-    def _export_forward(
+    def export_forward(
         self,
         speech: torch.Tensor,
         speech_lengths: torch.Tensor,
     ):
         # a. To device
         batch = {"speech": speech, "speech_lengths": speech_lengths}
-        # batch = to_device(batch, device=self.device)
+        batch = to_device(batch, device=self.device)
     
         enc, enc_len = self.encoder(**batch)
         mask = self.make_pad_mask(enc_len)[:, None, :]

--
Gitblit v1.9.1