From bcb8b0c3cbbdc7b3c246a28b3941a9f4301deac0 Mon Sep 17 00:00:00 2001
From: shixian.shi <shixian.shi@alibaba-inc.com>
Date: 星期五, 12 一月 2024 17:59:15 +0800
Subject: [PATCH] update (debugging)

---
 funasr/bin/inference.py                 |    5 ++++-
 funasr/models/seaco_paraformer/model.py |    3 ++-
 2 files changed, 6 insertions(+), 2 deletions(-)

diff --git a/funasr/bin/inference.py b/funasr/bin/inference.py
index 7d9c1b9..5ba0474 100644
--- a/funasr/bin/inference.py
+++ b/funasr/bin/inference.py
@@ -115,7 +115,7 @@
         vad_model = kwargs.get("vad_model", None)
         vad_kwargs = kwargs.get("vad_model_revision", None)
         if vad_model is not None:
-            print("build vad model")
+            logging.info("Building VAD model.")
             vad_kwargs = {"model": vad_model, "model_revision": vad_kwargs}
             vad_model, vad_kwargs = self.build_model(**vad_kwargs)
 
@@ -123,6 +123,7 @@
         punc_model = kwargs.get("punc_model", None)
         punc_kwargs = kwargs.get("punc_model_revision", None)
         if punc_model is not None:
+            logging.info("Building punc model.")
             punc_kwargs = {"model": punc_model, "model_revision": punc_kwargs}
             punc_model, punc_kwargs = self.build_model(**punc_kwargs)
 
@@ -130,6 +131,7 @@
         spk_model = kwargs.get("spk_model", None)
         spk_kwargs = kwargs.get("spk_model_revision", None)
         if spk_model is not None:
+            logging.info("Building SPK model.")
             spk_kwargs = {"model": spk_model, "model_revision": spk_kwargs}
             spk_model, spk_kwargs = self.build_model(**spk_kwargs)
             self.cb_model = ClusterBackend()
@@ -166,6 +168,7 @@
             device = "cpu"
             # kwargs["batch_size"] = 1
         kwargs["device"] = device
+        import pdb; pdb.set_trace()
         
         if kwargs.get("ncpu", None):
             torch.set_num_threads(kwargs.get("ncpu"))
diff --git a/funasr/models/seaco_paraformer/model.py b/funasr/models/seaco_paraformer/model.py
index 070b622..4f6c176 100644
--- a/funasr/models/seaco_paraformer/model.py
+++ b/funasr/models/seaco_paraformer/model.py
@@ -337,7 +337,8 @@
         meta_data[
             "batch_data_time"] = speech_lengths.sum().item() * frontend.frame_shift * frontend.lfr_n / 1000
         
-        speech.to(device=kwargs["device"]), speech_lengths.to(device=kwargs["device"])
+        speech = speech.to(device=kwargs["device"])
+        speech_lengths = speech_lengths.to(device=kwargs["device"])
 
         # hotword
         self.hotword_list = self.generate_hotwords_list(kwargs.get("hotword", None), tokenizer=tokenizer, frontend=frontend)

--
Gitblit v1.9.1