From 6d2746db80e770f8a4320172cca26212ed652936 Mon Sep 17 00:00:00 2001
From: jmwang66 <wangjiaming.wjm@alibaba-inc.com>
Date: 星期三, 28 十二月 2022 11:51:17 +0800
Subject: [PATCH] fix logging

---
 funasr/bin/asr_inference_paraformer.py |   12 ++++++------
 funasr/bin/asr_inference_uniasr.py     |    6 +++---
 funasr/bin/asr_inference.py            |    6 +++---
 .DS_Store                              |    0 
 4 files changed, 12 insertions(+), 12 deletions(-)

diff --git a/.DS_Store b/.DS_Store
index 68ac8d4..0831ca1 100644
--- a/.DS_Store
+++ b/.DS_Store
Binary files differ
diff --git a/funasr/bin/asr_inference.py b/funasr/bin/asr_inference.py
index bd5d7f4..b937f88 100755
--- a/funasr/bin/asr_inference.py
+++ b/funasr/bin/asr_inference.py
@@ -100,8 +100,8 @@
         if asr_model.frontend is None and frontend_conf is not None:
             frontend = WavFrontend(**frontend_conf)
             asr_model.frontend = frontend
-        logging.info("asr_model: {}".format(asr_model))
-        logging.info("asr_train_args: {}".format(asr_train_args))
+        # logging.info("asr_model: {}".format(asr_model))
+        # logging.info("asr_train_args: {}".format(asr_train_args))
         asr_model.to(dtype=getattr(torch, dtype)).eval()
 
         decoder = asr_model.decoder
@@ -164,7 +164,7 @@
         else:
             tokenizer = build_tokenizer(token_type=token_type)
         converter = TokenIDConverter(token_list=token_list)
-        logging.info(f"Text tokenizer: {tokenizer}")
+        # logging.info(f"Text tokenizer: {tokenizer}")
 
         self.asr_model = asr_model
         self.asr_train_args = asr_train_args
diff --git a/funasr/bin/asr_inference_paraformer.py b/funasr/bin/asr_inference_paraformer.py
index 5673403..09c61bc 100755
--- a/funasr/bin/asr_inference_paraformer.py
+++ b/funasr/bin/asr_inference_paraformer.py
@@ -92,8 +92,8 @@
         if asr_model.frontend is None and frontend_conf is not None:
             frontend = WavFrontend(**frontend_conf)
             asr_model.frontend = frontend
-        logging.info("asr_model: {}".format(asr_model))
-        logging.info("asr_train_args: {}".format(asr_train_args))
+        # logging.info("asr_model: {}".format(asr_model))
+        # logging.info("asr_train_args: {}".format(asr_train_args))
         asr_model.to(dtype=getattr(torch, dtype)).eval()
 
         ctc = CTCPrefixScorer(ctc=asr_model.ctc, eos=asr_model.eos)
@@ -141,8 +141,8 @@
         for scorer in scorers.values():
             if isinstance(scorer, torch.nn.Module):
                 scorer.to(device=device, dtype=getattr(torch, dtype)).eval()
-        logging.info(f"Beam_search: {beam_search}")
-        logging.info(f"Decoding device={device}, dtype={dtype}")
+        # logging.info(f"Beam_search: {beam_search}")
+        # logging.info(f"Decoding device={device}, dtype={dtype}")
 
         # 5. [Optional] Build Text converter: e.g. bpe-sym -> Text
         if token_type is None:
@@ -160,7 +160,7 @@
         else:
             tokenizer = build_tokenizer(token_type=token_type)
         converter = TokenIDConverter(token_list=token_list)
-        logging.info(f"Text tokenizer: {tokenizer}")
+        # logging.info(f"Text tokenizer: {tokenizer}")
 
         self.asr_model = asr_model
         self.asr_train_args = asr_train_args
@@ -426,7 +426,7 @@
         assert len(keys) == _bs, f"{len(keys)} != {_bs}"
         # batch = {k: v for k, v in batch.items() if not k.endswith("_lengths")}
 
-        logging.info("decoding, utt_id: {}".format(keys))
+        # logging.info("decoding, utt_id: {}".format(keys))
         # N-best list of (text, token, token_int, hyp_object)
 
         time_beg = time.time()
diff --git a/funasr/bin/asr_inference_uniasr.py b/funasr/bin/asr_inference_uniasr.py
index a1a23ba..9aea1a3 100755
--- a/funasr/bin/asr_inference_uniasr.py
+++ b/funasr/bin/asr_inference_uniasr.py
@@ -148,8 +148,8 @@
         for scorer in scorers.values():
             if isinstance(scorer, torch.nn.Module):
                 scorer.to(device=device, dtype=getattr(torch, dtype)).eval()
-        logging.info(f"Beam_search: {beam_search}")
-        logging.info(f"Decoding device={device}, dtype={dtype}")
+        # logging.info(f"Beam_search: {beam_search}")
+        # logging.info(f"Decoding device={device}, dtype={dtype}")
 
         # 5. [Optional] Build Text converter: e.g. bpe-sym -> Text
         if token_type is None:
@@ -167,7 +167,7 @@
         else:
             tokenizer = build_tokenizer(token_type=token_type)
         converter = TokenIDConverter(token_list=token_list)
-        logging.info(f"Text tokenizer: {tokenizer}")
+        # logging.info(f"Text tokenizer: {tokenizer}")
 
         self.asr_model = asr_model
         self.asr_train_args = asr_train_args

--
Gitblit v1.9.1