From 296d73f7d647f63032380d19ad4f4e261f995c55 Mon Sep 17 00:00:00 2001
From: shixian.shi <shixian.shi@alibaba-inc.com>
Date: 星期一, 15 一月 2024 20:47:16 +0800
Subject: [PATCH] update readme

---
 funasr/bin/inference.py |   26 +++++++++++---------------
 1 files changed, 11 insertions(+), 15 deletions(-)

diff --git a/funasr/bin/inference.py b/funasr/bin/inference.py
index 7d9c1b9..7368d16 100644
--- a/funasr/bin/inference.py
+++ b/funasr/bin/inference.py
@@ -115,7 +115,7 @@
         vad_model = kwargs.get("vad_model", None)
         vad_kwargs = kwargs.get("vad_model_revision", None)
         if vad_model is not None:
-            print("build vad model")
+            logging.info("Building VAD model.")
             vad_kwargs = {"model": vad_model, "model_revision": vad_kwargs}
             vad_model, vad_kwargs = self.build_model(**vad_kwargs)
 
@@ -123,6 +123,7 @@
         punc_model = kwargs.get("punc_model", None)
         punc_kwargs = kwargs.get("punc_model_revision", None)
         if punc_model is not None:
+            logging.info("Building punc model.")
             punc_kwargs = {"model": punc_model, "model_revision": punc_kwargs}
             punc_model, punc_kwargs = self.build_model(**punc_kwargs)
 
@@ -130,6 +131,7 @@
         spk_model = kwargs.get("spk_model", None)
         spk_kwargs = kwargs.get("spk_model_revision", None)
         if spk_model is not None:
+            logging.info("Building SPK model.")
             spk_kwargs = {"model": spk_model, "model_revision": spk_kwargs}
             spk_model, spk_kwargs = self.build_model(**spk_kwargs)
             self.cb_model = ClusterBackend()
@@ -173,7 +175,7 @@
         # build tokenizer
         tokenizer = kwargs.get("tokenizer", None)
         if tokenizer is not None:
-            tokenizer_class = tables.tokenizer_classes.get(tokenizer.lower())
+            tokenizer_class = tables.tokenizer_classes.get(tokenizer)
             tokenizer = tokenizer_class(**kwargs["tokenizer_conf"])
             kwargs["tokenizer"] = tokenizer
             kwargs["token_list"] = tokenizer.token_list
@@ -184,13 +186,13 @@
         # build frontend
         frontend = kwargs.get("frontend", None)
         if frontend is not None:
-            frontend_class = tables.frontend_classes.get(frontend.lower())
+            frontend_class = tables.frontend_classes.get(frontend)
             frontend = frontend_class(**kwargs["frontend_conf"])
             kwargs["frontend"] = frontend
             kwargs["input_size"] = frontend.output_size()
         
         # build model
-        model_class = tables.model_classes.get(kwargs["model"].lower())
+        model_class = tables.model_classes.get(kwargs["model"])
         model = model_class(**kwargs, **kwargs["model_conf"], vocab_size=vocab_size)
         model.eval()
         model.to(device)
@@ -243,7 +245,7 @@
         
             time1 = time.perf_counter()
             with torch.no_grad():
-                results, meta_data = model.generate(**batch, **kwargs)
+                results, meta_data = model.inference(**batch, **kwargs)
             time2 = time.perf_counter()
             
             asr_result_list.extend(results)
@@ -272,12 +274,9 @@
     def generate_with_vad(self, input, input_len=None, **cfg):
         
         # step.1: compute the vad model
-        model = self.vad_model
-        kwargs = self.vad_kwargs
-        kwargs.update(cfg)
+        self.vad_kwargs.update(cfg)
         beg_vad = time.time()
-        res = self.generate(input, input_len=input_len, model=model, kwargs=kwargs, **cfg)
-        vad_res = res
+        res = self.generate(input, input_len=input_len, model=self.vad_model, kwargs=self.vad_kwargs, **cfg)
         end_vad = time.time()
         print(f"time cost vad: {end_vad - beg_vad:0.3f}")
 
@@ -310,10 +309,7 @@
             if not len(sorted_data):
                 logging.info("decoding, utt: {}, empty speech".format(key))
                 continue
-            
 
-            # if kwargs["device"] == "cpu":
-            #     batch_size = 0
             if len(sorted_data) > 0 and len(sorted_data[0]) > 0:
                 batch_size = max(batch_size, sorted_data[0][0][1] - sorted_data[0][0][0])
             
@@ -403,7 +399,7 @@
                 spk_embedding = result['spk_embedding']
                 labels = self.cb_model(spk_embedding, oracle_num=self.preset_spk_num)
                 del result['spk_embedding']
-                sv_output = postprocess(all_segments, None, labels, spk_embedding)
+                sv_output = postprocess(all_segments, None, labels, spk_embedding.cpu())
                 if self.spk_mode == 'vad_segment':
                     sentence_list = []
                     for res, vadsegment in zip(restored_data, vadsegments):
@@ -441,7 +437,7 @@
         # build frontend
         frontend = kwargs.get("frontend", None)
         if frontend is not None:
-            frontend_class = tables.frontend_classes.get(frontend.lower())
+            frontend_class = tables.frontend_classes.get(frontend)
             frontend = frontend_class(**kwargs["frontend_conf"])
 
         self.frontend = frontend

--
Gitblit v1.9.1