From 3fcb5dcfede0103d2465dd85a2ef6b77af58c7bc Mon Sep 17 00:00:00 2001
From: shixian.shi <shixian.shi@alibaba-inc.com>
Date: 星期一, 15 一月 2024 20:25:35 +0800
Subject: [PATCH] update scripts

---
 funasr/bin/inference.py |   19 ++++++++-----------
 1 files changed, 8 insertions(+), 11 deletions(-)

diff --git a/funasr/bin/inference.py b/funasr/bin/inference.py
index 21938e6..ca8771d 100644
--- a/funasr/bin/inference.py
+++ b/funasr/bin/inference.py
@@ -175,7 +175,7 @@
         # build tokenizer
         tokenizer = kwargs.get("tokenizer", None)
         if tokenizer is not None:
-            tokenizer_class = tables.tokenizer_classes.get(tokenizer.lower())
+            tokenizer_class = tables.tokenizer_classes.get(tokenizer)
             tokenizer = tokenizer_class(**kwargs["tokenizer_conf"])
             kwargs["tokenizer"] = tokenizer
             kwargs["token_list"] = tokenizer.token_list
@@ -186,13 +186,13 @@
         # build frontend
         frontend = kwargs.get("frontend", None)
         if frontend is not None:
-            frontend_class = tables.frontend_classes.get(frontend.lower())
+            frontend_class = tables.frontend_classes.get(frontend)
             frontend = frontend_class(**kwargs["frontend_conf"])
             kwargs["frontend"] = frontend
             kwargs["input_size"] = frontend.output_size()
         
         # build model
-        model_class = tables.model_classes.get(kwargs["model"].lower())
+        model_class = tables.model_classes.get(kwargs["model"])
         model = model_class(**kwargs, **kwargs["model_conf"], vocab_size=vocab_size)
         model.eval()
         model.to(device)
@@ -245,7 +245,7 @@
         
             time1 = time.perf_counter()
             with torch.no_grad():
-                results, meta_data = model.generate(**batch, **kwargs)
+                results, meta_data = model.inference(**batch, **kwargs)
             time2 = time.perf_counter()
             
             asr_result_list.extend(results)
@@ -274,12 +274,9 @@
     def generate_with_vad(self, input, input_len=None, **cfg):
         
         # step.1: compute the vad model
-        model = self.vad_model
-        kwargs = self.vad_kwargs
-        kwargs.update(cfg)
+        self.vad_kwargs.update(cfg)
         beg_vad = time.time()
-        res = self.generate(input, input_len=input_len, model=model, kwargs=kwargs, **cfg)
-        vad_res = res
+        res = self.generate(input, input_len=input_len, model=self.vad_model, kwargs=self.vad_kwargs, **cfg)
         end_vad = time.time()
         print(f"time cost vad: {end_vad - beg_vad:0.3f}")
 
@@ -405,7 +402,7 @@
                 spk_embedding = result['spk_embedding']
                 labels = self.cb_model(spk_embedding, oracle_num=self.preset_spk_num)
                 del result['spk_embedding']
-                sv_output = postprocess(all_segments, None, labels, spk_embedding)
+                sv_output = postprocess(all_segments, None, labels, spk_embedding.cpu())
                 if self.spk_mode == 'vad_segment':
                     sentence_list = []
                     for res, vadsegment in zip(restored_data, vadsegments):
@@ -443,7 +440,7 @@
         # build frontend
         frontend = kwargs.get("frontend", None)
         if frontend is not None:
-            frontend_class = tables.frontend_classes.get(frontend.lower())
+            frontend_class = tables.frontend_classes.get(frontend)
             frontend = frontend_class(**kwargs["frontend_conf"])
 
         self.frontend = frontend

--
Gitblit v1.9.1