From f13cfbc18e6b7e37d4e5a515cf18411aa0c56d55 Mon Sep 17 00:00:00 2001
From: 北念 <lzr265946@alibaba-inc.com>
Date: 星期二, 21 二月 2023 17:34:45 +0800
Subject: [PATCH] support hotword parameter passing in the pipeline forward

---
 funasr/bin/asr_inference_uniasr.py |   15 ++++++++++++++-
 1 files changed, 14 insertions(+), 1 deletions(-)

diff --git a/funasr/bin/asr_inference_uniasr.py b/funasr/bin/asr_inference_uniasr.py
index 0a5824c..c50bf17 100644
--- a/funasr/bin/asr_inference_uniasr.py
+++ b/funasr/bin/asr_inference_uniasr.py
@@ -397,7 +397,7 @@
         device = "cuda"
     else:
         device = "cpu"
-
+    
     # 1. Set random-seed
     set_all_random_seed(seed)
 
@@ -433,12 +433,25 @@
                  output_dir_v2: Optional[str] = None,
                  fs: dict = None,
                  param_dict: dict = None,
+                 **kwargs,
                  ):
         # 3. Build data-iterator
         if data_path_and_name_and_type is None and raw_inputs is not None:
             if isinstance(raw_inputs, torch.Tensor):
                 raw_inputs = raw_inputs.numpy()
             data_path_and_name_and_type = [raw_inputs, "speech", "waveform"]
+        if param_dict is not None and "decoding_model" in param_dict:
+            if param_dict["decoding_model"] == "fast":
+                speech2text.decoding_ind = 0
+                speech2text.decoding_mode = "model1"
+            elif param_dict["decoding_model"] == "normal":
+                speech2text.decoding_ind = 0
+                speech2text.decoding_mode = "model2"
+            elif param_dict["decoding_model"] == "offline":
+                speech2text.decoding_ind = 1
+                speech2text.decoding_mode = "model2"
+            else:
+                raise NotImplementedError("unsupported decoding model {}".format(param_dict["decoding_model"]))
         loader = ASRTask.build_streaming_iterator(
             data_path_and_name_and_type,
             dtype=dtype,

--
Gitblit v1.9.1