From 79bd015ab0ded4e5aed1b1ecf32fcbc84eefde68 Mon Sep 17 00:00:00 2001
From: zhifu gao <zhifu.gzf@alibaba-inc.com>
Date: 星期五, 03 二月 2023 17:42:47 +0800
Subject: [PATCH] Merge pull request #58 from alibaba-damo-academy/dev

---
 funasr/bin/asr_inference_paraformer.py |   10 +++++++---
 1 files changed, 7 insertions(+), 3 deletions(-)

diff --git a/funasr/bin/asr_inference_paraformer.py b/funasr/bin/asr_inference_paraformer.py
index a50e038..0929436 100644
--- a/funasr/bin/asr_inference_paraformer.py
+++ b/funasr/bin/asr_inference_paraformer.py
@@ -169,7 +169,7 @@
         self.converter = converter
         self.tokenizer = tokenizer
         is_use_lm = lm_weight != 0.0 and lm_file is not None
-        if ctc_weight == 0.0 and not is_use_lm:
+        if (ctc_weight == 0.0 or asr_model.ctc == None) and not is_use_lm:
             beam_search = None
         self.beam_search = beam_search
         logging.info(f"Beam_search: {self.beam_search}")
@@ -227,6 +227,8 @@
         pre_acoustic_embeds, pre_token_length, alphas, pre_peak_index = predictor_outs[0], predictor_outs[1], \
                                                                         predictor_outs[2], predictor_outs[3]
         pre_token_length = pre_token_length.round().long()
+        if torch.max(pre_token_length) < 1:
+            return []
         decoder_outs = self.asr_model.cal_decoder_with_predictor(enc, enc_len, pre_acoustic_embeds, pre_token_length)
         decoder_out, ys_pad_lens = decoder_outs[0], decoder_outs[1]
 
@@ -394,7 +396,7 @@
 #         results = speech2text(**batch)
 #         if len(results) < 1:
 #             hyp = Hypothesis(score=0.0, scores={}, states={}, yseq=[])
-#             results = [[" ", ["<space>"], [2], hyp, 10, 6]] * nbest
+#             results = [[" ", ["sil"], [2], hyp, 10, 6]] * nbest
 #         time_end = time.time()
 #         forward_time = time_end - time_beg
 #         lfr_factor = results[0][-1]
@@ -527,6 +529,7 @@
         nbest: int = 1,
         num_workers: int = 1,
         output_dir: Optional[str] = None,
+        param_dict: dict = None,
         **kwargs,
 ):
     assert check_argument_types()
@@ -576,6 +579,7 @@
             data_path_and_name_and_type,
             raw_inputs: Union[np.ndarray, torch.Tensor] = None,
             output_dir_v2: Optional[str] = None,
+            param_dict: dict = None,
     ):
         # 3. Build data-iterator
         if data_path_and_name_and_type is None and raw_inputs is not None:
@@ -621,7 +625,7 @@
             results = speech2text(**batch)
             if len(results) < 1:
                 hyp = Hypothesis(score=0.0, scores={}, states={}, yseq=[])
-                results = [[" ", ["<space>"], [2], hyp, 10, 6]] * nbest
+                results = [[" ", ["sil"], [2], hyp, 10, 6]] * nbest
             time_end = time.time()
             forward_time = time_end - time_beg
             lfr_factor = results[0][-1]

--
Gitblit v1.9.1