From 8ae9fa8365eba7d33c8d8f5fa51d12903ca6a409 Mon Sep 17 00:00:00 2001
From: haoneng.lhn <haoneng.lhn@alibaba-inc.com>
Date: 星期四, 21 九月 2023 16:26:51 +0800
Subject: [PATCH] update

---
 funasr/bin/asr_inference_launch.py |   30 ++++++++++++++++--------------
 1 files changed, 16 insertions(+), 14 deletions(-)

diff --git a/funasr/bin/asr_inference_launch.py b/funasr/bin/asr_inference_launch.py
index 1b38f8f..50b9886 100644
--- a/funasr/bin/asr_inference_launch.py
+++ b/funasr/bin/asr_inference_launch.py
@@ -45,7 +45,7 @@
 from funasr.utils.types import str2triple_str
 from funasr.utils.types import str_or_none
 from funasr.utils.vad_utils import slice_padding_fbank
-
+from tqdm import tqdm
 
 def inference_asr(
         maxlenratio: float,
@@ -651,7 +651,8 @@
             
             batch_size_token_ms_cum = 0
             beg_idx = 0
-            for j, _ in enumerate(range(0, n)):
+            beg_asr_total = time.time()
+            for j, _ in enumerate(tqdm(range(0, n))):
                 batch_size_token_ms_cum += (sorted_data[j][0][1] - sorted_data[j][0][0])
                 if j < n - 1 and (batch_size_token_ms_cum + sorted_data[j + 1][0][1] - sorted_data[j + 1][0][0]) < batch_size_token_ms and (sorted_data[j + 1][0][1] - sorted_data[j + 1][0][0]) < batch_size_token_threshold_s:
                     continue
@@ -661,16 +662,17 @@
                 beg_idx = end_idx
                 batch = {"speech": speech_j, "speech_lengths": speech_lengths_j}
                 batch = to_device(batch, device=device)
-                print("batch: ", speech_j.shape[0])
+                # print("batch: ", speech_j.shape[0])
                 beg_asr = time.time()
                 results = speech2text(**batch)
                 end_asr = time.time()
-                print("time cost asr: ", end_asr - beg_asr)
+                # print("time cost asr: ", end_asr - beg_asr)
 
                 if len(results) < 1:
                     results = [["", [], [], [], [], [], []]]
                 results_sorted.extend(results)
-
+            end_asr_total = time.time()
+            print("total time cost asr: ", end_asr_total-beg_asr_total)
             restored_data = [0] * n
             for j in range(n):
                 index = sorted_data[j][1]
@@ -853,7 +855,7 @@
                     "feats": torch.zeros((batch_size, chunk_size[0] + chunk_size[2], feats_dims)), "tail_chunk": False}
         cache["encoder"] = cache_en
 
-        cache_de = {"decode_fsmn": None, "decoder_chunk_look_back": decoder_chunk_look_back, "opt": None}
+        cache_de = {"decode_fsmn": None, "decoder_chunk_look_back": decoder_chunk_look_back, "opt": None, "chunk_size": chunk_size}
         cache["decoder"] = cache_de
 
         return cache
@@ -870,7 +872,7 @@
                         "feats": torch.zeros((batch_size, chunk_size[0] + chunk_size[2], feats_dims)), "tail_chunk": False}
             cache["encoder"] = cache_en
 
-            cache_de = {"decode_fsmn": None, "decoder_chunk_look_back": decoder_chunk_look_back, "opt": None}
+            cache_de = {"decode_fsmn": None, "decoder_chunk_look_back": decoder_chunk_look_back, "opt": None, "chunk_size": chunk_size}
             cache["decoder"] = cache_de
 
         return cache
@@ -982,8 +984,8 @@
 
         asr_result_list.append(item)
         if is_final:
-            cache = _cache_reset(cache, chunk_size=chunk_size, batch_size=1,
-                                 encoder_chunk_look_back=encoder_chunk_look_back, decoder_chunk_look_back=decoder_chunk_look_back)
+            cache = _cache_reset(cache, chunk_size=chunk_size, encoder_chunk_look_back=encoder_chunk_look_back, 
+                                 decoder_chunk_look_back=decoder_chunk_look_back, batch_size=1)
         return asr_result_list
 
     return _forward
@@ -1341,7 +1343,7 @@
         quantize_modules: Optional[List[str]] = None,
         quantize_dtype: Optional[str] = "float16",
         streaming: Optional[bool] = False,
-        simu_streaming: Optional[bool] = False,
+        fake_streaming: Optional[bool] = False,
         full_utt: Optional[bool] = False,
         chunk_size: Optional[int] = 16,
         left_context: Optional[int] = 16,
@@ -1418,7 +1420,7 @@
         quantize_modules=quantize_modules,
         quantize_dtype=quantize_dtype,
         streaming=streaming,
-        simu_streaming=simu_streaming,
+        fake_streaming=fake_streaming,
         full_utt=full_utt,
         chunk_size=chunk_size,
         left_context=left_context,
@@ -1476,8 +1478,8 @@
                     final_hyps = speech2text.streaming_decode(
                         speech[_end: len(speech)], is_final=True
                     )
-                elif speech2text.simu_streaming:
-                    final_hyps = speech2text.simu_streaming_decode(**batch)
+                elif speech2text.fake_streaming:
+                    final_hyps = speech2text.fake_streaming_decode(**batch)
                 elif speech2text.full_utt:
                     final_hyps = speech2text.full_utt_decode(**batch)
                 else:
@@ -1867,7 +1869,7 @@
     group.add_argument("--lm_weight", type=float, default=1.0, help="RNNLM weight")
     group.add_argument("--ngram_weight", type=float, default=0.9, help="ngram weight")
     group.add_argument("--streaming", type=str2bool, default=False)
-    group.add_argument("--simu_streaming", type=str2bool, default=False)
+    group.add_argument("--fake_streaming", type=str2bool, default=False)
     group.add_argument("--full_utt", type=str2bool, default=False)
     group.add_argument("--chunk_size", type=int, default=16)
     group.add_argument("--left_context", type=int, default=16)

--
Gitblit v1.9.1