From 1a6d9d5cc422dcd1e6dd5b9c67047d63bc6cd667 Mon Sep 17 00:00:00 2001
From: 语帆 <yf352572@alibaba-inc.com>
Date: 星期一, 04 三月 2024 16:32:28 +0800
Subject: [PATCH] atsr

---
 funasr/auto/auto_model.py |    5 ++---
 1 files changed, 2 insertions(+), 3 deletions(-)

diff --git a/funasr/auto/auto_model.py b/funasr/auto/auto_model.py
index ba7dcab..9bb9ce0 100644
--- a/funasr/auto/auto_model.py
+++ b/funasr/auto/auto_model.py
@@ -41,6 +41,7 @@
     chars = string.ascii_letters + string.digits
     if isinstance(data_in, str) and data_in.startswith('http'): # url
         data_in = download_from_url(data_in)
+
     if isinstance(data_in, str) and os.path.exists(data_in): # wav_path; filelist: wav.scp, file.jsonl;text.txt;
         _, file_extension = os.path.splitext(data_in)
         file_extension = file_extension.lower()
@@ -212,7 +213,7 @@
         batch_size = kwargs.get("batch_size", 1)
         # if kwargs.get("device", "cpu") == "cpu":
         #     batch_size = 1
-        
+
         key_list, data_list = prepare_data_iterator(input, input_len=input_len, data_type=kwargs.get("data_type", None), key=key)
 
         speed_stats = {}
@@ -234,11 +235,9 @@
         
             time1 = time.perf_counter()
             with torch.no_grad():
-                pdb.set_trace()
                 results, meta_data = model.inference(**batch, **kwargs)
             time2 = time.perf_counter()
             
-            pdb.set_trace()
             asr_result_list.extend(results)
 
             # batch_data_time = time_per_frame_s * data_batch_i["speech_lengths"].sum().item()

--
Gitblit v1.9.1