From a2f263bd05498cf4f35d78ee0ee8755ba84d09ae Mon Sep 17 00:00:00 2001
From: 语帆 <yf352572@alibaba-inc.com>
Date: 星期一, 04 三月 2024 17:09:05 +0800
Subject: [PATCH] atsr
---
funasr/auto/auto_model.py | 12 +++++-------
1 files changed, 5 insertions(+), 7 deletions(-)
diff --git a/funasr/auto/auto_model.py b/funasr/auto/auto_model.py
index e6e08b8..9bb9ce0 100644
--- a/funasr/auto/auto_model.py
+++ b/funasr/auto/auto_model.py
@@ -41,6 +41,7 @@
chars = string.ascii_letters + string.digits
if isinstance(data_in, str) and data_in.startswith('http'): # url
data_in = download_from_url(data_in)
+
if isinstance(data_in, str) and os.path.exists(data_in): # wav_path; filelist: wav.scp, file.jsonl;text.txt;
_, file_extension = os.path.splitext(data_in)
file_extension = file_extension.lower()
@@ -141,7 +142,7 @@
kwargs = download_model(**kwargs)
set_all_random_seed(kwargs.get("seed", 0))
-
+
device = kwargs.get("device", "cuda")
if not torch.cuda.is_available() or kwargs.get("ngpu", 1) == 0:
device = "cpu"
@@ -161,19 +162,18 @@
vocab_size = len(tokenizer.token_list)
else:
vocab_size = -1
- pdb.set_trace()
# build frontend
frontend = kwargs.get("frontend", None)
+
if frontend is not None:
frontend_class = tables.frontend_classes.get(frontend)
frontend = frontend_class(**kwargs["frontend_conf"])
kwargs["frontend"] = frontend
kwargs["input_size"] = frontend.output_size()
- pdb.set_trace()
+
# build model
model_class = tables.model_classes.get(kwargs["model"])
model = model_class(**kwargs, **kwargs["model_conf"], vocab_size=vocab_size)
-
model.to(device)
# init_param
@@ -213,7 +213,7 @@
batch_size = kwargs.get("batch_size", 1)
# if kwargs.get("device", "cpu") == "cpu":
# batch_size = 1
-
+
key_list, data_list = prepare_data_iterator(input, input_len=input_len, data_type=kwargs.get("data_type", None), key=key)
speed_stats = {}
@@ -235,11 +235,9 @@
time1 = time.perf_counter()
with torch.no_grad():
- pdb.set_trace()
results, meta_data = model.inference(**batch, **kwargs)
time2 = time.perf_counter()
- pdb.set_trace()
asr_result_list.extend(results)
# batch_data_time = time_per_frame_s * data_batch_i["speech_lengths"].sum().item()
--
Gitblit v1.9.1