From c2f174cd7811a7a11e6227ecb371887f97fd66d3 Mon Sep 17 00:00:00 2001
From: 夜雨飘零 <yeyupiaoling@foxmail.com>
Date: 星期五, 02 二月 2024 23:06:13 +0800
Subject: [PATCH] Use ffmpeg read data (#1349)
---
funasr/auto/auto_model.py | 21 +++++++++------------
1 files changed, 9 insertions(+), 12 deletions(-)
diff --git a/funasr/auto/auto_model.py b/funasr/auto/auto_model.py
index 9d957a3..d99fc56 100644
--- a/funasr/auto/auto_model.py
+++ b/funasr/auto/auto_model.py
@@ -88,7 +88,8 @@
class AutoModel:
def __init__(self, **kwargs):
- tables.print()
+ if kwargs.get("disable_log", False):
+ tables.print()
model, kwargs = self.build_model(**kwargs)
@@ -133,8 +134,6 @@
self.spk_model = spk_model
self.spk_kwargs = spk_kwargs
self.model_path = kwargs.get("model_path")
-
-
def build_model(self, **kwargs):
assert "model" in kwargs
@@ -145,7 +144,7 @@
set_all_random_seed(kwargs.get("seed", 0))
device = kwargs.get("device", "cuda")
- if not torch.cuda.is_available() or kwargs.get("ngpu", 0) == 0:
+ if not torch.cuda.is_available() or kwargs.get("ngpu", 1) == 0:
device = "cpu"
kwargs["batch_size"] = 1
kwargs["device"] = device
@@ -199,8 +198,6 @@
res = self.model(*args, kwargs)
return res
-
-
def generate(self, input, input_len=None, **cfg):
if self.vad_model is None:
return self.inference(input, input_len=input_len, **cfg)
@@ -231,7 +228,7 @@
data_batch = data_list[beg_idx:end_idx]
key_batch = key_list[beg_idx:end_idx]
batch = {"data_in": data_batch, "key": key_batch}
- if (end_idx - beg_idx) == 1 and isinstance(data_batch[0], torch.Tensor): # fbank
+ if (end_idx - beg_idx) == 1 and kwargs.get("data_type", None) == "fbank": # fbank
batch["data_in"] = data_batch[0]
batch["data_lengths"] = input_len
@@ -426,10 +423,10 @@
f"time_escape: {time_escape_total_per_sample:0.3f}")
- end_total = time.time()
- time_escape_total_all_samples = end_total - beg_total
- print(f"rtf_avg_all: {time_escape_total_all_samples / time_speech_total_all_samples:0.3f}, "
- f"time_speech_all: {time_speech_total_all_samples: 0.3f}, "
- f"time_escape_all: {time_escape_total_all_samples:0.3f}")
+ # end_total = time.time()
+ # time_escape_total_all_samples = end_total - beg_total
+ # print(f"rtf_avg_all: {time_escape_total_all_samples / time_speech_total_all_samples:0.3f}, "
+ # f"time_speech_all: {time_speech_total_all_samples: 0.3f}, "
+ # f"time_escape_all: {time_escape_total_all_samples:0.3f}")
return results_ret_list
--
Gitblit v1.9.1