From d8b586e02cd14f7eed6b330bd4f110cb1e7f24ad Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期二, 09 一月 2024 20:33:12 +0800
Subject: [PATCH] funasr1.0 modelscope
---
funasr/bin/inference.py | 139 +++++++++++++++++++++++++++++++++++++++-------
1 files changed, 117 insertions(+), 22 deletions(-)
diff --git a/funasr/bin/inference.py b/funasr/bin/inference.py
index 16ad0e2..dedaf7d 100644
--- a/funasr/bin/inference.py
+++ b/funasr/bin/inference.py
@@ -4,11 +4,11 @@
import numpy as np
import hydra
import json
-from omegaconf import DictConfig, OmegaConf
+from omegaconf import DictConfig, OmegaConf, ListConfig
import logging
from funasr.download.download_from_hub import download_model
from funasr.train_utils.set_all_random_seed import set_all_random_seed
-from funasr.datasets.audio_datasets.load_audio_extract_fbank import load_bytes
+from funasr.utils.load_utils import load_bytes
from funasr.train_utils.device_funcs import to_device
from tqdm import tqdm
from funasr.train_utils.load_pretrained_model import load_pretrained_model
@@ -16,11 +16,13 @@
import random
import string
from funasr.register import tables
-from funasr.datasets.audio_datasets.load_audio_extract_fbank import load_audio
+
+from funasr.utils.load_utils import load_audio_text_image_video, extract_fbank
from funasr.utils.vad_utils import slice_padding_audio_samples
from funasr.utils.timestamp_tools import time_stamp_sentence
+from funasr.download.file import download_from_url
-def build_iter_for_infer(data_in, input_len=None, data_type="sound"):
+def prepare_data_iterator(data_in, input_len=None, data_type=None, key=None):
"""
:param input:
@@ -34,7 +36,8 @@
filelist = [".scp", ".txt", ".json", ".jsonl"]
chars = string.ascii_letters + string.digits
-
+ if isinstance(data_in, str) and data_in.startswith('http'): # url
+ data_in = download_from_url(data_in)
if isinstance(data_in, str) and os.path.exists(data_in): # wav_path; filelist: wav.scp, file.jsonl;text.txt;
_, file_extension = os.path.splitext(data_in)
file_extension = file_extension.lower()
@@ -57,20 +60,40 @@
key = "rand_key_" + ''.join(random.choice(chars) for _ in range(13))
data_list = [data_in]
key_list = [key]
- elif isinstance(data_in, (list, tuple)): # [audio sample point, fbank]
- data_list = data_in
- key_list = ["rand_key_" + ''.join(random.choice(chars) for _ in range(13)) for _ in range(len(data_in))]
+ elif isinstance(data_in, (list, tuple)):
+ if data_type is not None and isinstance(data_type, (list, tuple)): # mutiple inputs
+ data_list_tmp = []
+ for data_in_i, data_type_i in zip(data_in, data_type):
+ key_list, data_list_i = prepare_data_iterator(data_in=data_in_i, data_type=data_type_i)
+ data_list_tmp.append(data_list_i)
+ data_list = []
+ for item in zip(*data_list_tmp):
+ data_list.append(item)
+ else:
+ # [audio sample point, fbank, text]
+ data_list = data_in
+ key_list = ["rand_key_" + ''.join(random.choice(chars) for _ in range(13)) for _ in range(len(data_in))]
else: # raw text; audio sample point, fbank; bytes
if isinstance(data_in, bytes): # audio bytes
data_in = load_bytes(data_in)
- key = "rand_key_" + ''.join(random.choice(chars) for _ in range(13))
+ if key is None:
+ key = "rand_key_" + ''.join(random.choice(chars) for _ in range(13))
data_list = [data_in]
key_list = [key]
return key_list, data_list
@hydra.main(config_name=None, version_base=None)
-def main_hydra(kwargs: DictConfig):
+def main_hydra(cfg: DictConfig):
+ def to_plain_list(cfg_item):
+ if isinstance(cfg_item, ListConfig):
+ return OmegaConf.to_container(cfg_item, resolve=True)
+ elif isinstance(cfg_item, DictConfig):
+ return {k: to_plain_list(v) for k, v in cfg_item.items()}
+ else:
+ return cfg_item
+
+ kwargs = to_plain_list(cfg)
log_level = getattr(logging, kwargs.get("log_level", "INFO").upper())
logging.basicConfig(level=log_level)
@@ -121,10 +144,13 @@
set_all_random_seed(kwargs.get("seed", 0))
device = kwargs.get("device", "cuda")
- if not torch.cuda.is_available() or kwargs.get("ngpu", 1):
+ if not torch.cuda.is_available() or kwargs.get("ngpu", 0):
device = "cpu"
- kwargs["batch_size"] = 1
+ # kwargs["batch_size"] = 1
kwargs["device"] = device
+
+ if kwargs.get("ncpu", None):
+ torch.set_num_threads(kwargs.get("ncpu"))
# build tokenizer
tokenizer = kwargs.get("tokenizer", None)
@@ -169,17 +195,17 @@
else:
return self.generate_with_vad(input, input_len=input_len, **cfg)
- def generate(self, input, input_len=None, model=None, kwargs=None, **cfg):
+ def generate(self, input, input_len=None, model=None, kwargs=None, key=None, **cfg):
+ # import pdb; pdb.set_trace()
kwargs = self.kwargs if kwargs is None else kwargs
kwargs.update(cfg)
model = self.model if model is None else model
-
- data_type = kwargs.get("data_type", "sound")
+
batch_size = kwargs.get("batch_size", 1)
# if kwargs.get("device", "cpu") == "cpu":
# batch_size = 1
- key_list, data_list = build_iter_for_infer(input, input_len=input_len, data_type=data_type)
+ key_list, data_list = prepare_data_iterator(input, input_len=input_len, data_type=kwargs.get("data_type", None), key=key)
speed_stats = {}
asr_result_list = []
@@ -193,11 +219,12 @@
key_batch = key_list[beg_idx:end_idx]
batch = {"data_in": data_batch, "key": key_batch}
if (end_idx - beg_idx) == 1 and isinstance(data_batch[0], torch.Tensor): # fbank
- batch["data_batch"] = data_batch[0]
+ batch["data_in"] = data_batch[0]
batch["data_lengths"] = input_len
time1 = time.perf_counter()
- results, meta_data = model.generate(**batch, **kwargs)
+ with torch.no_grad():
+ results, meta_data = model.generate(**batch, **kwargs)
time2 = time.perf_counter()
asr_result_list.extend(results)
@@ -242,8 +269,8 @@
batch_size = int(kwargs.get("batch_size_s", 300))*1000
batch_size_threshold_ms = int(kwargs.get("batch_size_threshold_s", 60))*1000
kwargs["batch_size"] = batch_size
- data_type = kwargs.get("data_type", "sound")
- key_list, data_list = build_iter_for_infer(input, input_len=input_len, data_type=data_type)
+
+ key_list, data_list = prepare_data_iterator(input, input_len=input_len, data_type=kwargs.get("data_type", None))
results_ret_list = []
time_speech_total_all_samples = 0.0
@@ -253,7 +280,7 @@
key = res[i]["key"]
vadsegments = res[i]["value"]
input_i = data_list[i]
- speech = load_audio(input_i, fs=kwargs["frontend"].fs, audio_fs=kwargs.get("fs", 16000))
+ speech = load_audio_text_image_video(input_i, fs=kwargs["frontend"].fs, audio_fs=kwargs.get("fs", 16000))
speech_lengths = len(speech)
n = len(vadsegments)
data_with_index = [(vadsegments[i], i) for i in range(n)]
@@ -339,7 +366,7 @@
# sentences = time_stamp_sentence(model.punc_list, model.sentence_end_id, results_ret_list[i]["timestamp"], res[i]["text"])
# results_ret_list[i]["time_stamp"] = res[0]["text_postprocessed_punc"]
# results_ret_list[i]["sentences"] = sentences
- # results_ret_list[i]["text_with_punc"] = res[i]["text"]
+ results_ret_list[i]["text_with_punc"] = res[i]["text"]
pbar_total.update(1)
end_total = time.time()
@@ -348,6 +375,74 @@
f"time_speech_total_all_samples: {time_speech_total_all_samples: 0.3f}, "
f"time_escape_total_all_samples: {time_escape_total_all_samples:0.3f}")
return results_ret_list
+
+
+class AutoFrontend:
+ def __init__(self, **kwargs):
+ assert "model" in kwargs
+ if "model_conf" not in kwargs:
+ logging.info("download models from model hub: {}".format(kwargs.get("model_hub", "ms")))
+ kwargs = download_model(**kwargs)
+
+ # build frontend
+ frontend = kwargs.get("frontend", None)
+ if frontend is not None:
+ frontend_class = tables.frontend_classes.get(frontend.lower())
+ frontend = frontend_class(**kwargs["frontend_conf"])
+
+ self.frontend = frontend
+ self.kwargs = kwargs
+ def __call__(self, input, input_len=None, kwargs=None, **cfg):
+
+ kwargs = self.kwargs if kwargs is None else kwargs
+ kwargs.update(cfg)
+
+
+ key_list, data_list = prepare_data_iterator(input, input_len=input_len)
+ batch_size = kwargs.get("batch_size", 1)
+ device = kwargs.get("device", "cpu")
+ if device == "cpu":
+ batch_size = 1
+
+ meta_data = {}
+
+ result_list = []
+ num_samples = len(data_list)
+ pbar = tqdm(colour="blue", total=num_samples + 1, dynamic_ncols=True)
+
+ time0 = time.perf_counter()
+ for beg_idx in range(0, num_samples, batch_size):
+ end_idx = min(num_samples, beg_idx + batch_size)
+ data_batch = data_list[beg_idx:end_idx]
+ key_batch = key_list[beg_idx:end_idx]
+
+ # extract fbank feats
+ time1 = time.perf_counter()
+ audio_sample_list = load_audio_text_image_video(data_batch, fs=self.frontend.fs, audio_fs=kwargs.get("fs", 16000))
+ time2 = time.perf_counter()
+ meta_data["load_data"] = f"{time2 - time1:0.3f}"
+ speech, speech_lengths = extract_fbank(audio_sample_list, data_type=kwargs.get("data_type", "sound"),
+ frontend=self.frontend)
+ time3 = time.perf_counter()
+ meta_data["extract_feat"] = f"{time3 - time2:0.3f}"
+ meta_data["batch_data_time"] = speech_lengths.sum().item() * self.frontend.frame_shift * self.frontend.lfr_n / 1000
+
+ speech.to(device=device), speech_lengths.to(device=device)
+ batch = {"input": speech, "input_len": speech_lengths, "key": key_batch}
+ result_list.append(batch)
+
+ pbar.update(1)
+ description = (
+ f"{meta_data}, "
+ )
+ pbar.set_description(description)
+
+ time_end = time.perf_counter()
+ pbar.set_description(f"time escaped total: {time_end - time0:0.3f}")
+
+ return result_list
+
+
if __name__ == '__main__':
main_hydra()
\ No newline at end of file
--
Gitblit v1.9.1