From 32905d8cdedd53dad26680b0bd41397aaf0e51ae Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期五, 05 一月 2024 11:52:48 +0800
Subject: [PATCH] funasr1.0
---
funasr/models/paraformer/model.py | 4
examples/industrial_data_pretraining/tp_aligner/README_zh.md | 42 +++++++
examples/industrial_data_pretraining/paraformer-long/infer.sh | 4
examples/industrial_data_pretraining/fsmn-vad/demo.py | 4
examples/industrial_data_pretraining/tp_aligner/infer.sh | 17 ++
funasr/models/contextual_paraformer/model.py | 4
funasr/models/paraformer_streaming/model.py | 6
funasr/models/transducer/model.py | 4
funasr/utils/load_utils.py | 102 +++++++++++++++++
funasr/models/transformer/model.py | 4
examples/industrial_data_pretraining/tp_aligner/demo.py | 15 ++
examples/industrial_data_pretraining/paraformer-long/demo.py | 4
funasr/bin/inference.py | 45 +++++--
/dev/null | 76 ------------
funasr/models/seaco_paraformer/model.py | 4
funasr/models/fsmn_vad/model.py | 4
examples/industrial_data_pretraining/fsmn-vad/infer.sh | 2
funasr/models/bicif_paraformer/model.py | 4
examples/industrial_data_pretraining/ct-transformer/infer.sh | 7
examples/industrial_data_pretraining/seaco_paraformer/demo.py | 4
funasr/datasets/audio_datasets/datasets.py | 2
21 files changed, 238 insertions(+), 120 deletions(-)
diff --git a/examples/industrial_data_pretraining/ct-transformer/infer.sh b/examples/industrial_data_pretraining/ct-transformer/infer.sh
index f6c5c23..bd8ac05 100644
--- a/examples/industrial_data_pretraining/ct-transformer/infer.sh
+++ b/examples/industrial_data_pretraining/ct-transformer/infer.sh
@@ -1,14 +1,13 @@
# download model
-local_path_root=./modelscope_models
+local_path_root=../modelscope_models
mkdir -p ${local_path_root}
local_path=${local_path_root}/punc_ct-transformer_zh-cn-common-vocab272727-pytorch
-git clone https://www.modelscope.cn/damo/speech_paraformer-large-contextual_asr_nat-zh-cn-16k-common-vocab8404.git ${local_path}
+git clone https://www.modelscope.cn/damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch.git ${local_path}
python funasr/bin/inference.py \
+model="${local_path}" \
+input="${local_path}/example/punc_example.txt" \
+output_dir="./outputs/debug" \
-+device="cpu" \
-+debug="true"
++device="cpu"
diff --git a/examples/industrial_data_pretraining/fsmn-vad/demo.py b/examples/industrial_data_pretraining/fsmn-vad/demo.py
index b3e9bb6..6c112e2 100644
--- a/examples/industrial_data_pretraining/fsmn-vad/demo.py
+++ b/examples/industrial_data_pretraining/fsmn-vad/demo.py
@@ -5,7 +5,7 @@
from funasr import AutoModel
-model = AutoModel(model="/Users/zhifu/Downloads/modelscope_models/speech_fsmn_vad_zh-cn-16k-common-pytorch")
+model = AutoModel(model="../modelscope_models/speech_fsmn_vad_zh-cn-16k-common-pytorch")
-res = model(input="/Users/zhifu/Downloads/modelscope_models/speech_fsmn_vad_zh-cn-16k-common-pytorch/example/vad_example.wav")
+res = model(input="../modelscope_models/speech_fsmn_vad_zh-cn-16k-common-pytorch/example/vad_example.wav")
print(res)
\ No newline at end of file
diff --git a/examples/industrial_data_pretraining/fsmn-vad/infer.sh b/examples/industrial_data_pretraining/fsmn-vad/infer.sh
index 7662a53..94e1b3d 100644
--- a/examples/industrial_data_pretraining/fsmn-vad/infer.sh
+++ b/examples/industrial_data_pretraining/fsmn-vad/infer.sh
@@ -1,6 +1,6 @@
# download model
-local_path_root=./modelscope_models
+local_path_root=../modelscope_models
mkdir -p ${local_path_root}
local_path=${local_path_root}/speech_fsmn_vad_zh-cn-16k-common-pytorch
git clone https://www.modelscope.cn/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch.git ${local_path}
diff --git a/examples/industrial_data_pretraining/paraformer-long/demo.py b/examples/industrial_data_pretraining/paraformer-long/demo.py
index ca61ee3..83e024e 100644
--- a/examples/industrial_data_pretraining/paraformer-long/demo.py
+++ b/examples/industrial_data_pretraining/paraformer-long/demo.py
@@ -5,10 +5,10 @@
from funasr import AutoModel
-model = AutoModel(model="../modelscope_models/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch",
+model = AutoModel(model="../modelscope_models/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch",
vad_model="../modelscope_models/speech_fsmn_vad_zh-cn-16k-common-pytorch",
punc_model="../modelscope_models/punc_ct-transformer_zh-cn-common-vocab272727-pytorch",
)
-res = model(input="../modelscope_models/speech_fsmn_vad_zh-cn-16k-common-pytorch/example/asr_example.wav", batch_size_s=300, batch_size_threshold_s=60)
+res = model(input="../modelscope_models/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch/example/asr_example.wav", batch_size_s=300, batch_size_threshold_s=60)
print(res)
\ No newline at end of file
diff --git a/examples/industrial_data_pretraining/paraformer-long/infer.sh b/examples/industrial_data_pretraining/paraformer-long/infer.sh
index 3994219..2ec237b 100644
--- a/examples/industrial_data_pretraining/paraformer-long/infer.sh
+++ b/examples/industrial_data_pretraining/paraformer-long/infer.sh
@@ -3,8 +3,8 @@
local_path_root=../modelscope_models
mkdir -p ${local_path_root}
-local_path=${local_path_root}/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch
-git clone https://www.modelscope.cn/damo/speech_paraformer-large-contextual_asr_nat-zh-cn-16k-common-vocab8404.git ${local_path}
+local_path=${local_path_root}/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch
+git clone https://www.modelscope.cn/damo/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch.git ${local_path}
local_path_vad=${local_path_root}/speech_fsmn_vad_zh-cn-16k-common-pytorch
git clone https://www.modelscope.cn/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch.git ${local_path_vad}
diff --git a/examples/industrial_data_pretraining/seaco_paraformer/demo.py b/examples/industrial_data_pretraining/seaco_paraformer/demo.py
index 039d3f3..9aec94a 100644
--- a/examples/industrial_data_pretraining/seaco_paraformer/demo.py
+++ b/examples/industrial_data_pretraining/seaco_paraformer/demo.py
@@ -5,12 +5,12 @@
from funasr import AutoModel
-model = AutoModel(model="../modelscope_models/speech_paraformer-large-contextual_asr_nat-zh-cn-16k-common-vocab8404",
+model = AutoModel(model="../modelscope_models/speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch",
)
#vad_model="../modelscope_models/speech_fsmn_vad_zh-cn-16k-common-pytorch",
#punc_model="../modelscope_models/punc_ct-transformer_zh-cn-common-vocab272727-pytorch"
-res = model(input="../modelscope_models/speech_paraformer-large-contextual_asr_nat-zh-cn-16k-common-vocab8404/example/asr_example.wav",
+res = model(input="../modelscope_models/speech_seaco_paraformer_large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/example/asr_example.wav",
hotword='杈炬懇闄� 纾ㄦ惌')
print(res)
\ No newline at end of file
diff --git a/examples/industrial_data_pretraining/tp_aligner/README_zh.md b/examples/industrial_data_pretraining/tp_aligner/README_zh.md
new file mode 100644
index 0000000..8ddb202
--- /dev/null
+++ b/examples/industrial_data_pretraining/tp_aligner/README_zh.md
@@ -0,0 +1,42 @@
+(绠�浣撲腑鏂噟[English](./README.md))
+
+# 璇煶璇嗗埆
+
+> **娉ㄦ剰**:
+> pipeline 鏀寔 [modelscope妯″瀷浠撳簱](https://alibaba-damo-academy.github.io/FunASR/en/model_zoo/modelscope_models.html#pretrained-models-on-modelscope) 涓殑鎵�鏈夋ā鍨嬭繘琛屾帹鐞嗗拰寰皟銆傝繖閲屾垜浠互鍏稿瀷妯″瀷浣滀负绀轰緥鏉ユ紨绀轰娇鐢ㄦ柟娉曘��
+
+## 鎺ㄧ悊
+
+### 蹇�熶娇鐢�
+#### [Paraformer 妯″瀷](https://www.modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/summary)
+```python
+from funasr import AutoModel
+
+model = AutoModel(model="/Users/zhifu/Downloads/modelscope_models/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch")
+
+res = model(input="/Users/zhifu/Downloads/modelscope_models/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/example/asr_example.wav")
+print(res)
+```
+
+### API鎺ュ彛璇存槑
+#### AutoModel 瀹氫箟
+- `model`: [妯″瀷浠撳簱](https://alibaba-damo-academy.github.io/FunASR/en/model_zoo/modelscope_models.html#pretrained-models-on-modelscope) 涓殑妯″瀷鍚嶇О锛屾垨鏈湴纾佺洏涓殑妯″瀷璺緞
+- `device`: `cuda`锛堥粯璁わ級锛屼娇鐢� GPU 杩涜鎺ㄧ悊銆傚鏋滀负`cpu`锛屽垯浣跨敤 CPU 杩涜鎺ㄧ悊
+- `ncpu`: `None` 锛堥粯璁わ級锛岃缃敤浜� CPU 鍐呴儴鎿嶄綔骞惰鎬х殑绾跨▼鏁�
+- `output_dir`: `None` 锛堥粯璁わ級锛屽鏋滆缃紝杈撳嚭缁撴灉鐨勮緭鍑鸿矾寰�
+- `batch_size`: `1` 锛堥粯璁わ級锛岃В鐮佹椂鐨勬壒澶勭悊澶у皬
+#### AutoModel 鎺ㄧ悊
+- `input`: 瑕佽В鐮佺殑杈撳叆锛屽彲浠ユ槸锛�
+ - wav鏂囦欢璺緞, 渚嬪: asr_example.wav
+ - pcm鏂囦欢璺緞, 渚嬪: asr_example.pcm锛屾鏃堕渶瑕佹寚瀹氶煶棰戦噰鏍风巼fs锛堥粯璁や负16000锛�
+ - 闊抽瀛楄妭鏁版祦锛屼緥濡傦細楹﹀厠椋庣殑瀛楄妭鏁版暟鎹�
+ - wav.scp锛宬aldi 鏍峰紡鐨� wav 鍒楄〃 (`wav_id \t wav_path`), 渚嬪:
+ ```text
+ asr_example1 ./audios/asr_example1.wav
+ asr_example2 ./audios/asr_example2.wav
+ ```
+ 鍦ㄨ繖绉嶈緭鍏� `wav.scp` 鐨勬儏鍐典笅锛屽繀椤昏缃� `output_dir` 浠ヤ繚瀛樿緭鍑虹粨鏋�
+ - 闊抽閲囨牱鐐癸紝渚嬪锛歚audio, rate = soundfile.read("asr_example_zh.wav")`, 鏁版嵁绫诲瀷涓� numpy.ndarray銆傛敮鎸乥atch杈撳叆锛岀被鍨嬩负list锛�
+ ```[audio_sample1, audio_sample2, ..., audio_sampleN]```
+ - fbank杈撳叆锛屾敮鎸佺粍batch銆俿hape涓篬batch, frames, dim]锛岀被鍨嬩负torch.Tensor锛屼緥濡�
+- `output_dir`: None 锛堥粯璁わ級锛屽鏋滆缃紝杈撳嚭缁撴灉鐨勮緭鍑鸿矾寰�
diff --git a/examples/industrial_data_pretraining/tp_aligner/demo.py b/examples/industrial_data_pretraining/tp_aligner/demo.py
new file mode 100644
index 0000000..0d4be5b
--- /dev/null
+++ b/examples/industrial_data_pretraining/tp_aligner/demo.py
@@ -0,0 +1,15 @@
+#!/usr/bin/env python3
+# -*- encoding: utf-8 -*-
+# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
+# MIT License (https://opensource.org/licenses/MIT)
+
+from funasr import AutoModel
+
+model = AutoModel(model="/Users/zhifu/modelscope_models/speech_timestamp_prediction-v1-16k-offline")
+
+res = model(input=("/Users/zhifu/funasr_github/test_local/wav.scp",
+ "/Users/zhifu/funasr_github/test_local/text.txt"),
+ data_type=("sound", "text"),
+ batch_size=2,
+ )
+print(res)
diff --git a/examples/industrial_data_pretraining/tp_aligner/infer.sh b/examples/industrial_data_pretraining/tp_aligner/infer.sh
new file mode 100644
index 0000000..ded296f
--- /dev/null
+++ b/examples/industrial_data_pretraining/tp_aligner/infer.sh
@@ -0,0 +1,17 @@
+
+# download model
+local_path_root=../modelscope_models
+mkdir -p ${local_path_root}
+local_path=${local_path_root}/speech_timestamp_prediction-v1-16k-offline
+git clone https://www.modelscope.cn/damo/speech_timestamp_prediction-v1-16k-offline.git ${local_path}
+
+
+python funasr/bin/inference.py \
++model="${local_path}" \
++input='["/Users/zhifu/funasr_github/test_local/wav.scp", "/Users/zhifu/funasr_github/test_local/text.txt"]' \
++data_type='["sound", "text"]' \
++output_dir="./outputs/debug" \
++device="cpu" \
++batch_size=2 \
++debug="true"
+
diff --git a/funasr/bin/inference.py b/funasr/bin/inference.py
index 77b46f7..1fac92e 100644
--- a/funasr/bin/inference.py
+++ b/funasr/bin/inference.py
@@ -4,11 +4,11 @@
import numpy as np
import hydra
import json
-from omegaconf import DictConfig, OmegaConf
+from omegaconf import DictConfig, OmegaConf, ListConfig
import logging
from funasr.download.download_from_hub import download_model
from funasr.train_utils.set_all_random_seed import set_all_random_seed
-from funasr.datasets.audio_datasets.load_audio_extract_fbank import load_bytes
+from funasr.utils.load_utils import load_bytes
from funasr.train_utils.device_funcs import to_device
from tqdm import tqdm
from funasr.train_utils.load_pretrained_model import load_pretrained_model
@@ -17,11 +17,11 @@
import string
from funasr.register import tables
-from funasr.datasets.audio_datasets.load_audio_extract_fbank import load_audio, extract_fbank
+from funasr.utils.load_utils import load_audio_and_text_image_video, extract_fbank
from funasr.utils.vad_utils import slice_padding_audio_samples
from funasr.utils.timestamp_tools import time_stamp_sentence
-def build_iter_for_infer(data_in, input_len=None, data_type="sound", key=None):
+def build_iter_for_infer(data_in, input_len=None, data_type=None, key=None):
"""
:param input:
@@ -58,9 +58,19 @@
key = "rand_key_" + ''.join(random.choice(chars) for _ in range(13))
data_list = [data_in]
key_list = [key]
- elif isinstance(data_in, (list, tuple)): # [audio sample point, fbank]
- data_list = data_in
- key_list = ["rand_key_" + ''.join(random.choice(chars) for _ in range(13)) for _ in range(len(data_in))]
+ elif isinstance(data_in, (list, tuple)):
+ if data_type is not None and isinstance(data_type, (list, tuple)):
+ data_list_tmp = []
+ for data_in_i, data_type_i in zip(data_in, data_type):
+ key_list, data_list_i = build_iter_for_infer(data_in=data_in_i, data_type=data_type_i)
+ data_list_tmp.append(data_list_i)
+ data_list = []
+ for item in zip(*data_list_tmp):
+ data_list.append(item)
+ else:
+ # [audio sample point, fbank]
+ data_list = data_in
+ key_list = ["rand_key_" + ''.join(random.choice(chars) for _ in range(13)) for _ in range(len(data_in))]
else: # raw text; audio sample point, fbank; bytes
if isinstance(data_in, bytes): # audio bytes
data_in = load_bytes(data_in)
@@ -72,7 +82,16 @@
return key_list, data_list
@hydra.main(config_name=None, version_base=None)
-def main_hydra(kwargs: DictConfig):
+def main_hydra(cfg: DictConfig):
+ def to_plain_list(cfg_item):
+ if isinstance(cfg_item, ListConfig):
+ return OmegaConf.to_container(cfg_item, resolve=True)
+ elif isinstance(cfg_item, DictConfig):
+ return {k: to_plain_list(v) for k, v in cfg_item.items()}
+ else:
+ return cfg_item
+
+ kwargs = to_plain_list(cfg)
log_level = getattr(logging, kwargs.get("log_level", "INFO").upper())
logging.basicConfig(level=log_level)
@@ -125,7 +144,7 @@
device = kwargs.get("device", "cuda")
if not torch.cuda.is_available() or kwargs.get("ngpu", 0):
device = "cpu"
- kwargs["batch_size"] = 1
+ # kwargs["batch_size"] = 1
kwargs["device"] = device
if kwargs.get("ncpu", None):
@@ -182,8 +201,8 @@
data_type = kwargs.get("data_type", "sound")
batch_size = kwargs.get("batch_size", 1)
- if kwargs.get("device", "cpu") == "cpu":
- batch_size = 1
+ # if kwargs.get("device", "cpu") == "cpu":
+ # batch_size = 1
key_list, data_list = build_iter_for_infer(input, input_len=input_len, data_type=data_type, key=key)
@@ -259,7 +278,7 @@
key = res[i]["key"]
vadsegments = res[i]["value"]
input_i = data_list[i]
- speech = load_audio(input_i, fs=kwargs["frontend"].fs, audio_fs=kwargs.get("fs", 16000))
+ speech = load_audio_and_text_image_video(input_i, fs=kwargs["frontend"].fs, audio_fs=kwargs.get("fs", 16000))
speech_lengths = len(speech)
n = len(vadsegments)
data_with_index = [(vadsegments[i], i) for i in range(n)]
@@ -398,7 +417,7 @@
# extract fbank feats
time1 = time.perf_counter()
- audio_sample_list = load_audio(data_batch, fs=self.frontend.fs, audio_fs=kwargs.get("fs", 16000))
+ audio_sample_list = load_audio_and_text_image_video(data_batch, fs=self.frontend.fs, audio_fs=kwargs.get("fs", 16000))
time2 = time.perf_counter()
meta_data["load_data"] = f"{time2 - time1:0.3f}"
speech, speech_lengths = extract_fbank(audio_sample_list, data_type=kwargs.get("data_type", "sound"),
diff --git a/funasr/datasets/audio_datasets/datasets.py b/funasr/datasets/audio_datasets/datasets.py
index bfdf86a..0139c93 100644
--- a/funasr/datasets/audio_datasets/datasets.py
+++ b/funasr/datasets/audio_datasets/datasets.py
@@ -8,7 +8,7 @@
import time
import logging
-from funasr.datasets.audio_datasets.load_audio_extract_fbank import load_audio, extract_fbank
+from funasr.utils.load_utils import load_audio_and_text_image_video, extract_fbank
from funasr.register import tables
@tables.register("dataset_classes", "AudioDataset")
diff --git a/funasr/datasets/audio_datasets/load_audio_extract_fbank.py b/funasr/datasets/audio_datasets/load_audio_extract_fbank.py
deleted file mode 100644
index c8883ee..0000000
--- a/funasr/datasets/audio_datasets/load_audio_extract_fbank.py
+++ /dev/null
@@ -1,76 +0,0 @@
-import os
-import torch
-import json
-import torch.distributed as dist
-import numpy as np
-import kaldiio
-import librosa
-import torchaudio
-import time
-import logging
-from torch.nn.utils.rnn import pad_sequence
-
-def load_audio(audio_or_path_or_list, fs: int=16000, audio_fs: int=16000):
-
- if isinstance(audio_or_path_or_list, (list, tuple)):
- return [load_audio(audio, fs=fs, audio_fs=audio_fs) for audio in audio_or_path_or_list]
-
- if isinstance(audio_or_path_or_list, str) and os.path.exists(audio_or_path_or_list):
- audio_or_path_or_list, audio_fs = torchaudio.load(audio_or_path_or_list)
- audio_or_path_or_list = audio_or_path_or_list[0, :]
- elif isinstance(audio_or_path_or_list, np.ndarray): # audio sample point
- audio_or_path_or_list = np.squeeze(audio_or_path_or_list) #[n_samples,]
-
- if audio_fs != fs:
- resampler = torchaudio.transforms.Resample(audio_fs, fs)
- resampled_waveform = resampler(audio_or_path_or_list[None, :])[0, :]
- return audio_or_path_or_list
-#
-# def load_audio_from_list(audio_list, fs: int=16000, audio_fs: int=16000):
-# if isinstance(audio_list, (list, tuple)):
-# return [load_audio(audio_or_path, fs=fs, audio_fs=audio_fs) for audio_or_path in audio_list]
-
-
-def load_bytes(input):
- middle_data = np.frombuffer(input, dtype=np.int16)
- middle_data = np.asarray(middle_data)
- if middle_data.dtype.kind not in 'iu':
- raise TypeError("'middle_data' must be an array of integers")
- dtype = np.dtype('float32')
- if dtype.kind != 'f':
- raise TypeError("'dtype' must be a floating point type")
-
- i = np.iinfo(middle_data.dtype)
- abs_max = 2 ** (i.bits - 1)
- offset = i.min + abs_max
- array = np.frombuffer((middle_data.astype(dtype) - offset) / abs_max, dtype=np.float32)
- return array
-
-def extract_fbank(data, data_len = None, data_type: str="sound", frontend=None):
- # import pdb;
- # pdb.set_trace()
- if isinstance(data, np.ndarray):
- data = torch.from_numpy(data)
- if len(data.shape) < 2:
- data = data[None, :] # data: [batch, N]
- data_len = [data.shape[1]] if data_len is None else data_len
- elif isinstance(data, torch.Tensor):
- if len(data.shape) < 2:
- data = data[None, :] # data: [batch, N]
- data_len = [data.shape[1]] if data_len is None else data_len
- elif isinstance(data, (list, tuple)):
- data_list, data_len = [], []
- for data_i in data:
- if isinstance(data, np.ndarray):
- data_i = torch.from_numpy(data_i)
- data_list.append(data_i)
- data_len.append(data_i.shape[0])
- data = pad_sequence(data_list, batch_first=True) # data: [batch, N]
- # import pdb;
- # pdb.set_trace()
- if data_type == "sound":
- data, data_len = frontend(data, data_len)
-
- if isinstance(data_len, (list, tuple)):
- data_len = torch.tensor([data_len])
- return data.to(torch.float32), data_len.to(torch.int32)
\ No newline at end of file
diff --git a/funasr/models/bicif_paraformer/model.py b/funasr/models/bicif_paraformer/model.py
index 25b0462..aced088 100644
--- a/funasr/models/bicif_paraformer/model.py
+++ b/funasr/models/bicif_paraformer/model.py
@@ -23,7 +23,7 @@
from funasr.models.paraformer.search import Hypothesis
-from funasr.datasets.audio_datasets.load_audio_extract_fbank import load_audio, extract_fbank
+from funasr.utils.load_utils import load_audio_and_text_image_video, extract_fbank
from funasr.utils import postprocess_utils
from funasr.utils.datadir_writer import DatadirWriter
from funasr.utils.timestamp_tools import ts_prediction_lfr6_standard
@@ -243,7 +243,7 @@
else:
# extract fbank feats
time1 = time.perf_counter()
- audio_sample_list = load_audio(data_in, fs=frontend.fs, audio_fs=kwargs.get("fs", 16000))
+ audio_sample_list = load_audio_and_text_image_video(data_in, fs=frontend.fs, audio_fs=kwargs.get("fs", 16000))
time2 = time.perf_counter()
meta_data["load_data"] = f"{time2 - time1:0.3f}"
speech, speech_lengths = extract_fbank(audio_sample_list, data_type=kwargs.get("data_type", "sound"),
diff --git a/funasr/models/contextual_paraformer/model.py b/funasr/models/contextual_paraformer/model.py
index 1bb2d32..c277ffc 100644
--- a/funasr/models/contextual_paraformer/model.py
+++ b/funasr/models/contextual_paraformer/model.py
@@ -46,7 +46,7 @@
@contextmanager
def autocast(enabled=True):
yield
-from funasr.datasets.audio_datasets.load_audio_extract_fbank import load_audio, extract_fbank
+from funasr.utils.load_utils import load_audio_and_text_image_video, extract_fbank
from funasr.utils import postprocess_utils
from funasr.utils.datadir_writer import DatadirWriter
@@ -337,7 +337,7 @@
# extract fbank feats
time1 = time.perf_counter()
- audio_sample_list = load_audio(data_in, fs=frontend.fs, audio_fs=kwargs.get("fs", 16000))
+ audio_sample_list = load_audio_and_text_image_video(data_in, fs=frontend.fs, audio_fs=kwargs.get("fs", 16000))
time2 = time.perf_counter()
meta_data["load_data"] = f"{time2 - time1:0.3f}"
speech, speech_lengths = extract_fbank(audio_sample_list, data_type=kwargs.get("data_type", "sound"),
diff --git a/funasr/models/fsmn_vad/model.py b/funasr/models/fsmn_vad/model.py
index 488e05e..ee19558 100644
--- a/funasr/models/fsmn_vad/model.py
+++ b/funasr/models/fsmn_vad/model.py
@@ -9,7 +9,7 @@
from typing import Optional
import time
from funasr.register import tables
-from funasr.datasets.audio_datasets.load_audio_extract_fbank import load_audio,extract_fbank
+from funasr.utils.load_utils import load_audio_and_text_image_video,extract_fbank
from funasr.utils.datadir_writer import DatadirWriter
from torch.nn.utils.rnn import pad_sequence
@@ -544,7 +544,7 @@
else:
# extract fbank feats
time1 = time.perf_counter()
- audio_sample_list = load_audio(data_in, fs=frontend.fs, audio_fs=kwargs.get("fs", 16000))
+ audio_sample_list = load_audio_and_text_image_video(data_in, fs=frontend.fs, audio_fs=kwargs.get("fs", 16000))
time2 = time.perf_counter()
meta_data["load_data"] = f"{time2 - time1:0.3f}"
speech, speech_lengths = extract_fbank(audio_sample_list, data_type=kwargs.get("data_type", "sound"),
diff --git a/funasr/models/paraformer/model.py b/funasr/models/paraformer/model.py
index 4ce2c32..5492420 100644
--- a/funasr/models/paraformer/model.py
+++ b/funasr/models/paraformer/model.py
@@ -22,7 +22,7 @@
from torch.cuda.amp import autocast
-from funasr.datasets.audio_datasets.load_audio_extract_fbank import load_audio, extract_fbank
+from funasr.utils.load_utils import load_audio_and_text_image_video, extract_fbank, load_audio_and_text_image_video
from funasr.utils import postprocess_utils
from funasr.utils.datadir_writer import DatadirWriter
from funasr.register import tables
@@ -466,7 +466,7 @@
else:
# extract fbank feats
time1 = time.perf_counter()
- audio_sample_list = load_audio(data_in, fs=frontend.fs, audio_fs=kwargs.get("fs", 16000))
+ audio_sample_list = load_audio_and_text_image_video(data_in, fs=frontend.fs, audio_fs=kwargs.get("fs", 16000), data_type=kwargs.get("data_type", "sound"), tokenizer=tokenizer)
time2 = time.perf_counter()
meta_data["load_data"] = f"{time2 - time1:0.3f}"
speech, speech_lengths = extract_fbank(audio_sample_list, data_type=kwargs.get("data_type", "sound"), frontend=frontend)
diff --git a/funasr/models/paraformer_streaming/model.py b/funasr/models/paraformer_streaming/model.py
index bb24469..a57c927 100644
--- a/funasr/models/paraformer_streaming/model.py
+++ b/funasr/models/paraformer_streaming/model.py
@@ -40,7 +40,7 @@
@contextmanager
def autocast(enabled=True):
yield
-from funasr.datasets.audio_datasets.load_audio_extract_fbank import load_audio, extract_fbank
+from funasr.utils.load_utils import load_audio_and_text_image_video, extract_fbank
from funasr.utils import postprocess_utils
from funasr.utils.datadir_writer import DatadirWriter
from funasr.utils.timestamp_tools import ts_prediction_lfr6_standard
@@ -483,7 +483,7 @@
meta_data = {}
# extract fbank feats
time1 = time.perf_counter()
- audio_sample_list = load_audio(data_in, fs=self.frontend.fs, audio_fs=kwargs.get("fs", 16000))
+ audio_sample_list = load_audio_and_text_image_video(data_in, fs=self.frontend.fs, audio_fs=kwargs.get("fs", 16000))
time2 = time.perf_counter()
meta_data["load_data"] = f"{time2 - time1:0.3f}"
speech, speech_lengths = extract_fbank(audio_sample_list, data_type=kwargs.get("data_type", "sound"), frontend=self.frontend)
@@ -761,7 +761,7 @@
meta_data = {}
# extract fbank feats
time1 = time.perf_counter()
- audio_sample_list = load_audio(data_in, fs=self.frontend.fs, audio_fs=kwargs.get("fs", 16000))
+ audio_sample_list = load_audio_and_text_image_video(data_in, fs=self.frontend.fs, audio_fs=kwargs.get("fs", 16000))
time2 = time.perf_counter()
meta_data["load_data"] = f"{time2 - time1:0.3f}"
speech, speech_lengths = extract_fbank(audio_sample_list, data_type=kwargs.get("data_type", "sound"),
diff --git a/funasr/models/seaco_paraformer/model.py b/funasr/models/seaco_paraformer/model.py
index d107a57..2de125a 100644
--- a/funasr/models/seaco_paraformer/model.py
+++ b/funasr/models/seaco_paraformer/model.py
@@ -35,7 +35,7 @@
@contextmanager
def autocast(enabled=True):
yield
-from funasr.datasets.audio_datasets.load_audio_extract_fbank import load_audio, extract_fbank
+from funasr.utils.load_utils import load_audio_and_text_image_video, extract_fbank
from funasr.utils import postprocess_utils
from funasr.utils.datadir_writer import DatadirWriter
@@ -327,7 +327,7 @@
# extract fbank feats
time1 = time.perf_counter()
- audio_sample_list = load_audio(data_in, fs=frontend.fs, audio_fs=kwargs.get("fs", 16000))
+ audio_sample_list = load_audio_and_text_image_video(data_in, fs=frontend.fs, audio_fs=kwargs.get("fs", 16000))
time2 = time.perf_counter()
meta_data["load_data"] = f"{time2 - time1:0.3f}"
speech, speech_lengths = extract_fbank(audio_sample_list, data_type=kwargs.get("data_type", "sound"),
diff --git a/funasr/models/transducer/model.py b/funasr/models/transducer/model.py
index 21de4ba..9d9ae4b 100644
--- a/funasr/models/transducer/model.py
+++ b/funasr/models/transducer/model.py
@@ -45,7 +45,7 @@
@contextmanager
def autocast(enabled=True):
yield
-from funasr.datasets.audio_datasets.load_audio_extract_fbank import load_audio, extract_fbank
+from funasr.utils.load_utils import load_audio_and_text_image_video, extract_fbank
from funasr.utils import postprocess_utils
from funasr.utils.datadir_writer import DatadirWriter
from funasr.models.transformer.utils.nets_utils import get_transducer_task_io
@@ -517,7 +517,7 @@
meta_data = {}
# extract fbank feats
time1 = time.perf_counter()
- audio_sample_list = load_audio(data_in, fs=self.frontend.fs, audio_fs=kwargs.get("fs", 16000))
+ audio_sample_list = load_audio_and_text_image_video(data_in, fs=self.frontend.fs, audio_fs=kwargs.get("fs", 16000))
time2 = time.perf_counter()
meta_data["load_data"] = f"{time2 - time1:0.3f}"
speech, speech_lengths = extract_fbank(audio_sample_list, data_type=kwargs.get("data_type", "sound"), frontend=self.frontend)
diff --git a/funasr/models/transformer/model.py b/funasr/models/transformer/model.py
index b710c97..f09f460 100644
--- a/funasr/models/transformer/model.py
+++ b/funasr/models/transformer/model.py
@@ -12,7 +12,7 @@
from funasr.metrics.compute_acc import th_accuracy
# from funasr.models.e2e_asr_common import ErrorCalculator
from funasr.train_utils.device_funcs import force_gatherable
-from funasr.datasets.audio_datasets.load_audio_extract_fbank import load_audio, extract_fbank
+from funasr.utils.load_utils import load_audio_and_text_image_video, extract_fbank
from funasr.utils import postprocess_utils
from funasr.utils.datadir_writer import DatadirWriter
from funasr.register import tables
@@ -392,7 +392,7 @@
meta_data = {}
# extract fbank feats
time1 = time.perf_counter()
- audio_sample_list = load_audio(data_in, fs=self.frontend.fs, audio_fs=kwargs.get("fs", 16000))
+ audio_sample_list = load_audio_and_text_image_video(data_in, fs=self.frontend.fs, audio_fs=kwargs.get("fs", 16000))
time2 = time.perf_counter()
meta_data["load_data"] = f"{time2 - time1:0.3f}"
speech, speech_lengths = extract_fbank(audio_sample_list, data_type=kwargs.get("data_type", "sound"), frontend=self.frontend)
diff --git a/funasr/utils/load_utils.py b/funasr/utils/load_utils.py
new file mode 100644
index 0000000..7f1b850
--- /dev/null
+++ b/funasr/utils/load_utils.py
@@ -0,0 +1,102 @@
+import os
+import torch
+import json
+import torch.distributed as dist
+import numpy as np
+import kaldiio
+import librosa
+import torchaudio
+import time
+import logging
+from torch.nn.utils.rnn import pad_sequence
+
+# def load_audio(audio_or_path_or_list, fs: int=16000, audio_fs: int=16000):
+#
+# if isinstance(audio_or_path_or_list, (list, tuple)):
+# return [load_audio(audio, fs=fs, audio_fs=audio_fs) for audio in audio_or_path_or_list]
+#
+# if isinstance(audio_or_path_or_list, str) and os.path.exists(audio_or_path_or_list):
+# audio_or_path_or_list, audio_fs = torchaudio.load(audio_or_path_or_list)
+# audio_or_path_or_list = audio_or_path_or_list[0, :]
+# elif isinstance(audio_or_path_or_list, np.ndarray): # audio sample point
+# audio_or_path_or_list = np.squeeze(audio_or_path_or_list) #[n_samples,]
+#
+# if audio_fs != fs:
+# resampler = torchaudio.transforms.Resample(audio_fs, fs)
+# audio_or_path_or_list = resampler(audio_or_path_or_list[None, :])[0, :]
+# return audio_or_path_or_list
+
+
+def load_audio_and_text_image_video(audio_or_path_or_list, fs: int = 16000, audio_fs: int = 16000, data_type=None, tokenizer=None):
+ if isinstance(audio_or_path_or_list, (list, tuple)):
+ if data_type is not None and isinstance(data_type, (list, tuple)):
+
+ data_types = [data_type] * len(audio_or_path_or_list)
+ audio_or_path_or_list_ret = [[] for d in data_type]
+ for i, (data_type_i, audio_or_path_or_list_i) in enumerate(zip(data_types, audio_or_path_or_list)):
+
+ for j, (data_type_j, audio_or_path_or_list_j) in enumerate(zip(data_type_i, audio_or_path_or_list_i)):
+
+ audio_or_path_or_list_j = load_audio_and_text_image_video(audio_or_path_or_list_j, fs=fs, audio_fs=audio_fs, data_type=data_type_j, tokenizer=tokenizer)
+ audio_or_path_or_list_ret[j].append(audio_or_path_or_list_j)
+
+ return audio_or_path_or_list_ret
+ else:
+ return [load_audio_and_text_image_video(audio, fs=fs, audio_fs=audio_fs) for audio in audio_or_path_or_list]
+
+ if isinstance(audio_or_path_or_list, str) and os.path.exists(audio_or_path_or_list):
+ audio_or_path_or_list, audio_fs = torchaudio.load(audio_or_path_or_list)
+ audio_or_path_or_list = audio_or_path_or_list[0, :]
+ elif isinstance(audio_or_path_or_list, np.ndarray): # audio sample point
+ audio_or_path_or_list = np.squeeze(audio_or_path_or_list) # [n_samples,]
+ elif isinstance(audio_or_path_or_list, str) and data_type is not None and data_type == "text" and tokenizer is not None:
+ audio_or_path_or_list = tokenizer.encode(audio_or_path_or_list)
+
+ if audio_fs != fs and data_type != "text":
+ resampler = torchaudio.transforms.Resample(audio_fs, fs)
+ audio_or_path_or_list = resampler(audio_or_path_or_list[None, :])[0, :]
+ return audio_or_path_or_list
+
+def load_bytes(input):
+ middle_data = np.frombuffer(input, dtype=np.int16)
+ middle_data = np.asarray(middle_data)
+ if middle_data.dtype.kind not in 'iu':
+ raise TypeError("'middle_data' must be an array of integers")
+ dtype = np.dtype('float32')
+ if dtype.kind != 'f':
+ raise TypeError("'dtype' must be a floating point type")
+
+ i = np.iinfo(middle_data.dtype)
+ abs_max = 2 ** (i.bits - 1)
+ offset = i.min + abs_max
+ array = np.frombuffer((middle_data.astype(dtype) - offset) / abs_max, dtype=np.float32)
+ return array
+
+def extract_fbank(data, data_len = None, data_type: str="sound", frontend=None):
+ # import pdb;
+ # pdb.set_trace()
+ if isinstance(data, np.ndarray):
+ data = torch.from_numpy(data)
+ if len(data.shape) < 2:
+ data = data[None, :] # data: [batch, N]
+ data_len = [data.shape[1]] if data_len is None else data_len
+ elif isinstance(data, torch.Tensor):
+ if len(data.shape) < 2:
+ data = data[None, :] # data: [batch, N]
+ data_len = [data.shape[1]] if data_len is None else data_len
+ elif isinstance(data, (list, tuple)):
+ data_list, data_len = [], []
+ for data_i in data:
+ if isinstance(data, np.ndarray):
+ data_i = torch.from_numpy(data_i)
+ data_list.append(data_i)
+ data_len.append(data_i.shape[0])
+ data = pad_sequence(data_list, batch_first=True) # data: [batch, N]
+ # import pdb;
+ # pdb.set_trace()
+ # if data_type == "sound":
+ data, data_len = frontend(data, data_len)
+
+ if isinstance(data_len, (list, tuple)):
+ data_len = torch.tensor([data_len])
+ return data.to(torch.float32), data_len.to(torch.int32)
\ No newline at end of file
--
Gitblit v1.9.1