From ccb948895498b48e591f1f6a74cb62f4dcde8202 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期三, 27 十二月 2023 23:03:49 +0800
Subject: [PATCH] funasr1.0
---
funasr/bin/inference.py | 92 +++++++++++++++++++++++++++---
funasr/models/paraformer/model.py | 3 +
/dev/null | 1
examples/industrial_data_pretraining/paraformer/demo.py | 13 ++++
benchmarks/benchmark_pipeline_cer.md | 0
examples/industrial_data_pretraining/paraformer/README_zh.md | 42 ++++++++++++++
funasr/__init__.py | 2
7 files changed, 141 insertions(+), 12 deletions(-)
diff --git a/docs/benchmark/benchmark_pipeline_cer.md b/benchmarks/benchmark_pipeline_cer.md
similarity index 100%
rename from docs/benchmark/benchmark_pipeline_cer.md
rename to benchmarks/benchmark_pipeline_cer.md
diff --git a/docs/benchmark/benchmark_libtorch.md b/docs/benchmark/benchmark_libtorch.md
deleted file mode 120000
index 04ba682..0000000
--- a/docs/benchmark/benchmark_libtorch.md
+++ /dev/null
@@ -1 +0,0 @@
-../../funasr/runtime/docs/benchmark_libtorch.md
\ No newline at end of file
diff --git a/docs/benchmark/benchmark_onnx.md b/docs/benchmark/benchmark_onnx.md
deleted file mode 120000
index c199094..0000000
--- a/docs/benchmark/benchmark_onnx.md
+++ /dev/null
@@ -1 +0,0 @@
-../../funasr/runtime/docs/benchmark_onnx.md
\ No newline at end of file
diff --git a/docs/benchmark/benchmark_onnx_cpp.md b/docs/benchmark/benchmark_onnx_cpp.md
deleted file mode 120000
index c4ab108..0000000
--- a/docs/benchmark/benchmark_onnx_cpp.md
+++ /dev/null
@@ -1 +0,0 @@
-../../funasr/runtime/docs/benchmark_onnx_cpp.md
\ No newline at end of file
diff --git a/examples/industrial_data_pretraining/paraformer/README_zh.md b/examples/industrial_data_pretraining/paraformer/README_zh.md
new file mode 100644
index 0000000..8ddb202
--- /dev/null
+++ b/examples/industrial_data_pretraining/paraformer/README_zh.md
@@ -0,0 +1,42 @@
+(绠�浣撲腑鏂噟[English](./README.md))
+
+# 璇煶璇嗗埆
+
+> **娉ㄦ剰**:
+> pipeline 鏀寔 [modelscope妯″瀷浠撳簱](https://alibaba-damo-academy.github.io/FunASR/en/model_zoo/modelscope_models.html#pretrained-models-on-modelscope) 涓殑鎵�鏈夋ā鍨嬭繘琛屾帹鐞嗗拰寰皟銆傝繖閲屾垜浠互鍏稿瀷妯″瀷浣滀负绀轰緥鏉ユ紨绀轰娇鐢ㄦ柟娉曘��
+
+## 鎺ㄧ悊
+
+### 蹇�熶娇鐢�
+#### [Paraformer 妯″瀷](https://www.modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/summary)
+```python
+from funasr import AutoModel
+
+model = AutoModel(model="/Users/zhifu/Downloads/modelscope_models/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch")
+
+res = model(input="/Users/zhifu/Downloads/modelscope_models/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/example/asr_example.wav")
+print(res)
+```
+
+### API鎺ュ彛璇存槑
+#### AutoModel 瀹氫箟
+- `model`: [妯″瀷浠撳簱](https://alibaba-damo-academy.github.io/FunASR/en/model_zoo/modelscope_models.html#pretrained-models-on-modelscope) 涓殑妯″瀷鍚嶇О锛屾垨鏈湴纾佺洏涓殑妯″瀷璺緞
+- `device`: `cuda`锛堥粯璁わ級锛屼娇鐢� GPU 杩涜鎺ㄧ悊銆傚鏋滀负`cpu`锛屽垯浣跨敤 CPU 杩涜鎺ㄧ悊
+- `ncpu`: `None` 锛堥粯璁わ級锛岃缃敤浜� CPU 鍐呴儴鎿嶄綔骞惰鎬х殑绾跨▼鏁�
+- `output_dir`: `None` 锛堥粯璁わ級锛屽鏋滆缃紝杈撳嚭缁撴灉鐨勮緭鍑鸿矾寰�
+- `batch_size`: `1` 锛堥粯璁わ級锛岃В鐮佹椂鐨勬壒澶勭悊澶у皬
+#### AutoModel 鎺ㄧ悊
+- `input`: 瑕佽В鐮佺殑杈撳叆锛屽彲浠ユ槸锛�
+ - wav鏂囦欢璺緞, 渚嬪: asr_example.wav
+ - pcm鏂囦欢璺緞, 渚嬪: asr_example.pcm锛屾鏃堕渶瑕佹寚瀹氶煶棰戦噰鏍风巼fs锛堥粯璁や负16000锛�
+ - 闊抽瀛楄妭鏁版祦锛屼緥濡傦細楹﹀厠椋庣殑瀛楄妭鏁版暟鎹�
+ - wav.scp锛宬aldi 鏍峰紡鐨� wav 鍒楄〃 (`wav_id \t wav_path`), 渚嬪:
+ ```text
+ asr_example1 ./audios/asr_example1.wav
+ asr_example2 ./audios/asr_example2.wav
+ ```
+ 鍦ㄨ繖绉嶈緭鍏� `wav.scp` 鐨勬儏鍐典笅锛屽繀椤昏缃� `output_dir` 浠ヤ繚瀛樿緭鍑虹粨鏋�
+ - 闊抽閲囨牱鐐癸紝渚嬪锛歚audio, rate = soundfile.read("asr_example_zh.wav")`, 鏁版嵁绫诲瀷涓� numpy.ndarray銆傛敮鎸乥atch杈撳叆锛岀被鍨嬩负list锛�
+ ```[audio_sample1, audio_sample2, ..., audio_sampleN]```
+ - fbank杈撳叆锛屾敮鎸佺粍batch銆俿hape涓篬batch, frames, dim]锛岀被鍨嬩负torch.Tensor锛屼緥濡�
+- `output_dir`: None 锛堥粯璁わ級锛屽鏋滆缃紝杈撳嚭缁撴灉鐨勮緭鍑鸿矾寰�
diff --git a/examples/industrial_data_pretraining/paraformer/demo.py b/examples/industrial_data_pretraining/paraformer/demo.py
index 11c52f5..3c3309c 100644
--- a/examples/industrial_data_pretraining/paraformer/demo.py
+++ b/examples/industrial_data_pretraining/paraformer/demo.py
@@ -8,4 +8,15 @@
model = AutoModel(model="/Users/zhifu/Downloads/modelscope_models/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch")
res = model(input="/Users/zhifu/Downloads/modelscope_models/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/example/asr_example.wav")
-print(res)
\ No newline at end of file
+print(res)
+
+
+from funasr import AutoFrontend
+
+frontend = AutoFrontend(model="/Users/zhifu/Downloads/modelscope_models/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch")
+
+fbanks = frontend(input="/Users/zhifu/funasr_github/test_local/wav.scp", batch_size=2)
+
+for batch_idx, fbank_dict in enumerate(fbanks):
+ res = model(**fbank_dict)
+ print(res)
\ No newline at end of file
diff --git a/funasr/__init__.py b/funasr/__init__.py
index d7736e9..669bdac 100644
--- a/funasr/__init__.py
+++ b/funasr/__init__.py
@@ -30,4 +30,4 @@
import_submodules(__name__)
-from funasr.bin.inference import AutoModel
\ No newline at end of file
+from funasr.bin.inference import AutoModel, AutoFrontend
\ No newline at end of file
diff --git a/funasr/bin/inference.py b/funasr/bin/inference.py
index c545c4d..77b46f7 100644
--- a/funasr/bin/inference.py
+++ b/funasr/bin/inference.py
@@ -16,11 +16,12 @@
import random
import string
from funasr.register import tables
-from funasr.datasets.audio_datasets.load_audio_extract_fbank import load_audio
+
+from funasr.datasets.audio_datasets.load_audio_extract_fbank import load_audio, extract_fbank
from funasr.utils.vad_utils import slice_padding_audio_samples
from funasr.utils.timestamp_tools import time_stamp_sentence
-def build_iter_for_infer(data_in, input_len=None, data_type="sound"):
+def build_iter_for_infer(data_in, input_len=None, data_type="sound", key=None):
"""
:param input:
@@ -63,7 +64,8 @@
else: # raw text; audio sample point, fbank; bytes
if isinstance(data_in, bytes): # audio bytes
data_in = load_bytes(data_in)
- key = "rand_key_" + ''.join(random.choice(chars) for _ in range(13))
+ if key is None:
+ key = "rand_key_" + ''.join(random.choice(chars) for _ in range(13))
data_list = [data_in]
key_list = [key]
@@ -121,10 +123,13 @@
set_all_random_seed(kwargs.get("seed", 0))
device = kwargs.get("device", "cuda")
- if not torch.cuda.is_available() or kwargs.get("ngpu", 1):
+ if not torch.cuda.is_available() or kwargs.get("ngpu", 0):
device = "cpu"
kwargs["batch_size"] = 1
kwargs["device"] = device
+
+ if kwargs.get("ncpu", None):
+ torch.set_num_threads(kwargs.get("ncpu"))
# build tokenizer
tokenizer = kwargs.get("tokenizer", None)
@@ -169,17 +174,18 @@
else:
return self.generate_with_vad(input, input_len=input_len, **cfg)
- def generate(self, input, input_len=None, model=None, kwargs=None, **cfg):
+ def generate(self, input, input_len=None, model=None, kwargs=None, key=None, **cfg):
+ # import pdb; pdb.set_trace()
kwargs = self.kwargs if kwargs is None else kwargs
kwargs.update(cfg)
model = self.model if model is None else model
data_type = kwargs.get("data_type", "sound")
batch_size = kwargs.get("batch_size", 1)
- # if kwargs.get("device", "cpu") == "cpu":
- # batch_size = 1
+ if kwargs.get("device", "cpu") == "cpu":
+ batch_size = 1
- key_list, data_list = build_iter_for_infer(input, input_len=input_len, data_type=data_type)
+ key_list, data_list = build_iter_for_infer(input, input_len=input_len, data_type=data_type, key=key)
speed_stats = {}
asr_result_list = []
@@ -193,7 +199,7 @@
key_batch = key_list[beg_idx:end_idx]
batch = {"data_in": data_batch, "key": key_batch}
if (end_idx - beg_idx) == 1 and isinstance(data_batch[0], torch.Tensor): # fbank
- batch["data_batch"] = data_batch[0]
+ batch["data_in"] = data_batch[0]
batch["data_lengths"] = input_len
time1 = time.perf_counter()
@@ -348,6 +354,74 @@
f"time_speech_total_all_samples: {time_speech_total_all_samples: 0.3f}, "
f"time_escape_total_all_samples: {time_escape_total_all_samples:0.3f}")
return results_ret_list
+
+
+class AutoFrontend:
+ def __init__(self, **kwargs):
+ assert "model" in kwargs
+ if "model_conf" not in kwargs:
+ logging.info("download models from model hub: {}".format(kwargs.get("model_hub", "ms")))
+ kwargs = download_model(**kwargs)
+
+ # build frontend
+ frontend = kwargs.get("frontend", None)
+ if frontend is not None:
+ frontend_class = tables.frontend_classes.get(frontend.lower())
+ frontend = frontend_class(**kwargs["frontend_conf"])
+
+ self.frontend = frontend
+ self.kwargs = kwargs
+ def __call__(self, input, input_len=None, kwargs=None, **cfg):
+
+ kwargs = self.kwargs if kwargs is None else kwargs
+ kwargs.update(cfg)
+
+
+ key_list, data_list = build_iter_for_infer(input, input_len=input_len)
+ batch_size = kwargs.get("batch_size", 1)
+ device = kwargs.get("device", "cpu")
+ if device == "cpu":
+ batch_size = 1
+
+ meta_data = {}
+
+ result_list = []
+ num_samples = len(data_list)
+ pbar = tqdm(colour="blue", total=num_samples + 1, dynamic_ncols=True)
+
+ time0 = time.perf_counter()
+ for beg_idx in range(0, num_samples, batch_size):
+ end_idx = min(num_samples, beg_idx + batch_size)
+ data_batch = data_list[beg_idx:end_idx]
+ key_batch = key_list[beg_idx:end_idx]
+
+ # extract fbank feats
+ time1 = time.perf_counter()
+ audio_sample_list = load_audio(data_batch, fs=self.frontend.fs, audio_fs=kwargs.get("fs", 16000))
+ time2 = time.perf_counter()
+ meta_data["load_data"] = f"{time2 - time1:0.3f}"
+ speech, speech_lengths = extract_fbank(audio_sample_list, data_type=kwargs.get("data_type", "sound"),
+ frontend=self.frontend)
+ time3 = time.perf_counter()
+ meta_data["extract_feat"] = f"{time3 - time2:0.3f}"
+ meta_data["batch_data_time"] = speech_lengths.sum().item() * self.frontend.frame_shift * self.frontend.lfr_n / 1000
+
+ speech.to(device=device), speech_lengths.to(device=device)
+ batch = {"input": speech, "input_len": speech_lengths, "key": key_batch}
+ result_list.append(batch)
+
+ pbar.update(1)
+ description = (
+ f"{meta_data}, "
+ )
+ pbar.set_description(description)
+
+ time_end = time.perf_counter()
+ pbar.set_description(f"time escaped total: {time_end - time0:0.3f}")
+
+ return result_list
+
+
if __name__ == '__main__':
main_hydra()
\ No newline at end of file
diff --git a/funasr/models/paraformer/model.py b/funasr/models/paraformer/model.py
index c546585..4ce2c32 100644
--- a/funasr/models/paraformer/model.py
+++ b/funasr/models/paraformer/model.py
@@ -495,6 +495,8 @@
results = []
b, n, d = decoder_out.size()
+ if isinstance(key[0], (list, tuple)):
+ key = key[0]
for i in range(b):
x = encoder_out[i, :encoder_out_lens[i], :]
am_scores = decoder_out[i, :pre_token_length[i], :]
@@ -535,6 +537,7 @@
text = tokenizer.tokens2text(token)
text_postprocessed, _ = postprocess_utils.sentence_postprocess(token)
+
result_i = {"key": key[i], "text": text_postprocessed}
--
Gitblit v1.9.1