From 5451578ffb278490a7b0e757e941de71687856b2 Mon Sep 17 00:00:00 2001
From: zhifu gao <zhifu.gzf@alibaba-inc.com>
Date: 星期二, 25 四月 2023 14:43:30 +0800
Subject: [PATCH] Merge pull request #413 from alibaba-damo-academy/dev_sx
---
egs_modelscope/tp/TEMPLATE/README.md | 102 +++++++++++++++++++++++++
egs_modelscope/tp/TEMPLATE/infer.py | 1
egs_modelscope/vad/TEMPLATE/README.md | 4
egs_modelscope/tp/speech_timestamp_prediction-v1-16k-offline/infer.py | 32 ++++++--
egs_modelscope/tp/TEMPLATE/infer.sh | 75 ++++++++++++++++++
egs_modelscope/tp/TEMPLATE/utils | 1
funasr/bin/tp_inference.py | 19 ++++
7 files changed, 223 insertions(+), 11 deletions(-)
diff --git a/egs_modelscope/tp/TEMPLATE/README.md b/egs_modelscope/tp/TEMPLATE/README.md
new file mode 100644
index 0000000..f511f58
--- /dev/null
+++ b/egs_modelscope/tp/TEMPLATE/README.md
@@ -0,0 +1,102 @@
+# TIMESTAMP PREDICTION
+
+## Inference
+
+### Quick start
+#### [Use TP-Aligner Model Simply](https://modelscope.cn/models/damo/speech_timestamp_prediction-v1-16k-offline/summary)
+```python
+from modelscope.pipelines import pipeline
+from modelscope.utils.constant import Tasks
+
+inference_pipline = pipeline(
+ task=Tasks.speech_timestamp,
+ model='damo/speech_timestamp_prediction-v1-16k-offline',
+ output_dir=None)
+
+rec_result = inference_pipline(
+ audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_timestamps.wav',
+ text_in='涓� 涓� 涓� 澶� 骞� 娲� 鍥� 瀹� 涓� 浠� 涔� 璺� 鍒� 瑗� 澶� 骞� 娲� 鏉� 浜� 鍛�',)
+print(rec_result)
+```
+
+Timestamp pipeline can also be used after ASR pipeline to compose complete ASR function, ref to [demo](https://github.com/alibaba-damo-academy/FunASR/discussions/246).
+
+
+
+#### API-reference
+##### Define pipeline
+- `task`: `Tasks.speech_timestamp`
+- `model`: model name in [model zoo](https://alibaba-damo-academy.github.io/FunASR/en/modelscope_models.html#pretrained-models-on-modelscope), or model path in local disk
+- `ngpu`: `1` (Default), decoding on GPU. If ngpu=0, decoding on CPU
+- `ncpu`: `1` (Default), sets the number of threads used for intraop parallelism on CPU
+- `output_dir`: `None` (Default), the output path of results if set
+- `batch_size`: `1` (Default), batch size when decoding
+##### Infer pipeline
+- `audio_in`: the input speech to predict, which could be:
+ - wav_path, `e.g.`: asr_example.wav (wav in local or url),
+ - wav.scp, kaldi style wav list (`wav_id wav_path`), `e.g.`:
+ ```text
+ asr_example1 ./audios/asr_example1.wav
+ asr_example2 ./audios/asr_example2.wav
+ ```
+ In this case of `wav.scp` input, `output_dir` must be set to save the output results
+- `text_in`: the input text to predict锛� splited by blank, which could be:
+ - text string, `e.g.`: `浠� 澶� 澶� 姘� 鎬� 涔� 鏍穈
+ - text.scp, kaldi style text file (`wav_id transcription`), `e.g.`:
+ ```text
+ asr_example1 浠� 澶� 澶� 姘� 鎬� 涔� 鏍�
+ asr_example2 娆� 杩� 浣� 楠� 杈� 鎽� 闄� 璇� 闊� 璇� 鍒� 妯� 鍨�
+ ```
+- `audio_fs`: audio sampling rate, only set when audio_in is pcm audio
+- `output_dir`: None (Default), the output path of results if set, containing
+ - output_dir/timestamp_prediction/tp_sync, timestamp in second containing silence periods, `wav_id# token1 start_time end_time;`, `e.g.`:
+ ```text
+ test_wav1# <sil> 0.000 0.500;娓� 0.500 0.680;宸� 0.680 0.840;鍖� 0.840 1.040;宸� 1.040 1.280;浠� 1.280 1.520;<sil> 1.520 1.680;搴� 1.680 1.920;<sil> 1.920 2.160;璧� 2.160 2.380;鐏� 2.380 2.580;娈� 2.580 2.760;鍙� 2.760 2.920;闄� 2.920 3.100;杩� 3.100 3.340;<sil> 3.340 3.400;娌� 3.400 3.640;<sil> 3.640 3.700;娴� 3.700 3.940;<sil> 3.940 4.240;澶� 4.240 4.400;閲� 4.400 4.520;姝� 4.520 4.680;楸� 4.680 4.920;<sil> 4.920 4.940;婕� 4.940 5.120;娴� 5.120 5.300;娌� 5.300 5.500;闈� 5.500 5.900;<sil> 5.900 6.240;
+ ```
+ - output_dir/timestamp_prediction/tp_time, timestamp list in ms of same length as input text without silence `wav_id# [[start_time, end_time],]`, `e.g.`:
+ ```text
+ test_wav1# [[500, 680], [680, 840], [840, 1040], [1040, 1280], [1280, 1520], [1680, 1920], [2160, 2380], [2380, 2580], [2580, 2760], [2760, 2920], [2920, 3100], [3100, 3340], [3400, 3640], [3700, 3940], [4240, 4400], [4400, 4520], [4520, 4680], [4680, 4920], [4940, 5120], [5120, 5300], [5300, 5500], [5500, 5900]]
+ ```
+
+### Inference with multi-thread CPUs or multi GPUs
+FunASR also offer recipes [egs_modelscope/vad/TEMPLATE/infer.sh](https://github.com/alibaba-damo-academy/FunASR/blob/main/egs_modelscope/vad/TEMPLATE/infer.sh) to decode with multi-thread CPUs, or multi GPUs.
+
+- Setting parameters in `infer.sh`
+ - `model`: model name in [model zoo](https://alibaba-damo-academy.github.io/FunASR/en/modelscope_models.html#pretrained-models-on-modelscope), or model path in local disk
+ - `data_dir`: the dataset dir **must** include `wav.scp` and `text.scp`
+ - `output_dir`: output dir of the recognition results
+ - `batch_size`: `64` (Default), batch size of inference on gpu
+ - `gpu_inference`: `true` (Default), whether to perform gpu decoding, set false for CPU inference
+ - `gpuid_list`: `0,1` (Default), which gpu_ids are used to infer
+ - `njob`: only used for CPU inference (`gpu_inference`=`false`), `64` (Default), the number of jobs for CPU decoding
+ - `checkpoint_dir`: only used for infer finetuned models, the path dir of finetuned models
+ - `checkpoint_name`: only used for infer finetuned models, `valid.cer_ctc.ave.pb` (Default), which checkpoint is used to infer
+
+- Decode with multi GPUs:
+```shell
+ bash infer.sh \
+ --model "damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch" \
+ --data_dir "./data/test" \
+ --output_dir "./results" \
+ --batch_size 64 \
+ --gpu_inference true \
+ --gpuid_list "0,1"
+```
+- Decode with multi-thread CPUs:
+```shell
+ bash infer.sh \
+ --model "damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch" \
+ --data_dir "./data/test" \
+ --output_dir "./results" \
+ --gpu_inference false \
+ --njob 64
+```
+
+## Finetune with pipeline
+
+### Quick start
+
+### Finetune with your data
+
+## Inference with your finetuned model
+
diff --git a/egs_modelscope/tp/TEMPLATE/infer.py b/egs_modelscope/tp/TEMPLATE/infer.py
new file mode 120000
index 0000000..df5dff2
--- /dev/null
+++ b/egs_modelscope/tp/TEMPLATE/infer.py
@@ -0,0 +1 @@
+../speech_timestamp_prediction-v1-16k-offline/infer.py
\ No newline at end of file
diff --git a/egs_modelscope/tp/TEMPLATE/infer.sh b/egs_modelscope/tp/TEMPLATE/infer.sh
new file mode 100644
index 0000000..2a923bb
--- /dev/null
+++ b/egs_modelscope/tp/TEMPLATE/infer.sh
@@ -0,0 +1,75 @@
+#!/usr/bin/env bash
+
+set -e
+set -u
+set -o pipefail
+
+stage=1
+stop_stage=2
+model="damo/speech_timestamp_prediction-v1-16k-offline"
+data_dir="./data/test"
+output_dir="./results"
+batch_size=1
+gpu_inference=true # whether to perform gpu decoding
+gpuid_list="0,1" # set gpus, e.g., gpuid_list="0,1"
+njob=4 # the number of jobs for CPU decoding, if gpu_inference=false, use CPU decoding, please set njob
+checkpoint_dir=
+checkpoint_name="valid.cer_ctc.ave.pb"
+
+. utils/parse_options.sh || exit 1;
+
+if ${gpu_inference} == "true"; then
+ nj=$(echo $gpuid_list | awk -F "," '{print NF}')
+else
+ nj=$njob
+ batch_size=1
+ gpuid_list=""
+ for JOB in $(seq ${nj}); do
+ gpuid_list=$gpuid_list"-1,"
+ done
+fi
+
+mkdir -p $output_dir/split
+split_scps=""
+split_texts=""
+for JOB in $(seq ${nj}); do
+ split_scps="$split_scps $output_dir/split/wav.$JOB.scp"
+ split_texts="$split_texts $output_dir/split/text.$JOB.scp"
+done
+perl utils/split_scp.pl ${data_dir}/wav.scp ${split_scps}
+perl utils/split_scp.pl ${data_dir}/text.scp ${split_texts}
+
+if [ -n "${checkpoint_dir}" ]; then
+ python utils/prepare_checkpoint.py ${model} ${checkpoint_dir} ${checkpoint_name}
+ model=${checkpoint_dir}/${model}
+fi
+
+if [ $stage -le 1 ] && [ $stop_stage -ge 1 ];then
+ echo "Decoding ..."
+ gpuid_list_array=(${gpuid_list//,/ })
+ for JOB in $(seq ${nj}); do
+ {
+ id=$((JOB-1))
+ gpuid=${gpuid_list_array[$id]}
+ mkdir -p ${output_dir}/output.$JOB
+ python infer.py \
+ --model ${model} \
+ --audio_in ${output_dir}/split/wav.$JOB.scp \
+ --text_in ${output_dir}/split/text.$JOB.scp \
+ --output_dir ${output_dir}/output.$JOB \
+ --batch_size ${batch_size} \
+ --gpuid ${gpuid}
+ }&
+ done
+ wait
+
+ mkdir -p ${output_dir}/timestamp_prediction
+ for f in tp_sync tp_time; do
+ if [ -f "${output_dir}/output.1/timestamp_prediction/${f}" ]; then
+ for i in $(seq "${nj}"); do
+ cat "${output_dir}/output.${i}/timestamp_prediction/${f}"
+ done | sort -k1 >"${output_dir}/timestamp_prediction/${f}"
+ fi
+ done
+fi
+
diff --git a/egs_modelscope/tp/TEMPLATE/utils b/egs_modelscope/tp/TEMPLATE/utils
new file mode 120000
index 0000000..04cf1f8
--- /dev/null
+++ b/egs_modelscope/tp/TEMPLATE/utils
@@ -0,0 +1 @@
+../../vad/TEMPLATE/utils
\ No newline at end of file
diff --git a/egs_modelscope/tp/speech_timestamp_prediction-v1-16k-offline/infer.py b/egs_modelscope/tp/speech_timestamp_prediction-v1-16k-offline/infer.py
index ff42e68..6a7e496 100644
--- a/egs_modelscope/tp/speech_timestamp_prediction-v1-16k-offline/infer.py
+++ b/egs_modelscope/tp/speech_timestamp_prediction-v1-16k-offline/infer.py
@@ -1,12 +1,28 @@
+import os
+import argparse
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
-inference_pipline = pipeline(
- task=Tasks.speech_timestamp,
- model='damo/speech_timestamp_prediction-v1-16k-offline',
- output_dir='./tmp')
+def modelscope_infer(args):
+ os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpuid)
+ inference_pipeline = pipeline(
+ task=Tasks.speech_timestamp,
+ model=args.model,
+ output_dir=args.output_dir,
+ batch_size=args.batch_size,
+ )
+ if args.output_dir is not None:
+ inference_pipeline(audio_in=args.audio_in, text_in=args.text_in)
+ else:
+ print(inference_pipeline(audio_in=args.audio_in, text_in=args.text_in))
-rec_result = inference_pipline(
- audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_timestamps.wav',
- text_in='涓� 涓� 涓� 澶� 骞� 娲� 鍥� 瀹� 涓� 浠� 涔� 璺� 鍒� 瑗� 澶� 骞� 娲� 鏉� 浜� 鍛�')
-print(rec_result)
\ No newline at end of file
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--model', type=str, default="damo/speech_timestamp_prediction-v1-16k-offline")
+ parser.add_argument('--audio_in', type=str, default="https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_timestamps.wav")
+ parser.add_argument('--text_in', type=str, default="涓� 涓� 涓� 澶� 骞� 娲� 鍥� 瀹� 涓� 浠� 涔� 璺� 鍒� 瑗� 澶� 骞� 娲� 鏉� 浜� 鍛�")
+ parser.add_argument('--output_dir', type=str, default="./results/")
+ parser.add_argument('--batch_size', type=int, default=1)
+ parser.add_argument('--gpuid', type=str, default="0")
+ args = parser.parse_args()
+ modelscope_infer(args)
diff --git a/egs_modelscope/vad/TEMPLATE/README.md b/egs_modelscope/vad/TEMPLATE/README.md
index aef7dc8..6f746d5 100644
--- a/egs_modelscope/vad/TEMPLATE/README.md
+++ b/egs_modelscope/vad/TEMPLATE/README.md
@@ -1,7 +1,7 @@
# Voice Activity Detection
> **Note**:
-> The modelscope pipeline supports all the models in [model zoo](https://alibaba-damo-academy.github.io/FunASR/en/modelscope_models.html#pretrained-models-on-modelscope) to inference and finetine. Here we take the model of FSMN-VAD as example to demonstrate the usage.
+> The modelscope pipeline supports all the models in [model zoo](https://alibaba-damo-academy.github.io/FunASR/en/modelscope_models.html#pretrained-models-on-modelscope) to inference and finetune. Here we take the model of FSMN-VAD as example to demonstrate the usage.
## Inference
@@ -57,7 +57,7 @@
- pcm_path, `e.g.`: asr_example.pcm,
- audio bytes stream, `e.g.`: bytes data from a microphone
- audio sample point锛宍e.g.`: `audio, rate = soundfile.read("asr_example_zh.wav")`, the dtype is numpy.ndarray or torch.Tensor
- - wav.scp, kaldi style wav list (`wav_id \t wav_path``), `e.g.`:
+ - wav.scp, kaldi style wav list (`wav_id \t wav_path`), `e.g.`:
```text
asr_example1 ./audios/asr_example1.wav
asr_example2 ./audios/asr_example2.wav
diff --git a/funasr/bin/tp_inference.py b/funasr/bin/tp_inference.py
index df029fd..6e513c5 100644
--- a/funasr/bin/tp_inference.py
+++ b/funasr/bin/tp_inference.py
@@ -222,6 +222,13 @@
split_with_space=split_with_space,
seg_dict_file=seg_dict_file,
)
+
+ if output_dir is not None:
+ writer = DatadirWriter(output_dir)
+ tp_writer = writer[f"timestamp_prediction"]
+ # ibest_writer["token_list"][""] = " ".join(speech2text.asr_train_args.token_list)
+ else:
+ tp_writer = None
def _forward(
data_path_and_name_and_type,
@@ -230,7 +237,14 @@
fs: dict = None,
param_dict: dict = None,
**kwargs
- ):
+ ):
+ output_path = output_dir_v2 if output_dir_v2 is not None else output_dir
+ writer = None
+ if output_path is not None:
+ writer = DatadirWriter(output_path)
+ tp_writer = writer[f"timestamp_prediction"]
+ else:
+ tp_writer = None
# 3. Build data-iterator
if data_path_and_name_and_type is None and raw_inputs is not None:
if isinstance(raw_inputs, torch.Tensor):
@@ -268,6 +282,9 @@
ts_str, ts_list = ts_prediction_lfr6_standard(us_alphas[batch_id], us_cif_peak[batch_id], token, force_time_shift=-3.0)
logging.warning(ts_str)
item = {'key': key, 'value': ts_str, 'timestamp':ts_list}
+ if tp_writer is not None:
+ tp_writer["tp_sync"][key+'#'] = ts_str
+ tp_writer["tp_time"][key+'#'] = str(ts_list)
tp_result_list.append(item)
return tp_result_list
--
Gitblit v1.9.1