| docs/modescope_pipeline/vad_pipeline.md | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
| egs_modelscope/vad/TEMPLATE/README.md | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
| egs_modelscope/vad/TEMPLATE/infer.py | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
| egs_modelscope/vad/TEMPLATE/infer.sh | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
| egs_modelscope/vad/TEMPLATE/utils | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 |
docs/modescope_pipeline/vad_pipeline.md
New file @@ -0,0 +1 @@ ../../egs_modelscope/vad/TEMPLATE/README.md egs_modelscope/vad/TEMPLATE/README.md
File was renamed from docs/modescope_pipeline/vad_pipeline.md @@ -71,7 +71,7 @@ - Setting parameters in `infer.sh` - `model`: model name in [model zoo](https://alibaba-damo-academy.github.io/FunASR/en/modelscope_models.html#pretrained-models-on-modelscope), or model path in local disk - `data_dir`: the dataset dir needs to include `wav.scp`. If `${data_dir}/text` is also exists, CER will be computed - `data_dir`: the dataset dir needs to include `wav.scp` - `output_dir`: output dir of the recognition results - `batch_size`: `64` (Default), batch size of inference on gpu - `gpu_inference`: `true` (Default), whether to perform gpu decoding, set false for CPU inference @@ -99,7 +99,6 @@ --gpu_inference false \ --njob 64 ``` ## Finetune with pipeline egs_modelscope/vad/TEMPLATE/infer.py
New file @@ -0,0 +1,25 @@ import os import shutil import argparse from modelscope.pipelines import pipeline from modelscope.utils.constant import Tasks def modelscope_infer(args): os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpuid) inference_pipeline = pipeline( task=Tasks.voice_activity_detection, model=args.model, output_dir=args.output_dir, batch_size=args.batch_size, ) inference_pipeline(audio_in=args.audio_in) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('--model', type=str, default="damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch") parser.add_argument('--audio_in', type=str, default="./data/test/wav.scp") parser.add_argument('--output_dir', type=str, default="./results/") parser.add_argument('--batch_size', type=int, default=64) parser.add_argument('--gpuid', type=str, default="0") args = parser.parse_args() modelscope_infer(args) egs_modelscope/vad/TEMPLATE/infer.sh
New file @@ -0,0 +1,71 @@ #!/usr/bin/env bash set -e set -u set -o pipefail stage=1 stop_stage=2 model="damo/speech_fsmn_vad_zh-cn-16k-common" data_dir="./data/test" output_dir="./results" batch_size=64 gpu_inference=true # whether to perform gpu decoding gpuid_list="0,1" # set gpus, e.g., gpuid_list="0,1" njob=64 # the number of jobs for CPU decoding, if gpu_inference=false, use CPU decoding, please set njob checkpoint_dir= checkpoint_name="valid.cer_ctc.ave.pb" . utils/parse_options.sh || exit 1; if ${gpu_inference} == "true"; then nj=$(echo $gpuid_list | awk -F "," '{print NF}') else nj=$njob batch_size=1 gpuid_list="" for JOB in $(seq ${nj}); do gpuid_list=$gpuid_list"-1," done fi mkdir -p $output_dir/split split_scps="" for JOB in $(seq ${nj}); do split_scps="$split_scps $output_dir/split/wav.$JOB.scp" done perl utils/split_scp.pl ${data_dir}/wav.scp ${split_scps} if ${checkpoint_dir}; then python utils/prepare_checkpoint.py ${model} ${checkpoint_dir} ${checkpoint_name} model=${checkpoint_dir}/${model} fi if [ $stage -le 1 ] && [ $stop_stage -ge 1 ];then echo "Decoding ..." gpuid_list_array=(${gpuid_list//,/ }) for JOB in $(seq ${nj}); do { id=$((JOB-1)) gpuid=${gpuid_list_array[$id]} mkdir -p ${output_dir}/output.$JOB python infer.py \ --model ${model} \ --audio_in ${output_dir}/split/wav.$JOB.scp \ --output_dir ${output_dir}/output.$JOB \ --batch_size ${batch_size} \ --gpuid ${gpuid} }& done wait mkdir -p ${output_dir}/1best_recog for f in token score text; do if [ -f "${output_dir}/output.1/1best_recog/${f}" ]; then for i in $(seq "${nj}"); do cat "${output_dir}/output.${i}/1best_recog/${f}" done | sort -k1 >"${output_dir}/1best_recog/${f}" fi done fi egs_modelscope/vad/TEMPLATE/utils
New file @@ -0,0 +1 @@ ../../../egs/aishell/transformer/utils