From 7ca311f847a077cfea702762037ea17ce92a39cf Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期五, 21 四月 2023 00:56:05 +0800
Subject: [PATCH] vad docs

---
 docs/modescope_pipeline/vad_pipeline.md |    1 
 egs_modelscope/vad/TEMPLATE/utils       |    1 
 egs_modelscope/vad/TEMPLATE/README.md   |    3 -
 egs_modelscope/vad/TEMPLATE/infer.sh    |   71 +++++++++++++++++++++++++++++++++++
 egs_modelscope/vad/TEMPLATE/infer.py    |   25 ++++++++++++
 5 files changed, 99 insertions(+), 2 deletions(-)

diff --git a/docs/modescope_pipeline/vad_pipeline.md b/docs/modescope_pipeline/vad_pipeline.md
new file mode 120000
index 0000000..30ea6fc
--- /dev/null
+++ b/docs/modescope_pipeline/vad_pipeline.md
@@ -0,0 +1 @@
+../../egs_modelscope/vad/TEMPLATE/README.md
\ No newline at end of file
diff --git a/docs/modescope_pipeline/vad_pipeline.md b/egs_modelscope/vad/TEMPLATE/README.md
similarity index 96%
rename from docs/modescope_pipeline/vad_pipeline.md
rename to egs_modelscope/vad/TEMPLATE/README.md
index ca8a5ee..84601b0 100644
--- a/docs/modescope_pipeline/vad_pipeline.md
+++ b/egs_modelscope/vad/TEMPLATE/README.md
@@ -71,7 +71,7 @@
 
 - Setting parameters in `infer.sh`
     - `model`: model name in [model zoo](https://alibaba-damo-academy.github.io/FunASR/en/modelscope_models.html#pretrained-models-on-modelscope), or model path in local disk
-    - `data_dir`: the dataset dir needs to include `wav.scp`. If `${data_dir}/text` is also exists, CER will be computed
+    - `data_dir`: the dataset dir needs to include `wav.scp`
     - `output_dir`: output dir of the recognition results
     - `batch_size`: `64` (Default), batch size of inference on gpu
     - `gpu_inference`: `true` (Default), whether to perform gpu decoding, set false for CPU inference
@@ -99,7 +99,6 @@
     --gpu_inference false \
     --njob 64
 ```
-
 
 ## Finetune with pipeline
 
diff --git a/egs_modelscope/vad/TEMPLATE/infer.py b/egs_modelscope/vad/TEMPLATE/infer.py
new file mode 100644
index 0000000..3d9ee55
--- /dev/null
+++ b/egs_modelscope/vad/TEMPLATE/infer.py
@@ -0,0 +1,25 @@
+import os
+import shutil
+import argparse
+from modelscope.pipelines import pipeline
+from modelscope.utils.constant import Tasks
+
+def modelscope_infer(args):
+    os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpuid)
+    inference_pipeline = pipeline(
+        task=Tasks.voice_activity_detection,
+        model=args.model,
+        output_dir=args.output_dir,
+        batch_size=args.batch_size,
+    )
+    inference_pipeline(audio_in=args.audio_in)
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--model', type=str, default="damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch")
+    parser.add_argument('--audio_in', type=str, default="./data/test/wav.scp")
+    parser.add_argument('--output_dir', type=str, default="./results/")
+    parser.add_argument('--batch_size', type=int, default=64)
+    parser.add_argument('--gpuid', type=str, default="0")
+    args = parser.parse_args()
+    modelscope_infer(args)
\ No newline at end of file
diff --git a/egs_modelscope/vad/TEMPLATE/infer.sh b/egs_modelscope/vad/TEMPLATE/infer.sh
new file mode 100644
index 0000000..261b5e6
--- /dev/null
+++ b/egs_modelscope/vad/TEMPLATE/infer.sh
@@ -0,0 +1,71 @@
+#!/usr/bin/env bash
+
+set -e
+set -u
+set -o pipefail
+
+stage=1
+stop_stage=2
+model="damo/speech_fsmn_vad_zh-cn-16k-common"
+data_dir="./data/test"
+output_dir="./results"
+batch_size=64
+gpu_inference=true    # whether to perform gpu decoding
+gpuid_list="0,1"    # set gpus, e.g., gpuid_list="0,1"
+njob=64    # the number of jobs for CPU decoding, if gpu_inference=false, use CPU decoding, please set njob
+checkpoint_dir=
+checkpoint_name="valid.cer_ctc.ave.pb"
+
+. utils/parse_options.sh || exit 1;
+
+if ${gpu_inference} == "true"; then
+    nj=$(echo $gpuid_list | awk -F "," '{print NF}')
+else
+    nj=$njob
+    batch_size=1
+    gpuid_list=""
+    for JOB in $(seq ${nj}); do
+        gpuid_list=$gpuid_list"-1,"
+    done
+fi
+
+mkdir -p $output_dir/split
+split_scps=""
+for JOB in $(seq ${nj}); do
+    split_scps="$split_scps $output_dir/split/wav.$JOB.scp"
+done
+perl utils/split_scp.pl ${data_dir}/wav.scp ${split_scps}
+
+if ${checkpoint_dir}; then
+  python utils/prepare_checkpoint.py ${model} ${checkpoint_dir} ${checkpoint_name}
+  model=${checkpoint_dir}/${model}
+fi
+
+if [ $stage -le 1 ] && [ $stop_stage -ge 1 ];then
+    echo "Decoding ..."
+    gpuid_list_array=(${gpuid_list//,/ })
+    for JOB in $(seq ${nj}); do
+        {
+        id=$((JOB-1))
+        gpuid=${gpuid_list_array[$id]}
+        mkdir -p ${output_dir}/output.$JOB
+        python infer.py \
+            --model ${model} \
+            --audio_in ${output_dir}/split/wav.$JOB.scp \
+            --output_dir ${output_dir}/output.$JOB \
+            --batch_size ${batch_size} \
+            --gpuid ${gpuid}
+        }&
+    done
+    wait
+
+    mkdir -p ${output_dir}/1best_recog
+    for f in token score text; do
+        if [ -f "${output_dir}/output.1/1best_recog/${f}" ]; then
+          for i in $(seq "${nj}"); do
+              cat "${output_dir}/output.${i}/1best_recog/${f}"
+          done | sort -k1 >"${output_dir}/1best_recog/${f}"
+        fi
+    done
+fi
+
diff --git a/egs_modelscope/vad/TEMPLATE/utils b/egs_modelscope/vad/TEMPLATE/utils
new file mode 120000
index 0000000..dc7d417
--- /dev/null
+++ b/egs_modelscope/vad/TEMPLATE/utils
@@ -0,0 +1 @@
+../../../egs/aishell/transformer/utils
\ No newline at end of file

--
Gitblit v1.9.1