From cfe22850e2a62071fca0fbdc15bb6e95ca555490 Mon Sep 17 00:00:00 2001
From: yufan-aslp <379840315@qq.com>
Date: 星期二, 25 四月 2023 15:42:03 +0800
Subject: [PATCH] update mfcc infer.sh

---
 /dev/null                                                                          |   53 ----------
 egs_modelscope/asr/mfcca/speech_mfcca_asr-zh-cn-16k-alimeeting-vocab4950/infer.py  |  111 +++------------------
 egs_modelscope/asr/mfcca/speech_mfcca_asr-zh-cn-16k-alimeeting-vocab4950/infer.sh  |   70 ++++++++++++++
 egs_modelscope/asr/mfcca/speech_mfcca_asr-zh-cn-16k-alimeeting-vocab4950/README.md |    1 
 egs_modelscope/asr/TEMPLATE/README.md                                              |   18 +++
 egs_modelscope/asr/mfcca/speech_mfcca_asr-zh-cn-16k-alimeeting-vocab4950/utils     |    1 
 6 files changed, 107 insertions(+), 147 deletions(-)

diff --git a/egs_modelscope/asr/TEMPLATE/README.md b/egs_modelscope/asr/TEMPLATE/README.md
index c645033..94b47ec 100644
--- a/egs_modelscope/asr/TEMPLATE/README.md
+++ b/egs_modelscope/asr/TEMPLATE/README.md
@@ -58,6 +58,22 @@
 #### [RNN-T-online model]()
 Undo
 
+#### [MFCCA Model](https://www.modelscope.cn/models/NPU-ASLP/speech_mfcca_asr-zh-cn-16k-alimeeting-vocab4950/summary)
+For more model detailes, please refer to [docs](https://www.modelscope.cn/models/NPU-ASLP/speech_mfcca_asr-zh-cn-16k-alimeeting-vocab4950/summary)
+```python
+from modelscope.pipelines import pipeline
+from modelscope.utils.constant import Tasks
+
+inference_pipeline = pipeline(
+    task=Tasks.auto_speech_recognition,
+    model='NPU-ASLP/speech_mfcca_asr-zh-cn-16k-alimeeting-vocab4950',
+    model_revision='v3.0.0'
+)
+
+rec_result = inference_pipeline(audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav')
+print(rec_result)
+```
+
 #### API-reference
 ##### Define pipeline
 - `task`: `Tasks.auto_speech_recognition`
@@ -210,4 +226,4 @@
     --njob 64 \
     --checkpoint_dir "./checkpoint" \
     --checkpoint_name "valid.cer_ctc.ave.pb"
-```
\ No newline at end of file
+```
diff --git a/egs_modelscope/asr/mfcca/speech_mfcca_asr-zh-cn-16k-alimeeting-vocab4950/README.md b/egs_modelscope/asr/mfcca/speech_mfcca_asr-zh-cn-16k-alimeeting-vocab4950/README.md
deleted file mode 100644
index 16aeada..0000000
--- a/egs_modelscope/asr/mfcca/speech_mfcca_asr-zh-cn-16k-alimeeting-vocab4950/README.md
+++ /dev/null
@@ -1,53 +0,0 @@
-# ModelScope Model
-
-## How to finetune and infer using a pretrained Paraformer-large Model
-
-### Finetune
-
-- Modify finetune training related parameters in `finetune.py`
-    - <strong>output_dir:</strong> # result dir
-    - <strong>data_dir:</strong> # the dataset dir needs to include files: `train/wav.scp`, `train/text`; `validation/wav.scp`, `validation/text`
-    - <strong>dataset_type:</strong> # for dataset larger than 1000 hours, set as `large`, otherwise set as `small`
-    - <strong>batch_bins:</strong> # batch size. For dataset_type is `small`, `batch_bins` indicates the feature frames. For dataset_type is `large`, `batch_bins` indicates the duration in ms
-    - <strong>max_epoch:</strong> # number of training epoch
-    - <strong>lr:</strong> # learning rate
-
-- Then you can run the pipeline to finetune with:
-```python
-    python finetune.py
-```
-
-### Inference
-
-Or you can use the finetuned model for inference directly.
-
-- Setting parameters in `infer.py`
-    - <strong>data_dir:</strong> # the dataset dir needs to include `test/wav.scp`. If `test/text` is also exists, CER will be computed
-    - <strong>output_dir:</strong> # result dir
-    - <strong>ngpu:</strong> # the number of GPUs for decoding
-    - <strong>njob:</strong> # the number of jobs for each GPU
-
-- Then you can run the pipeline to infer with:
-```python
-    python infer.py
-```
-
-- Results
-
-The decoding results can be found in `$output_dir/1best_recog/text.sp.cer` and `$output_dir/1best_recog/text.nosp.cer`, which includes recognition results with or without separating character (src) of each sample and the CER metric of the whole test set.
-
-### Inference using local finetuned model
-
-- Modify inference related parameters in `infer_after_finetune.py`
-    - <strong>output_dir:</strong> # result dir
-    - <strong>data_dir:</strong> # the dataset dir needs to include `test/wav.scp`. If `test/text` is also exists, CER will be computed
-    - <strong>decoding_model_name:</strong> # set the checkpoint name for decoding, e.g., `valid.cer_ctc.ave.pb`
-
-- Then you can run the pipeline to finetune with:
-```python
-    python infer_after_finetune.py
-```
-
-- Results
-
-The decoding results can be found in `$output_dir/1best_recog/text.sp.cer` and `$output_dir/1best_recog/text.nosp.cer`, which includes recognition results with or without separating character (src) of each sample and the CER metric of the whole test set.
diff --git a/egs_modelscope/asr/mfcca/speech_mfcca_asr-zh-cn-16k-alimeeting-vocab4950/README.md b/egs_modelscope/asr/mfcca/speech_mfcca_asr-zh-cn-16k-alimeeting-vocab4950/README.md
new file mode 120000
index 0000000..bb55ab5
--- /dev/null
+++ b/egs_modelscope/asr/mfcca/speech_mfcca_asr-zh-cn-16k-alimeeting-vocab4950/README.md
@@ -0,0 +1 @@
+../../TEMPLATE/README.md
\ No newline at end of file
diff --git a/egs_modelscope/asr/mfcca/speech_mfcca_asr-zh-cn-16k-alimeeting-vocab4950/infer.py b/egs_modelscope/asr/mfcca/speech_mfcca_asr-zh-cn-16k-alimeeting-vocab4950/infer.py
index 8abadd7..12ec2ac 100755
--- a/egs_modelscope/asr/mfcca/speech_mfcca_asr-zh-cn-16k-alimeeting-vocab4950/infer.py
+++ b/egs_modelscope/asr/mfcca/speech_mfcca_asr-zh-cn-16k-alimeeting-vocab4950/infer.py
@@ -1,102 +1,27 @@
 import os
 import shutil
-from multiprocessing import Pool
-
+import argparse
 from modelscope.pipelines import pipeline
 from modelscope.utils.constant import Tasks
 
-from funasr.utils.compute_wer import compute_wer
-
-def modelscope_infer_core(output_dir, split_dir, njob, idx):
-    output_dir_job = os.path.join(output_dir, "output.{}".format(idx))
-    gpu_id = (int(idx) - 1) // njob
-    if "CUDA_VISIBLE_DEVICES" in os.environ.keys():
-        gpu_list = os.environ['CUDA_VISIBLE_DEVICES'].split(",")
-        os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_list[gpu_id])
-    else:
-        os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id)
-    inference_pipline = pipeline(
+def modelscope_infer(args):
+    os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpuid)
+    inference_pipeline = pipeline(
         task=Tasks.auto_speech_recognition,
-	    model='NPU-ASLP/speech_mfcca_asr-zh-cn-16k-alimeeting-vocab4950',
-        model_revision='v3.0.0',
-        output_dir=output_dir_job,
-        batch_size=1,
+        model=args.model,
+        model_revision=args.model_revision,
+        output_dir=args.output_dir,
+        batch_size=args.batch_size,
     )
-    audio_in = os.path.join(split_dir, "wav.{}.scp".format(idx))
-    inference_pipline(audio_in=audio_in)
-
-
-def modelscope_infer(params):
-    # prepare for multi-GPU decoding
-    ngpu = params["ngpu"]
-    njob = params["njob"]
-    output_dir = params["output_dir"]
-    if os.path.exists(output_dir):
-        shutil.rmtree(output_dir)
-    os.mkdir(output_dir)
-    split_dir = os.path.join(output_dir, "split")
-    os.mkdir(split_dir)
-    nj = ngpu * njob
-    wav_scp_file = os.path.join(params["data_dir"], "wav.scp")
-    with open(wav_scp_file) as f:
-        lines = f.readlines()
-        num_lines = len(lines)
-        num_job_lines = num_lines // nj
-    start = 0
-    for i in range(nj):
-        end = start + num_job_lines
-        file = os.path.join(split_dir, "wav.{}.scp".format(str(i + 1)))
-        with open(file, "w") as f:
-            if i == nj - 1:
-                f.writelines(lines[start:])
-            else:
-                f.writelines(lines[start:end])
-        start = end
-    p = Pool(nj)
-    for i in range(nj):
-        p.apply_async(modelscope_infer_core,
-                      args=(output_dir, split_dir, njob, str(i + 1)))
-    p.close()
-    p.join()
-
-    # combine decoding results
-    best_recog_path = os.path.join(output_dir, "1best_recog")
-    os.mkdir(best_recog_path)
-    files = ["text", "token", "score"]
-    for file in files:
-        with open(os.path.join(best_recog_path, file), "w") as f:
-            for i in range(nj):
-                job_file = os.path.join(output_dir, "output.{}/1best_recog".format(str(i + 1)), file)
-                with open(job_file) as f_job:
-                    lines = f_job.readlines()
-                f.writelines(lines)
-
-    # If text exists, compute CER
-    text_in = os.path.join(params["data_dir"], "text")
-    if os.path.exists(text_in):
-        text_proc_file = os.path.join(best_recog_path, "token")
-        text_proc_file2 = os.path.join(best_recog_path, "token_nosep")
-        with open(text_proc_file, 'r') as hyp_reader:
-                with open(text_proc_file2, 'w') as hyp_writer:
-                    for line in hyp_reader:
-                        new_context = line.strip().replace("src","").replace("  "," ").replace("  "," ").strip()
-                        hyp_writer.write(new_context+'\n')
-        text_in2 = os.path.join(best_recog_path, "ref_text_nosep")
-        with open(text_in, 'r') as ref_reader:
-            with open(text_in2, 'w') as ref_writer:
-                for line in ref_reader:
-                    new_context = line.strip().replace("src","").replace("  "," ").replace("  "," ").strip()
-                    ref_writer.write(new_context+'\n')
-
-
-        compute_wer(text_in, text_proc_file, os.path.join(best_recog_path, "text.sp.cer"))
-        compute_wer(text_in2, text_proc_file2, os.path.join(best_recog_path, "text.nosp.cer"))
-
+    inference_pipeline(audio_in=args.audio_in)
 
 if __name__ == "__main__":
-    params = {}
-    params["data_dir"] = "./example_data/validation"
-    params["output_dir"] = "./output_dir"
-    params["ngpu"] = 1
-    params["njob"] = 1
-    modelscope_infer(params)
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--model', type=str, default="NPU-ASLP/speech_mfcca_asr-zh-cn-16k-alimeeting-vocab4950")
+    parser.add_argument('--model_revision', type=str, default="v3.0.0")
+    parser.add_argument('--audio_in', type=str, default="./data/test/wav.scp")
+    parser.add_argument('--output_dir', type=str, default="./results/")
+    parser.add_argument('--batch_size', type=int, default=1)
+    parser.add_argument('--gpuid', type=str, default="0")
+    args = parser.parse_args()
+    modelscope_infer(args)
diff --git a/egs_modelscope/asr/mfcca/speech_mfcca_asr-zh-cn-16k-alimeeting-vocab4950/infer.sh b/egs_modelscope/asr/mfcca/speech_mfcca_asr-zh-cn-16k-alimeeting-vocab4950/infer.sh
new file mode 100755
index 0000000..51a4968
--- /dev/null
+++ b/egs_modelscope/asr/mfcca/speech_mfcca_asr-zh-cn-16k-alimeeting-vocab4950/infer.sh
@@ -0,0 +1,70 @@
+#!/usr/bin/env bash
+
+set -e
+set -u
+set -o pipefail
+
+stage=1
+stop_stage=3
+model="NPU-ASLP/speech_mfcca_asr-zh-cn-16k-alimeeting-vocab4950"
+data_dir="./data/test"
+output_dir="./results_pl_gpu"
+batch_size=1
+gpu_inference=true    # whether to perform gpu decoding
+gpuid_list="3,4"    # set gpus, e.g., gpuid_list="0,1"
+njob=4    # the number of jobs for CPU decoding, if gpu_inference=false, use CPU decoding, please set njob
+
+. utils/parse_options.sh || exit 1;
+
+if ${gpu_inference} == "true"; then
+    nj=$(echo $gpuid_list | awk -F "," '{print NF}')
+else
+    nj=$njob
+    batch_size=1
+    gpuid_list=""
+    for JOB in $(seq ${nj}); do
+        gpuid_list=$gpuid_list"-1,"
+    done
+fi
+
+mkdir -p $output_dir/split
+split_scps=""
+for JOB in $(seq ${nj}); do
+    split_scps="$split_scps $output_dir/split/wav.$JOB.scp"
+done
+perl utils/split_scp.pl ${data_dir}/wav.scp ${split_scps}
+
+if [ $stage -le 1 ] && [ $stop_stage -ge 1 ];then
+    echo "Decoding ..."
+    gpuid_list_array=(${gpuid_list//,/ })
+    ./utils/run.pl JOB=1:${nj} ${output_dir}/log/infer.JOB.log \
+    python infer.py \
+       --model ${model} \
+       --audio_in ${output_dir}/split/wav.JOB.scp \
+       --output_dir ${output_dir}/output.JOB \
+       --batch_size ${batch_size} \
+       --gpuid ${gpuid_list_array[JOB-1]}
+
+    mkdir -p ${output_dir}/1best_recog
+    for f in token score text; do
+        if [ -f "${output_dir}/output.1/1best_recog/${f}" ]; then
+          for i in $(seq "${nj}"); do
+              cat "${output_dir}/output.${i}/1best_recog/${f}"
+          done | sort -k1 >"${output_dir}/1best_recog/${f}"
+        fi
+    done
+fi
+
+if [ $stage -le 2 ] && [ $stop_stage -ge 2 ];then
+    echo "Computing WER ..."
+    cp ${output_dir}/1best_recog/token ${output_dir}/1best_recog/text.proc
+    cp ${data_dir}/text ${output_dir}/1best_recog/text.ref
+    sed -e 's/src//g' ${output_dir}/1best_recog/text.proc | sed -e 's/ \+/ /g' > ${output_dir}/1best_recog/text_nosp.proc
+    sed -e 's/src//g' ${output_dir}/1best_recog/text.ref | sed -e 's/ \+/ /g' > ${output_dir}/1best_recog/text_nosp.ref
+
+    python utils/compute_wer.py ${output_dir}/1best_recog/text.ref ${output_dir}/1best_recog/text.proc ${output_dir}/1best_recog/text.sp.cer
+    tail -n 3 ${output_dir}/1best_recog/text.sp.cer
+    python utils/compute_wer.py ${output_dir}/1best_recog/text_nosp.ref ${output_dir}/1best_recog/text_nosp.proc ${output_dir}/1best_recog/text.nosp.cer
+    tail -n 3 ${output_dir}/1best_recog/text.nosp.cer
+fi
+
diff --git a/egs_modelscope/asr/mfcca/speech_mfcca_asr-zh-cn-16k-alimeeting-vocab4950/utils b/egs_modelscope/asr/mfcca/speech_mfcca_asr-zh-cn-16k-alimeeting-vocab4950/utils
new file mode 120000
index 0000000..2ac163f
--- /dev/null
+++ b/egs_modelscope/asr/mfcca/speech_mfcca_asr-zh-cn-16k-alimeeting-vocab4950/utils
@@ -0,0 +1 @@
+../../../../egs/aishell/transformer/utils
\ No newline at end of file

--
Gitblit v1.9.1