From 100ea0304b956e55a9c2fe284b1ee1a26bdf2b7c Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期四, 20 四月 2023 23:49:15 +0800
Subject: [PATCH] docs

---
 egs_modelscope/asr/TEMPLATE/README.md               |   75 +++++++++++++++---------
 egs_modelscope/asr/TEMPLATE/infer.sh                |    7 ++
 egs/aishell/transformer/utils/prepare_checkpoint.py |   21 +++---
 docs/docker.md                                      |   13 +++-
 docs/modescope_pipeline/quick_start.md              |   16 ++--
 5 files changed, 82 insertions(+), 50 deletions(-)

diff --git a/docs/docker.md b/docs/docker.md
index 90616fd..95a75f2 100644
--- a/docs/docker.md
+++ b/docs/docker.md
@@ -1,6 +1,6 @@
 # Docker
 
-## Install 
+## Install Docker
 
 ### Ubuntu
 ```shell
@@ -16,6 +16,11 @@
 ### CentOS
 ```shell
 curl -fsSL https://get.docker.com | bash -s docker --mirror Aliyun
+```
+
+### MacOS
+```shell
+brew install --cask --appdir=/Applications docker
 ```
 
 ### Windows
@@ -40,18 +45,20 @@
 sudo docker pull <image-name>:<tag>
 ```
 
-### Check Downloaded Image 
+### Check Image 
 ```shell
 sudo docker images
 ```
 
 ## Run Docker
 ```shell
-sudo docker run -it <image-name>:<tag> bash
+sudo docker run -itd --name funasr <image-name>:<tag> bash
+sudo docker exec -it funasr bash
 ```
 
 ## Stop Docker
 ```shell
+exit
 sudo docker ps
 sudo docker stop <container-id>
 ```
diff --git a/docs/modescope_pipeline/quick_start.md b/docs/modescope_pipeline/quick_start.md
index b1614f5..50dac36 100644
--- a/docs/modescope_pipeline/quick_start.md
+++ b/docs/modescope_pipeline/quick_start.md
@@ -3,7 +3,7 @@
 ## Inference with pipeline
 
 ### Speech Recognition
-#### Paraformer model
+#### Paraformer Model
 ```python
 from modelscope.pipelines import pipeline
 from modelscope.utils.constant import Tasks
@@ -18,7 +18,7 @@
 ```
 
 ### Voice Activity Detection
-#### FSMN-VAD
+#### FSMN-VAD Model
 ```python
 from modelscope.pipelines import pipeline
 from modelscope.utils.constant import Tasks
@@ -37,7 +37,7 @@
 ```
 
 ### Punctuation Restoration
-#### CT_Transformer
+#### CT_Transformer Model
 ```python
 from modelscope.pipelines import pipeline
 from modelscope.utils.constant import Tasks
@@ -52,7 +52,7 @@
 ```
 
 ### Timestamp Prediction
-#### TP-Aligner
+#### TP-Aligner Model
 ```python
 from modelscope.pipelines import pipeline
 from modelscope.utils.constant import Tasks
@@ -68,7 +68,7 @@
 ```
 
 ### Speaker Verification
-#### X-vector
+#### X-vector Model
 ```python
 from modelscope.pipelines import pipeline
 from modelscope.utils.constant import Tasks
@@ -87,8 +87,8 @@
 print(rec_result["scores"][0])
 ```
 
-### Speaker diarization
-#### SOND
+### Speaker Diarization
+#### SOND Model
 ```python
 from modelscope.pipelines import pipeline
 from modelscope.utils.constant import Tasks
@@ -154,7 +154,7 @@
 
 ## Finetune with pipeline
 ### Speech Recognition
-#### Paraformer model
+#### Paraformer Model
 
 finetune.py
 ```python
diff --git a/egs_modelscope/asr/TEMPLATE/infer_after_finetune.py b/egs/aishell/transformer/utils/prepare_checkpoint.py
similarity index 75%
rename from egs_modelscope/asr/TEMPLATE/infer_after_finetune.py
rename to egs/aishell/transformer/utils/prepare_checkpoint.py
index 2d311dd..01763d4 100644
--- a/egs_modelscope/asr/TEMPLATE/infer_after_finetune.py
+++ b/egs/aishell/transformer/utils/prepare_checkpoint.py
@@ -1,12 +1,9 @@
-import json
 import os
 import shutil
 
 from modelscope.pipelines import pipeline
 from modelscope.utils.constant import Tasks
 from modelscope.hub.snapshot_download import snapshot_download
-
-from funasr.utils.compute_wer import compute_wer
 
 def modelscope_infer_after_finetune(params):
     # prepare for decoding
@@ -39,10 +36,14 @@
 
 
 if __name__ == '__main__':
-    params = {}
-    params["modelscope_model_name"] = "damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
-    params["output_dir"] = "./checkpoint"
-    params["data_dir"] = "./data/test"
-    params["decoding_model_name"] = "valid.acc.ave_10best.pb"
-    params["batch_size"] = 64
-    modelscope_infer_after_finetune(params)
\ No newline at end of file
+    import sys
+    
+    model = sys.argv[1]
+    checkpoint_dir = sys.argv[2]
+    checkpoint_name = sys.argv[3]
+    
+    try:
+        pretrained_model_path = snapshot_download(model, cache_dir=checkpoint_dir)
+    except BaseException:
+        raise BaseException(f"Please download pretrain model from ModelScope firstly.")
+    shutil.copy(os.path.join(checkpoint_dir, checkpoint_name), os.path.join(pretrained_model_path, "model.pb"))
diff --git a/egs_modelscope/asr/TEMPLATE/README.md b/egs_modelscope/asr/TEMPLATE/README.md
index 8b6b24d..a5d7d6e 100644
--- a/egs_modelscope/asr/TEMPLATE/README.md
+++ b/egs_modelscope/asr/TEMPLATE/README.md
@@ -1,12 +1,12 @@
 # Speech Recognition
 
 > **Note**: 
-> The modelscope pipeline supports all the models in [model zoo](https://alibaba-damo-academy.github.io/FunASR/en/modelscope_models.html#pretrained-models-on-modelscope) to inference and finetine. Here we take model of Paraformer and Paraformer-online as example to demonstrate the usage.
+> The modelscope pipeline supports all the models in [model zoo](https://alibaba-damo-academy.github.io/FunASR/en/modelscope_models.html#pretrained-models-on-modelscope) to inference and finetine. Here we take typic model as example to demonstrate the usage.
 
 ## Inference
 
 ### Quick start
-#### [Paraformer model](https://www.modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/summary)
+#### [Paraformer Model](https://www.modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/summary)
 ```python
 from modelscope.pipelines import pipeline
 from modelscope.utils.constant import Tasks
@@ -19,7 +19,7 @@
 rec_result = inference_pipeline(audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav')
 print(rec_result)
 ```
-#### [Paraformer-online model](https://www.modelscope.cn/models/damo/speech_paraformer_asr_nat-zh-cn-16k-common-vocab8404-online/summary)
+#### [Paraformer-online Model](https://www.modelscope.cn/models/damo/speech_paraformer_asr_nat-zh-cn-16k-common-vocab8404-online/summary)
 ```python
 inference_pipeline = pipeline(
     task=Tasks.auto_speech_recognition,
@@ -41,7 +41,7 @@
 ```
 Full code of demo, please ref to [demo](https://github.com/alibaba-damo-academy/FunASR/discussions/241)
 
-#### [UniASR model](https://www.modelscope.cn/models/damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online/summary)
+#### [UniASR Model](https://www.modelscope.cn/models/damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online/summary)
 There are three decoding mode for UniASR model(`fast`銆乣normal`銆乣offline`), for more model detailes, please refer to [docs](https://www.modelscope.cn/models/damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online/summary)
 ```python
 decoding_model = "fast" # "fast"銆�"normal"銆�"offline"
@@ -59,21 +59,21 @@
 Undo
 
 #### API-reference
-##### define pipeline
+##### Define pipeline
 - `task`: `Tasks.auto_speech_recognition`
 - `model`: model name in [model zoo](https://alibaba-damo-academy.github.io/FunASR/en/modelscope_models.html#pretrained-models-on-modelscope), or model path in local disk
 - `ngpu`: 1 (Defalut), decoding on GPU. If ngpu=0, decoding on CPU
 - `ncpu`: 1 (Defalut), sets the number of threads used for intraop parallelism on CPU 
 - `output_dir`: None (Defalut), the output path of results if set
 - `batch_size`: 1 (Defalut), batch size when decoding
-##### infer pipeline
+##### Infer pipeline
 - `audio_in`: the input to decode, which could be: 
   - wav_path, `e.g.`: asr_example.wav,
   - pcm_path, `e.g.`: asr_example.pcm, 
   - audio bytes stream, `e.g.`: bytes data from a microphone
   - audio sample point锛宍e.g.`: `audio, rate = soundfile.read("asr_example_zh.wav")`, the dtype is numpy.ndarray or torch.Tensor
   - wav.scp, kaldi style wav list (`wav_id \t wav_path``), `e.g.`: 
-  ```cat wav.scp
+  ```text
   asr_example1  ./audios/asr_example1.wav
   asr_example2  ./audios/asr_example2.wav
   ```
@@ -85,13 +85,15 @@
 FunASR also offer recipes [infer.sh](https://github.com/alibaba-damo-academy/FunASR/blob/main/egs_modelscope/asr/TEMPLATE/infer.sh) to decode with multi-thread CPUs, or multi GPUs.
 
 - Setting parameters in `infer.sh`
-    - <strong>model:</strong> # model name on ModelScope
-    - <strong>data_dir:</strong> # the dataset dir needs to include `${data_dir}/wav.scp`. If `${data_dir}/text` is also exists, CER will be computed
-    - <strong>output_dir:</strong> # result dir
-    - <strong>batch_size:</strong> # batchsize of inference
-    - <strong>gpu_inference:</strong> # whether to perform gpu decoding, set false for cpu decoding
-    - <strong>gpuid_list:</strong> # set gpus, e.g., gpuid_list="0,1"
-    - <strong>njob:</strong> # the number of jobs for CPU decoding, if `gpu_inference`=false, use CPU decoding, please set `njob`
+    - `model`: model name on ModelScope
+    - `data_dir`: the dataset dir needs to include `${data_dir}/wav.scp`. If `${data_dir}/text` is also exists, CER will be computed
+    - `output_dir`: result dir
+    - `batch_size`: batchsize of inference
+    - `gpu_inference`: whether to perform gpu decoding, set false for cpu decoding
+    - `gpuid_list`: set gpus, e.g., `gpuid_list`="0,1"
+    - `njob`: the number of jobs for CPU decoding, if `gpu_inference`=false, use CPU decoding, please set `njob`
+    - `checkpoint_dir`: only used for infer finetuned models, the path dir of finetuned models
+    - `checkpoint_name`: only used for infer finetuned models, `valid.cer_ctc.ave.pb` (Default), which checkpoint is used to infer
 
 - Decode with multi GPUs:
 ```shell
@@ -167,12 +169,12 @@
 ### Finetune with your data
 
 - Modify finetune training related parameters in [finetune.py](https://github.com/alibaba-damo-academy/FunASR/blob/main/egs_modelscope/asr/TEMPLATE/finetune.py)
-    - <strong>output_dir:</strong> # result dir
-    - <strong>data_dir:</strong> # the dataset dir needs to include files: `train/wav.scp`, `train/text`; `validation/wav.scp`, `validation/text`
-    - <strong>dataset_type:</strong> # for dataset larger than 1000 hours, set as `large`, otherwise set as `small`
-    - <strong>batch_bins:</strong> # batch size. For dataset_type is `small`, `batch_bins` indicates the feature frames. For dataset_type is `large`, `batch_bins` indicates the duration in ms
-    - <strong>max_epoch:</strong> # number of training epoch
-    - <strong>lr:</strong> # learning rate
+    - `output_dir`: result dir
+    - `data_dir`: the dataset dir needs to include files: `train/wav.scp`, `train/text`; `validation/wav.scp`, `validation/text`
+    - `dataset_type`: for dataset larger than 1000 hours, set as `large`, otherwise set as `small`
+    - `batch_bins`: batch size. For dataset_type is `small`, `batch_bins` indicates the feature frames. For dataset_type is `large`, `batch_bins` indicates the duration in ms
+    - `max_epoch`: number of training epoch
+    - `lr`: learning rate
 
 - Then you can run the pipeline to finetune with:
 ```shell
@@ -183,14 +185,29 @@
 CUDA_VISIBLE_DEVICES=1,2 python -m torch.distributed.launch --nproc_per_node 2 finetune.py > log.txt 2>&1
 ```
 ## Inference with your finetuned model
-- Modify inference related parameters in [infer_after_finetune.py](https://github.com/alibaba-damo-academy/FunASR/blob/main/egs_modelscope/asr/TEMPLATE/infer_after_finetune.py)
-    - <strong>modelscope_model_name: </strong> # model name on ModelScope
-    - <strong>output_dir:</strong> # result dir
-    - <strong>data_dir:</strong> # the dataset dir needs to include `test/wav.scp`. If `test/text` is also exists, CER will be computed
-    - <strong>decoding_model_name:</strong> # set the checkpoint name for decoding, e.g., `valid.cer_ctc.ave.pb`
-    - <strong>batch_size:</strong> # batchsize of inference  
 
-- Then you can run the pipeline to finetune with:
-```python
-    python infer_after_finetune.py
+- Setting parameters in [infer.sh](https://github.com/alibaba-damo-academy/FunASR/blob/main/egs_modelscope/asr/TEMPLATE/infer.sh) is the same with [docs](https://github.com/alibaba-damo-academy/FunASR/tree/main/egs_modelscope/asr/TEMPLATE#inference-with-multi-thread-cpus-or-multi-gpus) 
+
+- Decode with multi GPUs:
+```shell
+    bash infer.sh \
+    --model "damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch" \
+    --data_dir "./data/test" \
+    --output_dir "./results" \
+    --batch_size 64 \
+    --gpu_inference true \
+    --gpuid_list "0,1" \
+    --checkpoint_dir "./checkpoint" \
+    --checkpoint_name "valid.cer_ctc.ave.pb"
 ```
+- Decode with multi-thread CPUs:
+```shell
+    bash infer.sh \
+    --model "damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch" \
+    --data_dir "./data/test" \
+    --output_dir "./results" \
+    --gpu_inference false \
+    --njob 64 \
+    --checkpoint_dir "./checkpoint" \
+    --checkpoint_name "valid.cer_ctc.ave.pb"
+```
\ No newline at end of file
diff --git a/egs_modelscope/asr/TEMPLATE/infer.sh b/egs_modelscope/asr/TEMPLATE/infer.sh
index b8b011c..0d54abc 100644
--- a/egs_modelscope/asr/TEMPLATE/infer.sh
+++ b/egs_modelscope/asr/TEMPLATE/infer.sh
@@ -13,6 +13,8 @@
 gpu_inference=true    # whether to perform gpu decoding
 gpuid_list="0,1"    # set gpus, e.g., gpuid_list="0,1"
 njob=4    # the number of jobs for CPU decoding, if gpu_inference=false, use CPU decoding, please set njob
+checkpoint_dir=
+checkpoint_name="valid.cer_ctc.ave.pb"
 
 . utils/parse_options.sh || exit 1;
 
@@ -34,6 +36,11 @@
 done
 perl utils/split_scp.pl ${data_dir}/wav.scp ${split_scps}
 
+if ${checkpoint_dir}; then
+  python utils/prepare_checkpoint.py ${model} ${checkpoint_dir} ${checkpoint_name}
+  model=${checkpoint_dir}/${model}
+fi
+
 if [ $stage -le 1 ] && [ $stop_stage -ge 1 ];then
     echo "Decoding ..."
     gpuid_list_array=(${gpuid_list//,/ })

--
Gitblit v1.9.1