From a323aa9385ebaa5b9b8e290e1bb0cc37c9e3c241 Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期二, 18 四月 2023 14:43:55 +0800
Subject: [PATCH] docs

---
 docs/recipe/vad_recipe.md                    |    0 
 docs/runtime/grpc_cpp.md                     |    0 
 docs/modescope_pipeline/vad_pipeline.md      |   14 +++
 docs/index.rst                               |   34 +++++---
 docs/runtime/onnxruntime_python.md           |    0 
 docs/runtime/libtorch_python.md              |    0 
 docs/modescope_pipeline/lm_pipeline.md       |   14 +++
 docs/modescope_pipeline/asr_pipeline.md      |   14 +++
 docs/recipe/asr_recipe.md                    |    0 
 docs/recipe/sv_recipe.md                     |    0 
 docs/runtime/onnxruntime_cpp.md              |    1 
 /dev/null                                    |    1 
 docs/runtime/websocket_python.md             |    1 
 docs/recipe/punc_recipe.md                   |    0 
 docs/recipe/lm_recipe.md                     |    0 
 docs/runtime/export.md                       |    1 
 docs/modelscope_models.md                    |    2 
 docs/runtime/grpc_python.md                  |    1 
 docs/modescope_pipeline/modelscope_usages.md |    0 
 docs/modescope_pipeline/sv_pipeline.md       |   14 +++
 docs/modescope_pipeline/punc_pipeline.md     |   14 +++
 docs/huggingface_models.md                   |   94 +++++++++++++++++++++++
 docs/modescope_pipeline/quick_start.md       |    6 +
 docs/modescope_pipeline/tp_pipeline.md       |   14 +++
 24 files changed, 210 insertions(+), 15 deletions(-)

diff --git a/docs/export.md b/docs/export.md
deleted file mode 120000
index 592b12d..0000000
--- a/docs/export.md
+++ /dev/null
@@ -1 +0,0 @@
-../funasr/export/README.md
\ No newline at end of file
diff --git a/docs/grpc_python.md b/docs/grpc_python.md
deleted file mode 120000
index 792fccd..0000000
--- a/docs/grpc_python.md
+++ /dev/null
@@ -1 +0,0 @@
-../funasr/runtime/python/grpc/Readme.md
\ No newline at end of file
diff --git a/docs/huggingface_models.md b/docs/huggingface_models.md
new file mode 100644
index 0000000..61754eb
--- /dev/null
+++ b/docs/huggingface_models.md
@@ -0,0 +1,94 @@
+# Pretrained Models on Huggingface
+
+## Model License
+-  Apache License 2.0
+
+## Model Zoo
+Here we provided several pretrained models on different datasets. The details of models and datasets can be found on [ModelScope](https://www.modelscope.cn/models?page=1&tasks=auto-speech-recognition).
+
+### Speech Recognition Models
+#### Paraformer Models
+
+|                                                                     Model Name                                                                     | Language |          Training Data           | Vocab Size | Parameter | Offline/Online | Notes                                                                                                                           |
+|:--------------------------------------------------------------------------------------------------------------------------------------------------:|:--------:|:--------------------------------:|:----------:|:---------:|:--------------:|:--------------------------------------------------------------------------------------------------------------------------------|
+|        [Paraformer-large](https://www.modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/summary)        | CN & EN  | Alibaba Speech Data (60000hours) |    8404    |   220M    |    Offline     | Duration of input wav <= 20s                                                                                                    |
+| [Paraformer-large-long](https://www.modelscope.cn/models/damo/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch/summary) | CN & EN  | Alibaba Speech Data (60000hours) |    8404    |   220M    |    Offline     | Which ould deal with arbitrary length input wav                                                                                 |
+| [paraformer-large-contextual](https://www.modelscope.cn/models/damo/speech_paraformer-large-contextual_asr_nat-zh-cn-16k-common-vocab8404/summary) | CN & EN  | Alibaba Speech Data (60000hours) |    8404    |   220M    |    Offline     | Which supports the hotword customization based on the incentive enhancement, and improves the recall and precision of hotwords. |
+|              [Paraformer](https://modelscope.cn/models/damo/speech_paraformer_asr_nat-zh-cn-16k-common-vocab8358-tensorflow1/summary)              | CN & EN  | Alibaba Speech Data (50000hours) |    8358    |    68M    |    Offline     | Duration of input wav <= 20s                                                                                                    |
+|          [Paraformer-online](https://modelscope.cn/models/damo/speech_paraformer_asr_nat-zh-cn-16k-common-vocab8358-tensorflow1/summary)           | CN & EN  | Alibaba Speech Data (50000hours) |    8404    |    68M    |     Online     | Which could deal with streaming input                                                                                           |
+|       [Paraformer-tiny](https://www.modelscope.cn/models/damo/speech_paraformer-tiny-commandword_asr_nat-zh-cn-16k-vocab544-pytorch/summary)       |    CN    |  Alibaba Speech Data (200hours)  |    544     |   5.2M    |    Offline     | Lightweight Paraformer model which supports Mandarin command words recognition                                                  |
+|                   [Paraformer-aishell](https://www.modelscope.cn/models/damo/speech_paraformer_asr_nat-aishell1-pytorch/summary)                   |    CN    |        AISHELL (178hours)        |    4234    |    43M    |    Offline     |                                                                                                                                 |
+|       [ParaformerBert-aishell](https://modelscope.cn/models/damo/speech_paraformerbert_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch/summary)       |    CN    |        AISHELL (178hours)        |    4234    |    43M    |    Offline     |                                                                                                                                 |
+|        [Paraformer-aishell2](https://www.modelscope.cn/models/damo/speech_paraformer_asr_nat-zh-cn-16k-aishell2-vocab5212-pytorch/summary)         |    CN    |      AISHELL-2 (1000hours)       |    5212    |    64M    |    Offline     |                                                                                                                                 |
+|    [ParaformerBert-aishell2](https://www.modelscope.cn/models/damo/speech_paraformerbert_asr_nat-zh-cn-16k-aishell2-vocab5212-pytorch/summary)     |    CN    |      AISHELL-2 (1000hours)       |    5212    |    64M    |    Offline     |                                                                                                                                 |
+
+
+#### UniASR Models
+
+|                                                               Model Name                                                               | Language |          Training Data           | Vocab Size | Parameter | Offline/Online | Notes                                                                                                                           |
+|:--------------------------------------------------------------------------------------------------------------------------------------:|:--------:|:--------------------------------:|:----------:|:---------:|:--------------:|:--------------------------------------------------------------------------------------------------------------------------------|
+|       [UniASR](https://modelscope.cn/models/damo/speech_UniASR_asr_2pass-zh-cn-16k-common-vocab8358-tensorflow1-online/summary)        | CN & EN  | Alibaba Speech Data (60000hours) |    8358    |   100M    |     Online     | UniASR streaming offline unifying models                                                                                                    |
+| [UniASR-large](https://modelscope.cn/models/damo/speech_UniASR-large_asr_2pass-zh-cn-16k-common-vocab8358-tensorflow1-offline/summary) | CN & EN  | Alibaba Speech Data (60000hours) |    8358    |   220M    |    Offline     | UniASR streaming offline unifying models                                                                                                    |
+|           [UniASR Burmese](https://modelscope.cn/models/damo/speech_UniASR_asr_2pass-my-16k-common-vocab696-pytorch/summary)           | Burmese  |  Alibaba Speech Data (? hours)   |    696     |    95M    |     Online     | UniASR streaming offline unifying models                                                                                                    |
+|           [UniASR Hebrew](https://modelscope.cn/models/damo/speech_UniASR_asr_2pass-he-16k-common-vocab1085-pytorch/summary)           |  Hebrew  |  Alibaba Speech Data (? hours)   |    1085    |    95M    |     Online     | UniASR streaming offline unifying models                                                                                                    |
+|       [UniASR Urdu](https://modelscope.cn/models/damo/speech_UniASR_asr_2pass-ur-16k-common-vocab877-pytorch/summary)                  |   Urdu   |  Alibaba Speech Data (? hours)   |    877     |    95M    |     Online     | UniASR streaming offline unifying models                                                                                                    |
+
+#### Conformer Models
+
+|                                                       Model Name                                                       | Language |     Training Data     | Vocab Size | Parameter | Offline/Online | Notes                                                                                                                           |
+|:----------------------------------------------------------------------------------------------------------------------:|:--------:|:---------------------:|:----------:|:---------:|:--------------:|:--------------------------------------------------------------------------------------------------------------------------------|
+| [Conformer](https://modelscope.cn/models/damo/speech_conformer_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch/summary)   |   CN     |  AISHELL (178hours)   |    4234    |    44M    |    Offline     | Duration of input wav <= 20s                                                                                                    |
+| [Conformer](https://www.modelscope.cn/models/damo/speech_conformer_asr_nat-zh-cn-16k-aishell2-vocab5212-pytorch/summary)   |   CN     | AISHELL-2 (1000hours) |    5212    |    44M    |    Offline     | Duration of input wav <= 20s                                                                                                    |
+
+
+#### RNN-T Models
+
+### Multi-talker Speech Recognition Models
+
+#### MFCCA Models
+
+|                                                  Model Name                                                   | Language |               Training Data                | Vocab Size | Parameter | Offline/Online | Notes                                                                                                                           |
+|:-------------------------------------------------------------------------------------------------------------:|:--------:|:------------------------------------------:|:----------:|:---------:|:--------------:|:--------------------------------------------------------------------------------------------------------------------------------|
+| [MFCCA](https://www.modelscope.cn/models/NPU-ASLP/speech_mfcca_asr-zh-cn-16k-alimeeting-vocab4950/summary)    |   CN     | AliMeeting銆丄ISHELL-4銆丼imudata (917hours)   |     4950   |    45M    |    Offline     | Duration of input wav <= 20s, channel of input wav <= 8 channel |
+
+
+
+### Voice Activity Detection Models
+
+|                                           Model Name                                           |        Training Data         | Parameters | Sampling Rate | Notes |
+|:----------------------------------------------------------------------------------------------:|:----------------------------:|:----------:|:-------------:|:------|
+| [FSMN-VAD](https://modelscope.cn/models/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch/summary) | Alibaba Speech Data (5000hours) |    0.4M    |     16000     |       |
+|   [FSMN-VAD](https://modelscope.cn/models/damo/speech_fsmn_vad_zh-cn-8k-common/summary)        | Alibaba Speech Data (5000hours) |    0.4M    |     8000      |       |
+
+### Punctuation Restoration Models
+
+|                                                         Model Name                                                         |        Training Data         | Parameters | Vocab Size| Offline/Online | Notes |
+|:--------------------------------------------------------------------------------------------------------------------------:|:----------------------------:|:----------:|:----------:|:--------------:|:------|
+|      [CT-Transformer](https://modelscope.cn/models/damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/summary)      | Alibaba Text Data |    70M     |    272727     |    Offline     |   offline punctuation model    |
+| [CT-Transformer](https://modelscope.cn/models/damo/punc_ct-transformer_zh-cn-common-vad_realtime-vocab272727/summary)      | Alibaba Text Data |    70M     |    272727     |     Online     |  online punctuation model     |
+
+### Language Models
+
+|                                                       Model Name                                                       |        Training Data         | Parameters | Vocab Size | Notes |
+|:----------------------------------------------------------------------------------------------------------------------:|:----------------------------:|:----------:|:----------:|:------|
+| [Transformer](https://www.modelscope.cn/models/damo/speech_transformer_lm_zh-cn-common-vocab8404-pytorch/summary)      | Alibaba Speech Data (?hours) |    57M     |    8404    |       |
+
+### Speaker Verification Models
+
+|                                                  Model Name                                                   |   Training Data   | Parameters | Number Speaker | Notes |
+|:-------------------------------------------------------------------------------------------------------------:|:-----------------:|:----------:|:----------:|:------|
+| [Xvector](https://www.modelscope.cn/models/damo/speech_xvector_sv-zh-cn-cnceleb-16k-spk3465-pytorch/summary) | CNCeleb (1,200 hours)  |   17.5M    |    3465    |    Xvector, speaker verification, Chinese   |
+| [Xvector](https://www.modelscope.cn/models/damo/speech_xvector_sv-en-us-callhome-8k-spk6135-pytorch/summary) | CallHome (60 hours) |    61M     |    6135    |   Xvector, speaker verification, English    |
+
+### Speaker diarization Models
+
+|                                                    Model Name                                                    |    Training Data    | Parameters | Notes |
+|:----------------------------------------------------------------------------------------------------------------:|:-------------------:|:----------:|:------|
+| [SOND](https://www.modelscope.cn/models/damo/speech_diarization_sond-zh-cn-alimeeting-16k-n16k4-pytorch/summary) | AliMeeting (120 hours) |   40.5M    |    Speaker diarization, profiles and records, Chinese |
+| [SOND](https://www.modelscope.cn/models/damo/speech_diarization_sond-en-us-callhome-8k-n16k4-pytorch/summary)    |  CallHome (60 hours)  |     12M     |    Speaker diarization, profiles and records, English   |
+
+### Timestamp Prediction Models
+
+|                                                    Model Name                                     |  Language  |    Training Data    | Parameters | Notes |
+|:--------------------------------------------------------------------------------------------------:|:--------------:|:-------------------:|:----------:|:------|
+| [TP-Aligner](https://modelscope.cn/models/damo/speech_timestamp_prediction-v1-16k-offline/summary) | CN | Alibaba Speech Data (50000hours) |   37.8M    |    Timestamp prediction, Mandarin, middle size |
diff --git a/docs/index.rst b/docs/index.rst
index ae75c3f..e6fe51d 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -19,10 +19,10 @@
    :maxdepth: 1
    :caption: Recipe
 
-   ./asr_recipe.md
-   ./sv_recipe.md
-   ./punc_recipe.md
-   ./vad_recipe.md
+   ./recipe/asr_recipe.md
+   ./recipe/sv_recipe.md
+   ./recipe/punc_recipe.md
+   ./recipe/vad_recipe.md
 
 .. toctree::
    :maxdepth: 1
@@ -34,22 +34,30 @@
    :maxdepth: 1
    :caption: Runtime
 
-   ./export.md
-   ./onnxruntime_python.md
-   ./onnxruntime_cpp.md
-   ./libtorch_python.md
-   ./grpc_python.md
-   ./grpc_cpp.md
-   ./websocket_python.md
+   ./runtime/export.md
+   ./runtime/onnxruntime_python.md
+   ./runtime/onnxruntime_cpp.md
+   ./runtime/libtorch_python.md
+   ./runtime/grpc_python.md
+   ./runtime/grpc_cpp.md
+   ./runtime/websocket_python.md
 
+.. toctree::
+   :maxdepth: 1
+   :caption: Model Zoo
 
+   ./modelscope_models.md
+   ./huggingface_models.md
 
 .. toctree::
    :maxdepth: 1
    :caption: ModelScope pipeline
 
-   ./modelscope_models.md
-   ./modelscope_usages.md
+   ./modescope_pipeline/quick_start.md
+   ./modescope_pipeline/asr_pipeline.md
+   ./modescope_pipeline/vad_pipeline.md
+   ./modescope_pipeline/punc_pipeline.md
+   ./modescope_pipeline/sv_pipeline.md
 
 .. toctree::
    :maxdepth: 1
diff --git a/docs/modelscope_models.md b/docs/modelscope_models.md
index 6c88b69..b35d625 100644
--- a/docs/modelscope_models.md
+++ b/docs/modelscope_models.md
@@ -1,4 +1,4 @@
-# Pretrained models
+# Pretrained Models on ModelScope
 
 ## Model License
 -  Apache License 2.0
diff --git a/docs/modescope_pipeline/asr_pipeline.md b/docs/modescope_pipeline/asr_pipeline.md
new file mode 100644
index 0000000..cb81871
--- /dev/null
+++ b/docs/modescope_pipeline/asr_pipeline.md
@@ -0,0 +1,14 @@
+# Speech Recognition
+
+## Inference with pipeline
+### Quick start
+#### Inference with you data
+#### Inference with multi-threads on CPU
+#### Inference with multi GPU
+
+## Finetune with pipeline
+### Quick start
+### Finetune with your data
+
+## Inference with your finetuned model
+
diff --git a/docs/modescope_pipeline/lm_pipeline.md b/docs/modescope_pipeline/lm_pipeline.md
new file mode 100644
index 0000000..cb81871
--- /dev/null
+++ b/docs/modescope_pipeline/lm_pipeline.md
@@ -0,0 +1,14 @@
+# Speech Recognition
+
+## Inference with pipeline
+### Quick start
+#### Inference with you data
+#### Inference with multi-threads on CPU
+#### Inference with multi GPU
+
+## Finetune with pipeline
+### Quick start
+### Finetune with your data
+
+## Inference with your finetuned model
+
diff --git a/docs/modelscope_usages.md b/docs/modescope_pipeline/modelscope_usages.md
similarity index 100%
rename from docs/modelscope_usages.md
rename to docs/modescope_pipeline/modelscope_usages.md
diff --git a/docs/modescope_pipeline/punc_pipeline.md b/docs/modescope_pipeline/punc_pipeline.md
new file mode 100644
index 0000000..cb81871
--- /dev/null
+++ b/docs/modescope_pipeline/punc_pipeline.md
@@ -0,0 +1,14 @@
+# Speech Recognition
+
+## Inference with pipeline
+### Quick start
+#### Inference with you data
+#### Inference with multi-threads on CPU
+#### Inference with multi GPU
+
+## Finetune with pipeline
+### Quick start
+### Finetune with your data
+
+## Inference with your finetuned model
+
diff --git a/docs/modescope_pipeline/quick_start.md b/docs/modescope_pipeline/quick_start.md
new file mode 100644
index 0000000..a585cf6
--- /dev/null
+++ b/docs/modescope_pipeline/quick_start.md
@@ -0,0 +1,6 @@
+# Speech Recognition
+
+## Inference with pipeline
+
+
+## Finetune with pipeline
diff --git a/docs/modescope_pipeline/sv_pipeline.md b/docs/modescope_pipeline/sv_pipeline.md
new file mode 100644
index 0000000..cb81871
--- /dev/null
+++ b/docs/modescope_pipeline/sv_pipeline.md
@@ -0,0 +1,14 @@
+# Speech Recognition
+
+## Inference with pipeline
+### Quick start
+#### Inference with you data
+#### Inference with multi-threads on CPU
+#### Inference with multi GPU
+
+## Finetune with pipeline
+### Quick start
+### Finetune with your data
+
+## Inference with your finetuned model
+
diff --git a/docs/modescope_pipeline/tp_pipeline.md b/docs/modescope_pipeline/tp_pipeline.md
new file mode 100644
index 0000000..cb81871
--- /dev/null
+++ b/docs/modescope_pipeline/tp_pipeline.md
@@ -0,0 +1,14 @@
+# Speech Recognition
+
+## Inference with pipeline
+### Quick start
+#### Inference with you data
+#### Inference with multi-threads on CPU
+#### Inference with multi GPU
+
+## Finetune with pipeline
+### Quick start
+### Finetune with your data
+
+## Inference with your finetuned model
+
diff --git a/docs/modescope_pipeline/vad_pipeline.md b/docs/modescope_pipeline/vad_pipeline.md
new file mode 100644
index 0000000..cb81871
--- /dev/null
+++ b/docs/modescope_pipeline/vad_pipeline.md
@@ -0,0 +1,14 @@
+# Speech Recognition
+
+## Inference with pipeline
+### Quick start
+#### Inference with you data
+#### Inference with multi-threads on CPU
+#### Inference with multi GPU
+
+## Finetune with pipeline
+### Quick start
+### Finetune with your data
+
+## Inference with your finetuned model
+
diff --git a/docs/onnxruntime_cpp.md b/docs/onnxruntime_cpp.md
deleted file mode 120000
index 1bb4529..0000000
--- a/docs/onnxruntime_cpp.md
+++ /dev/null
@@ -1 +0,0 @@
-../funasr/runtime/onnxruntime/readme.md
\ No newline at end of file
diff --git a/docs/asr_recipe.md b/docs/recipe/asr_recipe.md
similarity index 100%
rename from docs/asr_recipe.md
rename to docs/recipe/asr_recipe.md
diff --git a/docs/asr_recipe.md b/docs/recipe/lm_recipe.md
similarity index 100%
copy from docs/asr_recipe.md
copy to docs/recipe/lm_recipe.md
diff --git a/docs/punc_recipe.md b/docs/recipe/punc_recipe.md
similarity index 100%
rename from docs/punc_recipe.md
rename to docs/recipe/punc_recipe.md
diff --git a/docs/sv_recipe.md b/docs/recipe/sv_recipe.md
similarity index 100%
rename from docs/sv_recipe.md
rename to docs/recipe/sv_recipe.md
diff --git a/docs/vad_recipe.md b/docs/recipe/vad_recipe.md
similarity index 100%
rename from docs/vad_recipe.md
rename to docs/recipe/vad_recipe.md
diff --git a/docs/runtime/export.md b/docs/runtime/export.md
new file mode 120000
index 0000000..91f8b98
--- /dev/null
+++ b/docs/runtime/export.md
@@ -0,0 +1 @@
+../../funasr/export/README.md
\ No newline at end of file
diff --git a/docs/grpc_cpp.md b/docs/runtime/grpc_cpp.md
similarity index 100%
rename from docs/grpc_cpp.md
rename to docs/runtime/grpc_cpp.md
diff --git a/docs/runtime/grpc_python.md b/docs/runtime/grpc_python.md
new file mode 120000
index 0000000..ee8d6ea
--- /dev/null
+++ b/docs/runtime/grpc_python.md
@@ -0,0 +1 @@
+../../funasr/runtime/python/grpc/Readme.md
\ No newline at end of file
diff --git a/docs/libtorch_python.md b/docs/runtime/libtorch_python.md
similarity index 100%
rename from docs/libtorch_python.md
rename to docs/runtime/libtorch_python.md
diff --git a/docs/runtime/onnxruntime_cpp.md b/docs/runtime/onnxruntime_cpp.md
new file mode 120000
index 0000000..3661d18
--- /dev/null
+++ b/docs/runtime/onnxruntime_cpp.md
@@ -0,0 +1 @@
+../../funasr/runtime/onnxruntime/readme.md
\ No newline at end of file
diff --git a/docs/onnxruntime_python.md b/docs/runtime/onnxruntime_python.md
similarity index 100%
rename from docs/onnxruntime_python.md
rename to docs/runtime/onnxruntime_python.md
diff --git a/docs/runtime/websocket_python.md b/docs/runtime/websocket_python.md
new file mode 120000
index 0000000..0fabb85
--- /dev/null
+++ b/docs/runtime/websocket_python.md
@@ -0,0 +1 @@
+../../funasr/runtime/python/websocket/README.md
\ No newline at end of file
diff --git a/docs/websocket_python.md b/docs/websocket_python.md
deleted file mode 120000
index bcf239c..0000000
--- a/docs/websocket_python.md
+++ /dev/null
@@ -1 +0,0 @@
-../funasr/runtime/python/websocket/README.md
\ No newline at end of file

--
Gitblit v1.9.1