From 89ab5d5a3ba72c1e22e7b1141ab07a1b00724a0e Mon Sep 17 00:00:00 2001
From: 游雁 <zhifu.gzf@alibaba-inc.com>
Date: 星期三, 26 七月 2023 21:07:20 +0800
Subject: [PATCH] docs zh
---
docs/egs_modelscope | 1
docs/index.rst | 2
egs_modelscope/asr/TEMPLATE/README.md | 10 +
egs_modelscope/asr/TEMPLATE/README_zh.md | 288 ++++++++++++++++++++++++++++++++++++++++++++++++
4 files changed, 296 insertions(+), 5 deletions(-)
diff --git a/docs/egs_modelscope b/docs/egs_modelscope
new file mode 120000
index 0000000..842ca25
--- /dev/null
+++ b/docs/egs_modelscope
@@ -0,0 +1 @@
+../egs_modelscope
\ No newline at end of file
diff --git a/docs/index.rst b/docs/index.rst
index 63fb681..e2aa87d 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -45,7 +45,7 @@
:caption: ModelScope Egs
./modelscope_pipeline/quick_start.md
- ./modelscope_pipeline/asr_pipeline.md
+ ./egs_modelscope/asr/TEMPLATE/README.md
./modelscope_pipeline/vad_pipeline.md
./modelscope_pipeline/punc_pipeline.md
./modelscope_pipeline/tp_pipeline.md
diff --git a/egs_modelscope/asr/TEMPLATE/README.md b/egs_modelscope/asr/TEMPLATE/README.md
index cf0ba84..a8cb486 100644
--- a/egs_modelscope/asr/TEMPLATE/README.md
+++ b/egs_modelscope/asr/TEMPLATE/README.md
@@ -1,3 +1,5 @@
+([绠�浣撲腑鏂嘳(./README_zh.md)|English)
+
# Speech Recognition
> **Note**:
@@ -230,10 +232,10 @@
- `batch_bins`: batch size. For dataset_type is `small`, `batch_bins` indicates the feature frames. For dataset_type is `large`, `batch_bins` indicates the duration in ms
- `max_epoch`: number of training epoch
- `lr`: learning rate
- - `init_param`: init model path, load modelscope model initialization by default. For example: ["checkpoint/20epoch.pb"]
- - `freeze_param`: Freeze model parameters. For example锛歔"encoder"]
- - `ignore_init_mismatch`: Ignore size mismatch when loading pre-trained model
- - `use_lora`: Fine-tuning model use lora, more detail please refer to [LORA](https://arxiv.org/pdf/2106.09685.pdf)
+ - `init_param`: `[]`(Default), init model path, load modelscope model initialization by default. For example: ["checkpoint/20epoch.pb"]
+ - `freeze_param`: `[]`(Default), Freeze model parameters. For example锛歔"encoder"]
+ - `ignore_init_mismatch`: `True`(Default), Ignore size mismatch when loading pre-trained model
+ - `use_lora`: `False`(Default), Fine-tuning model use lora, more detail please refer to [LORA](https://arxiv.org/pdf/2106.09685.pdf)
- Training data formats锛�
```sh
diff --git a/egs_modelscope/asr/TEMPLATE/README_zh.md b/egs_modelscope/asr/TEMPLATE/README_zh.md
new file mode 100644
index 0000000..81e0271
--- /dev/null
+++ b/egs_modelscope/asr/TEMPLATE/README_zh.md
@@ -0,0 +1,288 @@
+(绠�浣撲腑鏂噟[English](./README.md))
+
+# 璇煶璇嗗埆
+
+> **娉ㄦ剰**:
+> pipeline 鏀寔 [modelscope妯″瀷浠撳簱](https://alibaba-damo-academy.github.io/FunASR/en/model_zoo/modelscope_models.html#pretrained-models-on-modelscope) 涓殑鎵�鏈夋ā鍨嬭繘琛屾帹鐞嗗拰寰皟銆傝繖閲屾垜浠互鍏稿瀷妯″瀷浣滀负绀轰緥鏉ユ紨绀轰娇鐢ㄦ柟娉曘��
+
+## 鎺ㄧ悊
+
+### 蹇�熶娇鐢�
+#### [Paraformer 妯″瀷](https://www.modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/summary)
+```python
+from modelscope.pipelines import pipeline
+from modelscope.utils.constant import Tasks
+
+inference_pipeline = pipeline(
+ task=Tasks.auto_speech_recognition,
+ model='damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch',
+)
+
+rec_result = inference_pipeline(audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav')
+print(rec_result)
+```
+#### [Paraformer-瀹炴椂妯″瀷](https://www.modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-online/summary)
+##### 瀹炴椂鎺ㄧ悊
+```python
+inference_pipeline = pipeline(
+ task=Tasks.auto_speech_recognition,
+ model='damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-online',
+ model_revision='v1.0.6',
+ update_model=False,
+ mode='paraformer_streaming'
+ )
+import soundfile
+speech, sample_rate = soundfile.read("example/asr_example.wav")
+
+chunk_size = [5, 10, 5] #[5, 10, 5] 600ms, [8, 8, 4] 480ms
+param_dict = {"cache": dict(), "is_final": False, "chunk_size": chunk_size}
+chunk_stride = chunk_size[1] * 960 # 600ms銆�480ms
+# first chunk, 600ms
+speech_chunk = speech[0:chunk_stride]
+rec_result = inference_pipeline(audio_in=speech_chunk, param_dict=param_dict)
+print(rec_result)
+# next chunk, 600ms
+speech_chunk = speech[chunk_stride:chunk_stride+chunk_stride]
+rec_result = inference_pipeline(audio_in=speech_chunk, param_dict=param_dict)
+print(rec_result)
+```
+
+##### 浼疄鏃舵帹鐞�
+```python
+from modelscope.pipelines import pipeline
+from modelscope.utils.constant import Tasks
+
+inference_pipeline = pipeline(
+ task=Tasks.auto_speech_recognition,
+ model='damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-online',
+ model_revision='v1.0.6',
+ update_model=False,
+ mode="paraformer_fake_streaming"
+)
+audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav'
+rec_result = inference_pipeline(audio_in=audio_in)
+print(rec_result)
+```
+婕旂ず浠g爜瀹屾暣鐗堟湰锛岃鍙傝�僛demo](https://github.com/alibaba-damo-academy/FunASR/discussions/241)
+
+#### [UniASR 妯″瀷](https://www.modelscope.cn/models/damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online/summary)
+UniASR 妯″瀷鏈変笁绉嶈В鐮佹ā寮�(fast銆乶ormal銆乷ffline)锛屾洿澶氭ā鍨嬬粏鑺傝鍙傝�僛鏂囨。](https://www.modelscope.cn/models/damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online/summary)
+```python
+decoding_model = "fast" # "fast"銆�"normal"銆�"offline"
+inference_pipeline = pipeline(
+ task=Tasks.auto_speech_recognition,
+ model='damo/speech_UniASR_asr_2pass-minnan-16k-common-vocab3825',
+ param_dict={"decoding_model": decoding_model})
+
+rec_result = inference_pipeline(audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav')
+print(rec_result)
+```
+fast 鍜� normal 鐨勮В鐮佹ā寮忔槸鍋囨祦寮忚В鐮侊紝鍙敤浜庤瘎浼拌瘑鍒噯纭�с��
+婕旂ず鐨勫畬鏁翠唬鐮侊紝璇峰弬瑙� [demo](https://github.com/alibaba-damo-academy/FunASR/discussions/151)
+
+#### [RNN-T-online 妯″瀷]()
+Undo
+
+#### [MFCCA 妯″瀷](https://www.modelscope.cn/models/NPU-ASLP/speech_mfcca_asr-zh-cn-16k-alimeeting-vocab4950/summary)
+
+鏇村妯″瀷缁嗚妭璇峰弬鑰僛鏂囨。](https://www.modelscope.cn/models/NPU-ASLP/speech_mfcca_asr-zh-cn-16k-alimeeting-vocab4950/summary)
+```python
+from modelscope.pipelines import pipeline
+from modelscope.utils.constant import Tasks
+
+inference_pipeline = pipeline(
+ task=Tasks.auto_speech_recognition,
+ model='NPU-ASLP/speech_mfcca_asr-zh-cn-16k-alimeeting-vocab4950',
+ model_revision='v3.0.0'
+)
+
+rec_result = inference_pipeline(audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav')
+print(rec_result)
+```
+
+### API鎺ュ彛璇存槑
+#### pipeline瀹氫箟
+- `task`: `Tasks.auto_speech_recognition`
+- `model`: [妯″瀷浠撳簱](https://alibaba-damo-academy.github.io/FunASR/en/model_zoo/modelscope_models.html#pretrained-models-on-modelscope) 涓殑妯″瀷鍚嶇О锛屾垨鏈湴纾佺洏涓殑妯″瀷璺緞
+- `ngpu`: `1`锛堥粯璁わ級锛屼娇鐢� GPU 杩涜鎺ㄧ悊銆傚鏋� ngpu=0锛屽垯浣跨敤 CPU 杩涜鎺ㄧ悊
+- `ncpu`: `1` 锛堥粯璁わ級锛岃缃敤浜� CPU 鍐呴儴鎿嶄綔骞惰鎬х殑绾跨▼鏁�
+- `output_dir`: `None` 锛堥粯璁わ級锛屽鏋滆缃紝杈撳嚭缁撴灉鐨勮緭鍑鸿矾寰�
+- `batch_size`: `1` 锛堥粯璁わ級锛岃В鐮佹椂鐨勬壒澶勭悊澶у皬
+#### pipeline 鎺ㄧ悊
+- `audio_in`: 瑕佽В鐮佺殑杈撳叆锛屽彲浠ユ槸锛�
+ - wav鏂囦欢璺緞, 渚嬪: asr_example.wav,
+ - pcm鏂囦欢璺緞, 渚嬪: asr_example.pcm,
+ - 闊抽瀛楄妭鏁版祦锛屼緥濡傦細楹﹀厠椋庣殑瀛楄妭鏁版暟鎹�
+ - 闊抽閲囨牱鐐癸紝渚嬪锛歚audio, rate = soundfile.read("asr_example_zh.wav")`, 鏁版嵁绫诲瀷涓� numpy.ndarray 鎴栬�� torch.Tensor
+ - wav.scp锛宬aldi 鏍峰紡鐨� wav 鍒楄〃 (`wav_id \t wav_path`), 渚嬪:
+ ```text
+ asr_example1 ./audios/asr_example1.wav
+ asr_example2 ./audios/asr_example2.wav
+ ```
+ 鍦ㄨ繖绉嶈緭鍏� `wav.scp` 鐨勬儏鍐典笅锛屽繀椤昏缃� `output_dir` 浠ヤ繚瀛樿緭鍑虹粨鏋�
+- `audio_fs`: 闊抽閲囨牱鐜囷紝浠呭湪 audio_in 涓� pcm 闊抽鏃惰缃�
+- `output_dir`: None 锛堥粯璁わ級锛屽鏋滆缃紝杈撳嚭缁撴灉鐨勮緭鍑鸿矾寰�
+
+### 浣跨敤澶氱嚎绋� CPU 鎴栧涓� GPU 杩涜鎺ㄧ悊
+FunASR 杩樻彁渚涗簡 [egs_modelscope/asr/TEMPLATE/infer.sh](https://github.com/alibaba-damo-academy/FunASR/blob/main/egs_modelscope/asr/TEMPLATE/infer.sh) 鑴氭湰锛屼互浣跨敤澶氱嚎绋� CPU 鎴栧涓� GPU 杩涜瑙g爜銆�
+
+#### `infer.sh` 璁剧疆
+- `model`: [modelscope妯″瀷浠撳簱](https://alibaba-damo-academy.github.io/FunASR/en/model_zoo/modelscope_models.html#pretrained-models-on-modelscope)涓殑妯″瀷鍚嶇О锛屾垨鏈湴纾佺洏涓殑妯″瀷璺緞
+- `data_dir`: 鏁版嵁闆嗙洰褰曢渶瑕佸寘鎷� `wav.scp` 鏂囦欢銆傚鏋� `${data_dir}/text` 涔熷瓨鍦紝鍒欏皢璁$畻 CER
+- `output_dir`: 璇嗗埆缁撴灉鐨勮緭鍑虹洰褰�
+- `batch_size`: `64`锛堥粯璁わ級锛屽湪 GPU 涓婅繘琛屾帹鐞嗙殑鎵瑰鐞嗗ぇ灏�
+- `gpu_inference`: `true` 锛堥粯璁わ級锛屾槸鍚︽墽琛� GPU 瑙g爜锛屽鏋滆繘琛� CPU 鎺ㄧ悊锛屽垯璁剧疆涓� `false`
+- `gpuid_list`: `0,1` 锛堥粯璁わ級锛岀敤浜庢帹鐞嗙殑 GPU ID
+- `njob`: 浠呯敤浜� CPU 鎺ㄧ悊锛坄gpu_inference=false`锛夛紝`64`锛堥粯璁わ級锛孋PU 瑙g爜鐨勪綔涓氭暟
+- `checkpoint_dir`: 浠呯敤浜庢帹鐞嗗井璋冩ā鍨嬶紝寰皟妯″瀷鐨勮矾寰勭洰褰�
+- `checkpoint_name`: 浠呯敤浜庢帹鐞嗗井璋冩ā鍨嬶紝`valid.cer_ctc.ave.pb`锛堥粯璁わ級锛岀敤浜庢帹鐞嗙殑妫�鏌ョ偣
+- `decoding_mode`: `normal`锛堥粯璁わ級锛孶niASR 妯″瀷鐨勮В鐮佹ā寮忥紙`fast`銆乣normal`銆乣offline`锛�
+- `hotword_txt`: `None` 锛堥粯璁わ級锛屼笂涓嬫枃璇枡搴撴ā鍨嬬殑鐑瘝鏂囦欢锛堢儹璇嶆枃浠跺悕浠� .txt 缁撳熬锛�
+
+#### 浣跨敤澶氫釜 GPU 杩涜瑙g爜锛�
+```shell
+ bash infer.sh \
+ --model "damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch" \
+ --data_dir "./data/test" \
+ --output_dir "./results" \
+ --batch_size 64 \
+ --gpu_inference true \
+ --gpuid_list "0,1"
+```
+#### 浣跨敤澶氱嚎绋� CPU 杩涜瑙g爜锛�
+```shell
+ bash infer.sh \
+ --model "damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch" \
+ --data_dir "./data/test" \
+ --output_dir "./results" \
+ --gpu_inference false \
+ --njob 64
+```
+
+#### 鎺ㄧ悊缁撴灉
+瑙g爜缁撴灉鍙互鍦� `$output_dir/1best_recog/text.cer` 涓壘鍒帮紝鍏朵腑鍖呮嫭姣忎釜鏍锋湰鐨勮瘑鍒粨鏋滃拰鏁翠釜娴嬭瘯闆嗙殑 CER 鎸囨爣銆�
+濡傛灉鎮ㄥ SpeechIO 娴嬭瘯闆嗚繘琛岃В鐮侊紝鍒欏彲浠ヤ娇鐢� `stage=3` 鐨� textnorm锛宍DETAILS.txt` 鍜� `RESULTS.txt` 璁板綍浜嗘枃鏈爣鍑嗗寲鍚庣殑缁撴灉鍜� CER銆�
+
+## 浣跨敤pipeline杩涜寰皟
+
+### 蹇�熶笂鎵�
+[finetune.py](https://github.com/alibaba-damo-academy/FunASR/blob/main/egs_modelscope/asr/TEMPLATE/finetune.py)
+```python
+import os
+
+from modelscope.metainfo import Trainers
+from modelscope.trainers import build_trainer
+
+from funasr.datasets.ms_dataset import MsDataset
+from funasr.utils.modelscope_param import modelscope_args
+
+
+def modelscope_finetune(params):
+ if not os.path.exists(params.output_dir):
+ os.makedirs(params.output_dir, exist_ok=True)
+ # dataset split ["train", "validation"]
+ ds_dict = MsDataset.load(params.data_path)
+ kwargs = dict(
+ model=params.model,
+ data_dir=ds_dict,
+ dataset_type=params.dataset_type,
+ work_dir=params.output_dir,
+ batch_bins=params.batch_bins,
+ max_epoch=params.max_epoch,
+ lr=params.lr,
+ mate_params=params.param_dict)
+ trainer = build_trainer(Trainers.speech_asr_trainer, default_args=kwargs)
+ trainer.train()
+
+
+if __name__ == '__main__':
+ params = modelscope_args(model="damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch")
+ params.output_dir = "./checkpoint" # m妯″瀷淇濆瓨璺緞
+ params.data_path = "speech_asr_aishell1_trainsets" # 鏁版嵁璺緞
+ params.dataset_type = "small" # 灏忔暟鎹噺璁剧疆small锛岃嫢鏁版嵁閲忓ぇ浜�1000灏忔椂锛岃浣跨敤large
+ params.batch_bins = 2000 # batch size锛屽鏋渄ataset_type="small"锛宐atch_bins鍗曚綅涓篺bank鐗瑰緛甯ф暟锛屽鏋渄ataset_type="large"锛宐atch_bins鍗曚綅涓烘绉掞紝
+ params.max_epoch = 20 # 鏈�澶ц缁冭疆鏁�
+ params.lr = 0.00005 # 璁剧疆瀛︿範鐜�
+ init_param = [] # 鍒濆妯″瀷璺緞锛岄粯璁ゅ姞杞絤odelscope妯″瀷鍒濆鍖栵紝渚嬪: ["checkpoint/20epoch.pb"]
+ freeze_param = [] # 妯″瀷鍙傛暟freeze, 渚嬪: ["encoder"]
+ ignore_init_mismatch = True # 鏄惁蹇界暐妯″瀷鍙傛暟鍒濆鍖栦笉鍖归厤
+ use_lora = False # 鏄惁浣跨敤lora杩涜妯″瀷寰皟
+ params.param_dict = {"init_param":init_param, "freeze_param": freeze_param, "ignore_init_mismatch": ignore_init_mismatch}
+ if use_lora:
+ enable_lora = True
+ lora_bias = "all"
+ lora_params = {"lora_list":['q','v'], "lora_rank":8, "lora_alpha":16, "lora_dropout":0.1}
+ lora_config = {"enable_lora": enable_lora, "lora_bias": lora_bias, "lora_params": lora_params}
+ params.param_dict.update(lora_config)
+
+ modelscope_finetune(params)
+```
+
+```shell
+python finetune.py &> log.txt &
+```
+
+### 浣跨敤绉佹湁鏁版嵁杩涜寰皟
+
+- 淇敼 [finetune.py](https://github.com/alibaba-damo-academy/FunASR/blob/main/egs_modelscope/asr/TEMPLATE/finetune.py) 涓井璋冭缁冪浉鍏冲弬鏁�
+ - `output_dir`: 寰皟妯″瀷淇濆瓨璺緞
+ - `data_dir`: 鏁版嵁闆嗙洰褰曢渶瑕佸寘鎷互涓嬫枃浠讹細`train/wav.scp`, `train/text`; `validation/wav.scp`, `validation/text`
+ - `dataset_type`: 瀵逛簬澶т簬 1000 灏忔椂鐨勬暟鎹泦锛岃缃负 `large`锛屽惁鍒欒缃负 `small`
+ - `batch_bins`: 鎵瑰鐞嗗ぇ灏忋�傚浜� `dataset_type` 涓� `small`锛宍batch_bins` 琛ㄧず鐗瑰緛甯ф暟銆傚浜� `dataset_type` 涓� `large`锛宍batch_bins` 琛ㄧず浠ユ绉掍负鍗曚綅鐨勬寔缁椂闂�
+ - `max_epoch`: 鏈�澶ц缁� epoch 鏁伴噺
+ - `lr`: 瀛︿範鐜�
+ - `init_param`: `[]`锛堥粯璁ゅ�硷級锛屽垵濮嬪寲妯″瀷璺緞锛屾寜榛樿璁剧疆鍔犺浇 modelscope 妯″瀷鍒濆鍖栥�備緥濡傦細["checkpoint/20epoch.pb"]
+ - `freeze_param`: `[]`锛堥粯璁ゅ�硷級锛屽喕缁撴ā鍨嬪弬鏁般�備緥濡傦細["encoder"]
+ - `ignore_init_mismatch`: `True`锛堥粯璁ゅ�硷級锛屽湪鍔犺浇棰勮缁冩ā鍨嬫椂蹇界暐澶у皬涓嶅尮閰�
+ - `use_lora`: `False`锛堥粯璁ゅ�硷級锛屽井璋冩ā鍨嬩娇鐢� LORA锛岃鍙傞槄 [LORA璁烘枃](https://arxiv.org/pdf/2106.09685.pdf)
+
+- 璁粌鏁版嵁鏍煎紡
+```sh
+cat ./example_data/text
+BAC009S0002W0122 鑰� 瀵� 妤� 甯� 鎴� 浜� 鎶� 鍒� 浣� 鐢� 鏈� 澶� 鐨� 闄� 璐�
+BAC009S0002W0123 涔� 鎴� 涓� 鍦� 鏂� 鏀� 搴� 鐨� 鐪� 涓� 閽�
+english_example_1 hello world
+english_example_2 go swim 鍘� 娓� 娉�
+
+cat ./example_data/wav.scp
+BAC009S0002W0122 /mnt/data/wav/train/S0002/BAC009S0002W0122.wav
+BAC009S0002W0123 /mnt/data/wav/train/S0002/BAC009S0002W0123.wav
+english_example_1 /mnt/data/wav/train/S0002/english_example_1.wav
+english_example_2 /mnt/data/wav/train/S0002/english_example_2.wav
+```
+
+- 鐒跺悗锛屾偍鍙互浣跨敤浠ヤ笅鍛戒护杩愯pipeline杩涜寰皟锛�
+```shell
+python finetune.py
+```
+濡傛灉鎮ㄦ兂浣跨敤澶氫釜 GPU 杩涜寰皟锛屽彲浠ヤ娇鐢ㄤ互涓嬪懡浠わ細
+```shell
+CUDA_VISIBLE_DEVICES=1,2 python -m torch.distributed.launch --nproc_per_node 2 finetune.py > log.txt 2>&1
+```
+## 浣跨敤寰皟妯″瀷杩涜鎺ㄧ悊
+
+[egs_modelscope/asr/TEMPLATE/infer.sh](https://github.com/alibaba-damo-academy/FunASR/blob/main/egs_modelscope/asr/TEMPLATE/infer.sh) 鍙傛暟璁剧疆涓庝笂闈infer.sh`鐩稿悓
+
+- 浣跨敤澶氫釜 GPU 杩涜瑙g爜锛�
+```shell
+ bash infer.sh \
+ --model "damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch" \
+ --data_dir "./data/test" \
+ --output_dir "./results" \
+ --batch_size 64 \
+ --gpu_inference true \
+ --gpuid_list "0,1" \
+ --checkpoint_dir "./checkpoint" \
+ --checkpoint_name "valid.cer_ctc.ave.pb"
+```
+- 浣跨敤澶氱嚎绋� CPU 杩涜瑙g爜锛�
+```shell
+ bash infer.sh \
+ --model "damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch" \
+ --data_dir "./data/test" \
+ --output_dir "./results" \
+ --gpu_inference false \
+ --njob 64 \
+ --checkpoint_dir "./checkpoint" \
+ --checkpoint_name "valid.cer_ctc.ave.pb"
+```
--
Gitblit v1.9.1