From e4bf138dd8ac587e26a68b373229aac4ceb2abec Mon Sep 17 00:00:00 2001
From: zhifu gao <zhifu.gzf@alibaba-inc.com>
Date: 星期三, 22 三月 2023 15:19:54 +0800
Subject: [PATCH] Merge pull request #281 from alibaba-damo-academy/dev_lzr
---
/dev/null | 48 ---------
egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/infer.sh | 95 +++++++++++++++++++
egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/RESULTS.md | 20 ++--
egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/README.md | 11 +
egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/infer.py | 108 +++------------------
egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/utils | 1
6 files changed, 129 insertions(+), 154 deletions(-)
diff --git a/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-aishell1-vocab8404-pytorch/README.md b/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-aishell1-vocab8404-pytorch/README.md
deleted file mode 100644
index 1587d3d..0000000
--- a/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-aishell1-vocab8404-pytorch/README.md
+++ /dev/null
@@ -1,30 +0,0 @@
-# ModelScope Model
-
-## How to finetune and infer using a pretrained Paraformer-large Model
-
-### Finetune
-
-- Modify finetune training related parameters in `finetune.py`
- - <strong>output_dir:</strong> # result dir
- - <strong>data_dir:</strong> # the dataset dir needs to include files: train/wav.scp, train/text; validation/wav.scp, validation/text.
- - <strong>batch_bins:</strong> # batch size
- - <strong>max_epoch:</strong> # number of training epoch
- - <strong>lr:</strong> # learning rate
-
-- Then you can run the pipeline to finetune with:
-```python
- python finetune.py
-```
-
-### Inference
-
-Or you can use the finetuned model for inference directly.
-
-- Setting parameters in `infer.py`
- - <strong>data_dir:</strong> # the dataset dir
- - <strong>output_dir:</strong> # result dir
-
-- Then you can run the pipeline to infer with:
-```python
- python infer.py
-```
diff --git a/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-aishell1-vocab8404-pytorch/RESULTS.md b/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-aishell1-vocab8404-pytorch/RESULTS.md
deleted file mode 100644
index 5eeae37..0000000
--- a/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-aishell1-vocab8404-pytorch/RESULTS.md
+++ /dev/null
@@ -1,23 +0,0 @@
-# Paraformer-Large
-- Model link: <https://www.modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-aishell1-vocab8404-pytorch/summary>
-- Model size: 220M
-
-# Environments
-- date: `Fri Feb 10 13:34:24 CST 2023`
-- python version: `3.7.12`
-- FunASR version: `0.1.6`
-- pytorch version: `pytorch 1.7.0`
-- Git hash: ``
-- Commit date: ``
-
-# Beachmark Results
-
-## AISHELL-1
-- Decode config:
- - Decode without CTC
- - Decode without LM
-
-| testset CER(%) | base model|finetune model |
-|:--------------:|:---------:|:-------------:|
-| dev | 1.75 |1.62 |
-| test | 1.95 |1.78 |
diff --git a/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-aishell1-vocab8404-pytorch/finetune.py b/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-aishell1-vocab8404-pytorch/finetune.py
deleted file mode 100644
index 5817f0e..0000000
--- a/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-aishell1-vocab8404-pytorch/finetune.py
+++ /dev/null
@@ -1,36 +0,0 @@
-import os
-
-from modelscope.metainfo import Trainers
-from modelscope.trainers import build_trainer
-
-from funasr.datasets.ms_dataset import MsDataset
-from funasr.utils.modelscope_param import modelscope_args
-
-
-def modelscope_finetune(params):
- if not os.path.exists(params.output_dir):
- os.makedirs(params.output_dir, exist_ok=True)
- # dataset split ["train", "validation"]
- ds_dict = MsDataset.load(params.data_path)
- kwargs = dict(
- model=params.model,
- data_dir=ds_dict,
- dataset_type=params.dataset_type,
- work_dir=params.output_dir,
- batch_bins=params.batch_bins,
- max_epoch=params.max_epoch,
- lr=params.lr)
- trainer = build_trainer(Trainers.speech_asr_trainer, default_args=kwargs)
- trainer.train()
-
-
-if __name__ == '__main__':
- params = modelscope_args(model="damo/speech_paraformer-large_asr_nat-zh-cn-16k-aishell1-vocab8404-pytorch", data_path="./data")
- params.output_dir = "./checkpoint" # m妯″瀷淇濆瓨璺緞
- params.data_path = "./example_data/" # 鏁版嵁璺緞
- params.dataset_type = "small" # 灏忔暟鎹噺璁剧疆small锛岃嫢鏁版嵁閲忓ぇ浜�1000灏忔椂锛岃浣跨敤large
- params.batch_bins = 2000 # batch size锛屽鏋渄ataset_type="small"锛宐atch_bins鍗曚綅涓篺bank鐗瑰緛甯ф暟锛屽鏋渄ataset_type="large"锛宐atch_bins鍗曚綅涓烘绉掞紝
- params.max_epoch = 50 # 鏈�澶ц缁冭疆鏁�
- params.lr = 0.00005 # 璁剧疆瀛︿範鐜�
-
- modelscope_finetune(params)
diff --git a/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-aishell1-vocab8404-pytorch/infer.py b/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-aishell1-vocab8404-pytorch/infer.py
deleted file mode 100644
index 2fceb48..0000000
--- a/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-aishell1-vocab8404-pytorch/infer.py
+++ /dev/null
@@ -1,101 +0,0 @@
-import os
-import shutil
-from multiprocessing import Pool
-
-from modelscope.pipelines import pipeline
-from modelscope.utils.constant import Tasks
-
-from funasr.utils.compute_wer import compute_wer
-
-
-def modelscope_infer_core(output_dir, split_dir, njob, idx, batch_size, ngpu, model):
- output_dir_job = os.path.join(output_dir, "output.{}".format(idx))
- if ngpu > 0:
- use_gpu = 1
- gpu_id = int(idx) - 1
- else:
- use_gpu = 0
- gpu_id = -1
- if "CUDA_VISIBLE_DEVICES" in os.environ.keys():
- gpu_list = os.environ['CUDA_VISIBLE_DEVICES'].split(",")
- os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_list[gpu_id])
- else:
- os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id)
- inference_pipline = pipeline(
- task=Tasks.auto_speech_recognition,
- model=model,
- output_dir=output_dir_job,
- batch_size=batch_size,
- ngpu=use_gpu,
- )
- audio_in = os.path.join(split_dir, "wav.{}.scp".format(idx))
- inference_pipline(audio_in=audio_in)
-
-
-def modelscope_infer(params):
- # prepare for multi-GPU decoding
- ngpu = params["ngpu"]
- njob = params["njob"]
- batch_size = params["batch_size"]
- output_dir = params["output_dir"]
- model = params["model"]
- if os.path.exists(output_dir):
- shutil.rmtree(output_dir)
- os.mkdir(output_dir)
- split_dir = os.path.join(output_dir, "split")
- os.mkdir(split_dir)
- if ngpu > 0:
- nj = ngpu
- elif ngpu == 0:
- nj = njob
- wav_scp_file = os.path.join(params["data_dir"], "wav.scp")
- with open(wav_scp_file) as f:
- lines = f.readlines()
- num_lines = len(lines)
- num_job_lines = num_lines // nj
- start = 0
- for i in range(nj):
- end = start + num_job_lines
- file = os.path.join(split_dir, "wav.{}.scp".format(str(i + 1)))
- with open(file, "w") as f:
- if i == nj - 1:
- f.writelines(lines[start:])
- else:
- f.writelines(lines[start:end])
- start = end
-
- p = Pool(nj)
- for i in range(nj):
- p.apply_async(modelscope_infer_core,
- args=(output_dir, split_dir, njob, str(i + 1), batch_size, ngpu, model))
- p.close()
- p.join()
-
- # combine decoding results
- best_recog_path = os.path.join(output_dir, "1best_recog")
- os.mkdir(best_recog_path)
- files = ["text", "token", "score"]
- for file in files:
- with open(os.path.join(best_recog_path, file), "w") as f:
- for i in range(nj):
- job_file = os.path.join(output_dir, "output.{}/1best_recog".format(str(i + 1)), file)
- with open(job_file) as f_job:
- lines = f_job.readlines()
- f.writelines(lines)
-
- # If text exists, compute CER
- text_in = os.path.join(params["data_dir"], "text")
- if os.path.exists(text_in):
- text_proc_file = os.path.join(best_recog_path, "token")
- compute_wer(text_in, text_proc_file, os.path.join(best_recog_path, "text.cer"))
-
-
-if __name__ == "__main__":
- params = {}
- params["model"] = "damo/speech_paraformer-large_asr_nat-zh-cn-16k-aishell1-vocab8404-pytorch"
- params["data_dir"] = "./data/test"
- params["output_dir"] = "./results"
- params["ngpu"] = 1 # if ngpu > 0, will use gpu decoding
- params["njob"] = 1 # if ngpu = 0, will use cpu decoding
- params["batch_size"] = 64
- modelscope_infer(params)
\ No newline at end of file
diff --git a/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-aishell1-vocab8404-pytorch/infer_after_finetune.py b/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-aishell1-vocab8404-pytorch/infer_after_finetune.py
deleted file mode 100644
index fafe565..0000000
--- a/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-aishell1-vocab8404-pytorch/infer_after_finetune.py
+++ /dev/null
@@ -1,48 +0,0 @@
-import json
-import os
-import shutil
-
-from modelscope.pipelines import pipeline
-from modelscope.utils.constant import Tasks
-from modelscope.hub.snapshot_download import snapshot_download
-
-from funasr.utils.compute_wer import compute_wer
-
-def modelscope_infer_after_finetune(params):
- # prepare for decoding
-
- try:
- pretrained_model_path = snapshot_download(params["modelscope_model_name"], cache_dir=params["output_dir"])
- except BaseException:
- raise BaseException(f"Please download pretrain model from ModelScope firstly.")
- shutil.copy(os.path.join(params["output_dir"], params["decoding_model_name"]), os.path.join(pretrained_model_path, "model.pb"))
- decoding_path = os.path.join(params["output_dir"], "decode_results")
- if os.path.exists(decoding_path):
- shutil.rmtree(decoding_path)
- os.mkdir(decoding_path)
-
- # decoding
- inference_pipeline = pipeline(
- task=Tasks.auto_speech_recognition,
- model=pretrained_model_path,
- output_dir=decoding_path,
- batch_size=params["batch_size"]
- )
- audio_in = os.path.join(params["data_dir"], "wav.scp")
- inference_pipeline(audio_in=audio_in)
-
- # computer CER if GT text is set
- text_in = os.path.join(params["data_dir"], "text")
- if os.path.exists(text_in):
- text_proc_file = os.path.join(decoding_path, "1best_recog/token")
- compute_wer(text_in, text_proc_file, os.path.join(decoding_path, "text.cer"))
-
-
-if __name__ == '__main__':
- params = {}
- params["modelscope_model_name"] = "damo/speech_paraformer-large_asr_nat-zh-cn-16k-aishell1-vocab8404-pytorch"
- params["output_dir"] = "./checkpoint"
- params["data_dir"] = "./data/test"
- params["decoding_model_name"] = "valid.acc.ave_10best.pb"
- params["batch_size"] = 64
- modelscope_infer_after_finetune(params)
\ No newline at end of file
diff --git a/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-aishell2-vocab8404-pytorch/README.md b/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-aishell2-vocab8404-pytorch/README.md
deleted file mode 100644
index 1587d3d..0000000
--- a/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-aishell2-vocab8404-pytorch/README.md
+++ /dev/null
@@ -1,30 +0,0 @@
-# ModelScope Model
-
-## How to finetune and infer using a pretrained Paraformer-large Model
-
-### Finetune
-
-- Modify finetune training related parameters in `finetune.py`
- - <strong>output_dir:</strong> # result dir
- - <strong>data_dir:</strong> # the dataset dir needs to include files: train/wav.scp, train/text; validation/wav.scp, validation/text.
- - <strong>batch_bins:</strong> # batch size
- - <strong>max_epoch:</strong> # number of training epoch
- - <strong>lr:</strong> # learning rate
-
-- Then you can run the pipeline to finetune with:
-```python
- python finetune.py
-```
-
-### Inference
-
-Or you can use the finetuned model for inference directly.
-
-- Setting parameters in `infer.py`
- - <strong>data_dir:</strong> # the dataset dir
- - <strong>output_dir:</strong> # result dir
-
-- Then you can run the pipeline to infer with:
-```python
- python infer.py
-```
diff --git a/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-aishell2-vocab8404-pytorch/RESULTS.md b/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-aishell2-vocab8404-pytorch/RESULTS.md
deleted file mode 100644
index 71d9fee..0000000
--- a/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-aishell2-vocab8404-pytorch/RESULTS.md
+++ /dev/null
@@ -1,25 +0,0 @@
-# Paraformer-Large
-- Model link: <https://www.modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-aishell2-vocab8404-pytorch/summary>
-- Model size: 220M
-
-# Environments
-- date: `Fri Feb 10 13:34:24 CST 2023`
-- python version: `3.7.12`
-- FunASR version: `0.1.6`
-- pytorch version: `pytorch 1.7.0`
-- Git hash: ``
-- Commit date: ``
-
-# Beachmark Results
-
-## AISHELL-2
-- Decode config:
- - Decode without CTC
- - Decode without LM
-
-| testset | base model|finetune model|
-|:------------:|:---------:|:------------:|
-| dev_ios | 2.80 |2.60 |
-| test_android | 3.13 |2.84 |
-| test_ios | 2.85 |2.82 |
-| test_mic | 3.06 |2.88 |
diff --git a/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-aishell2-vocab8404-pytorch/finetune.py b/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-aishell2-vocab8404-pytorch/finetune.py
deleted file mode 100644
index c46d676..0000000
--- a/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-aishell2-vocab8404-pytorch/finetune.py
+++ /dev/null
@@ -1,36 +0,0 @@
-import os
-
-from modelscope.metainfo import Trainers
-from modelscope.trainers import build_trainer
-
-from funasr.datasets.ms_dataset import MsDataset
-from funasr.utils.modelscope_param import modelscope_args
-
-
-def modelscope_finetune(params):
- if not os.path.exists(params.output_dir):
- os.makedirs(params.output_dir, exist_ok=True)
- # dataset split ["train", "validation"]
- ds_dict = MsDataset.load(params.data_path)
- kwargs = dict(
- model=params.model,
- data_dir=ds_dict,
- dataset_type=params.dataset_type,
- work_dir=params.output_dir,
- batch_bins=params.batch_bins,
- max_epoch=params.max_epoch,
- lr=params.lr)
- trainer = build_trainer(Trainers.speech_asr_trainer, default_args=kwargs)
- trainer.train()
-
-
-if __name__ == '__main__':
- params = modelscope_args(model="damo/speech_paraformer-large_asr_nat-zh-cn-16k-aishell2-vocab8404-pytorch", data_path="./data")
- params.output_dir = "./checkpoint" # m妯″瀷淇濆瓨璺緞
- params.data_path = "./example_data/" # 鏁版嵁璺緞
- params.dataset_type = "small" # 灏忔暟鎹噺璁剧疆small锛岃嫢鏁版嵁閲忓ぇ浜�1000灏忔椂锛岃浣跨敤large
- params.batch_bins = 2000 # batch size锛屽鏋渄ataset_type="small"锛宐atch_bins鍗曚綅涓篺bank鐗瑰緛甯ф暟锛屽鏋渄ataset_type="large"锛宐atch_bins鍗曚綅涓烘绉掞紝
- params.max_epoch = 50 # 鏈�澶ц缁冭疆鏁�
- params.lr = 0.00005 # 璁剧疆瀛︿範鐜�
-
- modelscope_finetune(params)
diff --git a/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-aishell2-vocab8404-pytorch/infer.py b/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-aishell2-vocab8404-pytorch/infer.py
deleted file mode 100644
index d70af72..0000000
--- a/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-aishell2-vocab8404-pytorch/infer.py
+++ /dev/null
@@ -1,101 +0,0 @@
-import os
-import shutil
-from multiprocessing import Pool
-
-from modelscope.pipelines import pipeline
-from modelscope.utils.constant import Tasks
-
-from funasr.utils.compute_wer import compute_wer
-
-
-def modelscope_infer_core(output_dir, split_dir, njob, idx, batch_size, ngpu, model):
- output_dir_job = os.path.join(output_dir, "output.{}".format(idx))
- if ngpu > 0:
- use_gpu = 1
- gpu_id = int(idx) - 1
- else:
- use_gpu = 0
- gpu_id = -1
- if "CUDA_VISIBLE_DEVICES" in os.environ.keys():
- gpu_list = os.environ['CUDA_VISIBLE_DEVICES'].split(",")
- os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_list[gpu_id])
- else:
- os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id)
- inference_pipline = pipeline(
- task=Tasks.auto_speech_recognition,
- model=model,
- output_dir=output_dir_job,
- batch_size=batch_size,
- ngpu=use_gpu,
- )
- audio_in = os.path.join(split_dir, "wav.{}.scp".format(idx))
- inference_pipline(audio_in=audio_in)
-
-
-def modelscope_infer(params):
- # prepare for multi-GPU decoding
- ngpu = params["ngpu"]
- njob = params["njob"]
- batch_size = params["batch_size"]
- output_dir = params["output_dir"]
- model = params["model"]
- if os.path.exists(output_dir):
- shutil.rmtree(output_dir)
- os.mkdir(output_dir)
- split_dir = os.path.join(output_dir, "split")
- os.mkdir(split_dir)
- if ngpu > 0:
- nj = ngpu
- elif ngpu == 0:
- nj = njob
- wav_scp_file = os.path.join(params["data_dir"], "wav.scp")
- with open(wav_scp_file) as f:
- lines = f.readlines()
- num_lines = len(lines)
- num_job_lines = num_lines // nj
- start = 0
- for i in range(nj):
- end = start + num_job_lines
- file = os.path.join(split_dir, "wav.{}.scp".format(str(i + 1)))
- with open(file, "w") as f:
- if i == nj - 1:
- f.writelines(lines[start:])
- else:
- f.writelines(lines[start:end])
- start = end
-
- p = Pool(nj)
- for i in range(nj):
- p.apply_async(modelscope_infer_core,
- args=(output_dir, split_dir, njob, str(i + 1), batch_size, ngpu, model))
- p.close()
- p.join()
-
- # combine decoding results
- best_recog_path = os.path.join(output_dir, "1best_recog")
- os.mkdir(best_recog_path)
- files = ["text", "token", "score"]
- for file in files:
- with open(os.path.join(best_recog_path, file), "w") as f:
- for i in range(nj):
- job_file = os.path.join(output_dir, "output.{}/1best_recog".format(str(i + 1)), file)
- with open(job_file) as f_job:
- lines = f_job.readlines()
- f.writelines(lines)
-
- # If text exists, compute CER
- text_in = os.path.join(params["data_dir"], "text")
- if os.path.exists(text_in):
- text_proc_file = os.path.join(best_recog_path, "token")
- compute_wer(text_in, text_proc_file, os.path.join(best_recog_path, "text.cer"))
-
-
-if __name__ == "__main__":
- params = {}
- params["model"] = "damo/speech_paraformer-large_asr_nat-zh-cn-16k-aishell2-vocab8404-pytorch"
- params["data_dir"] = "./data/test"
- params["output_dir"] = "./results"
- params["ngpu"] = 1 # if ngpu > 0, will use gpu decoding
- params["njob"] = 1 # if ngpu = 0, will use cpu decoding
- params["batch_size"] = 64
- modelscope_infer(params)
\ No newline at end of file
diff --git a/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-aishell2-vocab8404-pytorch/infer_after_finetune.py b/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-aishell2-vocab8404-pytorch/infer_after_finetune.py
deleted file mode 100644
index 731cafe..0000000
--- a/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-aishell2-vocab8404-pytorch/infer_after_finetune.py
+++ /dev/null
@@ -1,48 +0,0 @@
-import json
-import os
-import shutil
-
-from modelscope.pipelines import pipeline
-from modelscope.utils.constant import Tasks
-from modelscope.hub.snapshot_download import snapshot_download
-
-from funasr.utils.compute_wer import compute_wer
-
-def modelscope_infer_after_finetune(params):
- # prepare for decoding
-
- try:
- pretrained_model_path = snapshot_download(params["modelscope_model_name"], cache_dir=params["output_dir"])
- except BaseException:
- raise BaseException(f"Please download pretrain model from ModelScope firstly.")
- shutil.copy(os.path.join(params["output_dir"], params["decoding_model_name"]), os.path.join(pretrained_model_path, "model.pb"))
- decoding_path = os.path.join(params["output_dir"], "decode_results")
- if os.path.exists(decoding_path):
- shutil.rmtree(decoding_path)
- os.mkdir(decoding_path)
-
- # decoding
- inference_pipeline = pipeline(
- task=Tasks.auto_speech_recognition,
- model=pretrained_model_path,
- output_dir=decoding_path,
- batch_size=params["batch_size"]
- )
- audio_in = os.path.join(params["data_dir"], "wav.scp")
- inference_pipeline(audio_in=audio_in)
-
- # computer CER if GT text is set
- text_in = os.path.join(params["data_dir"], "text")
- if os.path.exists(text_in):
- text_proc_file = os.path.join(decoding_path, "1best_recog/token")
- compute_wer(text_in, text_proc_file, os.path.join(decoding_path, "text.cer"))
-
-
-if __name__ == '__main__':
- params = {}
- params["modelscope_model_name"] = "damo/speech_paraformer-large_asr_nat-zh-cn-16k-aishell2-vocab8404-pytorch"
- params["output_dir"] = "./checkpoint"
- params["data_dir"] = "./data/test"
- params["decoding_model_name"] = "valid.acc.ave_10best.pb"
- params["batch_size"] = 64
- modelscope_infer_after_finetune(params)
\ No newline at end of file
diff --git a/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/README.md b/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/README.md
index a044361..79cc3c3 100644
--- a/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/README.md
+++ b/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/README.md
@@ -21,23 +21,26 @@
Or you can use the finetuned model for inference directly.
-- Setting parameters in `infer.py`
+- Setting parameters in `infer.sh`
- <strong>model:</strong> # model name on ModelScope
- <strong>data_dir:</strong> # the dataset dir needs to include `test/wav.scp`. If `test/text` is also exists, CER will be computed
- <strong>output_dir:</strong> # result dir
- - <strong>ngpu:</strong> # the number of GPUs for decoding, if `ngpu` > 0, use GPU decoding
- - <strong>njob:</strong> # the number of jobs for CPU decoding, if `ngpu` = 0, use CPU decoding, please set `njob`
- <strong>batch_size:</strong> # batchsize of inference
+ - <strong>gpu_inference:</strong> # whether to perform gpu decoding, set false for cpu decoding
+ - <strong>gpuid_list:</strong> # set gpus, e.g., gpuid_list="0,1"
+ - <strong>njob:</strong> # the number of jobs for CPU decoding, if `gpu_inference`=false, use CPU decoding, please set `njob`
- Then you can run the pipeline to infer with:
```python
- python infer.py
+ sh infer.sh
```
- Results
The decoding results can be found in `$output_dir/1best_recog/text.cer`, which includes recognition results of each sample and the CER metric of the whole test set.
+If you decode the SpeechIO test sets, you can use textnorm with `stage`=3, and `DETAILS.txt`, `RESULTS.txt` record the results and CER after text normalization.
+
### Inference using local finetuned model
- Modify inference related parameters in `infer_after_finetune.py`
diff --git a/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/RESULTS.md b/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/RESULTS.md
index ec95be3..4e06daf 100644
--- a/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/RESULTS.md
+++ b/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/RESULTS.md
@@ -17,22 +17,22 @@
- Decode without CTC
- Decode without LM
-| testset | CER(%)|
-|:---------:|:-----:|
-| dev | 1.75 |
-| test | 1.95 |
+| CER(%) | Pretrain model|[Finetune model](https://www.modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-aishell1-vocab8404-pytorch/summary) |
+|:---------:|:-------------:|:-------------:|
+| dev | 1.75 |1.62 |
+| test | 1.95 |1.78 |
## AISHELL-2
- Decode config:
- Decode without CTC
- Decode without LM
-| testset | CER(%)|
-|:------------:|:-----:|
-| dev_ios | 2.80 |
-| test_android | 3.13 |
-| test_ios | 2.85 |
-| test_mic | 3.06 |
+| CER(%) | Pretrain model|[Finetune model](https://www.modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-aishell2-vocab8404-pytorch/summary)|
+|:------------:|:-------------:|:------------:|
+| dev_ios | 2.80 |2.60 |
+| test_android | 3.13 |2.84 |
+| test_ios | 2.85 |2.82 |
+| test_mic | 3.06 |2.88 |
## Wenetspeech
- Decode config:
diff --git a/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/infer.py b/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/infer.py
index 795a1e7..1973191 100644
--- a/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/infer.py
+++ b/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/infer.py
@@ -1,101 +1,25 @@
import os
import shutil
-from multiprocessing import Pool
-
+import argparse
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
-from funasr.utils.compute_wer import compute_wer
-
-
-def modelscope_infer_core(output_dir, split_dir, njob, idx, batch_size, ngpu, model):
- output_dir_job = os.path.join(output_dir, "output.{}".format(idx))
- if ngpu > 0:
- use_gpu = 1
- gpu_id = int(idx) - 1
- else:
- use_gpu = 0
- gpu_id = -1
- if "CUDA_VISIBLE_DEVICES" in os.environ.keys():
- gpu_list = os.environ['CUDA_VISIBLE_DEVICES'].split(",")
- os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_list[gpu_id])
- else:
- os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id)
- inference_pipline = pipeline(
+def modelscope_infer(args):
+ os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpuid)
+ inference_pipeline = pipeline(
task=Tasks.auto_speech_recognition,
- model=model,
- output_dir=output_dir_job,
- batch_size=batch_size,
- ngpu=use_gpu,
+ model=args.model,
+ output_dir=args.output_dir,
+ batch_size=args.batch_size,
)
- audio_in = os.path.join(split_dir, "wav.{}.scp".format(idx))
- inference_pipline(audio_in=audio_in)
-
-
-def modelscope_infer(params):
- # prepare for multi-GPU decoding
- ngpu = params["ngpu"]
- njob = params["njob"]
- batch_size = params["batch_size"]
- output_dir = params["output_dir"]
- model = params["model"]
- if os.path.exists(output_dir):
- shutil.rmtree(output_dir)
- os.mkdir(output_dir)
- split_dir = os.path.join(output_dir, "split")
- os.mkdir(split_dir)
- if ngpu > 0:
- nj = ngpu
- elif ngpu == 0:
- nj = njob
- wav_scp_file = os.path.join(params["data_dir"], "wav.scp")
- with open(wav_scp_file) as f:
- lines = f.readlines()
- num_lines = len(lines)
- num_job_lines = num_lines // nj
- start = 0
- for i in range(nj):
- end = start + num_job_lines
- file = os.path.join(split_dir, "wav.{}.scp".format(str(i + 1)))
- with open(file, "w") as f:
- if i == nj - 1:
- f.writelines(lines[start:])
- else:
- f.writelines(lines[start:end])
- start = end
-
- p = Pool(nj)
- for i in range(nj):
- p.apply_async(modelscope_infer_core,
- args=(output_dir, split_dir, njob, str(i + 1), batch_size, ngpu, model))
- p.close()
- p.join()
-
- # combine decoding results
- best_recog_path = os.path.join(output_dir, "1best_recog")
- os.mkdir(best_recog_path)
- files = ["text", "token", "score"]
- for file in files:
- with open(os.path.join(best_recog_path, file), "w") as f:
- for i in range(nj):
- job_file = os.path.join(output_dir, "output.{}/1best_recog".format(str(i + 1)), file)
- with open(job_file) as f_job:
- lines = f_job.readlines()
- f.writelines(lines)
-
- # If text exists, compute CER
- text_in = os.path.join(params["data_dir"], "text")
- if os.path.exists(text_in):
- text_proc_file = os.path.join(best_recog_path, "token")
- compute_wer(text_in, text_proc_file, os.path.join(best_recog_path, "text.cer"))
-
+ inference_pipeline(audio_in=args.audio_in)
if __name__ == "__main__":
- params = {}
- params["model"] = "damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
- params["data_dir"] = "./data/test"
- params["output_dir"] = "./results"
- params["ngpu"] = 1 # if ngpu > 0, will use gpu decoding
- params["njob"] = 1 # if ngpu = 0, will use cpu decoding
- params["batch_size"] = 64
- modelscope_infer(params)
\ No newline at end of file
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--model', type=str, default="speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch")
+ parser.add_argument('--audio_in', type=str, default="./data/test")
+ parser.add_argument('--output_dir', type=str, default="./results/")
+ parser.add_argument('--batch_size', type=int, default=64)
+ parser.add_argument('--gpuid', type=str, default="0")
+ args = parser.parse_args()
+ modelscope_infer(args)
\ No newline at end of file
diff --git a/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/infer.sh b/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/infer.sh
new file mode 100644
index 0000000..ab64849
--- /dev/null
+++ b/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/infer.sh
@@ -0,0 +1,95 @@
+#!/usr/bin/env bash
+
+set -e
+set -u
+set -o pipefail
+
+stage=1
+stop_stage=2
+model="damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
+data_dir="./data/test"
+output_dir="./results"
+batch_size=64
+gpu_inference=true # whether to perform gpu decoding
+gpuid_list="0,1" # set gpus, e.g., gpuid_list="0,1"
+njob=4 # the number of jobs for CPU decoding, if gpu_inference=false, use CPU decoding, please set njob
+
+
+if ${gpu_inference}; then
+ nj=$(echo $gpuid_list | awk -F "," '{print NF}')
+else
+ nj=$njob
+ batch_size=1
+ gpuid_list=""
+ for JOB in $(seq ${nj}); do
+ gpuid_list=$gpuid_list"-1,"
+ done
+fi
+
+mkdir -p $output_dir/split
+split_scps=""
+for JOB in $(seq ${nj}); do
+ split_scps="$split_scps $output_dir/split/wav.$JOB.scp"
+done
+perl utils/split_scp.pl ${data_dir}/wav.scp ${split_scps}
+
+if [ $stage -le 1 ] && [ $stop_stage -ge 1 ];then
+ echo "Decoding ..."
+ gpuid_list_array=(${gpuid_list//,/ })
+ for JOB in $(seq ${nj}); do
+ {
+ id=$((JOB-1))
+ gpuid=${gpuid_list_array[$id]}
+ mkdir -p ${output_dir}/output.$JOB
+ python infer.py \
+ --model ${model} \
+ --audio_in ${output_dir}/split/wav.$JOB.scp \
+ --output_dir ${output_dir}/output.$JOB \
+ --batch_size ${batch_size} \
+ --gpuid ${gpuid}
+ }&
+ done
+ wait
+
+ mkdir -p ${output_dir}/1best_recog
+ for f in token score text; do
+ if [ -f "${output_dir}/output.1/1best_recog/${f}" ]; then
+ for i in $(seq "${nj}"); do
+ cat "${output_dir}/output.${i}/1best_recog/${f}"
+ done | sort -k1 >"${output_dir}/1best_recog/${f}"
+ fi
+ done
+fi
+
+if [ $stage -le 2 ] && [ $stop_stage -ge 2 ];then
+ echo "Computing WER ..."
+ python utils/proce_text.py ${output_dir}/1best_recog/text ${output_dir}/1best_recog/text.proc
+ python utils/proce_text.py ${data_dir}/text ${data_dir}/text.proc
+ python utils/compute_wer.py ${data_dir}/text.proc ${output_dir}/1best_recog/text.proc ${output_dir}/1best_recog/text.cer
+ tail -n 3 ${output_dir}/1best_recog/text.cer
+fi
+
+if [ $stage -le 3 ] && [ $stop_stage -ge 3 ];then
+ echo "SpeechIO TIOBE textnorm"
+ echo "$0 --> Normalizing REF text ..."
+ ./utils/textnorm_zh.py \
+ --has_key --to_upper \
+ ${data_dir}/text \
+ ${data_dir}/ref.txt
+
+ echo "$0 --> Normalizing HYP text ..."
+ ./utils/textnorm_zh.py \
+ --has_key --to_upper \
+ ${output_dir}/1best_recog/text.proc \
+ ${output_dir}/1best_recog/rec.txt
+ grep -v $'\t$' ${output_dir}/1best_recog/rec.txt > ${output_dir}/1best_recog/rec_non_empty.txt
+
+ echo "$0 --> computing WER/CER and alignment ..."
+ ./utils/error_rate_zh \
+ --tokenizer char \
+ --ref ${data_dir}/ref.txt \
+ --hyp ${output_dir}/1best_recog/rec_non_empty.txt \
+ ${output_dir}/1best_recog/DETAILS.txt | tee ${output_dir}/1best_recog/RESULTS.txt
+ rm -rf ${output_dir}/1best_recog/rec.txt ${output_dir}/1best_recog/rec_non_empty.txt
+fi
+
diff --git a/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/utils b/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/utils
new file mode 120000
index 0000000..2ac163f
--- /dev/null
+++ b/egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/utils
@@ -0,0 +1 @@
+../../../../egs/aishell/transformer/utils
\ No newline at end of file
--
Gitblit v1.9.1