zhifu gao
2023-03-16 216a0ab1e85b29a86501da688b04ef731d1cbb24
Merge pull request #247 from alibaba-damo-academy/dev_lzr

update egs_modelscope recipe
10个文件已修改
272 ■■■■ 已修改文件
egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-aishell1-vocab8404-pytorch/infer.py 31 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-aishell1-vocab8404-pytorch/infer_after_finetune.py 27 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-aishell2-vocab8404-pytorch/infer.py 31 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-aishell2-vocab8404-pytorch/infer_after_finetune.py 27 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/README.md 8 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/infer.py 31 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/infer_after_finetune.py 27 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
egs_modelscope/asr/paraformer/speech_paraformer_asr_nat-zh-cn-8k-common-vocab8358-tensorflow1/infer.py 31 ●●●● 补丁 | 查看 | 原始文档 | blame | 历史
egs_modelscope/asr/paraformer/speech_paraformer_asr_nat-zh-cn-8k-common-vocab8358-tensorflow1/infer_after_finetune.py 27 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
egs_modelscope/asr_vad_punc/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch/infer_after_finetune.py 32 ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史
egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-aishell1-vocab8404-pytorch/infer.py
@@ -8,9 +8,14 @@
from funasr.utils.compute_wer import compute_wer
def modelscope_infer_core(output_dir, split_dir, njob, idx):
def modelscope_infer_core(output_dir, split_dir, njob, idx, batch_size, ngpu, model):
    output_dir_job = os.path.join(output_dir, "output.{}".format(idx))
    gpu_id = (int(idx) - 1) // njob
    if ngpu > 0:
        use_gpu = 1
        gpu_id = int(idx) - 1
    else:
        use_gpu = 0
        gpu_id = -1
    if "CUDA_VISIBLE_DEVICES" in os.environ.keys():
        gpu_list = os.environ['CUDA_VISIBLE_DEVICES'].split(",")
        os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_list[gpu_id])
@@ -18,9 +23,10 @@
        os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id)
    inference_pipline = pipeline(
        task=Tasks.auto_speech_recognition,
        model="damo/speech_paraformer-large_asr_nat-zh-cn-16k-aishell1-vocab8404-pytorch",
        model=model,
        output_dir=output_dir_job,
        batch_size=64
        batch_size=batch_size,
        ngpu=use_gpu,
    )
    audio_in = os.path.join(split_dir, "wav.{}.scp".format(idx))
    inference_pipline(audio_in=audio_in)
@@ -30,13 +36,18 @@
    # prepare for multi-GPU decoding
    ngpu = params["ngpu"]
    njob = params["njob"]
    batch_size = params["batch_size"]
    output_dir = params["output_dir"]
    model = params["model"]
    if os.path.exists(output_dir):
        shutil.rmtree(output_dir)
    os.mkdir(output_dir)
    split_dir = os.path.join(output_dir, "split")
    os.mkdir(split_dir)
    nj = ngpu * njob
    if ngpu > 0:
        nj = ngpu
    elif ngpu == 0:
        nj = njob
    wav_scp_file = os.path.join(params["data_dir"], "wav.scp")
    with open(wav_scp_file) as f:
        lines = f.readlines()
@@ -56,7 +67,7 @@
    p = Pool(nj)
    for i in range(nj):
        p.apply_async(modelscope_infer_core,
                      args=(output_dir, split_dir, njob, str(i + 1)))
                      args=(output_dir, split_dir, njob, str(i + 1), batch_size, ngpu, model))
    p.close()
    p.join()
@@ -81,8 +92,10 @@
if __name__ == "__main__":
    params = {}
    params["model"] = "damo/speech_paraformer-large_asr_nat-zh-cn-16k-aishell1-vocab8404-pytorch"
    params["data_dir"] = "./data/test"
    params["output_dir"] = "./results"
    params["ngpu"] = 1
    params["njob"] = 1
    modelscope_infer(params)
    params["ngpu"] = 1 # if ngpu > 0, will use gpu decoding
    params["njob"] = 1 # if ngpu = 0, will use cpu decoding
    params["batch_size"] = 64
    modelscope_infer(params)
egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-aishell1-vocab8404-pytorch/infer_after_finetune.py
@@ -4,23 +4,18 @@
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
from modelscope.hub.snapshot_download import snapshot_download
from funasr.utils.compute_wer import compute_wer
def modelscope_infer_after_finetune(params):
    # prepare for decoding
    pretrained_model_path = os.path.join(os.environ["HOME"], ".cache/modelscope/hub", params["modelscope_model_name"])
    for file_name in params["required_files"]:
        if file_name == "configuration.json":
            with open(os.path.join(pretrained_model_path, file_name)) as f:
                config_dict = json.load(f)
                config_dict["model"]["am_model_name"] = params["decoding_model_name"]
            with open(os.path.join(params["output_dir"], "configuration.json"), "w") as f:
                json.dump(config_dict, f, indent=4, separators=(',', ': '))
        else:
            shutil.copy(os.path.join(pretrained_model_path, file_name),
                        os.path.join(params["output_dir"], file_name))
    try:
        pretrained_model_path = snapshot_download(params["modelscope_model_name"], cache_dir=params["output_dir"])
    except BaseException:
        raise BaseException(f"Please download pretrain model from ModelScope firstly.")
    shutil.copy(os.path.join(params["output_dir"], params["decoding_model_name"]), os.path.join(pretrained_model_path, "model.pb"))
    decoding_path = os.path.join(params["output_dir"], "decode_results")
    if os.path.exists(decoding_path):
        shutil.rmtree(decoding_path)
@@ -29,9 +24,9 @@
    # decoding
    inference_pipeline = pipeline(
        task=Tasks.auto_speech_recognition,
        model=params["output_dir"],
        model=pretrained_model_path,
        output_dir=decoding_path,
        batch_size=64
        batch_size=params["batch_size"]
    )
    audio_in = os.path.join(params["data_dir"], "wav.scp")
    inference_pipeline(audio_in=audio_in)
@@ -46,8 +41,8 @@
if __name__ == '__main__':
    params = {}
    params["modelscope_model_name"] = "damo/speech_paraformer-large_asr_nat-zh-cn-16k-aishell1-vocab8404-pytorch"
    params["required_files"] = ["am.mvn", "decoding.yaml", "configuration.json"]
    params["output_dir"] = "./checkpoint"
    params["data_dir"] = "./data/test"
    params["decoding_model_name"] = "valid.acc.ave_10best.pb"
    modelscope_infer_after_finetune(params)
    params["batch_size"] = 64
    modelscope_infer_after_finetune(params)
egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-aishell2-vocab8404-pytorch/infer.py
@@ -8,9 +8,14 @@
from funasr.utils.compute_wer import compute_wer
def modelscope_infer_core(output_dir, split_dir, njob, idx):
def modelscope_infer_core(output_dir, split_dir, njob, idx, batch_size, ngpu, model):
    output_dir_job = os.path.join(output_dir, "output.{}".format(idx))
    gpu_id = (int(idx) - 1) // njob
    if ngpu > 0:
        use_gpu = 1
        gpu_id = int(idx) - 1
    else:
        use_gpu = 0
        gpu_id = -1
    if "CUDA_VISIBLE_DEVICES" in os.environ.keys():
        gpu_list = os.environ['CUDA_VISIBLE_DEVICES'].split(",")
        os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_list[gpu_id])
@@ -18,9 +23,10 @@
        os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id)
    inference_pipline = pipeline(
        task=Tasks.auto_speech_recognition,
        model="damo/speech_paraformer-large_asr_nat-zh-cn-16k-aishell2-vocab8404-pytorch",
        model=model,
        output_dir=output_dir_job,
        batch_size=64
        batch_size=batch_size,
        ngpu=use_gpu,
    )
    audio_in = os.path.join(split_dir, "wav.{}.scp".format(idx))
    inference_pipline(audio_in=audio_in)
@@ -30,13 +36,18 @@
    # prepare for multi-GPU decoding
    ngpu = params["ngpu"]
    njob = params["njob"]
    batch_size = params["batch_size"]
    output_dir = params["output_dir"]
    model = params["model"]
    if os.path.exists(output_dir):
        shutil.rmtree(output_dir)
    os.mkdir(output_dir)
    split_dir = os.path.join(output_dir, "split")
    os.mkdir(split_dir)
    nj = ngpu * njob
    if ngpu > 0:
        nj = ngpu
    elif ngpu == 0:
        nj = njob
    wav_scp_file = os.path.join(params["data_dir"], "wav.scp")
    with open(wav_scp_file) as f:
        lines = f.readlines()
@@ -56,7 +67,7 @@
    p = Pool(nj)
    for i in range(nj):
        p.apply_async(modelscope_infer_core,
                      args=(output_dir, split_dir, njob, str(i + 1)))
                      args=(output_dir, split_dir, njob, str(i + 1), batch_size, ngpu, model))
    p.close()
    p.join()
@@ -81,8 +92,10 @@
if __name__ == "__main__":
    params = {}
    params["model"] = "damo/speech_paraformer-large_asr_nat-zh-cn-16k-aishell2-vocab8404-pytorch"
    params["data_dir"] = "./data/test"
    params["output_dir"] = "./results"
    params["ngpu"] = 1
    params["njob"] = 1
    modelscope_infer(params)
    params["ngpu"] = 1 # if ngpu > 0, will use gpu decoding
    params["njob"] = 1 # if ngpu = 0, will use cpu decoding
    params["batch_size"] = 64
    modelscope_infer(params)
egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-aishell2-vocab8404-pytorch/infer_after_finetune.py
@@ -4,23 +4,18 @@
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
from modelscope.hub.snapshot_download import snapshot_download
from funasr.utils.compute_wer import compute_wer
def modelscope_infer_after_finetune(params):
    # prepare for decoding
    pretrained_model_path = os.path.join(os.environ["HOME"], ".cache/modelscope/hub", params["modelscope_model_name"])
    for file_name in params["required_files"]:
        if file_name == "configuration.json":
            with open(os.path.join(pretrained_model_path, file_name)) as f:
                config_dict = json.load(f)
                config_dict["model"]["am_model_name"] = params["decoding_model_name"]
            with open(os.path.join(params["output_dir"], "configuration.json"), "w") as f:
                json.dump(config_dict, f, indent=4, separators=(',', ': '))
        else:
            shutil.copy(os.path.join(pretrained_model_path, file_name),
                        os.path.join(params["output_dir"], file_name))
    try:
        pretrained_model_path = snapshot_download(params["modelscope_model_name"], cache_dir=params["output_dir"])
    except BaseException:
        raise BaseException(f"Please download pretrain model from ModelScope firstly.")
    shutil.copy(os.path.join(params["output_dir"], params["decoding_model_name"]), os.path.join(pretrained_model_path, "model.pb"))
    decoding_path = os.path.join(params["output_dir"], "decode_results")
    if os.path.exists(decoding_path):
        shutil.rmtree(decoding_path)
@@ -29,9 +24,9 @@
    # decoding
    inference_pipeline = pipeline(
        task=Tasks.auto_speech_recognition,
        model=params["output_dir"],
        model=pretrained_model_path,
        output_dir=decoding_path,
        batch_size=64
        batch_size=params["batch_size"]
    )
    audio_in = os.path.join(params["data_dir"], "wav.scp")
    inference_pipeline(audio_in=audio_in)
@@ -46,8 +41,8 @@
if __name__ == '__main__':
    params = {}
    params["modelscope_model_name"] = "damo/speech_paraformer-large_asr_nat-zh-cn-16k-aishell2-vocab8404-pytorch"
    params["required_files"] = ["am.mvn", "decoding.yaml", "configuration.json"]
    params["output_dir"] = "./checkpoint"
    params["data_dir"] = "./data/test"
    params["decoding_model_name"] = "valid.acc.ave_10best.pb"
    modelscope_infer_after_finetune(params)
    params["batch_size"] = 64
    modelscope_infer_after_finetune(params)
egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/README.md
@@ -22,10 +22,12 @@
Or you can use the finetuned model for inference directly.
- Setting parameters in `infer.py`
    - <strong>model:</strong> # model name on ModelScope
    - <strong>data_dir:</strong> # the dataset dir needs to include `test/wav.scp`. If `test/text` is also exists, CER will be computed
    - <strong>output_dir:</strong> # result dir
    - <strong>ngpu:</strong> # the number of GPUs for decoding
    - <strong>njob:</strong> # the number of jobs for each GPU
    - <strong>ngpu:</strong> # the number of GPUs for decoding, if `ngpu` > 0, use GPU decoding
    - <strong>njob:</strong> # the number of jobs for CPU decoding, if `ngpu` = 0, use CPU decoding, please set `njob`
    - <strong>batch_size:</strong> # batchsize of inference
- Then you can run the pipeline to infer with:
```python
@@ -39,9 +41,11 @@
### Inference using local finetuned model
- Modify inference related parameters in `infer_after_finetune.py`
    - <strong>modelscope_model_name: </strong> # model name on ModelScope
    - <strong>output_dir:</strong> # result dir
    - <strong>data_dir:</strong> # the dataset dir needs to include `test/wav.scp`. If `test/text` is also exists, CER will be computed
    - <strong>decoding_model_name:</strong> # set the checkpoint name for decoding, e.g., `valid.cer_ctc.ave.pb`
    - <strong>batch_size:</strong> # batchsize of inference
- Then you can run the pipeline to finetune with:
```python
egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/infer.py
@@ -8,9 +8,14 @@
from funasr.utils.compute_wer import compute_wer
def modelscope_infer_core(output_dir, split_dir, njob, idx):
def modelscope_infer_core(output_dir, split_dir, njob, idx, batch_size, ngpu, model):
    output_dir_job = os.path.join(output_dir, "output.{}".format(idx))
    gpu_id = (int(idx) - 1) // njob
    if ngpu > 0:
        use_gpu = 1
        gpu_id = int(idx) - 1
    else:
        use_gpu = 0
        gpu_id = -1
    if "CUDA_VISIBLE_DEVICES" in os.environ.keys():
        gpu_list = os.environ['CUDA_VISIBLE_DEVICES'].split(",")
        os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_list[gpu_id])
@@ -18,9 +23,10 @@
        os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id)
    inference_pipline = pipeline(
        task=Tasks.auto_speech_recognition,
        model="damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch",
        model=model,
        output_dir=output_dir_job,
        batch_size=64
        batch_size=batch_size,
        ngpu=use_gpu,
    )
    audio_in = os.path.join(split_dir, "wav.{}.scp".format(idx))
    inference_pipline(audio_in=audio_in)
@@ -30,13 +36,18 @@
    # prepare for multi-GPU decoding
    ngpu = params["ngpu"]
    njob = params["njob"]
    batch_size = params["batch_size"]
    output_dir = params["output_dir"]
    model = params["model"]
    if os.path.exists(output_dir):
        shutil.rmtree(output_dir)
    os.mkdir(output_dir)
    split_dir = os.path.join(output_dir, "split")
    os.mkdir(split_dir)
    nj = ngpu * njob
    if ngpu > 0:
        nj = ngpu
    elif ngpu == 0:
        nj = njob
    wav_scp_file = os.path.join(params["data_dir"], "wav.scp")
    with open(wav_scp_file) as f:
        lines = f.readlines()
@@ -56,7 +67,7 @@
    p = Pool(nj)
    for i in range(nj):
        p.apply_async(modelscope_infer_core,
                      args=(output_dir, split_dir, njob, str(i + 1)))
                      args=(output_dir, split_dir, njob, str(i + 1), batch_size, ngpu, model))
    p.close()
    p.join()
@@ -81,8 +92,10 @@
if __name__ == "__main__":
    params = {}
    params["model"] = "damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
    params["data_dir"] = "./data/test"
    params["output_dir"] = "./results"
    params["ngpu"] = 1
    params["njob"] = 1
    modelscope_infer(params)
    params["ngpu"] = 1 # if ngpu > 0, will use gpu decoding
    params["njob"] = 1 # if ngpu = 0, will use cpu decoding
    params["batch_size"] = 64
    modelscope_infer(params)
egs_modelscope/asr/paraformer/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/infer_after_finetune.py
@@ -4,23 +4,18 @@
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
from modelscope.hub.snapshot_download import snapshot_download
from funasr.utils.compute_wer import compute_wer
def modelscope_infer_after_finetune(params):
    # prepare for decoding
    pretrained_model_path = os.path.join(os.environ["HOME"], ".cache/modelscope/hub", params["modelscope_model_name"])
    for file_name in params["required_files"]:
        if file_name == "configuration.json":
            with open(os.path.join(pretrained_model_path, file_name)) as f:
                config_dict = json.load(f)
                config_dict["model"]["am_model_name"] = params["decoding_model_name"]
            with open(os.path.join(params["output_dir"], "configuration.json"), "w") as f:
                json.dump(config_dict, f, indent=4, separators=(',', ': '))
        else:
            shutil.copy(os.path.join(pretrained_model_path, file_name),
                        os.path.join(params["output_dir"], file_name))
    try:
        pretrained_model_path = snapshot_download(params["modelscope_model_name"], cache_dir=params["output_dir"])
    except BaseException:
        raise BaseException(f"Please download pretrain model from ModelScope firstly.")
    shutil.copy(os.path.join(params["output_dir"], params["decoding_model_name"]), os.path.join(pretrained_model_path, "model.pb"))
    decoding_path = os.path.join(params["output_dir"], "decode_results")
    if os.path.exists(decoding_path):
        shutil.rmtree(decoding_path)
@@ -29,9 +24,9 @@
    # decoding
    inference_pipeline = pipeline(
        task=Tasks.auto_speech_recognition,
        model=params["output_dir"],
        model=pretrained_model_path,
        output_dir=decoding_path,
        batch_size=64
        batch_size=params["batch_size"]
    )
    audio_in = os.path.join(params["data_dir"], "wav.scp")
    inference_pipeline(audio_in=audio_in)
@@ -46,8 +41,8 @@
if __name__ == '__main__':
    params = {}
    params["modelscope_model_name"] = "damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
    params["required_files"] = ["am.mvn", "decoding.yaml", "configuration.json"]
    params["output_dir"] = "./checkpoint"
    params["data_dir"] = "./data/test"
    params["decoding_model_name"] = "valid.acc.ave_10best.pb"
    modelscope_infer_after_finetune(params)
    params["batch_size"] = 64
    modelscope_infer_after_finetune(params)
egs_modelscope/asr/paraformer/speech_paraformer_asr_nat-zh-cn-8k-common-vocab8358-tensorflow1/infer.py
@@ -8,9 +8,14 @@
from funasr.utils.compute_wer import compute_wer
def modelscope_infer_core(output_dir, split_dir, njob, idx):
def modelscope_infer_core(output_dir, split_dir, njob, idx, batch_size, ngpu, model):
    output_dir_job = os.path.join(output_dir, "output.{}".format(idx))
    gpu_id = (int(idx) - 1) // njob
    if ngpu > 0:
        use_gpu = 1
        gpu_id = int(idx) - 1
    else:
        use_gpu = 0
        gpu_id = -1
    if "CUDA_VISIBLE_DEVICES" in os.environ.keys():
        gpu_list = os.environ['CUDA_VISIBLE_DEVICES'].split(",")
        os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_list[gpu_id])
@@ -18,9 +23,10 @@
        os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id)
    inference_pipline = pipeline(
        task=Tasks.auto_speech_recognition,
        model="damo/speech_paraformer_asr_nat-zh-cn-8k-common-vocab8358-tensorflow1",
        model=model,
        output_dir=output_dir_job,
        batch_size=64
        batch_size=batch_size,
        ngpu=use_gpu,
    )
    audio_in = os.path.join(split_dir, "wav.{}.scp".format(idx))
    inference_pipline(audio_in=audio_in)
@@ -30,13 +36,18 @@
    # prepare for multi-GPU decoding
    ngpu = params["ngpu"]
    njob = params["njob"]
    batch_size = params["batch_size"]
    output_dir = params["output_dir"]
    model = params["model"]
    if os.path.exists(output_dir):
        shutil.rmtree(output_dir)
    os.mkdir(output_dir)
    split_dir = os.path.join(output_dir, "split")
    os.mkdir(split_dir)
    nj = ngpu * njob
    if ngpu > 0:
        nj = ngpu
    elif ngpu == 0:
        nj = njob
    wav_scp_file = os.path.join(params["data_dir"], "wav.scp")
    with open(wav_scp_file) as f:
        lines = f.readlines()
@@ -56,7 +67,7 @@
    p = Pool(nj)
    for i in range(nj):
        p.apply_async(modelscope_infer_core,
                      args=(output_dir, split_dir, njob, str(i + 1)))
                      args=(output_dir, split_dir, njob, str(i + 1), batch_size, ngpu, model))
    p.close()
    p.join()
@@ -81,8 +92,10 @@
if __name__ == "__main__":
    params = {}
    params["model"] = "damo/speech_paraformer_asr_nat-zh-cn-8k-common-vocab8358-tensorflow1"
    params["data_dir"] = "./data/test"
    params["output_dir"] = "./results"
    params["ngpu"] = 1
    params["njob"] = 1
    modelscope_infer(params)
    params["ngpu"] = 1 # if ngpu > 0, will use gpu decoding
    params["njob"] = 1 # if ngpu = 0, will use cpu decoding
    params["batch_size"] = 64
    modelscope_infer(params)
egs_modelscope/asr/paraformer/speech_paraformer_asr_nat-zh-cn-8k-common-vocab8358-tensorflow1/infer_after_finetune.py
@@ -4,23 +4,18 @@
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
from modelscope.hub.snapshot_download import snapshot_download
from funasr.utils.compute_wer import compute_wer
def modelscope_infer_after_finetune(params):
    # prepare for decoding
    pretrained_model_path = os.path.join(os.environ["HOME"], ".cache/modelscope/hub", params["modelscope_model_name"])
    for file_name in params["required_files"]:
        if file_name == "configuration.json":
            with open(os.path.join(pretrained_model_path, file_name)) as f:
                config_dict = json.load(f)
                config_dict["model"]["am_model_name"] = params["decoding_model_name"]
            with open(os.path.join(params["output_dir"], "configuration.json"), "w") as f:
                json.dump(config_dict, f, indent=4, separators=(',', ': '))
        else:
            shutil.copy(os.path.join(pretrained_model_path, file_name),
                        os.path.join(params["output_dir"], file_name))
    try:
        pretrained_model_path = snapshot_download(params["modelscope_model_name"], cache_dir=params["output_dir"])
    except BaseException:
        raise BaseException(f"Please download pretrain model from ModelScope firstly.")
    shutil.copy(os.path.join(params["output_dir"], params["decoding_model_name"]), os.path.join(pretrained_model_path, "model.pb"))
    decoding_path = os.path.join(params["output_dir"], "decode_results")
    if os.path.exists(decoding_path):
        shutil.rmtree(decoding_path)
@@ -29,9 +24,9 @@
    # decoding
    inference_pipeline = pipeline(
        task=Tasks.auto_speech_recognition,
        model=params["output_dir"],
        model=pretrained_model_path,
        output_dir=decoding_path,
        batch_size=64
        batch_size=params["batch_size"]
    )
    audio_in = os.path.join(params["data_dir"], "wav.scp")
    inference_pipeline(audio_in=audio_in)
@@ -46,8 +41,8 @@
if __name__ == '__main__':
    params = {}
    params["modelscope_model_name"] = "damo/speech_paraformer_asr_nat-zh-cn-8k-common-vocab8358-tensorflow1"
    params["required_files"] = ["am.mvn", "decoding.yaml", "configuration.json"]
    params["output_dir"] = "./checkpoint"
    params["data_dir"] = "./data/test"
    params["decoding_model_name"] = "valid.acc.ave_10best.pb"
    modelscope_infer_after_finetune(params)
    params["batch_size"] = 64
    modelscope_infer_after_finetune(params)
egs_modelscope/asr_vad_punc/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch/infer_after_finetune.py
@@ -4,27 +4,17 @@
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
from modelscope.hub.snapshot_download import snapshot_download
from funasr.utils.compute_wer import compute_wer
def modelscope_infer_after_finetune(params):
    # prepare for decoding
    if not os.path.exists(os.path.join(params["output_dir"], "punc")):
        os.makedirs(os.path.join(params["output_dir"], "punc"))
    if not os.path.exists(os.path.join(params["output_dir"], "vad")):
        os.makedirs(os.path.join(params["output_dir"], "vad"))
    pretrained_model_path = os.path.join(os.environ["HOME"], ".cache/modelscope/hub", params["modelscope_model_name"])
    for file_name in params["required_files"]:
        if file_name == "configuration.json":
            with open(os.path.join(pretrained_model_path, file_name)) as f:
                config_dict = json.load(f)
                config_dict["model"]["am_model_name"] = params["decoding_model_name"]
            with open(os.path.join(params["output_dir"], "configuration.json"), "w") as f:
                json.dump(config_dict, f, indent=4, separators=(',', ': '))
        else:
            shutil.copy(os.path.join(pretrained_model_path, file_name),
                        os.path.join(params["output_dir"], file_name))
    try:
        pretrained_model_path = snapshot_download(params["modelscope_model_name"], cache_dir=params["output_dir"])
    except BaseException:
        raise BaseException(f"Please download pretrain model from ModelScope firstly.")shutil.copy(os.path.join(params["output_dir"], params["decoding_model_name"]), os.path.join(pretrained_model_path, "model.pb"))
    decoding_path = os.path.join(params["output_dir"], "decode_results")
    if os.path.exists(decoding_path):
        shutil.rmtree(decoding_path)
@@ -33,16 +23,16 @@
    # decoding
    inference_pipeline = pipeline(
        task=Tasks.auto_speech_recognition,
        model=params["output_dir"],
        model=pretrained_model_path,
        output_dir=decoding_path,
        batch_size=64
        batch_size=params["batch_size"]
    )
    audio_in = os.path.join(params["data_dir"], "wav.scp")
    inference_pipeline(audio_in=audio_in)
    # computer CER if GT text is set
    text_in = os.path.join(params["data_dir"], "text")
    if text_in is not None:
    if os.path.exists(text_in):
        text_proc_file = os.path.join(decoding_path, "1best_recog/token")
        compute_wer(text_in, text_proc_file, os.path.join(decoding_path, "text.cer"))
@@ -50,8 +40,8 @@
if __name__ == '__main__':
    params = {}
    params["modelscope_model_name"] = "damo/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
    params["required_files"] = ["am.mvn", "decoding.yaml", "configuration.json", "punc/punc.pb", "punc/punc.yaml", "vad/vad.mvn", "vad/vad.pb", "vad/vad.yaml"]
    params["output_dir"] = "./checkpoint"
    params["data_dir"] = "./data/test"
    params["decoding_model_name"] = "valid.acc.ave_10best.pb"
    modelscope_infer_after_finetune(params)
    params["batch_size"] = 64
    modelscope_infer_after_finetune(params)