egs_modelscope/asr/data2vec/speech_data2vec_pretrain-zh-cn-aishell2-16k-pytorch/infer.py
@@ -16,13 +16,13 @@ os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_list[gpu_id]) else: os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id) inference_pipline = pipeline( inference_pipeline = pipeline( task=Tasks.auto_speech_recognition, model="damo/speech_data2vec_pretrain-zh-cn-aishell2-16k-pytorch", output_dir=output_dir_job, ) audio_in = os.path.join(split_dir, "wav.{}.scp".format(idx)) inference_pipline(audio_in=audio_in) inference_pipeline(audio_in=audio_in) def modelscope_infer(params):