| | |
| | | os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_list[gpu_id]) |
| | | else: |
| | | os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id) |
| | | inference_pipline = pipeline( |
| | | inference_pipeline = pipeline( |
| | | task=Tasks.auto_speech_recognition, |
| | | model="damo/speech_UniASR_asr_2pass-fa-16k-common-vocab1257-pytorch-online", |
| | | output_dir=output_dir_job, |
| | | batch_size=1 |
| | | ) |
| | | audio_in = os.path.join(split_dir, "wav.{}.scp".format(idx)) |
| | | inference_pipline(audio_in=audio_in, param_dict={"decoding_model":"normal"}) |
| | | inference_pipeline(audio_in=audio_in, param_dict={"decoding_model":"normal"}) |
| | | |
| | | |
| | | def modelscope_infer(params): |