| | |
| | | # download model |
| | | local_path_root=${workspace}/modelscope_models |
| | | mkdir -p ${local_path_root} |
| | | local_path=${local_path_root}/speech_whisper-large_asr_multilingual |
| | | git clone https://www.modelscope.cn/iic/speech_whisper-large_asr_multilingual.git ${local_path} |
| | | #Whisper-large-v2 |
| | | #local_path=${local_path_root}/speech_whisper-large_asr_multilingual |
| | | #git clone https://www.modelscope.cn/iic/speech_whisper-large_asr_multilingual.git ${local_path} |
| | | #init_param="${local_path}/large-v2.pt" |
| | | #Whisper-large-v3 |
| | | local_path=${local_path_root}/Whisper-large-v3 |
| | | git clone https://www.modelscope.cn/iic/Whisper-large-v3.git ${local_path} |
| | | init_param="${local_path}/large-v3.pt" |
| | | |
| | | device="cuda:0" # "cuda:0" for gpu0, "cuda:1" for gpu1, "cpu" |
| | | |
| | | config="config.yaml" |
| | | init_param="${local_path}/large-v2.pt" |
| | | |
| | | |
| | | python -m funasr.bin.inference \ |
| | | --config-path "${local_path}" \ |