| | |
| | | # Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved. |
| | | # MIT License (https://opensource.org/licenses/MIT) |
| | | |
| | | # download model |
| | | local_path_root=./modelscope_models |
| | | mkdir -p ${local_path_root} |
| | | local_path=${local_path_root}/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch |
| | | git clone https://www.modelscope.cn/damo/speech_paraformer-large-contextual_asr_nat-zh-cn-16k-common-vocab8404.git ${local_path} |
| | | # method1, inference from model hub |
| | | |
| | | # for more input type, please ref to readme.md |
| | | input="https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav" |
| | | |
| | | output_dir="./outputs/debug" |
| | | |
| | | model="iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch" |
| | | |
| | | |
| | | python funasr/bin/inference.py \ |
| | | +model="${local_path}" \ |
| | | +input="${local_path}/example/asr_example.wav" \ |
| | | +output_dir="./outputs/debug" \ |
| | | +device="cpu" \ |
| | | device="cuda:0" # "cuda:0" for gpu0, "cuda:1" for gpu1, "cpu" |
| | | |
| | | python -m funasr.bin.inference \ |
| | | ++model=${model} \ |
| | | ++input="${input}" \ |
| | | ++output_dir="${output_dir}" \ |
| | | ++device="${device}" \ |