| | |
| | | |
| | | |
| | | from funasr import AutoModel |
| | | wav_file = "https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/vad_example.wav" |
| | | |
| | | model = AutoModel(model="iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch", |
| | | model_revision="v2.0.4") |
| | | model = AutoModel( |
| | | model="iic/speech_paraformer-large-contextual_asr_nat-zh-cn-16k-common-vocab8404", |
| | | ) |
| | | |
| | | res = model.export(input=wav_file, type="onnx", quantize=False) |
| | | res = model.export(type="torchscripts", quantize=False) |
| | | # res = model.export(type="bladedisc", input=f"{model.model_path}/example/asr_example.wav") |
| | | print(res) |
| | | |
| | | |
| | | # method2, inference from local path |
| | | from funasr import AutoModel |
| | | # # method2, inference from local path |
| | | # from funasr import AutoModel |
| | | |
| | | wav_file = "https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/vad_example.wav" |
| | | # model = AutoModel( |
| | | # model="/Users/zhifu/.cache/modelscope/hub/iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch" |
| | | # ) |
| | | |
| | | model = AutoModel(model="/Users/zhifu/.cache/modelscope/hub/iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch") |
| | | |
| | | res = model.export(input=wav_file, type="onnx", quantize=False) |
| | | print(res) |
| | | # res = model.export(type="onnx", quantize=False) |
| | | # print(res) |