| | |
| | | |
| | | from funasr import AutoModel |
| | | |
| | | model = AutoModel(model="iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch",) |
| | | model = AutoModel( |
| | | model="iic/speech_paraformer-large-contextual_asr_nat-zh-cn-16k-common-vocab8404", |
| | | ) |
| | | |
| | | res = model.export(type="onnx", quantize=False) |
| | | res = model.export(type="torchscript", quantize=False) |
| | | # res = model.export(type="bladedisc", input=f"{model.model_path}/example/asr_example.wav") |
| | | print(res) |
| | | |
| | | |
| | | # method2, inference from local path |
| | | from funasr import AutoModel |
| | | # # method2, inference from local path |
| | | # from funasr import AutoModel |
| | | |
| | | model = AutoModel(model="/Users/zhifu/.cache/modelscope/hub/iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch") |
| | | # model = AutoModel( |
| | | # model="/Users/zhifu/.cache/modelscope/hub/iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch" |
| | | # ) |
| | | |
| | | res = model.export(type="onnx", quantize=False) |
| | | print(res) |
| | | # res = model.export(type="onnx", quantize=False) |
| | | # print(res) |