| | |
| | | device="cpu", |
| | | ) |
| | | |
| | | res = model.export(type="onnx", quantize=False) |
| | | res = model.export(type="torchscripts", quantize=False) |
| | | print(res) |
| | | |
| | | |
| | | # method2, inference from local path |
| | | from funasr import AutoModel |
| | | # # method2, inference from local path |
| | | # from funasr import AutoModel |
| | | |
| | | model = AutoModel( |
| | | model="/Users/zhifu/.cache/modelscope/hub/iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch", |
| | | device="cpu", |
| | | ) |
| | | # model = AutoModel( |
| | | # model="/Users/zhifu/.cache/modelscope/hub/iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch", |
| | | # device="cpu", |
| | | # ) |
| | | |
| | | res = model.export(type="onnx", quantize=False) |
| | | print(res) |
| | | # res = model.export(type="onnx", quantize=False) |
| | | # print(res) |