fix export error if using cpu device; fix model path (#2265)
Co-authored-by: siyuan.yang <siyuan.yang@sophgo.com>
| | |
| | | from funasr import AutoModel
|
| | |
|
| | | model = AutoModel(
|
| | | model="/raid/t3cv/wangch/WORK_SAPCE/ASR/models/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
|
| | | model="iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
|
| | | )
|
| | |
|
| | | res = model.export(type="onnx", quantize=False, opset_version=13, device='cuda') # fp32 onnx-gpu
|
| | |
| | | **kwargs, |
| | | ): |
| | | |
| | | device = kwargs.get("device", "cpu") |
| | | dummy_input = model.export_dummy_inputs() |
| | | dummy_input = (dummy_input[0].to("cuda"), dummy_input[1].to("cuda")) |
| | | dummy_input = (dummy_input[0].to(device), dummy_input[1].to(device)) |
| | | |
| | | |
| | | verbose = kwargs.get("verbose", False) |