| | |
| | | device = "cpu" |
| | | kwargs["batch_size"] = 1 |
| | | kwargs["device"] = device |
| | | |
| | | if kwargs.get("ncpu", 4): |
| | | torch.set_num_threads(kwargs.get("ncpu")) |
| | | |
| | | torch.set_num_threads(kwargs.get("ncpu", 4)) |
| | | |
| | | # build tokenizer |
| | | tokenizer = kwargs.get("tokenizer", None) |
| | | if tokenizer is not None: |
| | | tokenizer_class = tables.tokenizer_classes.get(tokenizer) |
| | | tokenizer = tokenizer_class(**kwargs["tokenizer_conf"]) |
| | | tokenizer_conf = kwargs.get("tokenizer_conf", {}) |
| | | tokenizer = tokenizer_class(**tokenizer_conf) |
| | | kwargs["tokenizer"] = tokenizer |
| | | |
| | | kwargs["token_list"] = tokenizer.token_list if hasattr(tokenizer, "token_list") else None |
| | |
| | | export_dir = export_utils.export_onnx( |
| | | model=model, |
| | | data_in=data_list, |
| | | quantize=quantize, |
| | | fallback_num=fallback_num, |
| | | calib_num=calib_num, |
| | | opset_version=opset_version, |
| | | **kwargs) |
| | | else: |
| | | export_dir = export_utils.export_torchscripts( |
| | | model=model, |
| | | data_in=data_list, |
| | | quantize=quantize, |
| | | fallback_num=fallback_num, |
| | | calib_num=calib_num, |
| | | opset_version=opset_version, |
| | | **kwargs) |
| | | |
| | | return export_dir |