diff --git a/examples/python/onnx_inference.py b/examples/python/onnx_inference.py new file mode 100644 index 00000000..152fe37a --- /dev/null +++ b/examples/python/onnx_inference.py @@ -0,0 +1,29 @@ +import sys +import onnx +import torch +import numpy as np +from pyinfinitensor.onnx import OnnxStub, backend + +if __name__ == '__main__': + args = sys.argv + if len(sys.argv) != 2: + print("Usage: python onnx_inference.py model_name.onnx") + exit() + model_path = sys.argv[1] + # print(model_path) + + onnx_model = onnx.load(model_path) + onnx_input = onnx_model.graph.input[0] + input_shape = [[d.dim_value for d in _input.type.tensor_type.shape.dim] + for _input in onnx_model.graph.input] + # Assume that there is only one input tensor + input_shape = input_shape[0] + # print(input_shape) + input_data = np.random.random(input_shape).astype(np.float32) + + model = OnnxStub(onnx_model, backend.cuda_runtime()) + next(iter(model.inputs.values())).copyin_numpy(input_data) + model.run() + outputs = next(iter(model.outputs.values())).copyout_numpy() + outputs = torch.tensor(outputs) + print(outputs.shape) diff --git a/examples/python/resnet_inference.py b/examples/python/resnet_inference.py new file mode 100644 index 00000000..4c58c7a6 --- /dev/null +++ b/examples/python/resnet_inference.py @@ -0,0 +1,24 @@ +import sys +import onnx +import torch +import numpy as np +from pyinfinitensor.onnx import OnnxStub, backend +import torchvision.models as models + +if __name__ == '__main__': + model_path = './resnet18.onnx' + tv_model = models.resnet50(weights=None) + input_shape = (1, 3, 224, 224) + param = torch.rand(input_shape) + torch.onnx.export(tv_model, param, model_path, verbose=False) + + onnx_model = onnx.load(model_path) + model = OnnxStub(onnx_model, backend.cuda_runtime()) + images = np.random.random(input_shape).astype(np.float32) + next(iter(model.inputs.values())).copyin_numpy(images) + model.run() + outputs = next(iter(model.outputs.values())).copyout_numpy() + outputs = torch.tensor(outputs) + outputs = torch.reshape(outputs, (1, 1000)) + _, predicted = torch.max(outputs, 1) + print(predicted)