From 9ab78f13f73108239956db0e7f4c102fd1ae40a0 Mon Sep 17 00:00:00 2001 From: YdrMaster Date: Thu, 23 Feb 2023 16:29:20 +0800 Subject: [PATCH] =?UTF-8?q?feat:=20=E5=AF=BC=E5=87=BA=20cuda=5Fruntime?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: YdrMaster --- pyinfinitensor/src/pyinfinitensor/onnx.py | 4 ++-- src/ffi/ffi_infinitensor.cc | 12 ++++++++++++ 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/pyinfinitensor/src/pyinfinitensor/onnx.py b/pyinfinitensor/src/pyinfinitensor/onnx.py index 207280ef..ef7d4de2 100644 --- a/pyinfinitensor/src/pyinfinitensor/onnx.py +++ b/pyinfinitensor/src/pyinfinitensor/onnx.py @@ -25,10 +25,10 @@ from onnx.shape_inference import infer_shapes from typing import Dict, List, Any, Tuple, Sequence, Union from functools import reduce -runtime = backend.cpu_runtime() +cpu_runtime = backend.cpu_runtime() -def from_onnx(model: ModelProto) -> backend.GraphHandler: +def from_onnx(model: ModelProto, runtime) -> backend.GraphHandler: model = infer_shapes(model) handler = backend.GraphHandler(runtime) diff --git a/src/ffi/ffi_infinitensor.cc b/src/ffi/ffi_infinitensor.cc index b791e66e..5d6085ea 100644 --- a/src/ffi/ffi_infinitensor.cc +++ b/src/ffi/ffi_infinitensor.cc @@ -6,6 +6,7 @@ #include #ifdef USE_CUDA +#include "cuda/cuda_runtime.h" #include "cuda/operator_timer.h" #endif @@ -94,6 +95,10 @@ static int tensor_dtype(Tensor t) { IT_ASSERT(false, "Unsupported data type"); } +#ifdef USE_CUDA +static Ref cuda_runtime() { return make_ref(); } +#endif + static int concat_axis_of(Operator op) { IT_ASSERT(op->getOpType() == OpType::Concat); return dynamic_cast(op.get())->getDim(); @@ -118,6 +123,9 @@ static Shape reshape_shape_of(Operator op) { void export_functions(py::module &m) { #define FUNCTION(NAME) def(#NAME, &NAME) m.def("cpu_runtime", &CpuRuntimeObj::getInstance) +#ifdef USE_CUDA + .FUNCTION(cuda_runtime) +#endif .FUNCTION(tensor_dtype) .FUNCTION(reshape_shape_of) .FUNCTION(concat_axis_of) @@ -132,6 +140,10 @@ void init_graph_builder(py::module &m) { py::class_>(m, "Runtime"); py::class_, RuntimeObj>( m, "CpuRuntime"); +#ifdef USE_CUDA + py::class_, RuntimeObj>( + m, "CudaRuntime"); +#endif py::class_>(m, "Tensor") .def("shape", &TensorObj::getDims, policy::move) .def("src", &TensorObj::getOutputOf, policy::move);