diff --git a/include/core/graph_handler.h b/include/core/graph_handler.h index 72f7a6a3..9936c637 100644 --- a/include/core/graph_handler.h +++ b/include/core/graph_handler.h @@ -89,18 +89,6 @@ class GraphHandlerObj { inline void data_malloc() { g->dataMalloc(); } - inline void copy_int32(Tensor tensor, std::vector list) { - tensor->copyin(list); - } - - inline void copy_int64(Tensor tensor, std::vector list) { - tensor->copyin(list); - } - - inline void copy_float(Tensor tensor, std::vector list) { - tensor->copyin(list); - } - inline void run() { g->getRuntime()->run(g); } }; diff --git a/include/core/tensor.h b/include/core/tensor.h index af3ed3f6..ed95f5f5 100644 --- a/include/core/tensor.h +++ b/include/core/tensor.h @@ -57,20 +57,16 @@ class TensorObj : public TensorBaseObj { return ans; } // Copy the element at `pos`. - template inline auto copyout(const vector &pos) const { + template inline auto copyOne(const vector &pos) const { IT_ASSERT(DataType::get() == dtype); auto offset = getOffset(pos); auto bytes = dtype.getSize(); T ans; - runtime->copyBlobToCPU(&ans, getRawDataPtr() + offset * bytes, - bytes); + runtime->copyBlobToCPU( + &ans, getRawDataPtr() + offset * bytes, bytes); return ans; } - inline auto copyoutFloat() const { return copyout(); } - inline auto copyoutInt32() const { return copyout(); } - inline auto copyoutInt64() const { return copyout(); } - void copyData(const TensorObj *src); void copyData(const Tensor &src) { copyData(src.get()); } void setData( diff --git a/pyinfinitensor/src/pyinfinitensor/onnx.py b/pyinfinitensor/src/pyinfinitensor/onnx.py index f85addb4..4ec4f365 100644 --- a/pyinfinitensor/src/pyinfinitensor/onnx.py +++ b/pyinfinitensor/src/pyinfinitensor/onnx.py @@ -101,7 +101,7 @@ class OnnxStub: (alpha, beta, transA, transB) = ( attributes[name] for name in ["alpha", "beta", "transA", "transB"] ) - # TODO 不支持这些参数 + # FIXME unsupport attributes: `alpha` `beta` assert alpha == 1.0 assert beta == 1.0 tensors[node.output[0]] = self.handler.matmul( @@ -265,7 +265,7 @@ class OnnxStub: tensors.get(node.output[0]), ) elif node.op_type == "Flatten": - # TODO 后端算子不支持沿任意轴展开 + # FIXME axis must be 1 axis = next( (attr.i for attr in node.attribute if attr.name == "axis"), None ) @@ -315,7 +315,7 @@ class OnnxStub: next((attr.i for attr in node.attribute if attr.name == "axis")), ) elif node.op_type == "ReduceMean": - tensors[node.output[0]] = self.handler.reduceMean( + tensors[node.output[0]] = self.handler.reduce_mean( tensors[node.input[0]], tensors.get(node.output[0]), tensors[node.input[1]] if len(node.input) > 1 else None, @@ -351,11 +351,11 @@ class OnnxStub: else: self.initializer[obj.fuid()] = tensor if tensor.data_type == TensorProto.INT32: - self.handler.copy_int32(obj, [int(i) for i in tensor.int32_data]) + obj.copyin_int32([int(i) for i in tensor.int32_data]) elif tensor.data_type == TensorProto.INT64: - self.handler.copy_int64(obj, [int(i) for i in tensor.int64_data]) + obj.copyin_int64([int(i) for i in tensor.int64_data]) elif tensor.data_type == TensorProto.FLOAT: - self.handler.copy_float(obj, [float(i) for i in tensor.float_data]) + obj.copyin_float([int(i) for i in tensor.float_data]) else: assert False, "Unsupported Tensor Type: {}".format(tensor.data_type) diff --git a/src/ffi/ffi_infinitensor.cc b/src/ffi/ffi_infinitensor.cc index 390ecb2d..627be8bf 100644 --- a/src/ffi/ffi_infinitensor.cc +++ b/src/ffi/ffi_infinitensor.cc @@ -177,9 +177,12 @@ void init_graph_builder(py::module &m) { py::class_>(m, "Tensor") .def("fuid", &TensorObj::getFuid, policy::automatic) .def("shape", &TensorObj::getDims, policy::move) - .def("copyoutFloat", &TensorObj::copyoutFloat, policy::move) - .def("copyoutInt32", &TensorObj::copyoutInt32, policy::move) - .def("copyoutInt64", &TensorObj::copyoutInt64, policy::move) + .def("copyin_float", &TensorObj::copyin, policy::move) + .def("copyin_int32", &TensorObj::copyin, policy::move) + .def("copyin_int64", &TensorObj::copyin, policy::move) + .def("copyout_float", &TensorObj::copyout, policy::move) + .def("copyout_int32", &TensorObj::copyout, policy::move) + .def("copyout_int64", &TensorObj::copyout, policy::move) .def("has_target", &TensorObj::hasTarget, policy::automatic) .def("src", &TensorObj::getOutputOf, policy::move); py::class_>(m, "Operator") @@ -212,15 +215,12 @@ void init_graph_builder(py::module &m) { .def("reshape", &Handler::reshape, policy::move) .def("concat", &Handler::concat, policy::move) .def("gather", &Handler::gather, policy::move) - .def("reduceMean", &Handler::reduceMean, policy::move) + .def("reduce_mean", &Handler::reduceMean, policy::move) .def("slice", &Handler::slice, policy::move) .def("pad", &Handler::pad, policy::move) .def("topo_sort", &Handler::topo_sort, policy::automatic) .def("operators", &Handler::operators, policy::move) .def("data_malloc", &Handler::data_malloc, policy::automatic) - .def("copy_int32", &Handler::copy_int32, policy::automatic) - .def("copy_int64", &Handler::copy_int64, policy::automatic) - .def("copy_float", &Handler::copy_float, policy::automatic) .def("run", &Handler::run, policy::automatic); }