From 315763a83a3684457ad2745e76c4c0eb207ef5be Mon Sep 17 00:00:00 2001 From: YdrMaster Date: Wed, 15 Feb 2023 11:41:06 +0800 Subject: [PATCH] =?UTF-8?q?feat:=20=E5=89=8D=E7=AB=AF=E6=94=AF=E6=8C=81=20?= =?UTF-8?q?pad=20=E5=8F=8A=E5=8D=95=E5=85=83=E6=B5=8B=E8=AF=95?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: YdrMaster --- include/core/graph_handler.h | 2 ++ include/operators/pad.h | 4 ++-- pyinfinitensor/src/pyinfinitensor/onnx.py | 7 +++++++ pyinfinitensor/tests/test_onnx.py | 22 +++++++++++++++++++--- src/core/graph_handler.cc | 13 +++++++++++++ src/ffi/ffi_infinitensor.cc | 4 ++++ src/operators/pad.cc | 9 ++++----- 7 files changed, 51 insertions(+), 10 deletions(-) diff --git a/include/core/graph_handler.h b/include/core/graph_handler.h index 9ee2ed7e..18024f2a 100644 --- a/include/core/graph_handler.h +++ b/include/core/graph_handler.h @@ -70,6 +70,8 @@ class GraphHandlerObj { Tensor slice(Tensor input, Tensor output, const vector &starts, const vector &ends, const optional> &axes, const optional> &steps); + Tensor pad(Tensor input, Tensor output, const vector &pads, + const optional> &axes); }; } // namespace infini diff --git a/include/operators/pad.h b/include/operators/pad.h index 4df709d2..7a25d8bd 100644 --- a/include/operators/pad.h +++ b/include/operators/pad.h @@ -21,10 +21,10 @@ class PadObj : public OperatorObj { * @param pads Add padding elements at the begining and end of each axis. * Suppose that padding axes are [x1, x2, ...], then pads's format is * [x1_begin, x2_begin, ..., x1_end, x2_end, ...] - * @param axis Pad for appointed axes. If axis is empty, pad for all axes. + * @param axes Pad for appointed axes. If axis is empty, pad for all axes. */ PadObj(GraphObj *graph, Tensor input, Tensor output, - const vector &pads, const optional> &axis); + const vector &pads, const optional> &axes); OP_CLONE(PadObj); optional> inferShape(const TensorVec &inputs) const override; diff --git a/pyinfinitensor/src/pyinfinitensor/onnx.py b/pyinfinitensor/src/pyinfinitensor/onnx.py index 7f60bf95..c66995d2 100644 --- a/pyinfinitensor/src/pyinfinitensor/onnx.py +++ b/pyinfinitensor/src/pyinfinitensor/onnx.py @@ -202,6 +202,13 @@ def from_onnx(model: onnx.ModelProto): _parse_data(data[node.input[3]]) if len(node.input) > 3 else None, _parse_data(data[node.input[4]]) if len(node.input) > 4 else None, ) + elif node.op_type == "Pad": + tensors[node.output[0]] = handler.pad( + tensors[node.input[0]], + tensors.get(node.output[0]), + _parse_data(data[node.input[1]]), + _parse_data(data[node.input[3]]) if len(node.input) > 3 else None, + ) else: raise Exception('Unsupported operator "{}"'.format(node.op_type)) diff --git a/pyinfinitensor/tests/test_onnx.py b/pyinfinitensor/tests/test_onnx.py index 5fb375a1..4f86cd8d 100644 --- a/pyinfinitensor/tests/test_onnx.py +++ b/pyinfinitensor/tests/test_onnx.py @@ -215,9 +215,7 @@ class TestStringMethods(unittest.TestCase): starts_data = make_tensor("starts", TensorProto.INT64, [4], [2, 10, 1, 5]) ends = make_tensor_value_info("ends", TensorProto.INT64, [4]) ends_data = make_tensor("ends", TensorProto.INT64, [4], [3, 10, 100, 100]) - slice = make_node( - "Slice", ["data", "starts", "ends"], ["output"], name="gather" - ) + slice = make_node("Slice", ["data", "starts", "ends"], ["output"], name="slice") make_and_import_model( make_graph( [slice], @@ -228,6 +226,24 @@ class TestStringMethods(unittest.TestCase): ) ) + def test_pad(self): + data = make_tensor_value_info("data", TensorProto.UINT32, [1, 64, 162, 162]) + output = make_tensor_value_info("output", TensorProto.UINT32, [3, 84, 164, 172]) + pads = make_tensor_value_info("pads", TensorProto.INT64, [8]) + pads_data = make_tensor( + "pads", TensorProto.INT64, [8], [2, 10, 1, 5, 0, 10, 1, 5] + ) + pad = make_node("Pad", ["data", "pads"], ["output"], name="pad") + make_and_import_model( + make_graph( + [pad], + "pad", + [data, pads], + [output], + [pads_data], + ) + ) + # see def test_linear(self): x = make_tensor_value_info("x", TensorProto.FLOAT, [1, 2, 3]) diff --git a/src/core/graph_handler.cc b/src/core/graph_handler.cc index ec6175e1..3b408ff4 100644 --- a/src/core/graph_handler.cc +++ b/src/core/graph_handler.cc @@ -4,6 +4,7 @@ #include "operators/element_wise.h" #include "operators/gather.h" #include "operators/matmul.h" +#include "operators/pad.h" #include "operators/pooling.h" #include "operators/reduce_mean.h" #include "operators/reshape.h" @@ -180,6 +181,18 @@ Tensor GraphHandlerObj::slice(Tensor input, Tensor output, } } +Tensor GraphHandlerObj::pad(Tensor input, Tensor output, + const vector &pads, + const optional> &axes) { + if (output) { + g->addOpWithOutputs(std::move(input), output, pads, axes); + return output; + } else { + return g->addOp(std::move(input), output, pads, axes) + ->getOutput(); + } +} + static DataType dtype_repr_convert(int dtype) { switch ((OnnxDType)dtype) { case OnnxDType::FLOAT: diff --git a/src/ffi/ffi_infinitensor.cc b/src/ffi/ffi_infinitensor.cc index ffe38b39..1e152c05 100644 --- a/src/ffi/ffi_infinitensor.cc +++ b/src/ffi/ffi_infinitensor.cc @@ -100,6 +100,10 @@ void init_graph_builder(py::module &m) { Tensor, Tensor, const vector &, const vector &, const optional> &, const optional> &>( &Handler::slice), + policy::move) + .def("pad", + py::overload_cast &, + const optional> &>(&Handler::pad), policy::move); } diff --git a/src/operators/pad.cc b/src/operators/pad.cc index 7e914f8e..1624236e 100644 --- a/src/operators/pad.cc +++ b/src/operators/pad.cc @@ -2,19 +2,18 @@ namespace infini { PadObj::PadObj(GraphObj *graph, Tensor input, Tensor output, - const vector &_pads, - const optional> &axis) + const vector &_pads, const optional> &axes) : OperatorObj(OpType::Pad, {input}, {output}) { - if (!axis) + if (!axes) pads = _pads; else { - auto nAxis = (*axis).size(); + auto nAxis = (*axes).size(); IT_ASSERT(_pads.size() == nAxis * 2); auto nDims = input->getDims().size(); pads = vector(nDims * 2, 0); for (size_t i = 0; i < nAxis; ++i) { - auto j = (*axis)[i]; + auto j = (*axes)[i]; pads[j] = _pads[i]; pads[j + nDims] = _pads[i + nAxis]; }