From 6a89946736f306181ecc221e0cf92c697a957b8e Mon Sep 17 00:00:00 2001 From: xgqdut2016 Date: Tue, 30 Apr 2024 15:04:12 +0800 Subject: [PATCH] modiefied format, replace layernorm as instancenorm --- include/core/graph_handler.h | 2 +- include/operators/instance_norm.h | 3 +-- pyinfinitensor/src/pyinfinitensor/onnx.py | 5 ++--- src/core/graph_handler.cc | 16 +++++++------- src/ffi/ffi_infinitensor.cc | 3 ++- src/kernels/ascend/instance_norm.cc | 10 ++++----- src/operators/instance_norm.cc | 10 ++++----- .../ascend/test_ascend_instancenorm.cc | 21 ++++++++----------- 8 files changed, 32 insertions(+), 38 deletions(-) diff --git a/include/core/graph_handler.h b/include/core/graph_handler.h index 2fa169d9..6412fe38 100644 --- a/include/core/graph_handler.h +++ b/include/core/graph_handler.h @@ -38,7 +38,7 @@ class GraphHandlerObj { Tensor layerNormalization(Tensor input, Tensor scale, Tensor output, Tensor bias, float eps, int axis, int stash_type); Tensor instanceNormalization(Tensor input, Tensor output, Tensor scale, - Tensor bias, float eps); + Tensor bias, float eps); Tensor rmsNorm(Tensor input, Tensor weight, Tensor output); Tensor maxPool(Tensor input, Tensor output, int kh, int kw, int dh, int dw, diff --git a/include/operators/instance_norm.h b/include/operators/instance_norm.h index 265eae17..bb3729a0 100644 --- a/include/operators/instance_norm.h +++ b/include/operators/instance_norm.h @@ -7,12 +7,11 @@ class InstanceNormObj : public OperatorObj { public: InstanceNormObj(GraphObj *graph, Tensor input, Tensor output, Tensor scale, - Tensor bias , float eps = 1e-5); + Tensor bias, float eps = 1e-5); OP_CLONE(InstanceNormObj); optional> inferShape(const TensorVec &inputs) override; std::string toString() const override; - int numInputs() const override { return inputs.size(); } int numOutputs() const override { return outputs.size(); } float getEps() const { return eps; } diff --git a/pyinfinitensor/src/pyinfinitensor/onnx.py b/pyinfinitensor/src/pyinfinitensor/onnx.py index 1f116c09..9ea61dc4 100644 --- a/pyinfinitensor/src/pyinfinitensor/onnx.py +++ b/pyinfinitensor/src/pyinfinitensor/onnx.py @@ -325,9 +325,9 @@ class OnnxStub: ) elif node.op_type == "InstanceNormalization": (input, scale, bias) = (tensors[node.input[i]] for i in [0, 1, 2]) - + output = tensors.get(node.output[0]) - + tensors[node.output[0]] = self.handler.instanceNormalization( input, output, @@ -337,7 +337,6 @@ class OnnxStub: (attr.f for attr in node.attribute if attr.name == "epsilon"), 1e-5, ), - ) elif node.op_type == "RMSNorm": tensors[node.output[0]] = self.handler.RMSNorm( diff --git a/src/core/graph_handler.cc b/src/core/graph_handler.cc index 32de9298..2b447ede 100644 --- a/src/core/graph_handler.cc +++ b/src/core/graph_handler.cc @@ -9,8 +9,8 @@ #include "operators/element_wise.h" #include "operators/expand.h" #include "operators/gather.h" -#include "operators/layer_norm.h" #include "operators/instance_norm.h" +#include "operators/layer_norm.h" #include "operators/lrn.h" #include "operators/matmul.h" #include "operators/pad.h" @@ -125,17 +125,17 @@ Tensor GraphHandlerObj::layerNormalization(Tensor input, Tensor scale, ->getOutput(); } } -Tensor GraphHandlerObj::instanceNormalization(Tensor input, - Tensor output, Tensor scale, Tensor bias, - float eps) { +Tensor GraphHandlerObj::instanceNormalization(Tensor input, Tensor output, + Tensor scale, Tensor bias, + float eps) { if (output) { - g->addOpWithOutputs(std::move(input), output, std::move(scale), - std::move(bias), eps); + g->addOpWithOutputs( + std::move(input), output, std::move(scale), std::move(bias), eps); return output; } else { return g - ->addOp(std::move(input), output, std::move(scale), - std::move(bias), eps) + ->addOp(std::move(input), output, std::move(scale), + std::move(bias), eps) ->getOutput(); } } diff --git a/src/ffi/ffi_infinitensor.cc b/src/ffi/ffi_infinitensor.cc index c3798b97..c9278926 100644 --- a/src/ffi/ffi_infinitensor.cc +++ b/src/ffi/ffi_infinitensor.cc @@ -529,7 +529,8 @@ void init_graph_builder(py::module &m) { .def("matmul", &Handler::matmul, policy::move) .def("batchNormalization", &Handler::batchNormalization, policy::move) .def("layerNormalization", &Handler::layerNormalization, policy::move) - .def("instanceNormalization", &Handler::instanceNormalization, policy::move) + .def("instanceNormalization", &Handler::instanceNormalization, + policy::move) .def("RMSNorm", &Handler::rmsNorm, policy::move) .def("maxPool", &Handler::maxPool, policy::move) .def("avgPool", &Handler::avgPool, policy::move) diff --git a/src/kernels/ascend/instance_norm.cc b/src/kernels/ascend/instance_norm.cc index c50dfe97..57ba02cd 100644 --- a/src/kernels/ascend/instance_norm.cc +++ b/src/kernels/ascend/instance_norm.cc @@ -27,13 +27,13 @@ class InstanceNormAclnn : public ASCENDKernelWithoutConfig { std::vector inputDim = castTo64(inputD); std::vector inputStride = castTo64(inputS); - std::vector weightDim = castTo64(weightD); + std::vector weightDim = castTo64(weightD); std::vector weightStride = castTo64(weightS); std::vector outputDim = castTo64(outD); std::vector outputStride = castTo64(outS); auto axis = 3; - + auto rank = static_cast(inputDim.size()); std::vector normalizedShape(rank - axis, 0); for (auto i = rank; i > axis; --i) { @@ -86,7 +86,7 @@ class InstanceNormAclnn : public ASCENDKernelWithoutConfig { if (workspaceSize > 0) { workspaceAddr = context->getWorkspace(workspaceSize); } - auto tmp_err_msg = aclGetRecentErrMsg(); + auto tmp_err_msg = aclGetRecentErrMsg(); if (tmp_err_msg != NULL) { printf(" ERROR Message : %s \n ", tmp_err_msg); } @@ -103,7 +103,7 @@ class InstanceNormAclnn : public ASCENDKernelWithoutConfig { } }; -REGISTER_KERNEL(Device::ASCEND, OpType::InstanceNormalization, InstanceNormAclnn, - "InstanceNorm_ASCEND"); +REGISTER_KERNEL(Device::ASCEND, OpType::InstanceNormalization, + InstanceNormAclnn, "InstanceNorm_ASCEND"); }; // namespace infini diff --git a/src/operators/instance_norm.cc b/src/operators/instance_norm.cc index 0f286488..6761f835 100644 --- a/src/operators/instance_norm.cc +++ b/src/operators/instance_norm.cc @@ -2,14 +2,12 @@ #include "utils/operator_utils.h" namespace infini { -InstanceNormObj::InstanceNormObj(GraphObj *graph, Tensor input, Tensor output, Tensor scale, - Tensor bias, - float eps) - : OperatorObj(OpType::InstanceNormalization, - TensorVec{input, scale, bias}, +InstanceNormObj::InstanceNormObj(GraphObj *graph, Tensor input, Tensor output, + Tensor scale, Tensor bias, float eps) + : OperatorObj(OpType::InstanceNormalization, TensorVec{input, scale, bias}, {output}), eps(eps) { - + IT_ASSERT(checkValid(graph)); } diff --git a/test/kernels/ascend/test_ascend_instancenorm.cc b/test/kernels/ascend/test_ascend_instancenorm.cc index 53b43ea4..18414e37 100644 --- a/test/kernels/ascend/test_ascend_instancenorm.cc +++ b/test/kernels/ascend/test_ascend_instancenorm.cc @@ -8,18 +8,17 @@ namespace infini { -void test_instancenormFp32( - const Shape &inputShape, const vector &inputData, - const Shape &scaleShape, const vector &scaleData, float eps, - const vector &ExpectData, - const Shape &biasShape, - const vector &biasData) { +void test_instancenormFp32(const Shape &inputShape, + const vector &inputData, + const Shape &scaleShape, + const vector &scaleData, float eps, + const vector &ExpectData, + const Shape &biasShape, + const vector &biasData) { Runtime runtime = NativeCpuRuntimeObj::getInstance(); Graph gCpu = make_ref(runtime); - - auto bias = gCpu->addTensor(biasShape, DataType::Float32); auto input = gCpu->addTensor(inputShape, DataType::Float32); auto scale = gCpu->addTensor(scaleShape, DataType::Float32); @@ -44,9 +43,8 @@ void test_instancenormFp32( scaleNpu->copyin(scaleData); ascendRuntime->run(gAscend); - auto oCpu = - gCpu->cloneTensor(op->getOutput()); // move Data from npu to cpu - oCpu->printData(); //->printData + auto oCpu = gCpu->cloneTensor(op->getOutput()); // move Data from npu to cpu + oCpu->printData(); //->printData EXPECT_TRUE(oCpu->equalData(ExpectData)); } @@ -67,7 +65,6 @@ TEST(CUDA_InstancenormFp32, run) { -0.3674207, 0.0000000, 0.6123678, -0.3674207, 0.0000000, 0.6123678, -0.3674207, 0.0000000, 0.6123678, -0.3674207, 0.0000000, 0.6123678}, Shape{3}, vector{0, 0, 0}); - aclFinalize(); } // python output