modiefied format, replace layernorm as instancenorm

This commit is contained in:
xgqdut2016 2024-04-30 15:04:12 +08:00
parent 0fcaf001c4
commit 6a89946736
8 changed files with 32 additions and 38 deletions

View File

@ -12,7 +12,6 @@ class InstanceNormObj : public OperatorObj {
optional<vector<Shape>> inferShape(const TensorVec &inputs) override; optional<vector<Shape>> inferShape(const TensorVec &inputs) override;
std::string toString() const override; std::string toString() const override;
int numInputs() const override { return inputs.size(); } int numInputs() const override { return inputs.size(); }
int numOutputs() const override { return outputs.size(); } int numOutputs() const override { return outputs.size(); }
float getEps() const { return eps; } float getEps() const { return eps; }

View File

@ -337,7 +337,6 @@ class OnnxStub:
(attr.f for attr in node.attribute if attr.name == "epsilon"), (attr.f for attr in node.attribute if attr.name == "epsilon"),
1e-5, 1e-5,
), ),
) )
elif node.op_type == "RMSNorm": elif node.op_type == "RMSNorm":
tensors[node.output[0]] = self.handler.RMSNorm( tensors[node.output[0]] = self.handler.RMSNorm(

View File

@ -9,8 +9,8 @@
#include "operators/element_wise.h" #include "operators/element_wise.h"
#include "operators/expand.h" #include "operators/expand.h"
#include "operators/gather.h" #include "operators/gather.h"
#include "operators/layer_norm.h"
#include "operators/instance_norm.h" #include "operators/instance_norm.h"
#include "operators/layer_norm.h"
#include "operators/lrn.h" #include "operators/lrn.h"
#include "operators/matmul.h" #include "operators/matmul.h"
#include "operators/pad.h" #include "operators/pad.h"
@ -125,12 +125,12 @@ Tensor GraphHandlerObj::layerNormalization(Tensor input, Tensor scale,
->getOutput(); ->getOutput();
} }
} }
Tensor GraphHandlerObj::instanceNormalization(Tensor input, Tensor GraphHandlerObj::instanceNormalization(Tensor input, Tensor output,
Tensor output, Tensor scale, Tensor bias, Tensor scale, Tensor bias,
float eps) { float eps) {
if (output) { if (output) {
g->addOpWithOutputs<InstanceNormObj>(std::move(input), output, std::move(scale), g->addOpWithOutputs<InstanceNormObj>(
std::move(bias), eps); std::move(input), output, std::move(scale), std::move(bias), eps);
return output; return output;
} else { } else {
return g return g

View File

@ -529,7 +529,8 @@ void init_graph_builder(py::module &m) {
.def("matmul", &Handler::matmul, policy::move) .def("matmul", &Handler::matmul, policy::move)
.def("batchNormalization", &Handler::batchNormalization, policy::move) .def("batchNormalization", &Handler::batchNormalization, policy::move)
.def("layerNormalization", &Handler::layerNormalization, policy::move) .def("layerNormalization", &Handler::layerNormalization, policy::move)
.def("instanceNormalization", &Handler::instanceNormalization, policy::move) .def("instanceNormalization", &Handler::instanceNormalization,
policy::move)
.def("RMSNorm", &Handler::rmsNorm, policy::move) .def("RMSNorm", &Handler::rmsNorm, policy::move)
.def("maxPool", &Handler::maxPool, policy::move) .def("maxPool", &Handler::maxPool, policy::move)
.def("avgPool", &Handler::avgPool, policy::move) .def("avgPool", &Handler::avgPool, policy::move)

View File

@ -103,7 +103,7 @@ class InstanceNormAclnn : public ASCENDKernelWithoutConfig {
} }
}; };
REGISTER_KERNEL(Device::ASCEND, OpType::InstanceNormalization, InstanceNormAclnn, REGISTER_KERNEL(Device::ASCEND, OpType::InstanceNormalization,
"InstanceNorm_ASCEND"); InstanceNormAclnn, "InstanceNorm_ASCEND");
}; // namespace infini }; // namespace infini

View File

@ -2,11 +2,9 @@
#include "utils/operator_utils.h" #include "utils/operator_utils.h"
namespace infini { namespace infini {
InstanceNormObj::InstanceNormObj(GraphObj *graph, Tensor input, Tensor output, Tensor scale, InstanceNormObj::InstanceNormObj(GraphObj *graph, Tensor input, Tensor output,
Tensor bias, Tensor scale, Tensor bias, float eps)
float eps) : OperatorObj(OpType::InstanceNormalization, TensorVec{input, scale, bias},
: OperatorObj(OpType::InstanceNormalization,
TensorVec{input, scale, bias},
{output}), {output}),
eps(eps) { eps(eps) {

View File

@ -8,9 +8,10 @@
namespace infini { namespace infini {
void test_instancenormFp32( void test_instancenormFp32(const Shape &inputShape,
const Shape &inputShape, const vector<float> &inputData, const vector<float> &inputData,
const Shape &scaleShape, const vector<float> &scaleData, float eps, const Shape &scaleShape,
const vector<float> &scaleData, float eps,
const vector<float> &ExpectData, const vector<float> &ExpectData,
const Shape &biasShape, const Shape &biasShape,
const vector<float> &biasData) { const vector<float> &biasData) {
@ -18,8 +19,6 @@ void test_instancenormFp32(
Runtime runtime = NativeCpuRuntimeObj::getInstance(); Runtime runtime = NativeCpuRuntimeObj::getInstance();
Graph gCpu = make_ref<GraphObj>(runtime); Graph gCpu = make_ref<GraphObj>(runtime);
auto bias = gCpu->addTensor(biasShape, DataType::Float32); auto bias = gCpu->addTensor(biasShape, DataType::Float32);
auto input = gCpu->addTensor(inputShape, DataType::Float32); auto input = gCpu->addTensor(inputShape, DataType::Float32);
auto scale = gCpu->addTensor(scaleShape, DataType::Float32); auto scale = gCpu->addTensor(scaleShape, DataType::Float32);
@ -44,8 +43,7 @@ void test_instancenormFp32(
scaleNpu->copyin(scaleData); scaleNpu->copyin(scaleData);
ascendRuntime->run(gAscend); ascendRuntime->run(gAscend);
auto oCpu = auto oCpu = gCpu->cloneTensor(op->getOutput()); // move Data from npu to cpu
gCpu->cloneTensor(op->getOutput()); // move Data from npu to cpu
oCpu->printData(); //->printData oCpu->printData(); //->printData
EXPECT_TRUE(oCpu->equalData(ExpectData)); EXPECT_TRUE(oCpu->equalData(ExpectData));
} }
@ -68,7 +66,6 @@ TEST(CUDA_InstancenormFp32, run) {
-0.3674207, 0.0000000, 0.6123678, -0.3674207, 0.0000000, 0.6123678}, -0.3674207, 0.0000000, 0.6123678, -0.3674207, 0.0000000, 0.6123678},
Shape{3}, vector<float>{0, 0, 0}); Shape{3}, vector<float>{0, 0, 0});
aclFinalize(); aclFinalize();
} // python output } // python output