Add: mutator abstract class

This commit is contained in:
Liyan Zheng 2022-08-08 15:52:07 +08:00
parent efa966a3e2
commit 1205240218
8 changed files with 125 additions and 98 deletions

View File

@ -1,5 +1,6 @@
#pragma once
#include <cassert>
#include <functional>
#include <iostream>
#include <list>
#include <map>

14
include/core/mutator.h Normal file
View File

@ -0,0 +1,14 @@
#pragma once
#include "core/graph.h"
namespace infini {
class Mutator {
public:
Mutator(){};
virtual ~Mutator(){};
virtual vector<Graph> run(const Graph &in_graph) = 0;
};
} // namespace infini

View File

@ -145,57 +145,4 @@ class OperatorNode : public Object {
virtual int numOutputs() const = 0;
};
class MatmulNode : public OperatorNode {
public:
struct MatmulArgs : public OpAttrs {
int b, m, n, k;
// PET assume a row-major tensor layout. transA=false means default
// dims, true means A should be transposed before matmul. This is in
// oppsite to column-major BLAS.
bool transA, transB;
ActType act;
MatmulArgs(int b, int m, int n, int k, bool transA, bool transB,
ActType act)
: b(b), m(m), n(n), k(k), transA(transA), transB(transB), act(act) {
}
bool operator<(const OpAttrs &rhsGeneric) {
auto rhs = dynamic_cast<const MatmulArgs &>(rhsGeneric);
return std::tie(b, m, n, k, transA, transB, act) <
std::tie(rhs.b, rhs.m, rhs.n, rhs.k, rhs.transA, rhs.transB,
rhs.act);
}
};
private:
MatmulArgs args;
public:
MatmulNode(Tensor A, Tensor B, Tensor C, bool transA = false,
bool transB = false, Tensor bias = nullptr,
ActType act = ActType::None);
std::string toString() const override;
vector<Shape> computeShape() const override;
int numInputs() const override { return 2; }
int numOutputs() const override { return 1; }
Tensor getBias() const { return inputs[2]; }
void setAct(ActType act) { this->args.act = act; }
ActType getAct() const { return args.act; }
bool getTransA() const { return args.transA; }
bool getTransB() const { return args.transB; }
MatmulArgs getArgs() const { return args; }
OpAttrs getOpAttrs() const override { return args; }
private:
// Q: whether to check the output? Since we can build an Op first and then
// assure output.
// Fix 1: make shape inference a static method.
bool checkValid(const TensorVec &inputs) const;
};
} // namespace infini

View File

@ -0,0 +1,59 @@
#pragma once
#include "core/operator.h"
namespace infini {
class MatmulNode : public OperatorNode {
public:
struct MatmulArgs : public OpAttrs {
int b, m, n, k;
// PET assume a row-major tensor layout. transA=false means default
// dims, true means A should be transposed before matmul. This is in
// oppsite to column-major BLAS.
bool transA, transB;
ActType act;
MatmulArgs(int b, int m, int n, int k, bool transA, bool transB,
ActType act)
: b(b), m(m), n(n), k(k), transA(transA), transB(transB), act(act) {
}
bool operator<(const OpAttrs &rhsGeneric) {
auto rhs = dynamic_cast<const MatmulArgs &>(rhsGeneric);
return std::tie(b, m, n, k, transA, transB, act) <
std::tie(rhs.b, rhs.m, rhs.n, rhs.k, rhs.transA, rhs.transB,
rhs.act);
}
};
private:
MatmulArgs args;
public:
MatmulNode(Tensor A, Tensor B, Tensor C, bool transA = false,
bool transB = false, Tensor bias = nullptr,
ActType act = ActType::None);
std::string toString() const override;
vector<Shape> computeShape() const override;
int numInputs() const override { return 2; }
int numOutputs() const override { return 1; }
Tensor getBias() const { return inputs[2]; }
void setAct(ActType act) { this->args.act = act; }
ActType getAct() const { return args.act; }
bool getTransA() const { return args.transA; }
bool getTransB() const { return args.transB; }
MatmulArgs getArgs() const { return args; }
OpAttrs getOpAttrs() const override { return args; }
private:
// Q: whether to check the output? Since we can build an Op first and then
// assure output.
// Fix 1: make shape inference a static method. But OpAttrs are required.
bool checkValid(const TensorVec &inputs) const;
};
} // namespace infini

View File

@ -29,49 +29,4 @@ bool OperatorNode::isMemBoundOp() const {
type == OpType::Transpose;
}
vector<Shape> MatmulNode::computeShape() const {
Shape ret{args.b, args.m, args.n};
return {ret};
}
MatmulNode::MatmulNode(Tensor A, Tensor B, Tensor C, bool transA, bool transB,
Tensor bias, ActType act)
: OperatorNode(OpType::Matmul, {A, B, bias}, {C}),
args(A->getDims()[0], transA ? A->getDims()[2] : A->getDims()[1],
transB ? B->getDims()[1] : B->getDims()[2],
transA ? A->getDims()[1] : A->getDims()[2], transA, transB, act) {
IT_ASSERT(checkValid(inputs));
}
string MatmulNode::toString() const {
std::ostringstream os;
MatmulArgs args = getArgs();
os << "Matmul([" << (args.transA ? "A^T" : "A") << ","
<< (args.transB ? "B^T" : "B") << ",act=" << (int)args.act
<< "],A=" << inputs[0]->getGuid() << ",B=" << inputs[1]->getGuid()
<< ",C=" << outputs[0]->getGuid() << ")";
return os.str();
}
bool MatmulNode::checkValid(const TensorVec &inputs) const {
auto A = inputs[0], B = inputs[1];
// if (A->getType() == Tensor::Weight && B->getType() == Tensor::Weight)
// return false;
IT_ASSERT(A->getDims().size() == 3 && B->getDims().size() == 3);
IT_ASSERT(A->getDims()[0] == B->getDims()[0]);
IT_ASSERT((args.transA ? A->getDims()[1] : A->getDims()[2]) ==
(args.transB ? B->getDims()[2] : B->getDims()[1]));
// if (A->getDims().size() != 3 || B->getDims().size() != 3) {
// return false;
// }
// if (A->getDims()[0] != B->getDims()[0]) {
// return false;
// }
// if ((args.transA ? A->getDims()[1] : A->getDims()[2]) !=
// (args.transB ? B->getDims()[2] : B->getDims()[1])) {
// return false;
// }
return true;
}
} // namespace infini

View File

@ -1,3 +1,4 @@
#include "operators/matmul.h"
#include "core/kernel.h"
namespace infini {

49
src/operators/matmul.cc Normal file
View File

@ -0,0 +1,49 @@
#include "operators/matmul.h"
namespace infini {
vector<Shape> MatmulNode::computeShape() const {
Shape ret{args.b, args.m, args.n};
return {ret};
}
MatmulNode::MatmulNode(Tensor A, Tensor B, Tensor C, bool transA, bool transB,
Tensor bias, ActType act)
: OperatorNode(OpType::Matmul, {A, B, bias}, {C}),
args(A->getDims()[0], transA ? A->getDims()[2] : A->getDims()[1],
transB ? B->getDims()[1] : B->getDims()[2],
transA ? A->getDims()[1] : A->getDims()[2], transA, transB, act) {
IT_ASSERT(checkValid(inputs));
}
string MatmulNode::toString() const {
std::ostringstream os;
MatmulArgs args = getArgs();
os << "Matmul([" << (args.transA ? "A^T" : "A") << ","
<< (args.transB ? "B^T" : "B") << ",act=" << (int)args.act
<< "],A=" << inputs[0]->getGuid() << ",B=" << inputs[1]->getGuid()
<< ",C=" << outputs[0]->getGuid() << ")";
return os.str();
}
bool MatmulNode::checkValid(const TensorVec &inputs) const {
auto A = inputs[0], B = inputs[1];
// if (A->getType() == Tensor::Weight && B->getType() == Tensor::Weight)
// return false;
IT_ASSERT(A->getDims().size() == 3 && B->getDims().size() == 3);
IT_ASSERT(A->getDims()[0] == B->getDims()[0]);
IT_ASSERT((args.transA ? A->getDims()[1] : A->getDims()[2]) ==
(args.transB ? B->getDims()[2] : B->getDims()[1]));
// if (A->getDims().size() != 3 || B->getDims().size() != 3) {
// return false;
// }
// if (A->getDims()[0] != B->getDims()[0]) {
// return false;
// }
// if ((args.transA ? A->getDims()[1] : A->getDims()[2]) !=
// (args.transB ? B->getDims()[2] : B->getDims()[1])) {
// return false;
// }
return true;
}
} // namespace infini

View File

@ -1,5 +1,6 @@
#include "core/graph.h"
#include "core/run_enigne.h"
#include "operators/matmul.h"
#include "test.h"
namespace infini {