forked from jiuyuan/InfiniTensor
fix: fix the reshape op; fix the infershape to no const
This commit is contained in:
parent
973f8a9030
commit
f000e211f7
|
@ -53,7 +53,7 @@ class GraphObj : public Object {
|
|||
const TensorVec &getTensors() const { return tensors; }
|
||||
const OpVec &getOperators() const { return ops; }
|
||||
OpVec getComputeOps() const;
|
||||
Tensor getTensorWithUid(int fuid) const;
|
||||
Tensor getTensorWithUid(int fuid) const;
|
||||
|
||||
/**
|
||||
* Sort the nodes in topological order.
|
||||
|
|
|
@ -58,7 +58,7 @@ class GraphHandlerObj {
|
|||
Tensor clip(Tensor x, Tensor y, std::optional<float> min,
|
||||
std::optional<float> max);
|
||||
Tensor transpose(Tensor data, Tensor transposed, Shape perm);
|
||||
Tensor reshape(Tensor data, Tensor reshaped, Shape shape, Shape shape_t);
|
||||
Tensor reshape(Tensor data, Tensor reshaped, Shape shape);
|
||||
Tensor concat(TensorVec inputs, Tensor output, int dim);
|
||||
TensorVec split(Tensor input, std::optional<TensorVec> outputs, int axis,
|
||||
int num_outputs);
|
||||
|
@ -73,7 +73,7 @@ class GraphHandlerObj {
|
|||
Tensor cast(Tensor input, Tensor output, int to);
|
||||
Tensor expand(Tensor input, Tensor output, Shape dims);
|
||||
Tensor where(Tensor inputX, Tensor inputY, Tensor condition, Tensor output);
|
||||
// std::vector<int> getDims(Tensor x) { return x->getDims(); }
|
||||
std::vector<int> getDims(Tensor x) { return x->getDims(); }
|
||||
|
||||
//------ modifiers
|
||||
|
||||
|
@ -81,9 +81,9 @@ class GraphHandlerObj {
|
|||
|
||||
inline void optimize() { g->optimize(); }
|
||||
|
||||
inline void shape_infer() { g->shape_infer(); }
|
||||
inline void shape_infer() { g->shape_infer(); }
|
||||
|
||||
void change_shape(const vector<int> &shape, int tensorId);
|
||||
void change_shape(const vector<int> &shape, int tensorId);
|
||||
//------ runtime
|
||||
|
||||
inline void data_malloc() { g->dataMalloc(); }
|
||||
|
|
|
@ -55,8 +55,7 @@ class OperatorObj : public Object {
|
|||
|
||||
public:
|
||||
OperatorObj(OpType opType, TensorVec inputs, TensorVec outputs);
|
||||
virtual optional<vector<Shape>>
|
||||
inferShape(const TensorVec &inputs) const = 0;
|
||||
virtual optional<vector<Shape>> inferShape(const TensorVec &inputs) = 0;
|
||||
virtual vector<DataType> inferDataType(const TensorVec &inputs) const;
|
||||
/**
|
||||
* @brief Constructs outputs (if requried) and check whether the operator is
|
||||
|
@ -105,7 +104,7 @@ class OperatorObj : public Object {
|
|||
const TensorVec &newOutputs) const = 0;
|
||||
|
||||
protected:
|
||||
optional<vector<Shape>> inferShape() const;
|
||||
optional<vector<Shape>> inferShape();
|
||||
vector<DataType> inferDataType() const;
|
||||
|
||||
private:
|
||||
|
|
|
@ -25,11 +25,11 @@ class TensorObj : public TensorBaseObj {
|
|||
string toString() const override;
|
||||
|
||||
size_t size() const { return _size; }
|
||||
void setSize(size_t size) { _size = size;}
|
||||
void setSize(size_t size) { _size = size; }
|
||||
size_t getBytes() const { return _size * dtype.getSize(); }
|
||||
|
||||
Shape getDims() const { return shape; }
|
||||
void setShape(Shape shape_) { shape = shape_; }
|
||||
void setShape(Shape shape_) { shape = shape_; }
|
||||
size_t getRank() const { return shape.size(); }
|
||||
Shape getStride() const;
|
||||
size_t getOffset(const vector<int> &ds) const;
|
||||
|
|
|
@ -35,7 +35,7 @@ class G2BMMObj : public OperatorObj {
|
|||
OP_CLONE(G2BMMObj);
|
||||
|
||||
std::string toString() const override;
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) const override;
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) override;
|
||||
|
||||
int numInputs() const override { return 2; }
|
||||
int numOutputs() const override { return 1; }
|
||||
|
|
|
@ -33,7 +33,7 @@ class GBMMObj : public OperatorObj {
|
|||
OP_CLONE(GBMMObj);
|
||||
|
||||
std::string toString() const override;
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) const override;
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) override;
|
||||
|
||||
int numInputs() const override { return 2; }
|
||||
int numOutputs() const override { return 1; }
|
||||
|
|
|
@ -7,7 +7,7 @@ class ActivationBackwardObj : public OperatorObj {
|
|||
ActivationBackwardObj(OpType type, GraphObj *graph, Tensor y, Tensor diff_y,
|
||||
Tensor x, Tensor diff_x);
|
||||
OP_CLONE(ActivationBackwardObj);
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) const override;
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) override;
|
||||
|
||||
std::string toString() const override;
|
||||
int numInputs() const override { return 3; }
|
||||
|
|
|
@ -34,7 +34,7 @@ class BatchNormObj : public OperatorObj {
|
|||
Tensor var, Tensor scale, Tensor bias, float momentum = 0.9,
|
||||
float eps = 1e-5, bool trainingMode = false);
|
||||
OP_CLONE(BatchNormObj);
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) const override;
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) override;
|
||||
std::string toString() const override;
|
||||
|
||||
// output size will be 3 when training
|
||||
|
|
|
@ -22,7 +22,7 @@ class ConcatObj : public OperatorObj {
|
|||
ConcatObj(GraphObj *graph, TensorVec inputs, Tensor output, int dim);
|
||||
OP_CLONE(ConcatObj);
|
||||
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) const override;
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) override;
|
||||
|
||||
std::string toString() const override;
|
||||
int numInputs() const override { return inputs.size(); }
|
||||
|
|
|
@ -142,7 +142,7 @@ class ConvObj : public ConvBaseObj {
|
|||
ActType act = ActType::None);
|
||||
OP_CLONE(ConvObj);
|
||||
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) const override;
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) override;
|
||||
int getNumGroups() const override { return c / getChannelPerGroup(); }
|
||||
|
||||
private:
|
||||
|
@ -164,7 +164,7 @@ class ConvBackwardFilterObj : public ConvBaseObj {
|
|||
int sh = 1, int sw = 1, int dh = 1, int dw = 1,
|
||||
Tensor bias = nullptr, ActType act = ActType::None);
|
||||
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) const override;
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) override;
|
||||
ActType getAct() const { return act; }
|
||||
int getNumGroups() const override { return c / getChannelPerGroup(); }
|
||||
|
||||
|
@ -191,7 +191,7 @@ class ConvTransposed2dObj : public ConvBaseObj {
|
|||
Tensor bias = nullptr, ActType act = ActType::None);
|
||||
OP_CLONE(ConvTransposed2dObj);
|
||||
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) const override;
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) override;
|
||||
int getNumGroups() const override { return group; }
|
||||
std::pair<int, int> getOutputPadding() const { return {oph, opw}; }
|
||||
|
||||
|
@ -218,7 +218,7 @@ class ConvTransposed2dNHWCObj : public ConvBaseObj {
|
|||
Tensor bias = nullptr, ActType act = ActType::None);
|
||||
OP_CLONE(ConvTransposed2dNHWCObj);
|
||||
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) const override;
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) override;
|
||||
int getNumGroups() const override { return group; }
|
||||
|
||||
private:
|
||||
|
|
|
@ -7,7 +7,7 @@ class DetObj : public OperatorObj {
|
|||
enum Mode { NormalDet = 0, LogDet };
|
||||
DetObj(GraphObj *graph, Tensor input, Tensor output, Mode mode);
|
||||
OP_CLONE(DetObj);
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) const override;
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) override;
|
||||
|
||||
std::string toString() const override;
|
||||
int numInputs() const override { return 1; }
|
||||
|
|
|
@ -37,7 +37,7 @@ class DropoutObj : public OperatorObj {
|
|||
DropoutObj(GraphObj *graph, Tensor data, Tensor output, Tensor mask,
|
||||
float ratio, bool training_mode);
|
||||
OP_CLONE(DropoutObj);
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) const override;
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) override;
|
||||
|
||||
std::string toString() const override;
|
||||
int numInputs() const override { return 1; }
|
||||
|
|
|
@ -21,7 +21,7 @@ class ElementWiseObj : public OperatorObj {
|
|||
*/
|
||||
ElementWiseObj(OpType type, GraphObj *graph, Tensor input0, Tensor input1,
|
||||
Tensor output);
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) const override;
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) override;
|
||||
|
||||
std::string toString() const override;
|
||||
int numInputs() const override { return 2; }
|
||||
|
@ -38,7 +38,7 @@ class MSELossObj : public OperatorObj {
|
|||
MSELossObj(GraphObj *graph, Tensor input0, Tensor input1,
|
||||
Reduction reduction, Tensor output);
|
||||
OP_CLONE(MSELossObj);
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) const override;
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) override;
|
||||
|
||||
Reduction getReduction() const { return reductionMode; }
|
||||
std::string toString() const override;
|
||||
|
|
|
@ -21,7 +21,7 @@ class ExpandObj : public OperatorObj {
|
|||
*/
|
||||
ExpandObj(GraphObj *graph, Tensor input, Tensor output, Shape dims);
|
||||
OP_CLONE(ExpandObj);
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) const override;
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) override;
|
||||
|
||||
std::string toString() const override;
|
||||
int numInputs() const override { return 1; }
|
||||
|
|
|
@ -23,7 +23,7 @@ class ExtendObj : public OperatorObj {
|
|||
ExtendObj(GraphObj *graph, Tensor input, Tensor output, int dim,
|
||||
int num = 1);
|
||||
OP_CLONE(ExtendObj);
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) const override;
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) override;
|
||||
|
||||
std::string toString() const override;
|
||||
int numInputs() const override { return 1; }
|
||||
|
|
|
@ -27,7 +27,7 @@ class GatherObj : public OperatorObj {
|
|||
std::string toString() const override;
|
||||
int numInputs() const override { return 2; }
|
||||
int numOutputs() const override { return 1; }
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) const override;
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) override;
|
||||
int getAxis() const { return axis; }
|
||||
vector<DataType> inferDataType(const TensorVec &inputs) const override;
|
||||
|
||||
|
|
|
@ -45,7 +45,7 @@ class MatmulObj : public OperatorObj {
|
|||
OP_CLONE(MatmulObj);
|
||||
|
||||
std::string toString() const override;
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) const override;
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) override;
|
||||
|
||||
int numInputs() const override { return inputs.size(); }
|
||||
int numOutputs() const override { return 1; }
|
||||
|
|
|
@ -21,7 +21,7 @@ class MemBoundObj : public OperatorObj {
|
|||
OP_CLONE(MemBoundObj);
|
||||
|
||||
std::string toString() const override;
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) const override;
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) override;
|
||||
|
||||
int numInputs() const override { return inputs.size(); }
|
||||
int numOutputs() const override { return outputs.size(); }
|
||||
|
|
|
@ -27,7 +27,7 @@ class PadObj : public OperatorObj {
|
|||
const vector<int> &pads, const optional<vector<int>> &axes);
|
||||
OP_CLONE(PadObj);
|
||||
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) const override;
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) override;
|
||||
std::string toString() const override;
|
||||
int numInputs() const override { return 1; }
|
||||
int numOutputs() const override { return 1; }
|
||||
|
|
|
@ -37,7 +37,7 @@ class PoolingObj : public OperatorObj {
|
|||
int kh, int kw, int dh, int dw, int ph, int pw, int sh, int sw);
|
||||
OP_CLONE(PoolingObj);
|
||||
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) const override;
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) override;
|
||||
std::string toString() const override;
|
||||
int numInputs() const override { return 1; }
|
||||
int numOutputs() const override { return 1; }
|
||||
|
|
|
@ -23,7 +23,7 @@ class ReduceMeanObj : public OperatorObj {
|
|||
ReduceMeanObj(GraphObj *graph, Tensor input, Tensor output,
|
||||
const optional<vector<int>> &axes, bool keepDims = true);
|
||||
OP_CLONE(ReduceMeanObj);
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) const override;
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) override;
|
||||
|
||||
std::string toString() const override;
|
||||
int numInputs() const override { return 1; }
|
||||
|
|
|
@ -9,7 +9,7 @@ namespace infini {
|
|||
*/
|
||||
class ReshapeObj : public OperatorObj {
|
||||
Shape dims;
|
||||
Shape dims_t = {};
|
||||
Shape outputShape;
|
||||
|
||||
public:
|
||||
/**
|
||||
|
@ -18,22 +18,20 @@ class ReshapeObj : public OperatorObj {
|
|||
* @param graph The computation graph that this operator belongs to.
|
||||
* @param input The input tensor.
|
||||
* @param output The output tensor.
|
||||
* @param dims The shape of the output tensor.
|
||||
* @param dims_t The origin data of change shape.
|
||||
* @param dims The shape to infer the output shape.
|
||||
* @param outputShape The real shape of output tensor.
|
||||
*/
|
||||
ReshapeObj(GraphObj *graph, Tensor input, Tensor output, Shape dims);
|
||||
ReshapeObj(GraphObj *graph, Tensor input, Tensor output, Shape dims, Shape dims_t);
|
||||
OP_CLONE(ReshapeObj);
|
||||
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) const override;
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) override;
|
||||
|
||||
std::string toString() const override;
|
||||
int numInputs() const override { return 1; }
|
||||
int numOutputs() const override { return 1; }
|
||||
|
||||
inline Shape getShape() const { return dims; }
|
||||
inline void setShape(Shape shape_) { dims = shape_; }
|
||||
inline Shape getShape_t() const { return dims_t; }
|
||||
inline Shape getShape() const { return outputShape; }
|
||||
inline Shape getDims() const { return dims; }
|
||||
|
||||
private:
|
||||
vector<int> getWorkloadVector() const override;
|
||||
|
@ -60,7 +58,7 @@ class FlattenObj : public OperatorObj {
|
|||
FlattenObj(GraphObj *graph, Tensor input, Tensor output, int axis);
|
||||
OP_CLONE(FlattenObj);
|
||||
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) const override;
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) override;
|
||||
|
||||
std::string toString() const override;
|
||||
int numInputs() const override { return 1; }
|
||||
|
@ -90,7 +88,7 @@ class IdentityObj : public OperatorObj {
|
|||
IdentityObj(GraphObj *graph, Tensor input, Tensor output);
|
||||
OP_CLONE(IdentityObj);
|
||||
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) const override;
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) override;
|
||||
|
||||
std::string toString() const override;
|
||||
int numInputs() const override { return 1; }
|
||||
|
|
|
@ -60,7 +60,7 @@ class ResizeObj : public OperatorObj {
|
|||
|
||||
// Operator clone(TensorVec inputs, TensorVec outputs) override;
|
||||
vector<DataType> inferDataType(const TensorVec &inputs) const override;
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) const override;
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) override;
|
||||
std::string toString() const override;
|
||||
int numInputs() const override { return inputs.size(); }
|
||||
int numOutputs() const override { return 1; }
|
||||
|
|
|
@ -32,7 +32,7 @@ class SliceObj : public OperatorObj {
|
|||
const optional<vector<int>> &steps);
|
||||
OP_CLONE(SliceObj);
|
||||
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) const override;
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) override;
|
||||
std::string toString() const override;
|
||||
inline int numInputs() const override { return 1; }
|
||||
inline int numOutputs() const override { return 1; }
|
||||
|
|
|
@ -10,7 +10,7 @@ class SoftmaxObj : public OperatorObj {
|
|||
|
||||
OP_CLONE(SoftmaxObj);
|
||||
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) const override {
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) override {
|
||||
return {{inputs[0]->getDims()}};
|
||||
};
|
||||
|
||||
|
|
|
@ -37,7 +37,7 @@ class SplitObj : public OperatorObj {
|
|||
int dim, const vector<int> &ratio);
|
||||
OP_CLONE(SplitObj);
|
||||
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) const override;
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) override;
|
||||
|
||||
std::string toString() const override;
|
||||
int numInputs() const override { return 1; }
|
||||
|
|
|
@ -7,7 +7,7 @@ class TransposeObj : public OperatorObj {
|
|||
TransposeObj(GraphObj *graph, Tensor input, Tensor output,
|
||||
vector<int> permute);
|
||||
OP_CLONE(TransposeObj);
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) const override;
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) override;
|
||||
|
||||
std::string toString() const override;
|
||||
int numInputs() const override { return 1; }
|
||||
|
|
|
@ -17,7 +17,7 @@ class UnaryObj : public OperatorObj {
|
|||
* @param output The output tensor.
|
||||
*/
|
||||
UnaryObj(OpType type, GraphObj *graph, Tensor input, Tensor output);
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) const override;
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) override;
|
||||
|
||||
std::string toString() const override;
|
||||
int numInputs() const override { return 1; }
|
||||
|
@ -33,7 +33,7 @@ class ClipObj : public OperatorObj {
|
|||
ClipObj(GraphObj *graph, Tensor input, Tensor output,
|
||||
std::optional<float> min, std::optional<float> max);
|
||||
OP_CLONE(ClipObj);
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) const override;
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) override;
|
||||
|
||||
std::string toString() const override;
|
||||
std::optional<float> getMin() const { return minValue; };
|
||||
|
@ -52,7 +52,7 @@ class HardtanhObj : public OperatorObj {
|
|||
HardtanhObj(GraphObj *graph, Tensor input, Tensor output, float min,
|
||||
float max);
|
||||
OP_CLONE(HardtanhObj);
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) const override;
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) override;
|
||||
|
||||
std::string toString() const override;
|
||||
float getMin() const { return minValue; };
|
||||
|
@ -70,7 +70,7 @@ class FlipObj : public OperatorObj {
|
|||
public:
|
||||
FlipObj(GraphObj *graph, Tensor input, Tensor output, vector<int> axis);
|
||||
OP_CLONE(FlipObj);
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) const override;
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) override;
|
||||
|
||||
std::string toString() const override;
|
||||
vector<int> getAxis() const { return axisValue; };
|
||||
|
@ -87,7 +87,7 @@ class FillObj : public OperatorObj {
|
|||
public:
|
||||
FillObj(GraphObj *graph, Tensor input, Tensor output, float value);
|
||||
OP_CLONE(FillObj);
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) const override;
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) override;
|
||||
|
||||
std::string toString() const override;
|
||||
float getValue() const { return setValue; };
|
||||
|
@ -104,7 +104,7 @@ class L2LossObj : public OperatorObj {
|
|||
public:
|
||||
L2LossObj(GraphObj *graph, Tensor input, Tensor output);
|
||||
OP_CLONE(L2LossObj);
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) const override;
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) override;
|
||||
|
||||
std::string toString() const override;
|
||||
int numInputs() const override { return 1; }
|
||||
|
@ -120,7 +120,7 @@ class TransformObj : public OperatorObj {
|
|||
TransformObj(GraphObj *graph, Tensor input, Tensor output, float alpha,
|
||||
float beta);
|
||||
OP_CLONE(TransformObj);
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) const override;
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) override;
|
||||
|
||||
std::string toString() const override;
|
||||
float getAlpha() const { return alphaValue; }
|
||||
|
@ -165,7 +165,7 @@ class CastObj : public OperatorObj {
|
|||
public:
|
||||
CastObj(GraphObj *graph, Tensor input, Tensor output, CastType type);
|
||||
OP_CLONE(CastObj);
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) const override;
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) override;
|
||||
vector<DataType> inferDataType(const TensorVec &inputs) const override;
|
||||
|
||||
std::string toString() const override;
|
||||
|
@ -185,7 +185,7 @@ class CumsumObj : public OperatorObj {
|
|||
CumsumObj(GraphObj *graph, Tensor input, Tensor output, int axis,
|
||||
bool exclusive, bool reverse);
|
||||
OP_CLONE(CumsumObj);
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) const override;
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) override;
|
||||
|
||||
std::string toString() const override;
|
||||
int getAxis() const { return axisValue; }
|
||||
|
@ -205,7 +205,7 @@ class ShapeObj : public OperatorObj {
|
|||
public:
|
||||
ShapeObj(GraphObj *graph, Tensor input, Tensor output);
|
||||
OP_CLONE(ShapeObj);
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) const override;
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) override;
|
||||
|
||||
std::string toString() const override;
|
||||
int numInputs() const override { return 1; }
|
||||
|
@ -216,7 +216,7 @@ class PReluObj : public OperatorObj {
|
|||
public:
|
||||
PReluObj(GraphObj *graph, Tensor input, Tensor alpha, Tensor output);
|
||||
OP_CLONE(PReluObj);
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) const override;
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) override;
|
||||
|
||||
std::string toString() const override;
|
||||
int numInputs() const override { return 2; }
|
||||
|
@ -236,7 +236,7 @@ class LogObj : public OperatorObj {
|
|||
};
|
||||
LogObj(GraphObj *graph, Tensor input, Tensor output, LogType type);
|
||||
OP_CLONE(LogObj);
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) const override;
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) override;
|
||||
|
||||
std::string toString() const override;
|
||||
LogType getType() const { return logType; }
|
||||
|
|
|
@ -22,7 +22,7 @@ class WhereObj : public OperatorObj {
|
|||
Tensor output);
|
||||
OP_CLONE(WhereObj);
|
||||
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) const override;
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) override;
|
||||
|
||||
std::string toString() const override;
|
||||
int numInputs() const override { return inputs.size(); }
|
||||
|
|
|
@ -441,21 +441,11 @@ class OnnxStub:
|
|||
perm,
|
||||
)
|
||||
elif node.op_type == "Reshape":
|
||||
dims = _search_shape(model, node.input[0])
|
||||
size = reduce(lambda acc, x: acc * x, dims)
|
||||
input_shape = _parse_data(data[node.input[1]])
|
||||
origin_shape = input_shape.copy()
|
||||
for i, x in enumerate(input_shape):
|
||||
if x == 0:
|
||||
input_shape[i] = dims[i]
|
||||
temp = reduce(lambda acc, x: acc * x, input_shape, 1)
|
||||
if temp < 0:
|
||||
input_shape[input_shape.index(-1)] = size // -temp
|
||||
shape = _parse_data(data[node.input[1]])
|
||||
tensors[node.output[0]] = self.handler.reshape(
|
||||
tensors[node.input[0]],
|
||||
tensors.get(node.output[0]),
|
||||
input_shape,
|
||||
origin_shape,
|
||||
shape,
|
||||
)
|
||||
elif node.op_type == "Squeeze":
|
||||
input_shape = _search_shape(model, node.input[0])
|
||||
|
@ -957,10 +947,18 @@ class OnnxStub:
|
|||
|
||||
def set_input(self, inputShapes: List[int]) -> None:
|
||||
for newInput, oldInput in zip(inputShapes, self.inputs):
|
||||
oldTensor = self.inputs[oldInput];
|
||||
oldTensor = self.inputs[oldInput]
|
||||
self.handler.change_shape(newInput, oldTensor.fuid())
|
||||
self.handler.shape_infer()
|
||||
self.handler.data_malloc()
|
||||
|
||||
# self.handler.data_malloc()
|
||||
|
||||
def getShape(self, name: str) -> List[int]:
|
||||
if name in self.inputs:
|
||||
ans = self.handler.getDims(self.inputs[name])
|
||||
else:
|
||||
ans = self.handler.getDims(self.outputs[name])
|
||||
return ans
|
||||
|
||||
def tune(self) -> None:
|
||||
self.handler.tune()
|
||||
|
@ -968,13 +966,6 @@ class OnnxStub:
|
|||
def run(self) -> None:
|
||||
self.handler.run()
|
||||
|
||||
# def getShape(self, name: str) -> List[int]:
|
||||
# if name in self.inputs:
|
||||
# ans = self.handler.getDims(self.inputs[name])
|
||||
# else:
|
||||
# ans = self.handler.getDims(self.outputs[name])
|
||||
# return ans
|
||||
|
||||
def get_perf_time(self) -> float:
|
||||
self.handler.get_perf_time()
|
||||
|
||||
|
|
|
@ -46,358 +46,345 @@ class TestStringMethods(unittest.TestCase):
|
|||
)
|
||||
model = infer_shapes(model)
|
||||
|
||||
def test_dynamic_tensor(self):
|
||||
model_file = r"/home/zhangyunze/InfiniTensor/resnet18-v2-7.onnx"
|
||||
model = OnnxStub(onnx.load(model_file), backend.cpu_runtime())
|
||||
# for input_name in model.inputs:
|
||||
# print(model.getShape(input_name))
|
||||
# for output_name in model.outputs:
|
||||
# print(model.getShape(output_name))
|
||||
model.set_input([[5,3,224,224]])
|
||||
# for input_name in model.inputs:
|
||||
# print(model.getShape(input_name))
|
||||
# for output_name in model.outputs:
|
||||
# print(model.getShape(output_name))
|
||||
def test_tensor(self):
|
||||
x = make_tensor_value_info("x", TensorProto.FLOAT, [1, 2, 3])
|
||||
make_and_import_model(make_graph([], "tensor", [x], [x]))
|
||||
|
||||
def test_tensor(self):
|
||||
x = make_tensor_value_info("x", TensorProto.FLOAT, [1, 2, 3])
|
||||
make_and_import_model(make_graph([], "tensor", [x], [x]))
|
||||
def test_conv(self):
|
||||
i = make_tensor_value_info("i", TensorProto.FLOAT, [1, 3, 4, 4])
|
||||
w = make_tensor_value_info("w", TensorProto.FLOAT, [2, 3, 3, 3])
|
||||
o = make_tensor_value_info("o", TensorProto.FLOAT, [1, 2, 2, 2])
|
||||
conv = make_node(
|
||||
"Conv",
|
||||
["i", "w"],
|
||||
["o"],
|
||||
"conv",
|
||||
pads=[1, 1, 1, 1],
|
||||
strides=[2, 1],
|
||||
dilations=[1, 2],
|
||||
)
|
||||
make_and_import_model(make_graph([conv], "conv", [i, w], [o]))
|
||||
|
||||
def test_conv(self):
|
||||
i = make_tensor_value_info("i", TensorProto.FLOAT, [1, 3, 4, 4])
|
||||
w = make_tensor_value_info("w", TensorProto.FLOAT, [2, 3, 3, 3])
|
||||
o = make_tensor_value_info("o", TensorProto.FLOAT, [1, 2, 2, 2])
|
||||
conv = make_node(
|
||||
"Conv",
|
||||
["i", "w"],
|
||||
["o"],
|
||||
"conv",
|
||||
pads=[1, 1, 1, 1],
|
||||
strides=[2, 1],
|
||||
dilations=[1, 2],
|
||||
)
|
||||
make_and_import_model(make_graph([conv], "conv", [i, w], [o]))
|
||||
def test_conv_fp16(self):
|
||||
i = make_tensor_value_info("i", TensorProto.FLOAT16, [1, 3, 4, 4])
|
||||
w = make_tensor_value_info("w", TensorProto.FLOAT16, [2, 3, 3, 3])
|
||||
o = make_tensor_value_info("o", TensorProto.FLOAT16, [1, 2, 2, 2])
|
||||
conv = make_node(
|
||||
"Conv",
|
||||
["i", "w"],
|
||||
["o"],
|
||||
"conv",
|
||||
pads=[1, 1, 1, 1],
|
||||
strides=[2, 1],
|
||||
dilations=[1, 2],
|
||||
)
|
||||
make_and_import_model(make_graph([conv], "conv_fp16", [i, w], [o]))
|
||||
|
||||
def test_conv_fp16(self):
|
||||
i = make_tensor_value_info("i", TensorProto.FLOAT16, [1, 3, 4, 4])
|
||||
w = make_tensor_value_info("w", TensorProto.FLOAT16, [2, 3, 3, 3])
|
||||
o = make_tensor_value_info("o", TensorProto.FLOAT16, [1, 2, 2, 2])
|
||||
conv = make_node(
|
||||
"Conv",
|
||||
["i", "w"],
|
||||
["o"],
|
||||
"conv",
|
||||
pads=[1, 1, 1, 1],
|
||||
strides=[2, 1],
|
||||
dilations=[1, 2],
|
||||
)
|
||||
make_and_import_model(make_graph([conv], "conv_fp16", [i, w], [o]))
|
||||
def test_conv_bfp16(self):
|
||||
i = make_tensor_value_info("i", TensorProto.BFLOAT16, [1, 3, 4, 4])
|
||||
w = make_tensor_value_info("w", TensorProto.BFLOAT16, [2, 3, 3, 3])
|
||||
o = make_tensor_value_info("o", TensorProto.BFLOAT16, [1, 2, 2, 2])
|
||||
conv = make_node(
|
||||
"Conv",
|
||||
["i", "w"],
|
||||
["o"],
|
||||
"conv",
|
||||
pads=[1, 1, 1, 1],
|
||||
strides=[2, 1],
|
||||
dilations=[1, 2],
|
||||
)
|
||||
make_and_import_model(make_graph([conv], "conv_bfp16", [i, w], [o]))
|
||||
|
||||
def test_conv_bfp16(self):
|
||||
i = make_tensor_value_info("i", TensorProto.BFLOAT16, [1, 3, 4, 4])
|
||||
w = make_tensor_value_info("w", TensorProto.BFLOAT16, [2, 3, 3, 3])
|
||||
o = make_tensor_value_info("o", TensorProto.BFLOAT16, [1, 2, 2, 2])
|
||||
conv = make_node(
|
||||
"Conv",
|
||||
["i", "w"],
|
||||
["o"],
|
||||
"conv",
|
||||
pads=[1, 1, 1, 1],
|
||||
strides=[2, 1],
|
||||
dilations=[1, 2],
|
||||
)
|
||||
make_and_import_model(make_graph([conv], "conv_bfp16", [i, w], [o]))
|
||||
def test_matmul(self):
|
||||
x = make_tensor_value_info("x", TensorProto.FLOAT, [1, 2, 3])
|
||||
a = make_tensor_value_info("a", TensorProto.FLOAT, [1, 3, 4])
|
||||
xa = make_tensor_value_info("xa", TensorProto.FLOAT, [1, 2, 4])
|
||||
matmul = make_node("MatMul", ["x", "a"], ["xa"], name="matmul")
|
||||
make_and_import_model(make_graph([matmul], "matmul", [x, a], [xa]))
|
||||
|
||||
def test_matmul(self):
|
||||
x = make_tensor_value_info("x", TensorProto.FLOAT, [1, 2, 3])
|
||||
a = make_tensor_value_info("a", TensorProto.FLOAT, [1, 3, 4])
|
||||
xa = make_tensor_value_info("xa", TensorProto.FLOAT, [1, 2, 4])
|
||||
matmul = make_node("MatMul", ["x", "a"], ["xa"], name="matmul")
|
||||
make_and_import_model(make_graph([matmul], "matmul", [x, a], [xa]))
|
||||
def test_gemm(self):
|
||||
a = make_tensor_value_info("a", TensorProto.FLOAT, [1, 2, 3])
|
||||
b = make_tensor_value_info("b", TensorProto.FLOAT, [1, 4, 3])
|
||||
c = make_tensor_value_info("c", TensorProto.FLOAT, [1, 2, 4])
|
||||
y = make_tensor_value_info("y", TensorProto.FLOAT, [1, 2, 4])
|
||||
gemm = make_node("Gemm", ["a", "b", "c"], ["y"], transB=1, name="gemm")
|
||||
make_and_import_model(make_graph([gemm], "gemm", [a, b, c], [y]))
|
||||
|
||||
def test_gemm(self):
|
||||
a = make_tensor_value_info("a", TensorProto.FLOAT, [1, 2, 3])
|
||||
b = make_tensor_value_info("b", TensorProto.FLOAT, [1, 4, 3])
|
||||
c = make_tensor_value_info("c", TensorProto.FLOAT, [1, 2, 4])
|
||||
y = make_tensor_value_info("y", TensorProto.FLOAT, [1, 2, 4])
|
||||
gemm = make_node("Gemm", ["a", "b", "c"], ["y"], transB=1, name="gemm")
|
||||
make_and_import_model(make_graph([gemm], "gemm", [a, b, c], [y]))
|
||||
def test_batch_norm(self):
|
||||
x = make_tensor_value_info("x", TensorProto.UINT32, [1, 3, 2, 2])
|
||||
scale = make_tensor_value_info("scale", TensorProto.FLOAT, [3])
|
||||
b = make_tensor_value_info("b", TensorProto.FLOAT, [3])
|
||||
mean = make_tensor_value_info("mean", TensorProto.FLOAT, [3])
|
||||
var = make_tensor_value_info("var", TensorProto.FLOAT, [3])
|
||||
y = make_tensor_value_info("y", TensorProto.UINT32, [1, 3, 2, 2])
|
||||
batch_norm = make_node(
|
||||
"BatchNormalization",
|
||||
["x", "scale", "b", "mean", "var"],
|
||||
["y"],
|
||||
name="batchNormalization",
|
||||
)
|
||||
make_and_import_model(
|
||||
make_graph([batch_norm], "batchNormalzation", [x, scale, b, mean, var], [y])
|
||||
)
|
||||
|
||||
def test_batch_norm(self):
|
||||
x = make_tensor_value_info("x", TensorProto.UINT32, [1, 3, 2, 2])
|
||||
scale = make_tensor_value_info("scale", TensorProto.FLOAT, [3])
|
||||
b = make_tensor_value_info("b", TensorProto.FLOAT, [3])
|
||||
mean = make_tensor_value_info("mean", TensorProto.FLOAT, [3])
|
||||
var = make_tensor_value_info("var", TensorProto.FLOAT, [3])
|
||||
y = make_tensor_value_info("y", TensorProto.UINT32, [1, 3, 2, 2])
|
||||
batch_norm = make_node(
|
||||
"BatchNormalization",
|
||||
["x", "scale", "b", "mean", "var"],
|
||||
["y"],
|
||||
name="batchNormalization",
|
||||
)
|
||||
make_and_import_model(
|
||||
make_graph([batch_norm], "batchNormalzation", [x, scale, b, mean, var], [y])
|
||||
)
|
||||
def test_max_pool(self):
|
||||
x = make_tensor_value_info("x", TensorProto.UINT32, [1, 64, 162, 162])
|
||||
y = make_tensor_value_info("y", TensorProto.UINT32, [1, 64, 80, 80])
|
||||
pool = make_node(
|
||||
"MaxPool",
|
||||
["x"],
|
||||
["y"],
|
||||
kernel_shape=[3, 3],
|
||||
dilations=[1, 1],
|
||||
pads=[0, 0, 0, 0],
|
||||
strides=[2, 2],
|
||||
name="maxPool",
|
||||
)
|
||||
make_and_import_model(make_graph([pool], "maxPool", [x], [y]))
|
||||
|
||||
def test_max_pool(self):
|
||||
x = make_tensor_value_info("x", TensorProto.UINT32, [1, 64, 162, 162])
|
||||
y = make_tensor_value_info("y", TensorProto.UINT32, [1, 64, 80, 80])
|
||||
pool = make_node(
|
||||
"MaxPool",
|
||||
["x"],
|
||||
["y"],
|
||||
kernel_shape=[3, 3],
|
||||
dilations=[1, 1],
|
||||
pads=[0, 0, 0, 0],
|
||||
strides=[2, 2],
|
||||
name="maxPool",
|
||||
)
|
||||
make_and_import_model(make_graph([pool], "maxPool", [x], [y]))
|
||||
def test_avg_pool(self):
|
||||
x = make_tensor_value_info("x", TensorProto.UINT32, [1, 64, 162, 162])
|
||||
y = make_tensor_value_info("y", TensorProto.UINT32, [1, 64, 80, 80])
|
||||
pool = make_node(
|
||||
"AveragePool",
|
||||
["x"],
|
||||
["y"],
|
||||
kernel_shape=[3, 3],
|
||||
pads=[0, 0, 0, 0],
|
||||
strides=[2, 2],
|
||||
name="avgPool",
|
||||
)
|
||||
make_and_import_model(make_graph([pool], "avgPool", [x], [y]))
|
||||
|
||||
def test_avg_pool(self):
|
||||
x = make_tensor_value_info("x", TensorProto.UINT32, [1, 64, 162, 162])
|
||||
y = make_tensor_value_info("y", TensorProto.UINT32, [1, 64, 80, 80])
|
||||
pool = make_node(
|
||||
"AveragePool",
|
||||
["x"],
|
||||
["y"],
|
||||
kernel_shape=[3, 3],
|
||||
pads=[0, 0, 0, 0],
|
||||
strides=[2, 2],
|
||||
name="avgPool",
|
||||
)
|
||||
make_and_import_model(make_graph([pool], "avgPool", [x], [y]))
|
||||
def test_global_avg_pool(self):
|
||||
x = make_tensor_value_info("x", TensorProto.UINT32, [30, 30, 30, 30])
|
||||
y = make_tensor_value_info("y", TensorProto.UINT32, [30, 30, 1, 1])
|
||||
pool = make_node(
|
||||
"GlobalAveragePool",
|
||||
["x"],
|
||||
["y"],
|
||||
name="globalAvgPool",
|
||||
)
|
||||
make_and_import_model(make_graph([pool], "avgPool", [x], [y]))
|
||||
|
||||
def test_global_avg_pool(self):
|
||||
x = make_tensor_value_info("x", TensorProto.UINT32, [30, 30, 30, 30])
|
||||
y = make_tensor_value_info("y", TensorProto.UINT32, [30, 30, 1, 1])
|
||||
pool = make_node(
|
||||
"GlobalAveragePool",
|
||||
["x"],
|
||||
["y"],
|
||||
name="globalAvgPool",
|
||||
)
|
||||
make_and_import_model(make_graph([pool], "avgPool", [x], [y]))
|
||||
def test_add(self):
|
||||
a = make_tensor_value_info("a", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
b = make_tensor_value_info("b", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
c = make_tensor_value_info("c", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
add = make_node("Add", ["a", "b"], ["c"], name="add")
|
||||
make_and_import_model(make_graph([add], "add", [a, b], [c]))
|
||||
|
||||
def test_add(self):
|
||||
a = make_tensor_value_info("a", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
b = make_tensor_value_info("b", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
c = make_tensor_value_info("c", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
add = make_node("Add", ["a", "b"], ["c"], name="add")
|
||||
make_and_import_model(make_graph([add], "add", [a, b], [c]))
|
||||
def test_sub(self):
|
||||
a = make_tensor_value_info("a", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
b = make_tensor_value_info("b", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
c = make_tensor_value_info("c", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
sub = make_node("Sub", ["a", "b"], ["c"], name="sub")
|
||||
make_and_import_model(make_graph([sub], "sub", [a, b], [c]))
|
||||
|
||||
def test_sub(self):
|
||||
a = make_tensor_value_info("a", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
b = make_tensor_value_info("b", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
c = make_tensor_value_info("c", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
sub = make_node("Sub", ["a", "b"], ["c"], name="sub")
|
||||
make_and_import_model(make_graph([sub], "sub", [a, b], [c]))
|
||||
def test_mul(self):
|
||||
a = make_tensor_value_info("a", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
b = make_tensor_value_info("b", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
c = make_tensor_value_info("c", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
mul = make_node("Mul", ["a", "b"], ["c"], name="mul")
|
||||
make_and_import_model(make_graph([mul], "mul", [a, b], [c]))
|
||||
|
||||
def test_mul(self):
|
||||
a = make_tensor_value_info("a", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
b = make_tensor_value_info("b", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
c = make_tensor_value_info("c", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
mul = make_node("Mul", ["a", "b"], ["c"], name="mul")
|
||||
make_and_import_model(make_graph([mul], "mul", [a, b], [c]))
|
||||
def test_div(self):
|
||||
a = make_tensor_value_info("a", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
b = make_tensor_value_info("b", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
c = make_tensor_value_info("c", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
div = make_node("Div", ["a", "b"], ["c"], name="div")
|
||||
make_and_import_model(make_graph([div], "div", [a, b], [c]))
|
||||
|
||||
def test_div(self):
|
||||
a = make_tensor_value_info("a", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
b = make_tensor_value_info("b", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
c = make_tensor_value_info("c", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
div = make_node("Div", ["a", "b"], ["c"], name="div")
|
||||
make_and_import_model(make_graph([div], "div", [a, b], [c]))
|
||||
def test_pow(self):
|
||||
a = make_tensor_value_info("a", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
b = make_tensor_value_info("b", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
c = make_tensor_value_info("c", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
pow = make_node("Pow", ["a", "b"], ["c"], name="pow")
|
||||
make_and_import_model(make_graph([pow], "pow", [a, b], [c]))
|
||||
|
||||
def test_pow(self):
|
||||
a = make_tensor_value_info("a", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
b = make_tensor_value_info("b", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
c = make_tensor_value_info("c", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
pow = make_node("Pow", ["a", "b"], ["c"], name="pow")
|
||||
make_and_import_model(make_graph([pow], "pow", [a, b], [c]))
|
||||
def test_relu(self):
|
||||
x = make_tensor_value_info("x", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
y = make_tensor_value_info("y", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
relu = make_node("Relu", ["x"], ["y"], name="relu")
|
||||
make_and_import_model(make_graph([relu], "relu", [x], [y]))
|
||||
|
||||
def test_relu(self):
|
||||
x = make_tensor_value_info("x", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
y = make_tensor_value_info("y", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
relu = make_node("Relu", ["x"], ["y"], name="relu")
|
||||
make_and_import_model(make_graph([relu], "relu", [x], [y]))
|
||||
def test_erf(self):
|
||||
x = make_tensor_value_info("x", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
y = make_tensor_value_info("y", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
erf = make_node("Erf", ["x"], ["y"], name="erf")
|
||||
make_and_import_model(make_graph([erf], "erf", [x], [y]))
|
||||
|
||||
def test_erf(self):
|
||||
x = make_tensor_value_info("x", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
y = make_tensor_value_info("y", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
erf = make_node("Erf", ["x"], ["y"], name="erf")
|
||||
make_and_import_model(make_graph([erf], "erf", [x], [y]))
|
||||
def test_sqrt(self):
|
||||
x = make_tensor_value_info("x", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
y = make_tensor_value_info("y", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
sqrt = make_node("Sqrt", ["x"], ["y"], name="sqrt")
|
||||
make_and_import_model(make_graph([sqrt], "sqrt", [x], [y]))
|
||||
|
||||
def test_sqrt(self):
|
||||
x = make_tensor_value_info("x", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
y = make_tensor_value_info("y", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
sqrt = make_node("Sqrt", ["x"], ["y"], name="sqrt")
|
||||
make_and_import_model(make_graph([sqrt], "sqrt", [x], [y]))
|
||||
def test_sigmoid(self):
|
||||
x = make_tensor_value_info("x", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
y = make_tensor_value_info("y", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
sigmoid = make_node("Sigmoid", ["x"], ["y"], name="sigmoid")
|
||||
make_and_import_model(make_graph([sigmoid], "sigmoid", [x], [y]))
|
||||
|
||||
def test_sigmoid(self):
|
||||
x = make_tensor_value_info("x", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
y = make_tensor_value_info("y", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
sigmoid = make_node("Sigmoid", ["x"], ["y"], name="sigmoid")
|
||||
make_and_import_model(make_graph([sigmoid], "sigmoid", [x], [y]))
|
||||
def test_tanh(self):
|
||||
x = make_tensor_value_info("x", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
y = make_tensor_value_info("y", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
tanh = make_node("Tanh", ["x"], ["y"], name="tanh")
|
||||
make_and_import_model(make_graph([tanh], "tanh", [x], [y]))
|
||||
|
||||
def test_tanh(self):
|
||||
x = make_tensor_value_info("x", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
y = make_tensor_value_info("y", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
tanh = make_node("Tanh", ["x"], ["y"], name="tanh")
|
||||
make_and_import_model(make_graph([tanh], "tanh", [x], [y]))
|
||||
def test_softmax(self):
|
||||
x = make_tensor_value_info("x", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
y = make_tensor_value_info("y", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
softmax = make_node("Softmax", ["x"], ["y"], axis=2, name="softmax")
|
||||
make_and_import_model(make_graph([softmax], "softmax", [x], [y]))
|
||||
|
||||
def test_softmax(self):
|
||||
x = make_tensor_value_info("x", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
y = make_tensor_value_info("y", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
softmax = make_node("Softmax", ["x"], ["y"], axis=2, name="softmax")
|
||||
make_and_import_model(make_graph([softmax], "softmax", [x], [y]))
|
||||
def test_abs(self):
|
||||
x = make_tensor_value_info("x", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
y = make_tensor_value_info("y", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
abs = make_node("Abs", ["x"], ["y"], name="abs")
|
||||
make_and_import_model(make_graph([abs], "abs", [x], [y]))
|
||||
|
||||
def test_abs(self):
|
||||
x = make_tensor_value_info("x", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
y = make_tensor_value_info("y", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
abs = make_node("Abs", ["x"], ["y"], name="abs")
|
||||
make_and_import_model(make_graph([abs], "abs", [x], [y]))
|
||||
def test_identity(self):
|
||||
x = make_tensor_value_info("x", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
y = make_tensor_value_info("y", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
identity = make_node("Identity", ["x"], ["y"], name="identity")
|
||||
make_and_import_model(make_graph([identity], "identity", [x], [y]))
|
||||
|
||||
def test_identity(self):
|
||||
x = make_tensor_value_info("x", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
y = make_tensor_value_info("y", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
identity = make_node("Identity", ["x"], ["y"], name="identity")
|
||||
make_and_import_model(make_graph([identity], "identity", [x], [y]))
|
||||
def test_flatten(self):
|
||||
x = make_tensor_value_info("x", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
y = make_tensor_value_info("y", TensorProto.FLOAT, [1 * 3, 5 * 7])
|
||||
flatten = make_node("Flatten", ["x"], ["y"], axis=2, name="flatten")
|
||||
make_and_import_model(make_graph([flatten], "flatten", [x], [y]))
|
||||
|
||||
def test_flatten(self):
|
||||
x = make_tensor_value_info("x", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
y = make_tensor_value_info("y", TensorProto.FLOAT, [1 * 3, 5 * 7])
|
||||
flatten = make_node("Flatten", ["x"], ["y"], axis=2, name="flatten")
|
||||
make_and_import_model(make_graph([flatten], "flatten", [x], [y]))
|
||||
def test_reshape(self):
|
||||
data = make_tensor_value_info("data", TensorProto.FLOAT, [2, 3, 4, 5])
|
||||
shape = make_tensor_value_info("shape", TensorProto.INT64, [3])
|
||||
shape_data = make_tensor("shape", TensorProto.INT64, [3], [5, 3, 8])
|
||||
reshaped = make_tensor_value_info(
|
||||
"reshaped", TensorProto.FLOAT, shape_data.int64_data
|
||||
)
|
||||
reshape = make_node("Reshape", ["data", "shape"], ["reshaped"], name="reshape")
|
||||
make_and_import_model(
|
||||
make_graph([reshape], "reshape", [data, shape], [reshaped], [shape_data])
|
||||
)
|
||||
|
||||
def test_reshape(self):
|
||||
data = make_tensor_value_info("data", TensorProto.FLOAT, [2, 3, 4, 5])
|
||||
shape = make_tensor_value_info("shape", TensorProto.INT64, [3])
|
||||
shape_data = make_tensor("shape", TensorProto.INT64, [3], [5, 3, 8])
|
||||
reshaped = make_tensor_value_info(
|
||||
"reshaped", TensorProto.FLOAT, shape_data.int64_data
|
||||
)
|
||||
reshape = make_node("Reshape", ["data", "shape"], ["reshaped"], name="reshape")
|
||||
make_and_import_model(
|
||||
make_graph([reshape], "reshape", [data, shape], [reshaped], [shape_data])
|
||||
)
|
||||
def test_concat(self):
|
||||
input1 = make_tensor_value_info("input1", TensorProto.FLOAT, [1, 3, 2, 4])
|
||||
input2 = make_tensor_value_info("input2", TensorProto.FLOAT, [1, 3, 2, 5])
|
||||
output = make_tensor_value_info("output", TensorProto.FLOAT, [1, 3, 2, 9])
|
||||
concat = make_node(
|
||||
"Concat", ["input1", "input2"], ["output"], axis=3, name="concat"
|
||||
)
|
||||
make_and_import_model(
|
||||
make_graph([concat], "concat", [input1, input2], [output])
|
||||
)
|
||||
|
||||
def test_concat(self):
|
||||
input1 = make_tensor_value_info("input1", TensorProto.FLOAT, [1, 3, 2, 4])
|
||||
input2 = make_tensor_value_info("input2", TensorProto.FLOAT, [1, 3, 2, 5])
|
||||
output = make_tensor_value_info("output", TensorProto.FLOAT, [1, 3, 2, 9])
|
||||
concat = make_node(
|
||||
"Concat", ["input1", "input2"], ["output"], axis=3, name="concat"
|
||||
)
|
||||
make_and_import_model(
|
||||
make_graph([concat], "concat", [input1, input2], [output])
|
||||
)
|
||||
def test_gather(self):
|
||||
data = make_tensor_value_info("data", TensorProto.FLOAT, [1, 3, 4, 4])
|
||||
indices = make_tensor_value_info("indices", TensorProto.FLOAT, [2, 1, 2])
|
||||
output = make_tensor_value_info("output", TensorProto.FLOAT, [1, 2, 1, 2, 4, 4])
|
||||
gather = make_node(
|
||||
"Gather", ["data", "indices"], ["output"], axis=1, name="gather"
|
||||
)
|
||||
make_and_import_model(make_graph([gather], "gather", [data, indices], [output]))
|
||||
|
||||
def test_gather(self):
|
||||
data = make_tensor_value_info("data", TensorProto.FLOAT, [1, 3, 4, 4])
|
||||
indices = make_tensor_value_info("indices", TensorProto.FLOAT, [2, 1, 2])
|
||||
output = make_tensor_value_info("output", TensorProto.FLOAT, [1, 2, 1, 2, 4, 4])
|
||||
gather = make_node(
|
||||
"Gather", ["data", "indices"], ["output"], axis=1, name="gather"
|
||||
)
|
||||
make_and_import_model(make_graph([gather], "gather", [data, indices], [output]))
|
||||
def test_reduce_mean(self):
|
||||
data = make_tensor_value_info("data", TensorProto.FLOAT, [2, 3, 3, 4])
|
||||
reduced = make_tensor_value_info("reduced", TensorProto.FLOAT, [1, 1, 1, 1])
|
||||
reduceMean = make_node(
|
||||
"ReduceMean", ["data"], ["reduced"], keepdims=1, name="reduceMean"
|
||||
)
|
||||
make_and_import_model(make_graph([reduceMean], "reduceMean", [data], [reduced]))
|
||||
|
||||
def test_reduce_mean(self):
|
||||
data = make_tensor_value_info("data", TensorProto.FLOAT, [2, 3, 3, 4])
|
||||
reduced = make_tensor_value_info("reduced", TensorProto.FLOAT, [1, 1, 1, 1])
|
||||
reduceMean = make_node(
|
||||
"ReduceMean", ["data"], ["reduced"], keepdims=1, name="reduceMean"
|
||||
)
|
||||
make_and_import_model(make_graph([reduceMean], "reduceMean", [data], [reduced]))
|
||||
def test_slice(self):
|
||||
data = make_tensor_value_info("data", TensorProto.UINT32, [10, 64, 162, 162])
|
||||
output = make_tensor_value_info("output", TensorProto.UINT32, [1, 1, 99, 95])
|
||||
starts = make_tensor("starts", TensorProto.INT64, [4], [2, 9, 1, 5])
|
||||
ends = make_tensor("ends", TensorProto.INT64, [4], [3, 10, 100, 100])
|
||||
slice = make_node("Slice", ["data", "starts", "ends"], ["output"], name="slice")
|
||||
make_and_import_model(
|
||||
make_graph(
|
||||
[slice],
|
||||
"slice",
|
||||
[data],
|
||||
[output],
|
||||
[starts, ends],
|
||||
)
|
||||
)
|
||||
|
||||
def test_slice(self):
|
||||
data = make_tensor_value_info("data", TensorProto.UINT32, [10, 64, 162, 162])
|
||||
output = make_tensor_value_info("output", TensorProto.UINT32, [1, 1, 99, 95])
|
||||
starts = make_tensor("starts", TensorProto.INT64, [4], [2, 9, 1, 5])
|
||||
ends = make_tensor("ends", TensorProto.INT64, [4], [3, 10, 100, 100])
|
||||
slice = make_node("Slice", ["data", "starts", "ends"], ["output"], name="slice")
|
||||
make_and_import_model(
|
||||
make_graph(
|
||||
[slice],
|
||||
"slice",
|
||||
[data],
|
||||
[output],
|
||||
[starts, ends],
|
||||
)
|
||||
)
|
||||
def test_pad(self):
|
||||
data = make_tensor_value_info("data", TensorProto.UINT32, [1, 64, 162, 162])
|
||||
output = make_tensor_value_info("output", TensorProto.UINT32, [3, 84, 164, 172])
|
||||
pads = make_tensor_value_info("pads", TensorProto.INT64, [8])
|
||||
pads_data = make_tensor(
|
||||
"pads", TensorProto.INT64, [8], [2, 10, 1, 5, 0, 10, 1, 5]
|
||||
)
|
||||
pad = make_node("Pad", ["data", "pads"], ["output"], name="pad")
|
||||
make_and_import_model(
|
||||
make_graph(
|
||||
[pad],
|
||||
"pad",
|
||||
[data, pads],
|
||||
[output],
|
||||
[pads_data],
|
||||
)
|
||||
)
|
||||
|
||||
def test_pad(self):
|
||||
data = make_tensor_value_info("data", TensorProto.UINT32, [1, 64, 162, 162])
|
||||
output = make_tensor_value_info("output", TensorProto.UINT32, [3, 84, 164, 172])
|
||||
pads = make_tensor_value_info("pads", TensorProto.INT64, [8])
|
||||
pads_data = make_tensor(
|
||||
"pads", TensorProto.INT64, [8], [2, 10, 1, 5, 0, 10, 1, 5]
|
||||
)
|
||||
pad = make_node("Pad", ["data", "pads"], ["output"], name="pad")
|
||||
make_and_import_model(
|
||||
make_graph(
|
||||
[pad],
|
||||
"pad",
|
||||
[data, pads],
|
||||
[output],
|
||||
[pads_data],
|
||||
)
|
||||
)
|
||||
# see <https://onnx.ai/onnx/intro/python.html#a-simple-example-a-linear-regression>
|
||||
def test_linear(self):
|
||||
x = make_tensor_value_info("x", TensorProto.FLOAT, [1, 2, 3])
|
||||
a = make_tensor_value_info("a", TensorProto.FLOAT, [1, 3, 4])
|
||||
b = make_tensor_value_info("b", TensorProto.FLOAT, [1, 2, 4])
|
||||
y = make_tensor_value_info("y", TensorProto.FLOAT, [1, 2, 4])
|
||||
matmul = make_node("MatMul", ["x", "a"], ["xa"], name="matmul")
|
||||
add = make_node("Add", ["xa", "b"], ["y"], name="add")
|
||||
graph = make_graph([matmul, add], "lr", [x, a, b], [y])
|
||||
model = make_model(graph)
|
||||
check_model(model)
|
||||
from_onnx(model, backend.cpu_runtime())
|
||||
|
||||
# see <https://onnx.ai/onnx/intro/python.html#a-simple-example-a-linear-regression>
|
||||
def test_linear(self):
|
||||
x = make_tensor_value_info("x", TensorProto.FLOAT, [1, 2, 3])
|
||||
a = make_tensor_value_info("a", TensorProto.FLOAT, [1, 3, 4])
|
||||
b = make_tensor_value_info("b", TensorProto.FLOAT, [1, 2, 4])
|
||||
y = make_tensor_value_info("y", TensorProto.FLOAT, [1, 2, 4])
|
||||
matmul = make_node("MatMul", ["x", "a"], ["xa"], name="matmul")
|
||||
add = make_node("Add", ["xa", "b"], ["y"], name="add")
|
||||
graph = make_graph([matmul, add], "lr", [x, a, b], [y])
|
||||
model = make_model(graph)
|
||||
check_model(model)
|
||||
from_onnx(model, backend.cpu_runtime())
|
||||
def test_frontend(self):
|
||||
handler = backend.GraphHandler(backend.cpu_runtime())
|
||||
a = handler.tensor([1, 2, 3], 12)
|
||||
b = handler.tensor([1, 2, 3], 12)
|
||||
c = handler.tensor([1, 2, 3], 12)
|
||||
d = handler.tensor([1, 2, 3], 12)
|
||||
e = handler.tensor([1, 2, 3], 12)
|
||||
|
||||
def test_frontend(self):
|
||||
handler = backend.GraphHandler(backend.cpu_runtime())
|
||||
a = handler.tensor([1, 2, 3], 12)
|
||||
b = handler.tensor([1, 2, 3], 12)
|
||||
c = handler.tensor([1, 2, 3], 12)
|
||||
d = handler.tensor([1, 2, 3], 12)
|
||||
e = handler.tensor([1, 2, 3], 12)
|
||||
x = handler.add(
|
||||
handler.add(handler.add(handler.add(a, b, None), c, None), d, None), e, None
|
||||
)
|
||||
y = handler.tensor([3, 2, 1], 12)
|
||||
handler.reshape(x, y, [3, 2, 1])
|
||||
|
||||
x = handler.add(
|
||||
handler.add(handler.add(handler.add(a, b, None), c, None), d, None), e, None
|
||||
)
|
||||
y = handler.tensor([3, 2, 1], 12)
|
||||
handler.reshape(x, y, [3, 2, 1])
|
||||
def test_cast(self):
|
||||
input1 = make_tensor_value_info("input1", TensorProto.FLOAT, [1, 3, 2, 4])
|
||||
output = make_tensor_value_info("output", TensorProto.FLOAT16, [1, 3, 2, 4])
|
||||
cast = make_node(
|
||||
"Cast", ["input1"], ["output"], to=TensorProto.FLOAT16, name="cast"
|
||||
)
|
||||
make_and_import_model(make_graph([cast], "cast", [input1], [output]))
|
||||
|
||||
def test_cast(self):
|
||||
input1 = make_tensor_value_info("input1", TensorProto.FLOAT, [1, 3, 2, 4])
|
||||
output = make_tensor_value_info("output", TensorProto.FLOAT16, [1, 3, 2, 4])
|
||||
cast = make_node(
|
||||
"Cast", ["input1"], ["output"], to=TensorProto.FLOAT16, name="cast"
|
||||
)
|
||||
make_and_import_model(make_graph([cast], "cast", [input1], [output]))
|
||||
def test_expand(self):
|
||||
data = make_tensor_value_info("data", TensorProto.FLOAT, [3, 1])
|
||||
dim = make_tensor_value_info("dim", TensorProto.INT64, [3])
|
||||
dim_data = make_tensor("dim", TensorProto.INT64, [3], [2, 1, 6])
|
||||
output = make_tensor_value_info("output", TensorProto.FLOAT, [2, 3, 6])
|
||||
expand = make_node("Expand", ["data", "dim"], ["output"], name="expand")
|
||||
make_and_import_model(
|
||||
make_graph([expand], "expand", [data, dim], [output], [dim_data])
|
||||
)
|
||||
|
||||
def test_expand(self):
|
||||
data = make_tensor_value_info("data", TensorProto.FLOAT, [3, 1])
|
||||
dim = make_tensor_value_info("dim", TensorProto.INT64, [3])
|
||||
dim_data = make_tensor("dim", TensorProto.INT64, [3], [2, 1, 6])
|
||||
output = make_tensor_value_info("output", TensorProto.FLOAT, [2, 3, 6])
|
||||
expand = make_node("Expand", ["data", "dim"], ["output"], name="expand")
|
||||
make_and_import_model(
|
||||
make_graph([expand], "expand", [data, dim], [output], [dim_data])
|
||||
)
|
||||
|
||||
def test_where(self):
|
||||
x = make_tensor_value_info("x", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
y = make_tensor_value_info("y", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
con = make_tensor_value_info("con", TensorProto.BOOL, [1, 3, 5, 7])
|
||||
output = make_tensor_value_info("output", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
where = make_node("Where", ["x", "y", "con"], ["output"], name="where")
|
||||
make_and_import_model(make_graph([where], "where", [x, y, con], [output]))
|
||||
def test_where(self):
|
||||
x = make_tensor_value_info("x", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
y = make_tensor_value_info("y", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
con = make_tensor_value_info("con", TensorProto.BOOL, [1, 3, 5, 7])
|
||||
output = make_tensor_value_info("output", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
where = make_node("Where", ["x", "y", "con"], ["output"], name="where")
|
||||
make_and_import_model(make_graph([where], "where", [x, y, con], [output]))
|
||||
|
||||
def test_copyin(self):
|
||||
dims = [2,3,5,4]
|
||||
dims = [2, 3, 5, 4]
|
||||
np_array = np.random.random(dims).astype(np.float32)
|
||||
handler = backend.GraphHandler(backend.cpu_runtime())
|
||||
tensor1 = handler.tensor(dims, TensorProto.FLOAT)
|
||||
|
@ -423,7 +410,7 @@ class TestStringMethods(unittest.TestCase):
|
|||
self.assertTrue(np.array_equal(np.array(array1).reshape(dims), np_array))
|
||||
|
||||
def test_to_numpy(self):
|
||||
dims = [2,3,5,4]
|
||||
dims = [2, 3, 5, 4]
|
||||
np_array = np.random.random(dims).astype(np.float32)
|
||||
handler = backend.GraphHandler(backend.cpu_runtime())
|
||||
tensor1 = handler.tensor(dims, TensorProto.FLOAT)
|
||||
|
@ -444,5 +431,22 @@ class TestStringMethods(unittest.TestCase):
|
|||
array1 = np.array(tensor1, copy=False)
|
||||
self.assertTrue(np.array_equal(array1, np_array))
|
||||
|
||||
class TestDynamicTensor(unittest.TestCase):
|
||||
def test_dynamic_tensor(self):
|
||||
filename = r"resnet18-v2-7.onnx"
|
||||
current_path = os.getcwd()
|
||||
model_file = ""
|
||||
for root, dirs, files in os.walk(current_path):
|
||||
if filename in files:
|
||||
model_file = os.path.join(root, filename)
|
||||
model = OnnxStub(onnx.load(model_file), backend.cpu_runtime())
|
||||
output_key = list(model.outputs.keys())[0]
|
||||
old_output_shape = model.getShape(output_key)
|
||||
self.assertEqual(old_output_shape, ([1, 1000]))
|
||||
model.set_input([[5, 3, 224, 224]])
|
||||
new_output_shape = model.getShape(output_key)
|
||||
self.assertEqual(new_output_shape, ([5, 1000]))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
#include "core/graph.h"
|
||||
#include <algorithm>
|
||||
#include <queue>
|
||||
#include <numeric>
|
||||
#include "operators/reshape.h"
|
||||
#include <algorithm>
|
||||
#include <numeric>
|
||||
#include <queue>
|
||||
|
||||
namespace infini {
|
||||
|
||||
|
@ -126,55 +126,35 @@ void GraphObj::optimize() {
|
|||
}
|
||||
|
||||
Tensor GraphObj::getTensorWithUid(int fuid) const {
|
||||
for (auto tensor : tensors) {
|
||||
if (tensor->getFuid() == fuid) {
|
||||
return tensor;
|
||||
}
|
||||
}
|
||||
return nullptr;
|
||||
for (auto tensor : tensors) {
|
||||
if (tensor->getFuid() == fuid) {
|
||||
return tensor;
|
||||
}
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void GraphObj::shape_infer() {
|
||||
for (auto &op : ops) {
|
||||
if (op->getOpType() == OpType::Reshape) {
|
||||
auto reshape = dynamic_cast<ReshapeObj *>(op.get());
|
||||
auto input = reshape->getInputs(0)->getDims();
|
||||
auto size = reshape->getInputs(0)->size();
|
||||
Shape ans = reshape->getShape_t();
|
||||
int index = -1;
|
||||
for (int i = 0; i < (int)ans.size(); ++i) {
|
||||
if (ans[i] == 0) {
|
||||
ans[i] = input[i];
|
||||
}
|
||||
if (ans[i] == -1) {
|
||||
index = i;
|
||||
}
|
||||
}
|
||||
if (index != -1) {
|
||||
int temp = (int)size / (-std::accumulate(ans.begin(), ans.end(), 1, [](auto acc, auto x) {return acc * x;}));
|
||||
ans[index] = temp;
|
||||
}
|
||||
reshape->setShape(ans);
|
||||
}
|
||||
auto ans = op->inferShape();
|
||||
IT_ASSERT(ans.has_value());
|
||||
std::cout<<"optype = "<<op->getOpType().toString()<<std::endl;
|
||||
auto oldOutputs = op->getOutputs();
|
||||
IT_ASSERT(ans.value().size() == oldOutputs.size());
|
||||
for (int i = 0; i < (int)ans.value().size(); ++i) {
|
||||
auto newShape = ans.value()[i];
|
||||
std::cout<<vecToString(newShape)<<std::endl;
|
||||
auto oldShape = oldOutputs[i]->getDims();
|
||||
auto fuid = oldOutputs[i]->getFuid();
|
||||
if (newShape != oldShape) {
|
||||
auto tensor = this->getTensorWithUid(fuid);
|
||||
tensor->setShape(newShape);
|
||||
size_t size = std::accumulate(newShape.begin(), newShape.end(), 1, [](auto acc, auto x) {return acc * x;});
|
||||
tensor->setSize(size);
|
||||
std::cout<<"replace newShape over"<<std::endl;
|
||||
}
|
||||
}
|
||||
}
|
||||
for (auto &op : ops) {
|
||||
auto ans = op->inferShape();
|
||||
IT_ASSERT(ans.has_value());
|
||||
auto oldOutputs = op->getOutputs();
|
||||
IT_ASSERT(ans.value().size() == oldOutputs.size());
|
||||
// replace the old outputshape and size with new one
|
||||
for (int i = 0; i < (int)ans.value().size(); ++i) {
|
||||
auto newShape = ans.value()[i];
|
||||
auto oldShape = oldOutputs[i]->getDims();
|
||||
auto fuid = oldOutputs[i]->getFuid();
|
||||
if (newShape != oldShape) {
|
||||
auto tensor = this->getTensorWithUid(fuid);
|
||||
tensor->setShape(newShape);
|
||||
size_t size =
|
||||
std::accumulate(newShape.begin(), newShape.end(), 1,
|
||||
[](auto acc, auto x) { return acc * x; });
|
||||
tensor->setSize(size);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void GraphObj::dataMalloc() {
|
||||
|
|
|
@ -212,13 +212,13 @@ Tensor GraphHandlerObj::transpose(Tensor data, Tensor transposed, Shape perm) {
|
|||
}
|
||||
}
|
||||
|
||||
Tensor GraphHandlerObj::reshape(Tensor data, Tensor reshaped, Shape shape, Shape shape_t) {
|
||||
Tensor GraphHandlerObj::reshape(Tensor data, Tensor reshaped, Shape shape) {
|
||||
if (reshaped) {
|
||||
g->addOpWithOutputs<ReshapeObj>(std::move(data), reshaped,
|
||||
std::move(shape), std::move(shape_t));
|
||||
std::move(shape));
|
||||
return reshaped;
|
||||
} else {
|
||||
return g->addOp<ReshapeObj>(std::move(data), reshaped, std::move(shape), std::move(shape_t))
|
||||
return g->addOp<ReshapeObj>(std::move(data), reshaped, std::move(shape))
|
||||
->getOutput();
|
||||
}
|
||||
}
|
||||
|
@ -433,8 +433,10 @@ static DataType dtype_repr_convert(int dtype) {
|
|||
}
|
||||
|
||||
void GraphHandlerObj::change_shape(const vector<int> &shape, int tensorId) {
|
||||
auto tensor = g->getTensorWithUid(tensorId);
|
||||
tensor->setShape(shape);
|
||||
auto tensor = g->getTensorWithUid(tensorId);
|
||||
IT_ASSERT(tensor != nullptr);
|
||||
IT_ASSERT(shape.size() != 0);
|
||||
tensor->setShape(shape);
|
||||
}
|
||||
|
||||
} // namespace infini
|
||||
|
|
|
@ -77,9 +77,7 @@ bool OperatorObj::checkValid(GraphObj *graph) {
|
|||
return true;
|
||||
}
|
||||
|
||||
optional<vector<Shape>> OperatorObj::inferShape() const {
|
||||
return inferShape(inputs);
|
||||
}
|
||||
optional<vector<Shape>> OperatorObj::inferShape() { return inferShape(inputs); }
|
||||
|
||||
vector<DataType> OperatorObj::inferDataType(const TensorVec &inputs) const {
|
||||
auto dataType = inputs[0]->getDType();
|
||||
|
|
|
@ -448,7 +448,7 @@ void init_graph_builder(py::module &m) {
|
|||
.def("run", &Handler::run, policy::automatic)
|
||||
.def("shape_infer", &Handler::shape_infer, policy::automatic)
|
||||
.def("change_shape", &Handler::change_shape, policy::automatic)
|
||||
// .def("getDims", &Handler::getDims, policy::automatic)
|
||||
.def("getDims", &Handler::getDims, policy::automatic)
|
||||
.def("get_perf_time", &Handler::get_perf_time, policy::automatic);
|
||||
}
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@ string G2BMMObj::toString() const {
|
|||
return os.str();
|
||||
}
|
||||
|
||||
optional<vector<Shape>> G2BMMObj::inferShape(const TensorVec &inputs) const {
|
||||
optional<vector<Shape>> G2BMMObj::inferShape(const TensorVec &inputs) {
|
||||
auto A = inputs[0], B = inputs[1];
|
||||
|
||||
IT_ASSERT(A->getRank() == 3 && B->getRank() == 3);
|
||||
|
|
|
@ -21,7 +21,7 @@ string GBMMObj::toString() const {
|
|||
return os.str();
|
||||
}
|
||||
|
||||
optional<vector<Shape>> GBMMObj::inferShape(const TensorVec &inputs) const {
|
||||
optional<vector<Shape>> GBMMObj::inferShape(const TensorVec &inputs) {
|
||||
auto A = inputs[0], B = inputs[1];
|
||||
|
||||
IT_ASSERT(A->getRank() == 3 && B->getRank() == 3);
|
||||
|
|
|
@ -9,7 +9,7 @@ ActivationBackwardObj::ActivationBackwardObj(OpType type, GraphObj *graph,
|
|||
}
|
||||
|
||||
optional<vector<Shape>>
|
||||
ActivationBackwardObj::inferShape(const TensorVec &inputs) const {
|
||||
ActivationBackwardObj::inferShape(const TensorVec &inputs) {
|
||||
return {{inputs[0]->getDims()}};
|
||||
}
|
||||
|
||||
|
|
|
@ -13,8 +13,7 @@ BatchNormObj::BatchNormObj(GraphObj *graph, Tensor input, Tensor output,
|
|||
IT_ASSERT(checkValid(graph));
|
||||
}
|
||||
|
||||
optional<vector<Shape>>
|
||||
BatchNormObj::inferShape(const TensorVec &inputs) const {
|
||||
optional<vector<Shape>> BatchNormObj::inferShape(const TensorVec &inputs) {
|
||||
auto input = inputs[0];
|
||||
auto mean = inputs[1];
|
||||
auto var = inputs[2];
|
||||
|
|
|
@ -9,7 +9,7 @@ ConcatObj::ConcatObj(GraphObj *graph, TensorVec inputs, Tensor output, int dim)
|
|||
IT_ASSERT(checkValid(graph));
|
||||
}
|
||||
|
||||
optional<vector<Shape>> ConcatObj::inferShape(const TensorVec &inputs) const {
|
||||
optional<vector<Shape>> ConcatObj::inferShape(const TensorVec &inputs) {
|
||||
IT_ASSERT(inputs.size() > 1);
|
||||
Shape dims = inputs[0]->getDims();
|
||||
auto rank = inputs[0]->getRank();
|
||||
|
|
|
@ -82,7 +82,7 @@ ConvObj::ConvObj(GraphObj *graph, Tensor input, Tensor weight, Tensor output,
|
|||
IT_ASSERT(checkValid(graph));
|
||||
}
|
||||
|
||||
optional<vector<Shape>> ConvObj::inferShape(const TensorVec &inputs) const {
|
||||
optional<vector<Shape>> ConvObj::inferShape(const TensorVec &inputs) {
|
||||
const auto &input = inputs[0], &weight = inputs[1];
|
||||
auto n = input->getDims()[0];
|
||||
auto h = input->getDims()[2];
|
||||
|
@ -141,7 +141,7 @@ ConvTransposed2dObj::ConvTransposed2dObj(GraphObj *graph, Tensor input,
|
|||
}
|
||||
|
||||
optional<vector<Shape>>
|
||||
ConvTransposed2dObj::inferShape(const TensorVec &inputs) const {
|
||||
ConvTransposed2dObj::inferShape(const TensorVec &inputs) {
|
||||
const Tensor &input = inputs[0], &weight = inputs[1];
|
||||
auto n = input->getDims()[0];
|
||||
auto f = input->getDims()[1];
|
||||
|
@ -219,7 +219,7 @@ ConvBackwardFilterObj::ConvBackwardFilterObj(GraphObj *graph, Tensor inputX,
|
|||
}
|
||||
|
||||
optional<vector<Shape>>
|
||||
ConvBackwardFilterObj::inferShape(const TensorVec &inputs) const {
|
||||
ConvBackwardFilterObj::inferShape(const TensorVec &inputs) {
|
||||
const auto &inputX = inputs[0], &diffY = inputs[1];
|
||||
auto n = inputX->getDims()[0];
|
||||
auto h = inputX->getDims()[2];
|
||||
|
@ -280,7 +280,7 @@ ConvTransposed2dNHWCObj::ConvTransposed2dNHWCObj(GraphObj *graph, Tensor input,
|
|||
}
|
||||
|
||||
optional<vector<Shape>>
|
||||
ConvTransposed2dNHWCObj::inferShape(const TensorVec &inputs) const {
|
||||
ConvTransposed2dNHWCObj::inferShape(const TensorVec &inputs) {
|
||||
const Tensor &input = inputs[0], &weight = inputs[1];
|
||||
auto n = input->getDims()[0];
|
||||
auto f = input->getDims()[3];
|
||||
|
|
|
@ -6,7 +6,7 @@ DetObj::DetObj(GraphObj *graph, Tensor input, Tensor output, Mode mode)
|
|||
IT_ASSERT(checkValid(graph));
|
||||
}
|
||||
|
||||
optional<vector<Shape>> DetObj::inferShape(const TensorVec &inputs) const {
|
||||
optional<vector<Shape>> DetObj::inferShape(const TensorVec &inputs) {
|
||||
const auto A = inputs[0];
|
||||
auto input = A->getDims();
|
||||
int rank = A->getRank();
|
||||
|
|
|
@ -10,7 +10,7 @@ DropoutObj::DropoutObj(GraphObj *graph, Tensor data, Tensor output, Tensor mask,
|
|||
IT_ASSERT(checkValid(graph));
|
||||
}
|
||||
|
||||
optional<vector<Shape>> DropoutObj::inferShape(const TensorVec &inputs) const {
|
||||
optional<vector<Shape>> DropoutObj::inferShape(const TensorVec &inputs) {
|
||||
auto shape = inputs[0]->getDims();
|
||||
return {{shape, shape}};
|
||||
}
|
||||
|
|
|
@ -8,8 +8,7 @@ ElementWiseObj::ElementWiseObj(OpType type, GraphObj *graph, Tensor input0,
|
|||
IT_ASSERT(checkValid(graph));
|
||||
}
|
||||
|
||||
optional<vector<Shape>>
|
||||
ElementWiseObj::inferShape(const TensorVec &inputs) const {
|
||||
optional<vector<Shape>> ElementWiseObj::inferShape(const TensorVec &inputs) {
|
||||
const auto A = inputs[0], B = inputs[1];
|
||||
auto res = infer_broadcast(A->getDims(), B->getDims());
|
||||
return {{res}};
|
||||
|
@ -45,7 +44,7 @@ MSELossObj::MSELossObj(GraphObj *graph, Tensor input0, Tensor input1,
|
|||
IT_ASSERT(checkValid(graph));
|
||||
}
|
||||
|
||||
optional<vector<Shape>> MSELossObj::inferShape(const TensorVec &inputs) const {
|
||||
optional<vector<Shape>> MSELossObj::inferShape(const TensorVec &inputs) {
|
||||
const auto A = inputs[0], B = inputs[1];
|
||||
IT_ASSERT(A->getRank() == B->getRank());
|
||||
IT_ASSERT(A->getDims() == B->getDims());
|
||||
|
|
|
@ -8,7 +8,7 @@ ExpandObj::ExpandObj(GraphObj *graph, Tensor input, Tensor output, Shape dims)
|
|||
IT_ASSERT(checkValid(graph));
|
||||
}
|
||||
|
||||
optional<vector<Shape>> ExpandObj::inferShape(const TensorVec &inputs) const {
|
||||
optional<vector<Shape>> ExpandObj::inferShape(const TensorVec &inputs) {
|
||||
auto shape_input = inputs[0]->getDims();
|
||||
Shape ret = infer_broadcast(shape_input, dims);
|
||||
return {{ret}};
|
||||
|
|
|
@ -11,7 +11,7 @@ ExtendObj::ExtendObj(GraphObj *graph, Tensor input, Tensor output, int dim,
|
|||
IT_ASSERT(checkValid(graph));
|
||||
}
|
||||
|
||||
optional<vector<Shape>> ExtendObj::inferShape(const TensorVec &inputs) const {
|
||||
optional<vector<Shape>> ExtendObj::inferShape(const TensorVec &inputs) {
|
||||
auto ret = inputs[0]->getDims();
|
||||
ret[dim] = ret[dim] * (num + 1);
|
||||
return {{ret}};
|
||||
|
|
|
@ -10,7 +10,7 @@ GatherObj::GatherObj(GraphObj *graph, Tensor input, Tensor indices,
|
|||
IT_ASSERT(checkValid(graph));
|
||||
}
|
||||
|
||||
optional<vector<Shape>> GatherObj::inferShape(const TensorVec &inputs) const {
|
||||
optional<vector<Shape>> GatherObj::inferShape(const TensorVec &inputs) {
|
||||
auto dims0 = inputs[0]->getDims();
|
||||
auto dims1 = inputs[1]->getDims();
|
||||
|
||||
|
|
|
@ -9,25 +9,6 @@ MatmulObj::MatmulObj(GraphObj *graph, Tensor A, Tensor B, Tensor C, bool transA,
|
|||
: OperatorObj(OpType::MatMul,
|
||||
bias ? TensorVec{A, B, bias} : TensorVec{A, B}, {C}),
|
||||
transA(transA), transB(transB), act(act), b(1) {
|
||||
auto shape_a = A->getDims();
|
||||
auto shape_b = B->getDims();
|
||||
int rankA = A->getRank();
|
||||
int rankB = B->getRank();
|
||||
IT_ASSERT(rankA >= 2 && rankB >= 2);
|
||||
Shape shape_a1(shape_a.begin(), shape_a.begin() + (rankA - 2));
|
||||
Shape shape_b1(shape_b.begin(), shape_b.begin() + (rankB - 2));
|
||||
auto ret = infer_broadcast(shape_a1, shape_b1);
|
||||
if (ret.empty()) {
|
||||
b = 1;
|
||||
} else {
|
||||
b = std::accumulate(ret.begin(), ret.end(), 1, std::multiplies<int>());
|
||||
}
|
||||
auto kA = *(transA ? shape_a.rbegin() + 1 : shape_a.rbegin());
|
||||
auto kB = *(transB ? shape_b.rbegin() : shape_b.rbegin() + 1);
|
||||
IT_ASSERT(kA == kB);
|
||||
m = *(transA ? shape_a.rbegin() : shape_a.rbegin() + 1);
|
||||
n = *(transB ? shape_b.rbegin() + 1 : shape_b.rbegin());
|
||||
k = kA;
|
||||
IT_ASSERT(checkValid(graph));
|
||||
}
|
||||
|
||||
|
@ -40,7 +21,7 @@ string MatmulObj::toString() const {
|
|||
return os.str();
|
||||
}
|
||||
|
||||
optional<vector<Shape>> MatmulObj::inferShape(const TensorVec &inputs) const {
|
||||
optional<vector<Shape>> MatmulObj::inferShape(const TensorVec &inputs) {
|
||||
auto A = inputs[0], B = inputs[1];
|
||||
auto shapeA = A->getDims();
|
||||
auto shapeB = B->getDims();
|
||||
|
@ -49,8 +30,17 @@ optional<vector<Shape>> MatmulObj::inferShape(const TensorVec &inputs) const {
|
|||
Shape shapeA1(shapeA.begin(), shapeA.begin() + (rankA - 2));
|
||||
Shape shapeB1(shapeB.begin(), shapeB.begin() + (rankB - 2));
|
||||
Shape ret = infer_broadcast(shapeA1, shapeB1);
|
||||
auto m = *(this->getTransA() ? shapeA.rbegin() : shapeA.rbegin() + 1);
|
||||
auto n = *(this->getTransB() ? shapeB.rbegin() + 1 : shapeB.rbegin());
|
||||
if (ret.empty()) {
|
||||
b = 1;
|
||||
} else {
|
||||
b = std::accumulate(ret.begin(), ret.end(), 1, std::multiplies<int>());
|
||||
}
|
||||
auto kA = *(transA ? shapeA.rbegin() + 1 : shapeA.rbegin());
|
||||
auto kB = *(transB ? shapeB.rbegin() : shapeB.rbegin() + 1);
|
||||
IT_ASSERT(kA == kB);
|
||||
m = *(transA ? shapeA.rbegin() : shapeA.rbegin() + 1);
|
||||
n = *(transB ? shapeB.rbegin() + 1 : shapeB.rbegin());
|
||||
k = kA;
|
||||
ret.emplace_back(m);
|
||||
ret.emplace_back(n);
|
||||
return {{ret}};
|
||||
|
|
|
@ -58,7 +58,7 @@ string MemBoundObj::toString() const {
|
|||
return os.str();
|
||||
}
|
||||
|
||||
optional<vector<Shape>> MemBoundObj::inferShape(const TensorVec &inputs) const {
|
||||
optional<vector<Shape>> MemBoundObj::inferShape(const TensorVec &inputs) {
|
||||
// inputs have to match nnetInputs excatly
|
||||
if (inputs.size() != nnetInputs.size())
|
||||
return {};
|
||||
|
|
|
@ -22,7 +22,7 @@ PadObj::PadObj(GraphObj *graph, Tensor input, Tensor output,
|
|||
IT_ASSERT(checkValid(graph));
|
||||
}
|
||||
|
||||
optional<vector<Shape>> PadObj::inferShape(const TensorVec &inputs) const {
|
||||
optional<vector<Shape>> PadObj::inferShape(const TensorVec &inputs) {
|
||||
auto dims = inputs[0]->getDims();
|
||||
int rank = inputs[0]->getRank();
|
||||
IT_ASSERT(rank * 2 == (int)pads.size());
|
||||
|
|
|
@ -14,7 +14,7 @@ PoolingObj::PoolingObj(GraphObj *graph, OpType optype, Tensor input,
|
|||
IT_ASSERT(checkValid(graph));
|
||||
}
|
||||
|
||||
optional<vector<Shape>> PoolingObj::inferShape(const TensorVec &inputs) const {
|
||||
optional<vector<Shape>> PoolingObj::inferShape(const TensorVec &inputs) {
|
||||
const auto &input = inputs[0];
|
||||
auto h = input->getDims()[input->getRank() - 2],
|
||||
w = input->getDims()[input->getRank() - 1];
|
||||
|
|
|
@ -21,8 +21,7 @@ bool ReduceMeanObj::isReduced(int idx) const {
|
|||
return axes.find(idx) != axes.end();
|
||||
}
|
||||
|
||||
optional<vector<Shape>>
|
||||
ReduceMeanObj::inferShape(const TensorVec &inputs) const {
|
||||
optional<vector<Shape>> ReduceMeanObj::inferShape(const TensorVec &inputs) {
|
||||
auto dims = inputs[0]->getDims();
|
||||
auto rank = inputs[0]->getRank();
|
||||
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
#include "operators/reshape.h"
|
||||
#include "utils/operator_utils.h"
|
||||
#include <numeric>
|
||||
|
||||
namespace infini {
|
||||
ReshapeObj::ReshapeObj(GraphObj *graph, Tensor input, Tensor output, Shape dims)
|
||||
|
@ -7,19 +8,37 @@ ReshapeObj::ReshapeObj(GraphObj *graph, Tensor input, Tensor output, Shape dims)
|
|||
IT_ASSERT(checkValid(graph));
|
||||
}
|
||||
|
||||
ReshapeObj::ReshapeObj(GraphObj *graph, Tensor input, Tensor output, Shape dims, Shape dims_t)
|
||||
: OperatorObj(OpType::Reshape, {input}, {output}), dims(std::move(dims)), dims_t(std::move(dims_t)) {
|
||||
IT_ASSERT(checkValid(graph));
|
||||
}
|
||||
|
||||
optional<vector<Shape>> ReshapeObj::inferShape(const TensorVec &inputs) const {
|
||||
size_t size = 1;
|
||||
for (size_t i = 0; i < dims.size(); ++i) {
|
||||
size *= dims.at(i);
|
||||
optional<vector<Shape>> ReshapeObj::inferShape(const TensorVec &inputs) {
|
||||
int count = 0;
|
||||
for (auto x : dims) {
|
||||
if (x == -1) {
|
||||
count++;
|
||||
}
|
||||
IT_ASSERT(x == -1 || x >= 0);
|
||||
}
|
||||
IT_ASSERT(size == inputs[0]->size());
|
||||
IT_ASSERT(count == 0 || count == 1);
|
||||
auto inputShape = inputs[0]->getDims();
|
||||
int size = inputs[0]->size();
|
||||
int index = -1;
|
||||
outputShape = dims;
|
||||
for (int i = 0; i < (int)dims.size(); ++i) {
|
||||
if (dims[i] == 0) {
|
||||
outputShape[i] = inputShape[i];
|
||||
}
|
||||
if (dims[i] == -1) {
|
||||
index = i;
|
||||
}
|
||||
}
|
||||
if (index != -1) {
|
||||
outputShape[index] =
|
||||
size / (-std::accumulate(outputShape.begin(), outputShape.end(), 1,
|
||||
[](auto acc, auto x) { return acc * x; }));
|
||||
}
|
||||
int outputSize = std::accumulate(outputShape.begin(), outputShape.end(), 1,
|
||||
[](auto acc, auto x) { return acc * x; });
|
||||
IT_ASSERT(outputSize == size);
|
||||
|
||||
return {{dims}};
|
||||
return {{outputShape}};
|
||||
}
|
||||
|
||||
std::string ReshapeObj::toString() const {
|
||||
|
@ -27,7 +46,7 @@ std::string ReshapeObj::toString() const {
|
|||
os << "Reshape[" << getGuid() << "]";
|
||||
os << "(";
|
||||
os << vecToString(inputs[0]->getDims()) << ",";
|
||||
os << "dims=" << vecToString(dims) << ",";
|
||||
os << "outputShape=" << vecToString(outputShape) << ",";
|
||||
os << "input=" << inputs[0]->getGuid() << ",";
|
||||
os << "output=" << outputs[0]->getGuid() << ")";
|
||||
return os.str();
|
||||
|
@ -35,12 +54,12 @@ std::string ReshapeObj::toString() const {
|
|||
|
||||
vector<int> ReshapeObj::getWorkloadVector() const {
|
||||
vector<int> ret = inputs[0]->getDims();
|
||||
ret.insert(ret.end(), dims.begin(), dims.end());
|
||||
ret.insert(ret.end(), outputShape.begin(), outputShape.end());
|
||||
ret.emplace(ret.begin(), type.underlying());
|
||||
return ret;
|
||||
}
|
||||
vector<int> ReshapeObj::getOpAttrVector() const {
|
||||
vector<int> ret = dims;
|
||||
vector<int> ret = outputShape;
|
||||
ret.emplace(ret.begin(), type.underlying());
|
||||
return ret;
|
||||
}
|
||||
|
@ -52,7 +71,7 @@ FlattenObj::FlattenObj(GraphObj *graph, Tensor input, Tensor output, int _axis)
|
|||
IT_ASSERT(checkValid(graph));
|
||||
}
|
||||
|
||||
optional<vector<Shape>> FlattenObj::inferShape(const TensorVec &inputs) const {
|
||||
optional<vector<Shape>> FlattenObj::inferShape(const TensorVec &inputs) {
|
||||
int sizeB = 1, sizeE = 1;
|
||||
auto dims = getInputs(0)->getDims();
|
||||
int rank = getInputs(0)->getRank();
|
||||
|
@ -89,7 +108,7 @@ IdentityObj::IdentityObj(GraphObj *graph, Tensor input, Tensor output)
|
|||
IT_ASSERT(checkValid(graph));
|
||||
}
|
||||
|
||||
optional<vector<Shape>> IdentityObj::inferShape(const TensorVec &inputs) const {
|
||||
optional<vector<Shape>> IdentityObj::inferShape(const TensorVec &inputs) {
|
||||
return {{getInputs(0)->getDims()}};
|
||||
}
|
||||
|
||||
|
|
|
@ -206,7 +206,7 @@ float ResizeObj::round_int(float x) const {
|
|||
}
|
||||
|
||||
// output shape is related to sizes/scales value.
|
||||
optional<vector<Shape>> ResizeObj::inferShape(const TensorVec &inputs) const {
|
||||
optional<vector<Shape>> ResizeObj::inferShape(const TensorVec &inputs) {
|
||||
auto inDims = inputs[0]->getDims();
|
||||
Shape ret = inDims;
|
||||
int rank = inputs[0]->getRank();
|
||||
|
|
|
@ -57,7 +57,7 @@ SliceObj::SliceObj(GraphObj *graph, Tensor input, Tensor output,
|
|||
IT_ASSERT(checkValid(graph));
|
||||
}
|
||||
|
||||
optional<vector<Shape>> SliceObj::inferShape(const TensorVec &inputs) const {
|
||||
optional<vector<Shape>> SliceObj::inferShape(const TensorVec &inputs) {
|
||||
Shape ans;
|
||||
ans.reserve(axes.size());
|
||||
for (const auto &range : axes) {
|
||||
|
|
|
@ -35,7 +35,7 @@ SplitObj::SplitObj(GraphObj *graph, Tensor input,
|
|||
IT_ASSERT(checkValid(graph));
|
||||
}
|
||||
|
||||
optional<vector<Shape>> SplitObj::inferShape(const TensorVec &inputs) const {
|
||||
optional<vector<Shape>> SplitObj::inferShape(const TensorVec &inputs) {
|
||||
IT_ASSERT(num != -1 && ratio.size() != 0);
|
||||
auto inputDims = inputs[0]->getDims();
|
||||
int totalSize = inputDims.at(dim);
|
||||
|
|
|
@ -16,8 +16,7 @@ TransposeObj::TransposeObj(GraphObj *graph, Tensor input, Tensor output,
|
|||
IT_ASSERT(checkValid(graph));
|
||||
}
|
||||
|
||||
optional<vector<Shape>>
|
||||
TransposeObj::inferShape(const TensorVec &inputs) const {
|
||||
optional<vector<Shape>> TransposeObj::inferShape(const TensorVec &inputs) {
|
||||
const auto A = inputs[0];
|
||||
auto input_dim = A->getDims();
|
||||
auto output_dim = input_dim;
|
||||
|
|
|
@ -6,7 +6,7 @@ UnaryObj::UnaryObj(OpType type, GraphObj *graph, Tensor input, Tensor output)
|
|||
IT_ASSERT(checkValid(graph));
|
||||
}
|
||||
|
||||
optional<vector<Shape>> UnaryObj::inferShape(const TensorVec &inputs) const {
|
||||
optional<vector<Shape>> UnaryObj::inferShape(const TensorVec &inputs) {
|
||||
const auto A = inputs[0];
|
||||
return {{A->getDims()}};
|
||||
}
|
||||
|
@ -37,7 +37,7 @@ ClipObj::ClipObj(GraphObj *graph, Tensor input, Tensor output,
|
|||
IT_ASSERT(checkValid(graph));
|
||||
}
|
||||
|
||||
optional<vector<Shape>> ClipObj::inferShape(const TensorVec &inputs) const {
|
||||
optional<vector<Shape>> ClipObj::inferShape(const TensorVec &inputs) {
|
||||
const auto A = inputs[0];
|
||||
return {{A->getDims()}};
|
||||
}
|
||||
|
@ -68,7 +68,7 @@ HardtanhObj::HardtanhObj(GraphObj *graph, Tensor input, Tensor output,
|
|||
IT_ASSERT(checkValid(graph));
|
||||
}
|
||||
|
||||
optional<vector<Shape>> HardtanhObj::inferShape(const TensorVec &inputs) const {
|
||||
optional<vector<Shape>> HardtanhObj::inferShape(const TensorVec &inputs) {
|
||||
const auto A = inputs[0];
|
||||
return {{A->getDims()}};
|
||||
}
|
||||
|
@ -97,7 +97,7 @@ FillObj::FillObj(GraphObj *graph, Tensor input, Tensor output, float value)
|
|||
IT_ASSERT(checkValid(graph));
|
||||
}
|
||||
|
||||
optional<vector<Shape>> FillObj::inferShape(const TensorVec &inputs) const {
|
||||
optional<vector<Shape>> FillObj::inferShape(const TensorVec &inputs) {
|
||||
const auto A = inputs[0];
|
||||
return {{A->getDims()}};
|
||||
}
|
||||
|
@ -124,7 +124,7 @@ L2LossObj::L2LossObj(GraphObj *graph, Tensor input, Tensor output)
|
|||
IT_ASSERT(checkValid(graph));
|
||||
}
|
||||
|
||||
optional<vector<Shape>> L2LossObj::inferShape(const TensorVec &inputs) const {
|
||||
optional<vector<Shape>> L2LossObj::inferShape(const TensorVec &inputs) {
|
||||
Shape temp = {1};
|
||||
return {{temp}};
|
||||
}
|
||||
|
@ -159,7 +159,7 @@ vector<DataType> CastObj::inferDataType(const TensorVec &inputs) const {
|
|||
return vector(numOutputs(), output_dataType);
|
||||
}
|
||||
|
||||
optional<vector<Shape>> CastObj::inferShape(const TensorVec &inputs) const {
|
||||
optional<vector<Shape>> CastObj::inferShape(const TensorVec &inputs) {
|
||||
const auto A = inputs[0];
|
||||
return {{A->getDims()}};
|
||||
}
|
||||
|
@ -241,7 +241,7 @@ ShapeObj::ShapeObj(GraphObj *graph, Tensor input, Tensor output)
|
|||
IT_ASSERT(checkValid(graph));
|
||||
}
|
||||
|
||||
optional<vector<Shape>> ShapeObj::inferShape(const TensorVec &inputs) const {
|
||||
optional<vector<Shape>> ShapeObj::inferShape(const TensorVec &inputs) {
|
||||
return {{{static_cast<int>(inputs[0]->getRank())}}};
|
||||
}
|
||||
|
||||
|
@ -257,7 +257,7 @@ PReluObj::PReluObj(GraphObj *graph, Tensor input, Tensor alpha, Tensor output)
|
|||
IT_ASSERT(checkValid(graph));
|
||||
}
|
||||
|
||||
optional<vector<Shape>> PReluObj::inferShape(const TensorVec &inputs) const {
|
||||
optional<vector<Shape>> PReluObj::inferShape(const TensorVec &inputs) {
|
||||
const auto A = inputs[0];
|
||||
return {{A->getDims()}};
|
||||
}
|
||||
|
@ -286,7 +286,7 @@ LogObj::LogObj(GraphObj *graph, Tensor input, Tensor output, LogType type)
|
|||
IT_ASSERT(checkValid(graph));
|
||||
}
|
||||
|
||||
optional<vector<Shape>> LogObj::inferShape(const TensorVec &inputs) const {
|
||||
optional<vector<Shape>> LogObj::inferShape(const TensorVec &inputs) {
|
||||
const auto A = inputs[0];
|
||||
return {{A->getDims()}};
|
||||
}
|
||||
|
|
|
@ -10,7 +10,7 @@ WhereObj::WhereObj(GraphObj *graph, Tensor inputX, Tensor inputY,
|
|||
IT_ASSERT(checkValid(graph));
|
||||
}
|
||||
|
||||
optional<vector<Shape>> WhereObj::inferShape(const TensorVec &inputs) const {
|
||||
optional<vector<Shape>> WhereObj::inferShape(const TensorVec &inputs) {
|
||||
auto shapeX = inputs[0]->getDims();
|
||||
auto shapeY = inputs[1]->getDims();
|
||||
auto shapeCon = inputs[2]->getDims();
|
||||
|
|
Loading…
Reference in New Issue