2023-02-12 08:23:49 +08:00
|
|
|
|
#pragma once
|
|
|
|
|
|
|
|
|
|
#include "core/graph.h"
|
|
|
|
|
#include "core/runtime.h"
|
|
|
|
|
|
|
|
|
|
namespace infini {
|
|
|
|
|
|
2023-02-16 14:57:51 +08:00
|
|
|
|
// Use the indices from onnx to reduce delivery overhead,
|
|
|
|
|
// which comes from onnx but may be not only used for onnx.
|
2023-02-12 08:23:49 +08:00
|
|
|
|
//
|
|
|
|
|
// see https://onnx.ai/onnx/intro/concepts.html#element-type
|
|
|
|
|
enum OnnxDType : int {
|
|
|
|
|
UNDEFINED = 0,
|
|
|
|
|
FLOAT,
|
|
|
|
|
UINT8,
|
|
|
|
|
INT8,
|
|
|
|
|
UINT16,
|
|
|
|
|
INT16,
|
|
|
|
|
INT32,
|
|
|
|
|
INT64,
|
|
|
|
|
STRING,
|
|
|
|
|
BOOL,
|
|
|
|
|
FLOAT16,
|
|
|
|
|
DOUBLE,
|
|
|
|
|
UINT32,
|
|
|
|
|
UINT64,
|
|
|
|
|
COMPLEX64,
|
|
|
|
|
COMPLEX128,
|
|
|
|
|
BFLOAT16,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
class GraphHandlerObj {
|
|
|
|
|
Graph g;
|
|
|
|
|
|
|
|
|
|
public:
|
|
|
|
|
GraphHandlerObj(Runtime runtime)
|
|
|
|
|
: g(make_ref<GraphObj>(std::move(runtime))) {}
|
|
|
|
|
|
|
|
|
|
Tensor tensor(Shape dims, int dtype);
|
|
|
|
|
|
2023-02-22 15:05:44 +08:00
|
|
|
|
Tensor conv(Tensor input, Tensor weight, Tensor output, int ph, int pw,
|
|
|
|
|
int sh, int sw, int dh, int dw);
|
|
|
|
|
|
2023-02-12 08:23:49 +08:00
|
|
|
|
Tensor matmul(Tensor a, Tensor b, Tensor y, bool transA, bool transB,
|
|
|
|
|
Tensor bias, ActType act);
|
2023-02-13 11:25:54 +08:00
|
|
|
|
|
2023-02-13 17:15:35 +08:00
|
|
|
|
Tensor batchNorm(Tensor input, Tensor output, Tensor mean, Tensor var,
|
|
|
|
|
Tensor scale, Tensor bias, float momentum, float eps,
|
|
|
|
|
bool training);
|
|
|
|
|
|
2023-02-14 16:26:47 +08:00
|
|
|
|
Tensor maxPool(Tensor input, Tensor output, int kh, int kw, int dh, int dw,
|
|
|
|
|
int ph, int pw, int sh, int sw);
|
|
|
|
|
Tensor avgPool(Tensor input, Tensor output, int kh, int kw, int dh, int dw,
|
|
|
|
|
int ph, int pw, int sh, int sw);
|
|
|
|
|
|
2023-02-13 11:25:54 +08:00
|
|
|
|
Tensor add(Tensor a, Tensor b, Tensor c);
|
|
|
|
|
Tensor sub(Tensor a, Tensor b, Tensor c);
|
|
|
|
|
Tensor mul(Tensor a, Tensor b, Tensor c);
|
|
|
|
|
Tensor div(Tensor a, Tensor b, Tensor c);
|
|
|
|
|
Tensor pow(Tensor a, Tensor b, Tensor c);
|
2023-02-13 11:54:54 +08:00
|
|
|
|
|
|
|
|
|
Tensor relu(Tensor x, Tensor y);
|
|
|
|
|
Tensor sigmoid(Tensor x, Tensor y);
|
|
|
|
|
Tensor tanh(Tensor x, Tensor y);
|
|
|
|
|
Tensor softmax(Tensor x, Tensor y);
|
|
|
|
|
Tensor abs(Tensor x, Tensor y);
|
2023-02-13 12:13:01 +08:00
|
|
|
|
Tensor identity(Tensor x, Tensor y);
|
2023-02-13 13:50:07 +08:00
|
|
|
|
Tensor flatten(Tensor s, Tensor y);
|
2023-02-14 09:50:32 +08:00
|
|
|
|
Tensor reshape(Tensor data, Tensor reshaped, Shape shape);
|
2023-02-14 13:42:35 +08:00
|
|
|
|
Tensor concat(TensorVec inputs, Tensor output, int dim);
|
2023-02-14 15:35:01 +08:00
|
|
|
|
Tensor gather(Tensor data, Tensor indices, Tensor output, int axis);
|
|
|
|
|
Tensor reduceMean(Tensor data, Tensor reduced,
|
|
|
|
|
const optional<vector<int>> &axes, bool keepdims);
|
2023-02-14 17:35:18 +08:00
|
|
|
|
Tensor slice(Tensor input, Tensor output, const vector<int> &starts,
|
|
|
|
|
const vector<int> &ends, const optional<vector<int>> &axes,
|
|
|
|
|
const optional<vector<int>> &steps);
|
2023-02-15 11:41:06 +08:00
|
|
|
|
Tensor pad(Tensor input, Tensor output, const vector<int> &pads,
|
|
|
|
|
const optional<vector<int>> &axes);
|
2023-02-12 08:23:49 +08:00
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
} // namespace infini
|