forked from jiuyuan/InfiniTensor
refactor: 直接导入 numpy ndarray 以提高加载性能
Signed-off-by: YdrMaster <ydrml@hotmail.com>
This commit is contained in:
parent
5d56206d1a
commit
6830f02c88
|
@ -1 +1 @@
|
|||
Subproject commit 4627c2f2596f2428f91649138ba0c8ebac5bce67
|
||||
Subproject commit c2aae62103d6c1ec3f1069be8b2ee6387928fc0e
|
|
@ -1 +0,0 @@
|
|||
|
|
@ -5,30 +5,30 @@ from typing import Any
|
|||
|
||||
|
||||
def build_graph(model: ModelProto) -> backend.Graph:
|
||||
nodes: dict[str, backend.Operator] = dict()
|
||||
edges: dict[str, backend.Tensor] = dict()
|
||||
topology: dict[str, tuple[list[str], list[str]]] = dict()
|
||||
|
||||
for tensor in model.graph.initializer:
|
||||
edges[tensor.name] = _parse_tensor(tensor)
|
||||
|
||||
for tensor in model.graph.input:
|
||||
if tensor.name not in edges:
|
||||
dim = [
|
||||
DimExpr(d.dim_value) if d.dim_value > 0 else DimExpr(d.dim_param)
|
||||
for d in tensor.type.tensor_type.shape.dim
|
||||
]
|
||||
edges[tensor.name] = refactor_tensor(
|
||||
tensor.type.tensor_type.elem_type, dim, None
|
||||
tensor.type.tensor_type.elem_type,
|
||||
[
|
||||
DimExpr(d.dim_value)
|
||||
if d.HasField("dim_value")
|
||||
else DimExpr(d.dim_param)
|
||||
for d in tensor.type.tensor_type.shape.dim
|
||||
],
|
||||
None,
|
||||
)
|
||||
|
||||
for node in model.graph.node:
|
||||
topology[node.name] = ([i for i in node.input], [o for o in node.output])
|
||||
nodes[node.name] = refactor_operator(node.op_type, _parse_attribute(node))
|
||||
|
||||
return refactor_graph(
|
||||
topology,
|
||||
nodes,
|
||||
{node.name: (node.input, node.output) for node in model.graph.node},
|
||||
{
|
||||
node.name: refactor_operator(node.op_type, _parse_attribute(node))
|
||||
for node in model.graph.node
|
||||
},
|
||||
edges,
|
||||
[i.name for i in model.graph.input],
|
||||
[o.name for o in model.graph.output],
|
||||
|
@ -39,29 +39,32 @@ def _parse_tensor(tensor: TensorProto) -> backend.Tensor:
|
|||
return refactor_tensor(
|
||||
tensor.data_type,
|
||||
[DimExpr(d) for d in tensor.dims],
|
||||
[b for b in numpy_helper.to_array(tensor).data.tobytes()],
|
||||
numpy_helper.to_array(tensor),
|
||||
)
|
||||
|
||||
|
||||
def _raise(attr: AttributeProto) -> None:
|
||||
raise NotImplementedError("Unsupported Attribute Type: {}".format(attr.type))
|
||||
|
||||
|
||||
def _parse_attribute(node: NodeProto) -> dict[str, Any]:
|
||||
ans: dict[str, Any] = dict()
|
||||
for attr in node.attribute:
|
||||
if attr.type == AttributeProto.INT:
|
||||
ans[attr.name] = attr.i
|
||||
elif attr.type == AttributeProto.INTS:
|
||||
ans[attr.name] = attr.ints
|
||||
elif attr.type == AttributeProto.FLOAT:
|
||||
ans[attr.name] = attr.f
|
||||
elif attr.type == AttributeProto.FLOATS:
|
||||
ans[attr.name] = attr.floats
|
||||
elif attr.type == AttributeProto.STRING:
|
||||
ans[attr.name] = attr.s
|
||||
elif attr.type == AttributeProto.STRINGS:
|
||||
ans[attr.name] = attr.strings
|
||||
elif attr.type == AttributeProto.TENSOR:
|
||||
ans[attr.name] = _parse_tensor(attr.t)
|
||||
elif attr.type == AttributeProto.TENSORS:
|
||||
ans[attr.name] = [_parse_tensor(t) for t in attr.tensors]
|
||||
else:
|
||||
assert False, "Unsupported Attribute Type: {}".format(attr.type)
|
||||
return ans
|
||||
return {
|
||||
attr.name: attr.i
|
||||
if attr.type == AttributeProto.INT
|
||||
else attr.ints
|
||||
if attr.type == AttributeProto.INTS
|
||||
else attr.f
|
||||
if attr.type == AttributeProto.FLOAT
|
||||
else attr.floats
|
||||
if attr.type == AttributeProto.FLOATS
|
||||
else attr.s
|
||||
if attr.type == AttributeProto.STRING
|
||||
else attr.strings
|
||||
if attr.type == AttributeProto.STRINGS
|
||||
else _parse_tensor(attr.t)
|
||||
if attr.type == AttributeProto.TENSOR
|
||||
else [_parse_tensor(t) for t in attr.tensors]
|
||||
if attr.type == AttributeProto.TENSORS
|
||||
else _raise(attr)
|
||||
for attr in node.attribute
|
||||
}
|
||||
|
|
|
@ -1,527 +0,0 @@
|
|||
import os, onnx, unittest
|
||||
from onnx import TensorProto
|
||||
from onnx.helper import (
|
||||
make_model,
|
||||
make_node,
|
||||
make_tensor,
|
||||
make_graph,
|
||||
make_tensor_value_info,
|
||||
)
|
||||
from onnx.checker import check_model, check_graph
|
||||
from onnx.shape_inference import infer_shapes
|
||||
from pyinfinitensor.onnx import from_onnx, OnnxStub, backend, _parse_data_fp16
|
||||
import numpy as np
|
||||
|
||||
|
||||
def make_and_import_model(graph: onnx.GraphProto):
|
||||
check_graph(graph)
|
||||
model = make_model(graph)
|
||||
check_model(model)
|
||||
from_onnx(model, backend.cpu_runtime())
|
||||
|
||||
|
||||
class TestStringMethods(unittest.TestCase):
|
||||
# def test_run(self):
|
||||
# model_file = next(
|
||||
# (name for name in os.listdir() if name.endswith(".onnx")), None
|
||||
# )
|
||||
# if model_file != None:
|
||||
# print(
|
||||
# "model: {file}({size:.2f} MiB)".format(
|
||||
# file=model_file, size=os.path.getsize(model_file) / 1024 / 1024
|
||||
# )
|
||||
# )
|
||||
# run_onnx(onnx.load(model_file), runtime)
|
||||
|
||||
def test_load(self):
|
||||
for model_file in os.listdir():
|
||||
if model_file.endswith(".onnx"):
|
||||
print(
|
||||
"model: {file}({size:.2f} MiB)".format(
|
||||
file=model_file, size=os.path.getsize(model_file) / 1024 / 1024
|
||||
)
|
||||
)
|
||||
model = OnnxStub(onnx.load(model_file), backend.cpu_runtime()).to_onnx(
|
||||
"new"
|
||||
)
|
||||
model = infer_shapes(model)
|
||||
|
||||
def test_tensor(self):
|
||||
x = make_tensor_value_info("x", TensorProto.FLOAT, [1, 2, 3])
|
||||
make_and_import_model(make_graph([], "tensor", [x], [x]))
|
||||
|
||||
def test_conv(self):
|
||||
i = make_tensor_value_info("i", TensorProto.FLOAT, [1, 3, 4, 4])
|
||||
w = make_tensor_value_info("w", TensorProto.FLOAT, [2, 3, 3, 3])
|
||||
o = make_tensor_value_info("o", TensorProto.FLOAT, [1, 2, 2, 2])
|
||||
conv = make_node(
|
||||
"Conv",
|
||||
["i", "w"],
|
||||
["o"],
|
||||
"conv",
|
||||
pads=[1, 1, 1, 1],
|
||||
strides=[2, 1],
|
||||
dilations=[1, 2],
|
||||
)
|
||||
make_and_import_model(make_graph([conv], "conv", [i, w], [o]))
|
||||
|
||||
def test_conv_fp16(self):
|
||||
i = make_tensor_value_info("i", TensorProto.FLOAT16, [1, 3, 4, 4])
|
||||
w = make_tensor_value_info("w", TensorProto.FLOAT16, [2, 3, 3, 3])
|
||||
o = make_tensor_value_info("o", TensorProto.FLOAT16, [1, 2, 2, 2])
|
||||
conv = make_node(
|
||||
"Conv",
|
||||
["i", "w"],
|
||||
["o"],
|
||||
"conv",
|
||||
pads=[1, 1, 1, 1],
|
||||
strides=[2, 1],
|
||||
dilations=[1, 2],
|
||||
)
|
||||
make_and_import_model(make_graph([conv], "conv_fp16", [i, w], [o]))
|
||||
|
||||
def test_conv_bfp16(self):
|
||||
i = make_tensor_value_info("i", TensorProto.BFLOAT16, [1, 3, 4, 4])
|
||||
w = make_tensor_value_info("w", TensorProto.BFLOAT16, [2, 3, 3, 3])
|
||||
o = make_tensor_value_info("o", TensorProto.BFLOAT16, [1, 2, 2, 2])
|
||||
conv = make_node(
|
||||
"Conv",
|
||||
["i", "w"],
|
||||
["o"],
|
||||
"conv",
|
||||
pads=[1, 1, 1, 1],
|
||||
strides=[2, 1],
|
||||
dilations=[1, 2],
|
||||
)
|
||||
make_and_import_model(make_graph([conv], "conv_bfp16", [i, w], [o]))
|
||||
|
||||
def test_matmul(self):
|
||||
x = make_tensor_value_info("x", TensorProto.FLOAT, [1, 2, 3])
|
||||
a = make_tensor_value_info("a", TensorProto.FLOAT, [1, 3, 4])
|
||||
xa = make_tensor_value_info("xa", TensorProto.FLOAT, [1, 2, 4])
|
||||
matmul = make_node("MatMul", ["x", "a"], ["xa"], name="matmul")
|
||||
make_and_import_model(make_graph([matmul], "matmul", [x, a], [xa]))
|
||||
|
||||
def test_gemm(self):
|
||||
a = make_tensor_value_info("a", TensorProto.FLOAT, [1, 2, 3])
|
||||
b = make_tensor_value_info("b", TensorProto.FLOAT, [1, 4, 3])
|
||||
c = make_tensor_value_info("c", TensorProto.FLOAT, [1, 2, 4])
|
||||
y = make_tensor_value_info("y", TensorProto.FLOAT, [1, 2, 4])
|
||||
gemm = make_node("Gemm", ["a", "b", "c"], ["y"], transB=1, name="gemm")
|
||||
make_and_import_model(make_graph([gemm], "gemm", [a, b, c], [y]))
|
||||
|
||||
def test_batch_norm(self):
|
||||
x = make_tensor_value_info("x", TensorProto.UINT32, [1, 3, 2, 2])
|
||||
scale = make_tensor_value_info("scale", TensorProto.FLOAT, [3])
|
||||
b = make_tensor_value_info("b", TensorProto.FLOAT, [3])
|
||||
mean = make_tensor_value_info("mean", TensorProto.FLOAT, [3])
|
||||
var = make_tensor_value_info("var", TensorProto.FLOAT, [3])
|
||||
y = make_tensor_value_info("y", TensorProto.UINT32, [1, 3, 2, 2])
|
||||
batch_norm = make_node(
|
||||
"BatchNormalization",
|
||||
["x", "scale", "b", "mean", "var"],
|
||||
["y"],
|
||||
name="batchNormalization",
|
||||
)
|
||||
make_and_import_model(
|
||||
make_graph([batch_norm], "batchNormalzation", [x, scale, b, mean, var], [y])
|
||||
)
|
||||
|
||||
def test_max_pool(self):
|
||||
x = make_tensor_value_info("x", TensorProto.UINT32, [1, 64, 162, 162])
|
||||
y = make_tensor_value_info("y", TensorProto.UINT32, [1, 64, 80, 80])
|
||||
pool = make_node(
|
||||
"MaxPool",
|
||||
["x"],
|
||||
["y"],
|
||||
kernel_shape=[3, 3],
|
||||
dilations=[1, 1],
|
||||
pads=[0, 0, 0, 0],
|
||||
strides=[2, 2],
|
||||
name="maxPool",
|
||||
)
|
||||
make_and_import_model(make_graph([pool], "maxPool", [x], [y]))
|
||||
|
||||
def test_avg_pool(self):
|
||||
x = make_tensor_value_info("x", TensorProto.UINT32, [1, 64, 162, 162])
|
||||
y = make_tensor_value_info("y", TensorProto.UINT32, [1, 64, 80, 80])
|
||||
pool = make_node(
|
||||
"AveragePool",
|
||||
["x"],
|
||||
["y"],
|
||||
kernel_shape=[3, 3],
|
||||
pads=[0, 0, 0, 0],
|
||||
strides=[2, 2],
|
||||
name="avgPool",
|
||||
)
|
||||
make_and_import_model(make_graph([pool], "avgPool", [x], [y]))
|
||||
|
||||
def test_global_avg_pool(self):
|
||||
x = make_tensor_value_info("x", TensorProto.UINT32, [30, 30, 30, 30])
|
||||
y = make_tensor_value_info("y", TensorProto.UINT32, [30, 30, 1, 1])
|
||||
pool = make_node(
|
||||
"GlobalAveragePool",
|
||||
["x"],
|
||||
["y"],
|
||||
name="globalAvgPool",
|
||||
)
|
||||
make_and_import_model(make_graph([pool], "avgPool", [x], [y]))
|
||||
|
||||
def test_add(self):
|
||||
a = make_tensor_value_info("a", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
b = make_tensor_value_info("b", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
c = make_tensor_value_info("c", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
add = make_node("Add", ["a", "b"], ["c"], name="add")
|
||||
make_and_import_model(make_graph([add], "add", [a, b], [c]))
|
||||
|
||||
def test_sub(self):
|
||||
a = make_tensor_value_info("a", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
b = make_tensor_value_info("b", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
c = make_tensor_value_info("c", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
sub = make_node("Sub", ["a", "b"], ["c"], name="sub")
|
||||
make_and_import_model(make_graph([sub], "sub", [a, b], [c]))
|
||||
|
||||
def test_mul(self):
|
||||
a = make_tensor_value_info("a", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
b = make_tensor_value_info("b", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
c = make_tensor_value_info("c", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
mul = make_node("Mul", ["a", "b"], ["c"], name="mul")
|
||||
make_and_import_model(make_graph([mul], "mul", [a, b], [c]))
|
||||
|
||||
def test_div(self):
|
||||
a = make_tensor_value_info("a", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
b = make_tensor_value_info("b", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
c = make_tensor_value_info("c", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
div = make_node("Div", ["a", "b"], ["c"], name="div")
|
||||
make_and_import_model(make_graph([div], "div", [a, b], [c]))
|
||||
|
||||
def test_pow(self):
|
||||
a = make_tensor_value_info("a", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
b = make_tensor_value_info("b", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
c = make_tensor_value_info("c", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
pow = make_node("Pow", ["a", "b"], ["c"], name="pow")
|
||||
make_and_import_model(make_graph([pow], "pow", [a, b], [c]))
|
||||
|
||||
def test_relu(self):
|
||||
x = make_tensor_value_info("x", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
y = make_tensor_value_info("y", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
relu = make_node("Relu", ["x"], ["y"], name="relu")
|
||||
make_and_import_model(make_graph([relu], "relu", [x], [y]))
|
||||
|
||||
def test_erf(self):
|
||||
x = make_tensor_value_info("x", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
y = make_tensor_value_info("y", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
erf = make_node("Erf", ["x"], ["y"], name="erf")
|
||||
make_and_import_model(make_graph([erf], "erf", [x], [y]))
|
||||
|
||||
def test_sqrt(self):
|
||||
x = make_tensor_value_info("x", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
y = make_tensor_value_info("y", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
sqrt = make_node("Sqrt", ["x"], ["y"], name="sqrt")
|
||||
make_and_import_model(make_graph([sqrt], "sqrt", [x], [y]))
|
||||
|
||||
def test_sigmoid(self):
|
||||
x = make_tensor_value_info("x", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
y = make_tensor_value_info("y", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
sigmoid = make_node("Sigmoid", ["x"], ["y"], name="sigmoid")
|
||||
make_and_import_model(make_graph([sigmoid], "sigmoid", [x], [y]))
|
||||
|
||||
def test_tanh(self):
|
||||
x = make_tensor_value_info("x", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
y = make_tensor_value_info("y", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
tanh = make_node("Tanh", ["x"], ["y"], name="tanh")
|
||||
make_and_import_model(make_graph([tanh], "tanh", [x], [y]))
|
||||
|
||||
def test_softmax(self):
|
||||
x = make_tensor_value_info("x", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
y = make_tensor_value_info("y", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
softmax = make_node("Softmax", ["x"], ["y"], axis=2, name="softmax")
|
||||
make_and_import_model(make_graph([softmax], "softmax", [x], [y]))
|
||||
|
||||
def test_abs(self):
|
||||
x = make_tensor_value_info("x", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
y = make_tensor_value_info("y", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
abs = make_node("Abs", ["x"], ["y"], name="abs")
|
||||
make_and_import_model(make_graph([abs], "abs", [x], [y]))
|
||||
|
||||
def test_identity(self):
|
||||
x = make_tensor_value_info("x", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
y = make_tensor_value_info("y", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
identity = make_node("Identity", ["x"], ["y"], name="identity")
|
||||
make_and_import_model(make_graph([identity], "identity", [x], [y]))
|
||||
|
||||
def test_flatten(self):
|
||||
x = make_tensor_value_info("x", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
y = make_tensor_value_info("y", TensorProto.FLOAT, [1 * 3, 5 * 7])
|
||||
flatten = make_node("Flatten", ["x"], ["y"], axis=2, name="flatten")
|
||||
make_and_import_model(make_graph([flatten], "flatten", [x], [y]))
|
||||
|
||||
def test_reshape(self):
|
||||
data = make_tensor_value_info("data", TensorProto.FLOAT, [2, 3, 4, 5])
|
||||
shape = make_tensor_value_info("shape", TensorProto.INT64, [3])
|
||||
shape_data = make_tensor("shape", TensorProto.INT64, [3], [5, 3, 8])
|
||||
reshaped = make_tensor_value_info(
|
||||
"reshaped", TensorProto.FLOAT, shape_data.int64_data
|
||||
)
|
||||
reshape = make_node("Reshape", ["data", "shape"], ["reshaped"], name="reshape")
|
||||
make_and_import_model(
|
||||
make_graph([reshape], "reshape", [data, shape], [reshaped], [shape_data])
|
||||
)
|
||||
|
||||
def test_concat(self):
|
||||
input1 = make_tensor_value_info("input1", TensorProto.FLOAT, [1, 3, 2, 4])
|
||||
input2 = make_tensor_value_info("input2", TensorProto.FLOAT, [1, 3, 2, 5])
|
||||
output = make_tensor_value_info("output", TensorProto.FLOAT, [1, 3, 2, 9])
|
||||
concat = make_node(
|
||||
"Concat", ["input1", "input2"], ["output"], axis=3, name="concat"
|
||||
)
|
||||
make_and_import_model(
|
||||
make_graph([concat], "concat", [input1, input2], [output])
|
||||
)
|
||||
|
||||
def test_gather(self):
|
||||
data = make_tensor_value_info("data", TensorProto.FLOAT, [1, 3, 4, 4])
|
||||
indices = make_tensor_value_info("indices", TensorProto.FLOAT, [2, 1, 2])
|
||||
output = make_tensor_value_info("output", TensorProto.FLOAT, [1, 2, 1, 2, 4, 4])
|
||||
gather = make_node(
|
||||
"Gather", ["data", "indices"], ["output"], axis=1, name="gather"
|
||||
)
|
||||
make_and_import_model(make_graph([gather], "gather", [data, indices], [output]))
|
||||
|
||||
def test_reduce_mean(self):
|
||||
data = make_tensor_value_info("data", TensorProto.FLOAT, [2, 3, 3, 4])
|
||||
reduced = make_tensor_value_info("reduced", TensorProto.FLOAT, [1, 1, 1, 1])
|
||||
reduceMean = make_node(
|
||||
"ReduceMean", ["data"], ["reduced"], keepdims=1, name="reduceMean"
|
||||
)
|
||||
make_and_import_model(make_graph([reduceMean], "reduceMean", [data], [reduced]))
|
||||
|
||||
def test_slice(self):
|
||||
data = make_tensor_value_info("data", TensorProto.UINT32, [10, 64, 162, 162])
|
||||
output = make_tensor_value_info("output", TensorProto.UINT32, [1, 1, 99, 95])
|
||||
starts = make_tensor("starts", TensorProto.INT64, [4], [2, 9, 1, 5])
|
||||
ends = make_tensor("ends", TensorProto.INT64, [4], [3, 10, 100, 100])
|
||||
slice = make_node("Slice", ["data", "starts", "ends"], ["output"], name="slice")
|
||||
make_and_import_model(
|
||||
make_graph(
|
||||
[slice],
|
||||
"slice",
|
||||
[data],
|
||||
[output],
|
||||
[starts, ends],
|
||||
)
|
||||
)
|
||||
|
||||
def test_pad(self):
|
||||
data = make_tensor_value_info("data", TensorProto.UINT32, [1, 64, 162, 162])
|
||||
output = make_tensor_value_info("output", TensorProto.UINT32, [3, 84, 164, 172])
|
||||
pads = make_tensor_value_info("pads", TensorProto.INT64, [8])
|
||||
pads_data = make_tensor(
|
||||
"pads", TensorProto.INT64, [8], [2, 10, 1, 5, 0, 10, 1, 5]
|
||||
)
|
||||
pad = make_node("Pad", ["data", "pads"], ["output"], name="pad")
|
||||
make_and_import_model(
|
||||
make_graph(
|
||||
[pad],
|
||||
"pad",
|
||||
[data, pads],
|
||||
[output],
|
||||
[pads_data],
|
||||
)
|
||||
)
|
||||
|
||||
def test_allReduceSum(self):
|
||||
input = make_tensor_value_info("input", TensorProto.FLOAT, [1, 3, 2, 4])
|
||||
output = make_tensor_value_info("output", TensorProto.FLOAT, [1, 3, 2, 4])
|
||||
allReduceSum = make_node(
|
||||
"AllReduceSum", ["input"], ["output"], name="allReduceSum"
|
||||
)
|
||||
graph = make_graph([allReduceSum], "allReduceSum", [input], [output])
|
||||
model = make_model(graph)
|
||||
from_onnx(model, backend.cpu_runtime())
|
||||
|
||||
def test_allReduceProd(self):
|
||||
input = make_tensor_value_info("input", TensorProto.FLOAT, [1, 3, 2, 4])
|
||||
output = make_tensor_value_info("output", TensorProto.FLOAT, [1, 3, 2, 4])
|
||||
allReduceProd = make_node(
|
||||
"AllReduceProd", ["input"], ["output"], name="allReduceProd"
|
||||
)
|
||||
graph = make_graph([allReduceProd], "allReduceProd", [input], [output])
|
||||
model = make_model(graph)
|
||||
from_onnx(model, backend.cpu_runtime())
|
||||
|
||||
def test_allReduceMin(self):
|
||||
input = make_tensor_value_info("input", TensorProto.FLOAT, [1, 3, 2, 4])
|
||||
output = make_tensor_value_info("output", TensorProto.FLOAT, [1, 3, 2, 4])
|
||||
allReduceMin = make_node(
|
||||
"AllReduceMin", ["input"], ["output"], name="allReduceMin"
|
||||
)
|
||||
graph = make_graph([allReduceMin], "allReduceMin", [input], [output])
|
||||
model = make_model(graph)
|
||||
from_onnx(model, backend.cpu_runtime())
|
||||
|
||||
def test_allReduceMax(self):
|
||||
input = make_tensor_value_info("input", TensorProto.FLOAT, [1, 3, 2, 4])
|
||||
output = make_tensor_value_info("output", TensorProto.FLOAT, [1, 3, 2, 4])
|
||||
allReduceMax = make_node(
|
||||
"AllReduceMax", ["input"], ["output"], name="allReduceMax"
|
||||
)
|
||||
graph = make_graph([allReduceMax], "allReduceMax", [input], [output])
|
||||
model = make_model(graph)
|
||||
from_onnx(model, backend.cpu_runtime())
|
||||
|
||||
def test_allReduceAvg(self):
|
||||
input = make_tensor_value_info("input", TensorProto.FLOAT, [1, 3, 2, 4])
|
||||
output = make_tensor_value_info("output", TensorProto.FLOAT, [1, 3, 2, 4])
|
||||
allReduceAvg = make_node(
|
||||
"AllReduceAvg", ["input"], ["output"], name="allReduceAvg"
|
||||
)
|
||||
graph = make_graph([allReduceAvg], "allReduceAvg", [input], [output])
|
||||
model = make_model(graph)
|
||||
from_onnx(model, backend.cpu_runtime())
|
||||
|
||||
def test_split(self):
|
||||
input = make_tensor_value_info("input", TensorProto.FLOAT, [1, 3, 2, 4])
|
||||
split = make_node("Split", ["input"], ["output"], name="split", axis=0)
|
||||
make_and_import_model(make_graph([split], "split", [input], []))
|
||||
|
||||
def test_allBroadcast(self):
|
||||
input = make_tensor_value_info("input", TensorProto.FLOAT, [1, 3, 2, 4])
|
||||
output = make_tensor_value_info("output", TensorProto.FLOAT, [1, 3, 2, 4])
|
||||
broadcast = make_node(
|
||||
"Broadcast", ["input"], ["output"], name="broadcast", root=1
|
||||
)
|
||||
graph = make_graph([broadcast], "broadcast", [input], [output])
|
||||
model = make_model(graph)
|
||||
from_onnx(model, backend.cpu_runtime())
|
||||
|
||||
def test_allGather(self):
|
||||
input = make_tensor_value_info("input", TensorProto.FLOAT, [1, 3, 2, 4])
|
||||
world_size = make_tensor_value_info("world_size", TensorProto.INT32, [1])
|
||||
allGather = make_node(
|
||||
"AllGather", ["input", "world_size"], ["output"], name="allGather"
|
||||
)
|
||||
graph = make_graph([allGather], "allGather", [input, world_size], [])
|
||||
model = make_model(graph)
|
||||
from_onnx(model, backend.cpu_runtime())
|
||||
|
||||
# see <https://onnx.ai/onnx/intro/python.html#a-simple-example-a-linear-regression>
|
||||
def test_linear(self):
|
||||
x = make_tensor_value_info("x", TensorProto.FLOAT, [1, 2, 3])
|
||||
a = make_tensor_value_info("a", TensorProto.FLOAT, [1, 3, 4])
|
||||
b = make_tensor_value_info("b", TensorProto.FLOAT, [1, 2, 4])
|
||||
y = make_tensor_value_info("y", TensorProto.FLOAT, [1, 2, 4])
|
||||
matmul = make_node("MatMul", ["x", "a"], ["xa"], name="matmul")
|
||||
add = make_node("Add", ["xa", "b"], ["y"], name="add")
|
||||
graph = make_graph([matmul, add], "lr", [x, a, b], [y])
|
||||
model = make_model(graph)
|
||||
check_model(model)
|
||||
from_onnx(model, backend.cpu_runtime())
|
||||
|
||||
def test_frontend(self):
|
||||
handler = backend.GraphHandler(backend.cpu_runtime())
|
||||
a = handler.tensor([1, 2, 3], 12)
|
||||
b = handler.tensor([1, 2, 3], 12)
|
||||
c = handler.tensor([1, 2, 3], 12)
|
||||
d = handler.tensor([1, 2, 3], 12)
|
||||
e = handler.tensor([1, 2, 3], 12)
|
||||
|
||||
x = handler.add(
|
||||
handler.add(handler.add(handler.add(a, b, None), c, None), d, None), e, None
|
||||
)
|
||||
y = handler.tensor([3, 2, 1], 12)
|
||||
handler.reshape(x, y, [3, 2, 1])
|
||||
|
||||
def test_cast(self):
|
||||
input1 = make_tensor_value_info("input1", TensorProto.FLOAT, [1, 3, 2, 4])
|
||||
output = make_tensor_value_info("output", TensorProto.FLOAT16, [1, 3, 2, 4])
|
||||
cast = make_node(
|
||||
"Cast", ["input1"], ["output"], to=TensorProto.FLOAT16, name="cast"
|
||||
)
|
||||
make_and_import_model(make_graph([cast], "cast", [input1], [output]))
|
||||
|
||||
def test_expand(self):
|
||||
data = make_tensor_value_info("data", TensorProto.FLOAT, [3, 1])
|
||||
dim = make_tensor_value_info("dim", TensorProto.INT64, [3])
|
||||
dim_data = make_tensor("dim", TensorProto.INT64, [3], [2, 1, 6])
|
||||
output = make_tensor_value_info("output", TensorProto.FLOAT, [2, 3, 6])
|
||||
expand = make_node("Expand", ["data", "dim"], ["output"], name="expand")
|
||||
make_and_import_model(
|
||||
make_graph([expand], "expand", [data, dim], [output], [dim_data])
|
||||
)
|
||||
|
||||
def test_where(self):
|
||||
x = make_tensor_value_info("x", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
y = make_tensor_value_info("y", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
con = make_tensor_value_info("con", TensorProto.BOOL, [1, 3, 5, 7])
|
||||
output = make_tensor_value_info("output", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
where = make_node("Where", ["x", "y", "con"], ["output"], name="where")
|
||||
make_and_import_model(make_graph([where], "where", [x, y, con], [output]))
|
||||
|
||||
def test_copyin(self):
|
||||
dims = [2, 3, 5, 4]
|
||||
np_array = np.random.random(dims).astype(np.float32)
|
||||
handler = backend.GraphHandler(backend.cpu_runtime())
|
||||
tensor1 = handler.tensor(dims, TensorProto.FLOAT)
|
||||
tensor2 = handler.tensor(dims, TensorProto.FLOAT)
|
||||
handler.data_malloc()
|
||||
tensor1.copyin_numpy(np_array)
|
||||
tensor2.copyin_float(np_array.flatten().tolist())
|
||||
array1 = tensor1.copyout_float()
|
||||
array2 = tensor2.copyout_float()
|
||||
self.assertEqual(array1, array2)
|
||||
self.assertTrue(np.array_equal(np.array(array1).reshape(dims), np_array))
|
||||
|
||||
np_array = np.random.random(dims).astype(np.int64)
|
||||
handler = backend.GraphHandler(backend.cpu_runtime())
|
||||
tensor1 = handler.tensor(dims, TensorProto.INT64)
|
||||
tensor2 = handler.tensor(dims, TensorProto.INT64)
|
||||
handler.data_malloc()
|
||||
tensor1.copyin_numpy(np_array)
|
||||
tensor2.copyin_int64(np_array.flatten().tolist())
|
||||
array1 = tensor1.copyout_int64()
|
||||
array2 = tensor2.copyout_int64()
|
||||
self.assertEqual(array1, array2)
|
||||
self.assertTrue(np.array_equal(np.array(array1).reshape(dims), np_array))
|
||||
|
||||
def test_to_numpy(self):
|
||||
dims = [2, 3, 5, 4]
|
||||
np_array = np.random.random(dims).astype(np.float32)
|
||||
handler = backend.GraphHandler(backend.cpu_runtime())
|
||||
tensor1 = handler.tensor(dims, TensorProto.FLOAT)
|
||||
tensor2 = handler.tensor(dims, TensorProto.FLOAT)
|
||||
handler.data_malloc()
|
||||
tensor1.copyin_float(np_array.flatten().tolist())
|
||||
tensor2.copyin_float(np_array.flatten().tolist())
|
||||
array1 = np.array(tensor1.copyout_float()).reshape(dims)
|
||||
array2 = np.array(tensor2)
|
||||
self.assertTrue(np.array_equal(array2, np_array))
|
||||
self.assertTrue(np.array_equal(array1, array2))
|
||||
|
||||
np_array = np.random.random(dims).astype(np.float16)
|
||||
handler = backend.GraphHandler(backend.cpu_runtime())
|
||||
tensor1 = handler.tensor(dims, TensorProto.FLOAT16)
|
||||
handler.data_malloc()
|
||||
tensor1.copyin_numpy(np_array)
|
||||
array1 = np.array(tensor1, copy=False)
|
||||
self.assertTrue(np.array_equal(array1, np_array))
|
||||
|
||||
class TestDynamicTensor(unittest.TestCase):
|
||||
def test_dynamic_tensor(self):
|
||||
filename = r"resnet18-v2-7.onnx"
|
||||
current_path = os.getcwd()
|
||||
model_file = ""
|
||||
for root, dirs, files in os.walk(current_path):
|
||||
if filename in files:
|
||||
model_file = os.path.join(root, filename)
|
||||
model = OnnxStub(onnx.load(model_file), backend.cpu_runtime())
|
||||
output_key = list(model.outputs.keys())[0]
|
||||
old_output_shape = model.getShape(output_key)
|
||||
self.assertEqual(old_output_shape, ([1, 1000]))
|
||||
model.set_input([[5, 3, 224, 224]])
|
||||
new_output_shape = model.getShape(output_key)
|
||||
self.assertEqual(new_output_shape, ([5, 1000]))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
|
@ -1,6 +1,7 @@
|
|||
#include "common/error_handler.h"
|
||||
#include "computation/graph.h"
|
||||
#include "onnx/operators.h"
|
||||
#include <pybind11/numpy.h>
|
||||
#include <pybind11/pybind11.h>
|
||||
#include <pybind11/stl.h>
|
||||
|
||||
|
@ -25,13 +26,13 @@ class Handler {
|
|||
};
|
||||
|
||||
std::shared_ptr<Tensor> edge(int dataType, std::vector<DimExpr> shape,
|
||||
std::optional<std::vector<uint8_t>> data) {
|
||||
Shape s(shape.begin(), shape.end());
|
||||
auto ans = std::make_shared<Tensor>(static_cast<common::DataType>(dataType),
|
||||
std::move(s));
|
||||
std::optional<py::array> data) {
|
||||
auto ans = Tensor::share(static_cast<common::DataType>(dataType),
|
||||
Shape(shape.begin(), shape.end()));
|
||||
if (data) {
|
||||
auto const bytesSize = ans->bytesSize();
|
||||
ASSERT(bytesSize == data->size(), "Data size mismatch");
|
||||
ASSERT(bytesSize == static_cast<size_t>(data->nbytes()),
|
||||
"Data size mismatch");
|
||||
ans->data = std::make_shared<Blob>(new uint8_t[bytesSize]);
|
||||
std::memcpy(ans->data->ptr, data->data(), bytesSize);
|
||||
}
|
||||
|
@ -39,15 +40,14 @@ std::shared_ptr<Tensor> edge(int dataType, std::vector<DimExpr> shape,
|
|||
}
|
||||
|
||||
std::shared_ptr<Operator>
|
||||
node(std::string opType,
|
||||
node(const char *opType,
|
||||
std::unordered_map<std::string, decltype(Attribute::value)> attrs) {
|
||||
std::unordered_map<std::string, Attribute> attrs_;
|
||||
for (auto it = attrs.begin(); it != attrs.end(); attrs.erase(it++)) {
|
||||
attrs_.insert({std::move(it->first), {std::move(it->second)}});
|
||||
}
|
||||
return std::make_shared<Operator>(
|
||||
Operator{OpType::parse(fmt::format("onnx::{}", opType).c_str()),
|
||||
std::move(attrs_)});
|
||||
return std::make_shared<Operator>(Operator{
|
||||
OpType::parse(fmt::format("onnx::{}", opType)), std::move(attrs_)});
|
||||
}
|
||||
|
||||
std::shared_ptr<Handler>
|
||||
|
@ -68,8 +68,8 @@ graph(std::unordered_map<Name, std::pair<std::vector<Name>, std::vector<Name>>>
|
|||
builder.nodes.insert({std::move(name), std::move(node)});
|
||||
}
|
||||
for (auto &[name, tensor] : edges) {
|
||||
auto node = Edge{std::move(tensor), name};
|
||||
builder.edges.insert({std::move(name), std::move(node)});
|
||||
auto edge = Edge{std::move(tensor), name};
|
||||
builder.edges.insert({std::move(name), std::move(edge)});
|
||||
}
|
||||
builder.globalInputs = std::move(inputs);
|
||||
builder.globalOutputs = std::move(outputs);
|
||||
|
|
Loading…
Reference in New Issue