feat: 初步支持模型导出,不能导出属性张量,也不能导出张量的数据

Signed-off-by: YdrMaster <ydrml@hotmail.com>
This commit is contained in:
YdrMaster 2023-09-16 14:20:00 +08:00
parent 229993f081
commit a795364612
3 changed files with 104 additions and 2 deletions

@ -1 +1 @@
Subproject commit c361f800793400a4bb6c425ebfa1ffa834e467be
Subproject commit e1b74e070b219424f54e1be3890d46bdca092273

View File

@ -1,5 +1,6 @@
import backend
from onnx import ModelProto, NodeProto, TensorProto, AttributeProto, numpy_helper
from onnx.helper import make_model, make_node, make_graph, make_tensor_value_info
from backend import DimExpr, refactor_tensor, refactor_operator, refactor_graph
from typing import Any
@ -68,3 +69,24 @@ def _parse_attribute(node: NodeProto) -> dict[str, Any]:
else _raise(attr)
for attr in node.attribute
}
def build_onnx(grpah_name: str, graph: backend.Graph) -> ModelProto:
iterator = backend.Iterator(graph)
nodes = []
global_inputs = []
global_outputs = []
while True:
node = iterator.next()
if node is None:
break
(name, op_type, attributes, inputs, outputs) = node
nodes.append(make_node(op_type, inputs, outputs, name=name, **attributes))
for tensor in iterator.global_inputs():
(name, data_type, shape) = tensor
global_inputs.append(make_tensor_value_info(name, data_type, shape))
for tensor in iterator.global_outputs():
(name, data_type, shape) = tensor
global_outputs.append(make_tensor_value_info(name, data_type, shape))
graph = make_graph(nodes, grpah_name, global_inputs, global_outputs)
return make_model(graph)

View File

@ -26,10 +26,85 @@ class Handler {
ASSERT(_g.substitute(name, value),
fmt::format("Variable {} not exist", name));
}
auto const &graph() const { return _g.internal(); }
void runCuda() { TODO("Not implemented"); }
};
class Iterator {
std::shared_ptr<Handler> _internal;
graph_topo::GraphTopo::Iterator _it;
public:
explicit Iterator(std::shared_ptr<Handler> internal)
: _internal(std::move(internal)),
_it(_internal->graph().topology.begin()) {}
using T = std::tuple<Name, int, std::vector<std::variant<Name, int>>>;
using O = std::tuple<Name, Name,
std::unordered_map<Name, decltype(Attribute::value)>,
std::vector<Name>, std::vector<Name>>;
T buildT(size_t edgeIdx) const {
auto const &edge = _internal->graph().edges[edgeIdx];
auto const &shape = edge.tensor->shape;
std::vector<std::variant<Name, int>> shape_(shape.size(), 1);
std::transform(shape.begin(), shape.end(), shape_.begin(),
[](auto const &d) -> std::variant<Name, int> {
if (d.isVariable()) {
return d.variable()->name;
} else {
return static_cast<int>(d.value());
}
});
return T{edge.name, static_cast<int>(edge.tensor->dataType),
std::move(shape_)};
}
std::vector<T> globalInputs() const {
auto inputs = _it.globalInputs();
std::vector<T> ans(inputs.size());
std::transform(inputs.begin(), inputs.end(), ans.begin(),
[this](auto const &edgeIdx) { return buildT(edgeIdx); });
return ans;
}
std::vector<T> globalOutputs() const {
auto outputs = _it.globalInputs();
std::vector<T> ans(outputs.size());
std::transform(outputs.begin(), outputs.end(), ans.begin(),
[this](auto const &edgeIdx) { return buildT(edgeIdx); });
return ans;
}
std::optional<O> next() {
if (_it == _internal->graph().topology.end()) {
return std::nullopt;
}
auto [nodeIdx, inputs_, outputs_] = *_it++;
auto const &node = _internal->graph().nodes[nodeIdx];
auto const &name = node.name;
auto const opType = node.op->opType.name();
ASSERT(opType.substr(0, 6) == "onnx::", "Invalid opType");
auto const &attributes = node.op->attributes;
std::vector<Name> inputs(inputs_.size()), outputs(outputs_.size());
std::transform(inputs_.begin(), inputs_.end(), inputs.begin(),
[this](auto const &idx) {
return _internal->graph().edges[idx].name;
});
std::transform(outputs_.begin(), outputs_.end(), outputs.begin(),
[this](auto const &idx) {
return _internal->graph().edges[idx].name;
});
std::unordered_map<Name, decltype(Attribute::value)> attributes_;
attributes_.reserve(attributes.size());
for (auto const &[name, attr] : attributes) {
if (!std::holds_alternative<Tensor_>(attr.value)) {
attributes_.insert({name, attr.value});
}
}
return O{name, opType.substr(6), attributes_, inputs, outputs};
}
};
std::shared_ptr<Tensor> edge(int dataType, std::vector<DimExpr> shape,
std::optional<py::array> data) {
auto ans = Tensor::share(static_cast<common::DataType>(dataType),
@ -102,6 +177,11 @@ void register_refactor(py::module &m) {
.def("substitute", &Handler::substitute)
.def("set_input", &Handler::setInput)
.def("run_cuda", &Handler::runCuda);
py::class_<Iterator>(m, "Iterator")
.def(py::init<std::shared_ptr<Handler>>())
.def("global_inputs", &Iterator::globalInputs)
.def("global_outputs", &Iterator::globalOutputs)
.def("next", &Iterator::next);
m.def("refactor_tensor", edge)
.def("refactor_operator", node)
.def("refactor_graph", graph);