forked from jiuyuan/InfiniTensor
fix: 正确使用张量中的数据
Signed-off-by: YdrMaster <ydrml@hotmail.com>
This commit is contained in:
parent
3631b03e73
commit
e3428d8fd8
|
@ -2,15 +2,16 @@
|
|||
|
||||
#include <cstdint>
|
||||
#include <cstring>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace optimization {
|
||||
|
||||
/// @brief Stores tensor data。
|
||||
struct Data {
|
||||
class Data {
|
||||
/// @brief `cpu_data` is stored in the memory space,
|
||||
/// which allows it to be managed using `std::vector<uint8_t>`.
|
||||
std::vector<uint8_t> cpu_data;
|
||||
uint8_t *cpu_data;
|
||||
|
||||
// #ifdef USE_CUDA
|
||||
// void *gpu_data;
|
||||
|
@ -20,13 +21,56 @@ struct Data {
|
|||
// void *mlu_data;
|
||||
// #endif
|
||||
|
||||
Data(uint8_t *ptr) : cpu_data(ptr) {}
|
||||
|
||||
public:
|
||||
Data() : cpu_data(nullptr) {}
|
||||
Data(size_t size) : cpu_data(new uint8_t[size]) {}
|
||||
template <class t> Data(t begin, t end) : cpu_data(nullptr) {
|
||||
size_t c = sizeof(decltype(*begin)) * static_cast<size_t>(end - begin);
|
||||
cpu_data = new uint8_t[c];
|
||||
std::copy(begin, end, cpu_data);
|
||||
}
|
||||
Data(Data const &) = delete;
|
||||
Data(Data &&others) noexcept
|
||||
: cpu_data(std::exchange(others.cpu_data, nullptr)) {}
|
||||
~Data() noexcept { delete[] cpu_data; }
|
||||
|
||||
Data &operator=(Data const &) = delete;
|
||||
Data &operator=(Data &&others) noexcept {
|
||||
if (this != &others)
|
||||
delete[] std::exchange(cpu_data,
|
||||
std::exchange(others.cpu_data, nullptr));
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
/// @brief Builds `Data` from `vector` os any type `t`.
|
||||
/// @tparam t Data type.
|
||||
/// @param data Data `vector`.
|
||||
/// @return `Data` object.
|
||||
template <class t> static Data cpu(std::vector<t> data) {
|
||||
Data ans{std::vector<uint8_t>(sizeof(t) * data.size())};
|
||||
memcpy(ans.cpu_data.data(), data.data(), ans.cpu_data.size());
|
||||
template <class t> static Data cpu(std::vector<t> const &data) {
|
||||
auto const len = data.size();
|
||||
auto const size = sizeof(t[len]);
|
||||
Data ans;
|
||||
memcpy(ans.cpu_data, data.data(), size);
|
||||
return ans;
|
||||
}
|
||||
|
||||
/// @brief Gets data ptr.
|
||||
/// @tparam t Data type.
|
||||
/// @return Data ptr.
|
||||
template <class t> t *as_ptr() const {
|
||||
return reinterpret_cast<t *>(cpu_data);
|
||||
}
|
||||
|
||||
/// @brief Copies data to a `Vec`.
|
||||
/// @tparam t Data type.
|
||||
/// @param len Count of data.
|
||||
/// @return The data `Vec`.
|
||||
template <class t> std::vector<t> to_vec(size_t len) const {
|
||||
std::vector<t> ans(len);
|
||||
memcpy(cpu_data, ans.data(), sizeof(t[len]));
|
||||
return ans;
|
||||
}
|
||||
};
|
||||
|
|
|
@ -53,10 +53,11 @@ Vec<Unigraph> optimization::pass::mutate( // fmt: new line
|
|||
auto const conv = Conv(g.operators.front());
|
||||
auto const &i_shape = conv.input()->shape;
|
||||
auto const &w_shape = conv.weight()->shape;
|
||||
auto const &dilations = conv.delations()->data.cpu_data;
|
||||
auto const &strides = conv.strides()->data.cpu_data;
|
||||
auto const &dilations = conv.delations()->to_vec<int64_t>();
|
||||
auto const &strides = conv.strides()->to_vec<int64_t>();
|
||||
if (w_shape.rbegin()[0] == 1 // fmt: new line
|
||||
&& w_shape.rbegin()[1] == 1 //
|
||||
&& i_shape[1] == w_shape[1] // group = 1
|
||||
&& std::all_of(strides.begin(), strides.end(),
|
||||
[](auto x) { return x == 1; })) {
|
||||
// 1x1 conv
|
||||
|
|
|
@ -8,6 +8,13 @@ Arc<Tensor> Tensor::share(Vec<size_t> shape, DataType data_type, Data data) {
|
|||
new Tensor(std::move(shape), std::move(data_type), std::move(data)));
|
||||
}
|
||||
|
||||
size_t Tensor::count() const {
|
||||
return shape.empty() // fmt: new line
|
||||
? 0
|
||||
: std::accumulate(shape.begin(), shape.end(), 1,
|
||||
[](auto acc, auto it) { return acc * it; });
|
||||
}
|
||||
|
||||
size_t Tensor::size() const {
|
||||
return shape.empty() // fmt: new line
|
||||
? 0
|
||||
|
|
|
@ -66,10 +66,19 @@ struct Tensor {
|
|||
Data::cpu<t>(std::move(val)));
|
||||
}
|
||||
|
||||
/// @brief Calculates count of data in this tensor.
|
||||
/// @return Data count.
|
||||
size_t count() const;
|
||||
|
||||
/// @brief Calculates the size of the tensor in bytes.
|
||||
/// @return Memory usage in bytes.
|
||||
size_t size() const;
|
||||
|
||||
/// @brief Copies tensor data to a `Vec`.
|
||||
/// @tparam t Data type.
|
||||
/// @return The data `Vec`.
|
||||
template <class t> Vec<t> to_vec() const { return data.to_vec<t>(count()); }
|
||||
|
||||
private:
|
||||
/// @brief Constructor is private and only accessible by the factory method.
|
||||
Tensor(Vec<size_t> &&, DataType &&, Data &&);
|
||||
|
|
|
@ -173,12 +173,10 @@ optimization::Unigraph cast(GraphObj &g) {
|
|||
std::transform(dims.begin(), dims.end(), shape.begin(),
|
||||
[](auto x) { return static_cast<size_t>(x); });
|
||||
|
||||
opt::Data data{};
|
||||
opt::Data data;
|
||||
if (t->hasData()) {
|
||||
auto origin = t->getDataBlob();
|
||||
data.cpu_data.resize(t->getBytes());
|
||||
memcpy(data.cpu_data.data(), origin->getPtr<uint8_t *>(),
|
||||
data.cpu_data.size());
|
||||
auto ptr = t->getDataBlob()->getPtr<uint8_t *>();
|
||||
data = opt::Data(ptr, ptr + t->getBytes());
|
||||
}
|
||||
tensors[I(t)] =
|
||||
opt::Tensor::share(shape, cast(t->getDType()), std::move(data));
|
||||
|
|
Loading…
Reference in New Issue