InfiniTensor/include/utils/data_generator.h

104 lines
2.9 KiB
C
Raw Permalink Normal View History

#pragma once
#include "core/common.h"
#include "core/tensor_base.h"
#include "utils/data_convert.h"
#include <random>
namespace infini {
// TODO: isolate these class
class DataGenerator {
private:
virtual void fill(uint32_t *data, size_t size) { IT_TODO_HALT(); }
virtual void fill(float *data, size_t size) { IT_TODO_HALT(); }
virtual void fill_fp16(uint16_t *data, size_t size) { IT_TODO_HALT(); }
public:
virtual ~DataGenerator() {}
void operator()(void *data, size_t size, DataType dataType) {
if (dataType == DataType::UInt32)
fill(reinterpret_cast<uint32_t *>(data), size);
else if (dataType == DataType::Float32)
fill(reinterpret_cast<float *>(data), size);
else if (dataType == DataType::Float16)
fill_fp16(reinterpret_cast<uint16_t *>(data), size);
else
IT_TODO_HALT();
}
};
class IncrementalGenerator : public DataGenerator {
public:
virtual ~IncrementalGenerator() {}
private:
template <typename T> void fill(T *data, size_t size) {
for (size_t i = 0; i < size; i++) {
data[i] = i;
}
}
void fill(uint32_t *data, size_t size) override {
fill<uint32_t>(data, size);
}
void fill(float *data, size_t size) override { fill<float>(data, size); }
// FIXME: fix the accuracy standards when dtype is float16
void fill_fp16(uint16_t *data, size_t size) {
for (size_t i = 0; i < size; i++) {
float x = 2.0f;
data[i] = float_to_fp16(x);
}
}
};
class RandomGenerator : public DataGenerator {
private:
double l, r;
std::mt19937 e;
std::uniform_int_distribution<int> di;
std::uniform_real_distribution<float> dr;
public:
RandomGenerator(double l = 0, double r = 1, unsigned int seed = 0)
: l(l), r(r), e(seed), di(l, r), dr(l, r) {}
virtual ~RandomGenerator() {}
private:
void fill(uint32_t *data, size_t size) override {
for (size_t i = 0; i < size; i++) {
data[i] = di(e);
}
}
void fill(float *data, size_t size) override {
for (size_t i = 0; i < size; i++) {
data[i] = dr(e);
}
}
};
template <int val> class ValGenerator : public DataGenerator {
public:
virtual ~ValGenerator() {}
private:
template <typename T> void fill(T *data, size_t size) {
for (size_t i = 0; i < size; i++) {
data[i] = val;
}
}
void fill(uint32_t *data, size_t size) override {
fill<uint32_t>(data, size);
}
void fill(float *data, size_t size) override { fill<float>(data, size); }
Modify kernel registration & support fp16 (#205) * - Remove dataType from the kernel registration. * - support fp16 for conv * - cpu kernel: adapt the new registration mechanism * modified all register kernel * add where fp16 * add layernorm fp16 * add split_concat fp16 * - element_wise support fp16 * feat: support transpose fp16 * feat: support sliceOp fp16 * - unary support fp16 * - feat: support reduceOp fp16 * feat: support matmulOp/expandOp fp16 * feat: support powOp int8 * add cuda cast & support half-precision for gather * style: fix style * feat:support int8 for gather * style:fix style * modified test_cuda_conv_transposed * fix: fix dist code to support fp16 * fix(graph.cc): fix topo_sort * fix: fix recv and send kernel registration * feat: add field tensors for stub * refactor(frontend): 先排序后构图 Signed-off-by: YdrMaster <ydrml@hotmail.com> * fix: 为中间结果提供tensor到node的mapping * fix (slice): add guard for area out of range * fix: fix matmul fp16 * fix: fix re-dataMalloc for weight tensor and use of naive allocator * feat: add dataType filter for cuda kernel * feat: bang kernel adapt the new registration mechanism * fix: fix some error on mlu * feat: intelcpu kernel adapt the new registration mechanism * feat: modify kernel registration on kunlun * fix intelcpu compiler bug * feat: bang reshape support all dataType * fix: fix bang reduce * fix(all_reduce.cc): fix as reviewer suggessted * fix: fix style and restore unary test codes --------- Signed-off-by: YdrMaster <ydrml@hotmail.com> Co-authored-by: xgqdut2016 <kenan_gewei@163.com> Co-authored-by: xgqdut2016 <140036308+xgqdut2016@users.noreply.github.com> Co-authored-by: zhangyunze <z13785159769@163.com> Co-authored-by: OdinaryWord <sx-hz@163.com> Co-authored-by: YdrMaster <ydrml@hotmail.com> Co-authored-by: panzezhong <panzezhong@qiyuanlab.com>
2024-01-15 11:02:13 +08:00
void fill_fp16(uint16_t *data, size_t size) {
for (size_t i = 0; i < size; i++) {
float x = 1.0f * val;
data[i] = float_to_fp16(x);
}
}
};
typedef ValGenerator<1> OneGenerator;
typedef ValGenerator<0> ZeroGenerator;
} // namespace infini