InfiniTensor/test/operators/test_where.cc

81 lines
3.4 KiB
C++
Raw Permalink Normal View History

#include "core/graph.h"
#include "core/kernel.h"
#include "core/runtime.h"
#include "operators/where.h"
#include "test.h"
namespace infini {
Modify kernel registration & support fp16 (#205) * - Remove dataType from the kernel registration. * - support fp16 for conv * - cpu kernel: adapt the new registration mechanism * modified all register kernel * add where fp16 * add layernorm fp16 * add split_concat fp16 * - element_wise support fp16 * feat: support transpose fp16 * feat: support sliceOp fp16 * - unary support fp16 * - feat: support reduceOp fp16 * feat: support matmulOp/expandOp fp16 * feat: support powOp int8 * add cuda cast & support half-precision for gather * style: fix style * feat:support int8 for gather * style:fix style * modified test_cuda_conv_transposed * fix: fix dist code to support fp16 * fix(graph.cc): fix topo_sort * fix: fix recv and send kernel registration * feat: add field tensors for stub * refactor(frontend): 先排序后构图 Signed-off-by: YdrMaster <ydrml@hotmail.com> * fix: 为中间结果提供tensor到node的mapping * fix (slice): add guard for area out of range * fix: fix matmul fp16 * fix: fix re-dataMalloc for weight tensor and use of naive allocator * feat: add dataType filter for cuda kernel * feat: bang kernel adapt the new registration mechanism * fix: fix some error on mlu * feat: intelcpu kernel adapt the new registration mechanism * feat: modify kernel registration on kunlun * fix intelcpu compiler bug * feat: bang reshape support all dataType * fix: fix bang reduce * fix(all_reduce.cc): fix as reviewer suggessted * fix: fix style and restore unary test codes --------- Signed-off-by: YdrMaster <ydrml@hotmail.com> Co-authored-by: xgqdut2016 <kenan_gewei@163.com> Co-authored-by: xgqdut2016 <140036308+xgqdut2016@users.noreply.github.com> Co-authored-by: zhangyunze <z13785159769@163.com> Co-authored-by: OdinaryWord <sx-hz@163.com> Co-authored-by: YdrMaster <ydrml@hotmail.com> Co-authored-by: panzezhong <panzezhong@qiyuanlab.com>
2024-01-15 11:02:13 +08:00
TEST(WhereFp32, ShapeInference) {
Runtime runtime = NativeCpuRuntimeObj::getInstance();
{
Graph g = make_ref<GraphObj>(runtime);
Tensor x = g->addTensor({2, 2}, DataType::Float32);
Tensor y = g->addTensor({2, 2}, DataType::Float32);
Tensor con = g->addTensor({2, 2}, DataType::Bool);
auto op = g->addOp<WhereObj>(x, y, con, nullptr);
EXPECT_EQ(op->getOutput()->getDims(), (Shape{2, 2}));
}
{
Graph g = make_ref<GraphObj>(runtime);
Tensor x = g->addTensor({1, 12, 224, 224}, DataType::Float32);
Tensor y = g->addTensor({1, 1, 224, 224}, DataType::Float32);
Tensor con = g->addTensor({1, 224, 1}, DataType::Bool);
auto op = g->addOp<WhereObj>(x, y, con, nullptr);
EXPECT_EQ(op->getOutput()->getDims(), (Shape{1, 12, 224, 224}));
}
{
Graph g = make_ref<GraphObj>(runtime);
Tensor x = g->addTensor({12, 224, 224}, DataType::Float32);
Tensor y = g->addTensor({1, 1, 224, 224}, DataType::Float32);
Tensor con = g->addTensor({1, 224}, DataType::Bool);
auto op = g->addOp<WhereObj>(x, y, con, nullptr);
EXPECT_EQ(op->getOutput()->getDims(), (Shape{1, 12, 224, 224}));
}
{
Graph g = make_ref<GraphObj>(runtime);
Tensor x = g->addTensor({12, 224, 224}, DataType::Float32);
Tensor y = g->addTensor({1, 1, 224, 224}, DataType::Float32);
Tensor con = g->addTensor({2, 1, 1, 1, 224}, DataType::Bool);
auto op = g->addOp<WhereObj>(x, y, con, nullptr);
EXPECT_EQ(op->getOutput()->getDims(), (Shape{2, 1, 12, 224, 224}));
}
}
Modify kernel registration & support fp16 (#205) * - Remove dataType from the kernel registration. * - support fp16 for conv * - cpu kernel: adapt the new registration mechanism * modified all register kernel * add where fp16 * add layernorm fp16 * add split_concat fp16 * - element_wise support fp16 * feat: support transpose fp16 * feat: support sliceOp fp16 * - unary support fp16 * - feat: support reduceOp fp16 * feat: support matmulOp/expandOp fp16 * feat: support powOp int8 * add cuda cast & support half-precision for gather * style: fix style * feat:support int8 for gather * style:fix style * modified test_cuda_conv_transposed * fix: fix dist code to support fp16 * fix(graph.cc): fix topo_sort * fix: fix recv and send kernel registration * feat: add field tensors for stub * refactor(frontend): 先排序后构图 Signed-off-by: YdrMaster <ydrml@hotmail.com> * fix: 为中间结果提供tensor到node的mapping * fix (slice): add guard for area out of range * fix: fix matmul fp16 * fix: fix re-dataMalloc for weight tensor and use of naive allocator * feat: add dataType filter for cuda kernel * feat: bang kernel adapt the new registration mechanism * fix: fix some error on mlu * feat: intelcpu kernel adapt the new registration mechanism * feat: modify kernel registration on kunlun * fix intelcpu compiler bug * feat: bang reshape support all dataType * fix: fix bang reduce * fix(all_reduce.cc): fix as reviewer suggessted * fix: fix style and restore unary test codes --------- Signed-off-by: YdrMaster <ydrml@hotmail.com> Co-authored-by: xgqdut2016 <kenan_gewei@163.com> Co-authored-by: xgqdut2016 <140036308+xgqdut2016@users.noreply.github.com> Co-authored-by: zhangyunze <z13785159769@163.com> Co-authored-by: OdinaryWord <sx-hz@163.com> Co-authored-by: YdrMaster <ydrml@hotmail.com> Co-authored-by: panzezhong <panzezhong@qiyuanlab.com>
2024-01-15 11:02:13 +08:00
TEST(WhereFp16, ShapeInference) {
Runtime runtime = NativeCpuRuntimeObj::getInstance();
{
Graph g = make_ref<GraphObj>(runtime);
Tensor x = g->addTensor({2, 2}, DataType::Float16);
Tensor y = g->addTensor({2, 2}, DataType::Float16);
Tensor con = g->addTensor({2, 2}, DataType::Bool);
auto op = g->addOp<WhereObj>(x, y, con, nullptr);
EXPECT_EQ(op->getOutput()->getDims(), (Shape{2, 2}));
}
{
Graph g = make_ref<GraphObj>(runtime);
Tensor x = g->addTensor({1, 12, 224, 224}, DataType::Float16);
Tensor y = g->addTensor({1, 1, 224, 224}, DataType::Float16);
Tensor con = g->addTensor({1, 224, 1}, DataType::Bool);
auto op = g->addOp<WhereObj>(x, y, con, nullptr);
EXPECT_EQ(op->getOutput()->getDims(), (Shape{1, 12, 224, 224}));
}
{
Graph g = make_ref<GraphObj>(runtime);
Tensor x = g->addTensor({12, 224, 224}, DataType::Float16);
Tensor y = g->addTensor({1, 1, 224, 224}, DataType::Float16);
Tensor con = g->addTensor({1, 224}, DataType::Bool);
auto op = g->addOp<WhereObj>(x, y, con, nullptr);
EXPECT_EQ(op->getOutput()->getDims(), (Shape{1, 12, 224, 224}));
}
{
Graph g = make_ref<GraphObj>(runtime);
Tensor x = g->addTensor({12, 224, 224}, DataType::Float16);
Tensor y = g->addTensor({1, 1, 224, 224}, DataType::Float16);
Tensor con = g->addTensor({2, 1, 1, 1, 224}, DataType::Bool);
auto op = g->addOp<WhereObj>(x, y, con, nullptr);
EXPECT_EQ(op->getOutput()->getDims(), (Shape{2, 1, 12, 224, 224}));
}
}
} // namespace infini