InfiniTensor/include/core/common.h

82 lines
2.4 KiB
C
Raw Normal View History

2022-07-31 21:43:26 +08:00
#pragma once
#include "utils/exception.h"
2022-07-31 21:43:26 +08:00
#include <cassert>
2022-08-08 15:52:07 +08:00
#include <functional>
2022-07-31 21:43:26 +08:00
#include <iostream>
#include <list>
#include <map>
#include <optional>
#include <set>
#include <sstream>
#include <string>
#include <tuple>
#include <unordered_map>
#include <unordered_set>
#include <vector>
2022-08-07 21:12:17 +08:00
namespace infini {
2022-07-31 21:43:26 +08:00
using std::list;
using std::map;
using std::optional;
2022-07-31 21:43:26 +08:00
using std::pair;
using std::set;
using std::string;
using std::tie;
using std::to_string;
using std::tuple;
using std::unordered_map;
using std::vector;
// Aliases
using dtype = float;
using HashType = uint64_t; // compatible with std::hash
2022-07-31 21:43:26 +08:00
// Metaprogramming utilities
2022-08-05 12:50:34 +08:00
#define _CAT(A, B) A##B
#define _SELECT(NAME, NUM) _CAT(NAME##_, NUM)
#define _GET_COUNT(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, COUNT, ...) COUNT
#define _VA_SIZE(...) _GET_COUNT(__VA_ARGS__, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1)
#define _VA_SELECT(NAME, ...) _SELECT(NAME, _VA_SIZE(__VA_ARGS__))(__VA_ARGS__)
// Assert: conditions should have no side effect
#define _IT_ASSERT_2(condition, info) \
tensor parallel for transformer (#125) * add cmake bits about NCCL * move example to examples/NNmodel * impl NCCL communicator * add comm related function to Runtime * export runtime interface * add launch.py * use unique name to distingush the the NCCL ID file * add timeout to communicator init * expose communicator obj from runtime obj, add unit test for nccl communicator * reformat files * Add allReduce operator and cuda nccl allReduce kernel * impl model parallel for resnet * add allGather nccl kernel and operator * Add allreduce allgather operator tests, change allgather kernel to output list of tensor, fix shape infer, handle nullptr output * fix format of onnx.py * use concat following AllGather * get tensor parallel for resnet * fix format of graph_handler.cc * change BUILD_DIST default to OFF * polish code of communicator * update .gitignore * export min/max to python * fix MatMul * modify launch.py to run opt * hack to treat ReduceSum as AllReduceSum * throw exception in cuda error * fix parallel_opt.py * improve the error prompt and cuda error check * fix GatherObj::GatherObj member init * fix size calculation for scalar (rank = 0) tensor * MatMul supports bias * fix add bias for row parallel gemm * add --gen_std to launch.py * fix AllReduceNCCL * update launch.py * less log * update parallel_opt * update launch.py * add __eq__ for Placement sub-classes * less benchmark run * fix placement infer for matmul * fix vacabuary size * fix Exception * Add shard tensor with group to support gpt2 * Add find successor function to find split op at different depth * recover CommunicatorObj * improve error mesasge * optimize parallel_opt.py * optimize launch.py * recover docs for all_reduce and all_gather * Fix API * fix format --------- Co-authored-by: panzezhong <panzezhong@qiyuanlab.com> Co-authored-by: Haojie Wang <haojie0429@gmail.com>
2023-09-14 14:19:45 +08:00
static_cast<bool>(condition) \
? void(0) \
: throw ::infini::Exception( \
std::string("[") + __FILE__ + ":" + std::to_string(__LINE__) + \
"] Assertion failed (" + #condition + "): " + info)
#define _IT_ASSERT_1(condition) _IT_ASSERT_2(condition, "")
2022-08-05 12:50:34 +08:00
#define IT_ASSERT(...) _VA_SELECT(_IT_ASSERT, __VA_ARGS__)
#define IT_TODO_HALT() _IT_ASSERT_2(false, "Unimplemented")
#define IT_TODO_HALT_MSG(msg) _IT_ASSERT_2(false, msg)
#define IT_ASSERT_TODO(condition) _IT_ASSERT_2(condition, "Unimplemented")
2022-08-07 21:12:17 +08:00
#define IT_TODO_SKIP() puts("Unimplemented " __FILE__ ":" __LINE__)
2022-08-05 12:50:34 +08:00
// Other utilities
// std::to_underlying is avaiable since C++23
template <typename T> auto enum_to_underlying(T e) {
return static_cast<std::underlying_type_t<T>>(e);
}
template <typename T> std::string vecToString(const std::vector<T> &vec) {
std::string ret;
ret.append("[");
for (auto d : vec) {
ret.append(std::to_string(d));
ret.append(",");
}
if (!vec.empty())
ret.pop_back();
ret.append("]");
return ret;
}
double timeit(
const std::function<void()> &func,
const std::function<void(void)> &sync = []() {}, int warmupRounds = 10,
int timingRounds = 10);
2022-08-07 21:12:17 +08:00
} // namespace infini