forked from jiuyuan/InfiniTensor
support cuda transpose
This commit is contained in:
parent
225a42f22d
commit
664f0dbe02
|
@ -0,0 +1,11 @@
|
|||
#pragma once
|
||||
|
||||
#include "operators/transpose.h"
|
||||
#include "utils/small_array.h"
|
||||
|
||||
namespace infini {
|
||||
|
||||
void transpose_kernel(float *input, float *output, int nDims, int size,
|
||||
SmallArray strides, SmallArray outputShape);
|
||||
|
||||
}; // namespace infini
|
|
@ -15,7 +15,7 @@ class TransposeObj : public OperatorObj {
|
|||
std::vector<int> getPermute() const { return transposePermute; }
|
||||
|
||||
private:
|
||||
vector<int> transposePermute = {1, 1, 1, 1};
|
||||
vector<int> transposePermute;
|
||||
vector<int> getWorkloadVector() const override;
|
||||
vector<int> getOpAttrVector() const override;
|
||||
};
|
||||
|
|
|
@ -0,0 +1,6 @@
|
|||
namespace infini {
|
||||
#define SMALL_ARRAY_SIZE 8
|
||||
struct SmallArray {
|
||||
int data[SMALL_ARRAY_SIZE];
|
||||
};
|
||||
} // namespace infini
|
|
@ -24,4 +24,4 @@ class ClipCuda : public CudaKernelWithoutConfig {
|
|||
REGISTER_KERNEL(Device::CUDA, OpType::Clip, DataType::Float32, ClipCuda,
|
||||
"Clip_CUDA_Float32");
|
||||
|
||||
}; // namespace infini
|
||||
} // namespace infini
|
||||
|
|
|
@ -1,9 +1,7 @@
|
|||
#include "core/common.h"
|
||||
#include "core/constants.h"
|
||||
#include "cuda/cuda_common.h"
|
||||
#include <math.h>
|
||||
|
||||
using infini::E_CONSTANT;
|
||||
constexpr unsigned int num_threads() { return 32 * 4; }
|
||||
constexpr int thread_work_size() { return 4; }
|
||||
constexpr int block_work_size() { return thread_work_size() * num_threads(); }
|
||||
|
@ -29,4 +27,4 @@ void clip_kernel(float *input, float *output, int num, float minValue,
|
|||
maxValue);
|
||||
}
|
||||
|
||||
}; // namespace infini
|
||||
} // namespace infini
|
||||
|
|
|
@ -0,0 +1,49 @@
|
|||
#include "operators/transpose.h"
|
||||
#include "cuda/cuda_kernel_wihtout_config.h"
|
||||
#include "cuda/cuda_runtime.h"
|
||||
#include "cuda/cuda_transpose.h"
|
||||
|
||||
namespace infini {
|
||||
|
||||
class TransposeCuda : public CudaKernelWithoutConfig {
|
||||
void compute(const Operator &_op,
|
||||
const RuntimeObj *_context) const override {
|
||||
auto op = as<TransposeObj>(_op);
|
||||
|
||||
auto input = op->getInputs(0);
|
||||
auto output = op->getOutput();
|
||||
void *const inputData = input->getRawDataPtr<void *>();
|
||||
void *const outputData = output->getRawDataPtr<void *>();
|
||||
const auto &inputShape = input->getDims();
|
||||
const auto &outputShape = output->getDims();
|
||||
|
||||
const auto &perm = op->getPermute();
|
||||
int size = input->size();
|
||||
int nDims = input->getDims().size();
|
||||
|
||||
// Compute strides
|
||||
SmallArray strides, buffer;
|
||||
IT_ASSERT(nDims <= SMALL_ARRAY_SIZE);
|
||||
int curStride = 1;
|
||||
for (int i = nDims - 1; i >= 0; --i) {
|
||||
buffer.data[i] = curStride;
|
||||
curStride *= inputShape[i];
|
||||
}
|
||||
for (int i = 0; i < nDims; ++i) {
|
||||
strides.data[i] = buffer.data[perm[i]];
|
||||
}
|
||||
|
||||
SmallArray outputDims;
|
||||
for (int i = 0; i < nDims; ++i) {
|
||||
outputDims.data[i] = outputShape[i];
|
||||
}
|
||||
|
||||
transpose_kernel((float *)inputData, (float *)outputData, nDims, size,
|
||||
strides, outputDims);
|
||||
}
|
||||
};
|
||||
|
||||
REGISTER_KERNEL(Device::CUDA, OpType::Transpose, DataType::Float32,
|
||||
TransposeCuda, "Transpose_CUDA_Float32");
|
||||
|
||||
} // namespace infini
|
|
@ -0,0 +1,37 @@
|
|||
#include "core/common.h"
|
||||
#include "cuda/cuda_common.h"
|
||||
#include "utils/small_array.h"
|
||||
|
||||
constexpr unsigned int num_threads() { return 32 * 4; }
|
||||
constexpr int thread_work_size() { return 4; }
|
||||
constexpr int block_work_size() { return thread_work_size() * num_threads(); }
|
||||
|
||||
__global__ void _transpose_kernel(float *input, float *output, int nDims,
|
||||
int size, infini::SmallArray strides,
|
||||
infini::SmallArray outputShape) {
|
||||
int outputIdx = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
if (outputIdx < size) {
|
||||
int inputIdx = 0;
|
||||
int v = outputIdx;
|
||||
for (int i = nDims - 1; i >= 0; --i) {
|
||||
inputIdx += v % outputShape.data[i] * strides.data[i];
|
||||
v /= outputShape.data[i];
|
||||
}
|
||||
#if __CUDA_ARCH__ >= 350 || defined(USE_ROCM)
|
||||
output[outputIdx] = __ldg(input + inputIdx);
|
||||
#else
|
||||
output[outputIdx] = input[inputIdx];
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
namespace infini {
|
||||
void transpose_kernel(float *input, float *output, int nDims, int size,
|
||||
SmallArray strides, SmallArray outputShape) {
|
||||
int blocksize = block_work_size();
|
||||
int gridsize = (size + block_work_size() - 1) / block_work_size();
|
||||
_transpose_kernel<<<blocksize, gridsize>>>(input, output, nDims, size,
|
||||
strides, outputShape);
|
||||
}
|
||||
|
||||
} // namespace infini
|
|
@ -4,13 +4,7 @@ namespace infini {
|
|||
TransposeObj::TransposeObj(GraphObj *graph, Tensor input, Tensor output,
|
||||
vector<int> permute)
|
||||
: OperatorObj(OpType::Transpose, {input}, {output}) {
|
||||
if (permute.size() != 4) {
|
||||
IT_TODO_HALT();
|
||||
}
|
||||
transposePermute[0] = permute[0];
|
||||
transposePermute[1] = permute[1];
|
||||
transposePermute[2] = permute[2];
|
||||
transposePermute[3] = permute[3];
|
||||
transposePermute = permute;
|
||||
IT_ASSERT(checkValid(graph));
|
||||
}
|
||||
|
||||
|
@ -20,7 +14,8 @@ TransposeObj::inferShape(const TensorVec &inputs) const {
|
|||
auto input = A->getDims();
|
||||
auto output = input;
|
||||
|
||||
for (int i = 0; i < 4; ++i) {
|
||||
auto nDims = input.size();
|
||||
for (size_t i = 0; i < nDims; ++i) {
|
||||
output[i] = input[transposePermute[i]];
|
||||
}
|
||||
return {{output}};
|
||||
|
|
|
@ -0,0 +1,45 @@
|
|||
#include "core/graph.h"
|
||||
#include "core/kernel.h"
|
||||
#include "core/runtime.h"
|
||||
#include "cuda/cuda_runtime.h"
|
||||
#include "operators/transpose.h"
|
||||
|
||||
#include "test.h"
|
||||
|
||||
namespace infini {
|
||||
|
||||
template <class T>
|
||||
void testTranspose(
|
||||
const std::function<void(void *, size_t, DataType)> &generator,
|
||||
const Shape &shape) {
|
||||
// Runtime
|
||||
Runtime cpuRuntime = NativeCpuRuntimeObj::getInstance();
|
||||
auto cudaRuntime = make_ref<CudaRuntimeObj>();
|
||||
|
||||
// Build input data on CPU
|
||||
Tensor inputCpu = make_ref<TensorObj>(shape, DataType::Float32, cpuRuntime);
|
||||
inputCpu->dataMalloc();
|
||||
inputCpu->setData(generator);
|
||||
|
||||
// GPU
|
||||
Graph cudaGraph = make_ref<GraphObj>(cudaRuntime);
|
||||
auto inputGpu = cudaGraph->cloneTensor(inputCpu);
|
||||
vector<int> permute = {0, 2, 1, 3};
|
||||
auto gpuOp = cudaGraph->addOp<T>(inputGpu, nullptr, permute);
|
||||
cudaGraph->dataMalloc();
|
||||
cudaRuntime->run(cudaGraph);
|
||||
auto outputGpu = gpuOp->getOutput();
|
||||
auto oCpu = outputGpu->clone(cpuRuntime);
|
||||
// Check
|
||||
// inputCpu->printData();
|
||||
// oCpu->printData();
|
||||
EXPECT_TRUE(oCpu->equalData(vector<float>{0, 1, 2, 3, 12, 13, 14, 15,
|
||||
4, 5, 6, 7, 16, 17, 18, 19,
|
||||
8, 9, 10, 11, 20, 21, 22, 23}));
|
||||
}
|
||||
|
||||
TEST(cuda_Transpose, run) {
|
||||
testTranspose<TransposeObj>(IncrementalGenerator(), Shape{1, 2, 3, 4});
|
||||
}
|
||||
|
||||
} // namespace infini
|
Loading…
Reference in New Issue