forked from jiuyuan/InfiniTensor
fix format
This commit is contained in:
parent
b1bdbbf478
commit
412f301323
|
@ -1,7 +1,6 @@
|
|||
#pragma once
|
||||
#include "core/runtime.h"
|
||||
#include "ascend/ascend_common.h"
|
||||
|
||||
#include "core/runtime.h"
|
||||
|
||||
#define CHECK_RET(cond, return_expr) \
|
||||
do { \
|
||||
|
@ -25,18 +24,22 @@ class ASCENDRuntimeObj : public RuntimeObj {
|
|||
size_t workspaceSize;
|
||||
|
||||
public:
|
||||
ASCENDRuntimeObj(int deviceId = 0)
|
||||
: RuntimeObj(Device::ASCEND, deviceId) {
|
||||
ASCENDRuntimeObj(int deviceId = 0) : RuntimeObj(Device::ASCEND, deviceId) {
|
||||
auto ret = aclrtSetDevice(deviceId);
|
||||
CHECK_RET(ret == ACL_SUCCESS, LOG_PRINT("aclrtSetDevice failed. ERROR: %d\n", ret));
|
||||
CHECK_RET(ret == ACL_SUCCESS,
|
||||
LOG_PRINT("aclrtSetDevice failed. ERROR: %d\n", ret));
|
||||
ret = aclrtCreateContext(&aclnn, deviceId);
|
||||
CHECK_RET(ret == ACL_SUCCESS, LOG_PRINT("aclrtCreateContext failed. ERROR: %d\n", ret));
|
||||
CHECK_RET(ret == ACL_SUCCESS,
|
||||
LOG_PRINT("aclrtCreateContext failed. ERROR: %d\n", ret));
|
||||
ret = aclrtSetCurrentContext(aclnn);
|
||||
CHECK_RET(ret == ACL_SUCCESS, LOG_PRINT("aclrtSetCurrentContext failed. ERROR: %d\n", ret));
|
||||
CHECK_RET(ret == ACL_SUCCESS,
|
||||
LOG_PRINT("aclrtSetCurrentContext failed. ERROR: %d\n", ret));
|
||||
ret = aclrtCreateStream(&stream);
|
||||
CHECK_RET(ret == ACL_SUCCESS, LOG_PRINT("aclrtCreateStream failed. ERROR: %d\n", ret));
|
||||
CHECK_RET(ret == ACL_SUCCESS,
|
||||
LOG_PRINT("aclrtCreateStream failed. ERROR: %d\n", ret));
|
||||
ret = aclInit(nullptr);
|
||||
CHECK_RET(ret == ACL_SUCCESS, LOG_PRINT("aclInit failed. ERROR: %d\n", ret));
|
||||
CHECK_RET(ret == ACL_SUCCESS,
|
||||
LOG_PRINT("aclInit failed. ERROR: %d\n", ret));
|
||||
// 10GB for Longformer
|
||||
// size_t longformerNum = 3lu * (1 << 30);
|
||||
workspaceSize = 3ll << 30; // 3 GB
|
||||
|
@ -73,19 +76,19 @@ class ASCENDRuntimeObj : public RuntimeObj {
|
|||
|
||||
void copyBlobFromCPU(void *dst, const void *src,
|
||||
size_t bytes) const override {
|
||||
aclrtMemcpy(dst, 1024*1024*1024, const_cast<void *>(src), bytes,
|
||||
aclrtMemcpy(dst, 1024 * 1024 * 1024, const_cast<void *>(src), bytes,
|
||||
ACL_MEMCPY_HOST_TO_DEVICE);
|
||||
}
|
||||
|
||||
void copyBlobToCPU(void *dst, const void *src,
|
||||
size_t bytes) const override {
|
||||
aclrtMemcpy(dst, 1024*1024*1024, const_cast<void *>(src), bytes,
|
||||
aclrtMemcpy(dst, 1024 * 1024 * 1024, const_cast<void *>(src), bytes,
|
||||
ACL_MEMCPY_DEVICE_TO_HOST);
|
||||
}
|
||||
|
||||
void copyBlobInsideRuntime(void *dst, const void *src,
|
||||
size_t bytes) const override {
|
||||
aclrtMemcpy(dst, 1024*1024*1024, const_cast<void *>(src), bytes,
|
||||
aclrtMemcpy(dst, 1024 * 1024 * 1024, const_cast<void *>(src), bytes,
|
||||
ACL_MEMCPY_DEVICE_TO_DEVICE);
|
||||
}
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
#include "operators/unary.h"
|
||||
#include "aclnnop/level2/aclnn_relu.h"
|
||||
#include "ascend/ascend_kernel_without_config.h"
|
||||
#include "ascend/ascend_runtime.h"
|
||||
#include "aclnnop/level2/aclnn_relu.h"
|
||||
|
||||
namespace infini {
|
||||
class ReluAclnn : public ASCENDKernelWithoutConfig {
|
||||
|
@ -15,38 +15,45 @@ class ReluAclnn : public ASCENDKernelWithoutConfig {
|
|||
|
||||
auto a = op->getInputs(0)->getDims();
|
||||
std::vector<int64_t> aDim(a.size(), 1);
|
||||
for(size_t i = 0; i < a.size(); ++i) {
|
||||
for (size_t i = 0; i < a.size(); ++i) {
|
||||
aDim[i] = int64_t(a[i]);
|
||||
}
|
||||
auto aS = op->getInputs(0)->getStride();
|
||||
std::vector<int64_t> aStride(aS.size(), 1);
|
||||
for(size_t i = 0; i < aS.size(); ++i) {
|
||||
for (size_t i = 0; i < aS.size(); ++i) {
|
||||
aStride[i] = int64_t(aS[i]);
|
||||
}
|
||||
auto c = op->getInputs(0)->getDims();
|
||||
std::vector<int64_t> cDim(c.size(), 1);
|
||||
for(size_t i = 0; i < c.size(); ++i) {
|
||||
for (size_t i = 0; i < c.size(); ++i) {
|
||||
cDim[i] = int64_t(c[i]);
|
||||
}
|
||||
auto cS = op->getInputs(0)->getStride();
|
||||
std::vector<int64_t> cStride(cS.size(), 1);
|
||||
for(size_t i = 0; i < cS.size(); ++i) {
|
||||
for (size_t i = 0; i < cS.size(); ++i) {
|
||||
cStride[i] = int64_t(cS[i]);
|
||||
}
|
||||
|
||||
auto input = aclCreateTensor(aDim.data(), aDim.size(), ACL_FLOAT, aStride.data(), 0, aclFormat::ACL_FORMAT_ND, aDim.data(), aDim.size(), aData);
|
||||
auto output = aclCreateTensor(cDim.data(), cDim.size(), ACL_FLOAT, cStride.data(), 0, aclFormat::ACL_FORMAT_ND, cDim.data(), cDim.size(), cData);
|
||||
auto input = aclCreateTensor(
|
||||
aDim.data(), aDim.size(), ACL_FLOAT, aStride.data(), 0,
|
||||
aclFormat::ACL_FORMAT_ND, aDim.data(), aDim.size(), aData);
|
||||
auto output = aclCreateTensor(
|
||||
cDim.data(), cDim.size(), ACL_FLOAT, cStride.data(), 0,
|
||||
aclFormat::ACL_FORMAT_ND, cDim.data(), cDim.size(), cData);
|
||||
|
||||
uint64_t workspaceSize = 0;
|
||||
aclOpExecutor* executor;
|
||||
aclOpExecutor *executor;
|
||||
|
||||
auto ret = aclnnReluGetWorkspaceSize(input, output, &workspaceSize, &executor);
|
||||
void* workspaceAddr = nullptr;
|
||||
auto ret =
|
||||
aclnnReluGetWorkspaceSize(input, output, &workspaceSize, &executor);
|
||||
void *workspaceAddr = nullptr;
|
||||
if (workspaceSize > 0) {
|
||||
ret = aclrtMalloc(&workspaceAddr, workspaceSize, ACL_MEM_MALLOC_HUGE_FIRST);
|
||||
ret = aclrtMalloc(&workspaceAddr, workspaceSize,
|
||||
ACL_MEM_MALLOC_HUGE_FIRST);
|
||||
}
|
||||
assert(ret == ACL_SUCCESS);
|
||||
ret = aclnnRelu(workspaceAddr, workspaceSize, executor, context->ASCENDHandle());
|
||||
ret = aclnnRelu(workspaceAddr, workspaceSize, executor,
|
||||
context->ASCENDHandle());
|
||||
assert(ret == ACL_SUCCESS);
|
||||
ret = aclrtSynchronizeStream(context->ASCENDHandle());
|
||||
assert(ret == ACL_SUCCESS);
|
||||
|
@ -57,4 +64,4 @@ class ReluAclnn : public ASCENDKernelWithoutConfig {
|
|||
|
||||
REGISTER_KERNEL(Device::ASCEND, OpType::Relu, DataType::Float32, ReluAclnn,
|
||||
"relu_ASCEND_float");
|
||||
};
|
||||
}; // namespace infini
|
||||
|
|
Loading…
Reference in New Issue