modified format

This commit is contained in:
xgqdut2016 2024-05-07 16:31:53 +08:00
parent 9384cec7de
commit 5747eb8f7d
1 changed files with 25 additions and 29 deletions

View File

@ -1,7 +1,7 @@
#include "operators/pad.h"
#include "aclnnop/level2/aclnn_reflection_pad2d.h" #include "aclnnop/level2/aclnn_reflection_pad2d.h"
#include "ascend/ascend_kernel_without_config.h" #include "ascend/ascend_kernel_without_config.h"
#include "ascend/ascend_runtime.h" #include "ascend/ascend_runtime.h"
#include "operators/pad.h"
namespace infini { namespace infini {
@ -12,19 +12,18 @@ class PadAclnn : public ASCENDKernelWithoutConfig {
auto op = as<PadObj>(_op); auto op = as<PadObj>(_op);
auto context = dynamic_cast<const ASCENDRuntimeObj *>(_context); auto context = dynamic_cast<const ASCENDRuntimeObj *>(_context);
void *const aData = (op->getInputs(0)->getRawDataPtr<void *>()); void *const aData = (op->getInputs(0)->getRawDataPtr<void *>());
void *const cData = (op->getOutput()->getRawDataPtr<void *>()); void *const cData = (op->getOutput()->getRawDataPtr<void *>());
auto inputD = op->getInputs(0)->getDims(); auto inputD = op->getInputs(0)->getDims();
auto inputS = op->getInputs(0)->getStride(); auto inputS = op->getInputs(0)->getStride();
auto outD = op->getOutput()->getDims(); auto outD = op->getOutput()->getDims();
auto outS = op->getOutput()->getStride(); auto outS = op->getOutput()->getStride();
std::vector<int64_t> inputDim = castTo64(inputD); std::vector<int64_t> inputDim = castTo64(inputD);
std::vector<int64_t> inputStride = castTo64(inputS); std::vector<int64_t> inputStride = castTo64(inputS);
std::vector<int64_t> outputDim = castTo64(outD); std::vector<int64_t> outputDim = castTo64(outD);
std::vector<int64_t> outputStride = castTo64(outS); std::vector<int64_t> outputStride = castTo64(outS);
@ -32,7 +31,7 @@ class PadAclnn : public ASCENDKernelWithoutConfig {
aclCreateTensor(inputDim.data(), inputDim.size(), ACL_FLOAT, aclCreateTensor(inputDim.data(), inputDim.size(), ACL_FLOAT,
inputStride.data(), 0, aclFormat::ACL_FORMAT_NCHW, inputStride.data(), 0, aclFormat::ACL_FORMAT_NCHW,
inputDim.data(), inputDim.size(), aData); inputDim.data(), inputDim.size(), aData);
auto outputTensor = auto outputTensor =
aclCreateTensor(outputDim.data(), outputDim.size(), ACL_FLOAT, aclCreateTensor(outputDim.data(), outputDim.size(), ACL_FLOAT,
outputStride.data(), 0, aclFormat::ACL_FORMAT_NCHW, outputStride.data(), 0, aclFormat::ACL_FORMAT_NCHW,
@ -40,41 +39,38 @@ class PadAclnn : public ASCENDKernelWithoutConfig {
uint64_t workspaceSize = 0; uint64_t workspaceSize = 0;
aclOpExecutor *executor; aclOpExecutor *executor;
std::vector<int> intPads = op->getPads(); std::vector<int> intPads = op->getPads();
std::size_t length = intPads.size(); std::size_t length = intPads.size();
std::vector<int64_t> pads(4); std::vector<int64_t> pads(4);
if(length == 8){ if (length == 8) {
std::size_t halfLen = intPads.size() / 2; std::size_t halfLen = intPads.size() / 2;
bool condition = true; bool condition = true;
//std::cout << "Length of intPads: " << length << std::endl; // std::cout << "Length of intPads: " << length << std::endl;
for (std::size_t i = 0; i < halfLen; ++i) {
for (std::size_t i = 0; i < halfLen; ++i) {
condition = (intPads[i] == intPads[i + 4]); condition = (intPads[i] == intPads[i + 4]);
//std::cout << "intPads[" << i << "]: " << intPads[i] << std::endl; // std::cout << "intPads[" << i << "]: " << intPads[i] <<
} // std::endl;
}
assert(condition); assert(condition);
pads[0] = intPads[2]; pads[0] = intPads[2];
pads[1] = intPads[3]; pads[1] = intPads[3];
pads[2] = intPads[6]; pads[2] = intPads[6];
pads[3] = intPads[7]; pads[3] = intPads[7];
} } else if (length == 4) {
else if (length == 4){ for (std::size_t i = 0; i < 4; ++i) {
for (std::size_t i = 0; i < 4; ++i) {
pads[i] = intPads[i]; pads[i] = intPads[i];
}
}
} }
aclIntArray *padding = aclCreateIntArray(pads.data(), 4); aclIntArray *padding = aclCreateIntArray(pads.data(), 4);
auto ret = aclnnReflectionPad2dGetWorkspaceSize( auto ret = aclnnReflectionPad2dGetWorkspaceSize(
inputTensor, padding , outputTensor, inputTensor, padding, outputTensor, &workspaceSize, &executor);
&workspaceSize, &executor);
void *workspaceAddr = nullptr; void *workspaceAddr = nullptr;
if (workspaceSize > 0) { if (workspaceSize > 0) {
workspaceAddr = context->getWorkspace(workspaceSize); workspaceAddr = context->getWorkspace(workspaceSize);
@ -85,7 +81,7 @@ class PadAclnn : public ASCENDKernelWithoutConfig {
} }
assert(ret == ACL_SUCCESS); assert(ret == ACL_SUCCESS);
ret = aclnnReflectionPad2d(workspaceAddr, workspaceSize, executor, ret = aclnnReflectionPad2d(workspaceAddr, workspaceSize, executor,
context->ASCENDHandle()); context->ASCENDHandle());
assert(ret == ACL_SUCCESS); assert(ret == ACL_SUCCESS);
ret = aclrtSynchronizeStream(context->ASCENDHandle()); ret = aclrtSynchronizeStream(context->ASCENDHandle());