forked from jiuyuan/InfiniTensor
modified format
This commit is contained in:
parent
9384cec7de
commit
5747eb8f7d
|
@ -1,7 +1,7 @@
|
||||||
#include "operators/pad.h"
|
|
||||||
#include "aclnnop/level2/aclnn_reflection_pad2d.h"
|
#include "aclnnop/level2/aclnn_reflection_pad2d.h"
|
||||||
#include "ascend/ascend_kernel_without_config.h"
|
#include "ascend/ascend_kernel_without_config.h"
|
||||||
#include "ascend/ascend_runtime.h"
|
#include "ascend/ascend_runtime.h"
|
||||||
|
#include "operators/pad.h"
|
||||||
|
|
||||||
namespace infini {
|
namespace infini {
|
||||||
|
|
||||||
|
@ -12,7 +12,6 @@ class PadAclnn : public ASCENDKernelWithoutConfig {
|
||||||
auto op = as<PadObj>(_op);
|
auto op = as<PadObj>(_op);
|
||||||
auto context = dynamic_cast<const ASCENDRuntimeObj *>(_context);
|
auto context = dynamic_cast<const ASCENDRuntimeObj *>(_context);
|
||||||
|
|
||||||
|
|
||||||
void *const aData = (op->getInputs(0)->getRawDataPtr<void *>());
|
void *const aData = (op->getInputs(0)->getRawDataPtr<void *>());
|
||||||
void *const cData = (op->getOutput()->getRawDataPtr<void *>());
|
void *const cData = (op->getOutput()->getRawDataPtr<void *>());
|
||||||
|
|
||||||
|
@ -45,16 +44,16 @@ class PadAclnn : public ASCENDKernelWithoutConfig {
|
||||||
|
|
||||||
std::size_t length = intPads.size();
|
std::size_t length = intPads.size();
|
||||||
std::vector<int64_t> pads(4);
|
std::vector<int64_t> pads(4);
|
||||||
if(length == 8){
|
if (length == 8) {
|
||||||
std::size_t halfLen = intPads.size() / 2;
|
std::size_t halfLen = intPads.size() / 2;
|
||||||
bool condition = true;
|
bool condition = true;
|
||||||
//std::cout << "Length of intPads: " << length << std::endl;
|
// std::cout << "Length of intPads: " << length << std::endl;
|
||||||
|
|
||||||
|
|
||||||
for (std::size_t i = 0; i < halfLen; ++i) {
|
for (std::size_t i = 0; i < halfLen; ++i) {
|
||||||
condition = (intPads[i] == intPads[i + 4]);
|
condition = (intPads[i] == intPads[i + 4]);
|
||||||
|
|
||||||
//std::cout << "intPads[" << i << "]: " << intPads[i] << std::endl;
|
// std::cout << "intPads[" << i << "]: " << intPads[i] <<
|
||||||
|
// std::endl;
|
||||||
}
|
}
|
||||||
assert(condition);
|
assert(condition);
|
||||||
|
|
||||||
|
@ -62,19 +61,16 @@ class PadAclnn : public ASCENDKernelWithoutConfig {
|
||||||
pads[1] = intPads[3];
|
pads[1] = intPads[3];
|
||||||
pads[2] = intPads[6];
|
pads[2] = intPads[6];
|
||||||
pads[3] = intPads[7];
|
pads[3] = intPads[7];
|
||||||
}
|
} else if (length == 4) {
|
||||||
else if (length == 4){
|
|
||||||
for (std::size_t i = 0; i < 4; ++i) {
|
for (std::size_t i = 0; i < 4; ++i) {
|
||||||
|
|
||||||
pads[i] = intPads[i];
|
pads[i] = intPads[i];
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
aclIntArray *padding = aclCreateIntArray(pads.data(), 4);
|
aclIntArray *padding = aclCreateIntArray(pads.data(), 4);
|
||||||
auto ret = aclnnReflectionPad2dGetWorkspaceSize(
|
auto ret = aclnnReflectionPad2dGetWorkspaceSize(
|
||||||
inputTensor, padding , outputTensor,
|
inputTensor, padding, outputTensor, &workspaceSize, &executor);
|
||||||
&workspaceSize, &executor);
|
|
||||||
void *workspaceAddr = nullptr;
|
void *workspaceAddr = nullptr;
|
||||||
if (workspaceSize > 0) {
|
if (workspaceSize > 0) {
|
||||||
workspaceAddr = context->getWorkspace(workspaceSize);
|
workspaceAddr = context->getWorkspace(workspaceSize);
|
||||||
|
@ -85,7 +81,7 @@ class PadAclnn : public ASCENDKernelWithoutConfig {
|
||||||
}
|
}
|
||||||
assert(ret == ACL_SUCCESS);
|
assert(ret == ACL_SUCCESS);
|
||||||
ret = aclnnReflectionPad2d(workspaceAddr, workspaceSize, executor,
|
ret = aclnnReflectionPad2d(workspaceAddr, workspaceSize, executor,
|
||||||
context->ASCENDHandle());
|
context->ASCENDHandle());
|
||||||
assert(ret == ACL_SUCCESS);
|
assert(ret == ACL_SUCCESS);
|
||||||
|
|
||||||
ret = aclrtSynchronizeStream(context->ASCENDHandle());
|
ret = aclrtSynchronizeStream(context->ASCENDHandle());
|
||||||
|
|
Loading…
Reference in New Issue