forked from jiuyuan/InfiniTensor
fix resize
This commit is contained in:
parent
377b3bf391
commit
a1a68d3624
|
@ -75,7 +75,7 @@ class GraphHandlerObj {
|
|||
Tensor reshape(Tensor data, Tensor reshaped, Shape shape);
|
||||
Tensor resize(Tensor input, Tensor output,
|
||||
const std::optional<vector<int>> &axes, Tensor sizes,
|
||||
Tensor scales, Tensor roi, vector<uint32_t> sizes_,
|
||||
Tensor scales, Tensor roi, vector<int64_t> sizes_,
|
||||
vector<float> scales_, vector<float> roi_, string mode,
|
||||
string ratioPolicy, string nearestMode,
|
||||
string coordTransMode);
|
||||
|
|
|
@ -632,7 +632,7 @@ class OnnxStub:
|
|||
"cubic_coeff_a": -0.75,
|
||||
"exclude_outside": 0,
|
||||
"extrapolation_value": 0.0,
|
||||
"keep_aspect_ratio_policy": "none",
|
||||
"keep_aspect_ratio_policy": "stretch",
|
||||
"mode": "nearest",
|
||||
"nearest_mode": "none",
|
||||
},
|
||||
|
|
|
@ -284,13 +284,13 @@ Tensor GraphHandlerObj::reshape(Tensor data, Tensor reshaped, Shape shape) {
|
|||
Tensor GraphHandlerObj::resize(Tensor input, Tensor output,
|
||||
const std::optional<vector<int>> &axes,
|
||||
Tensor sizes, Tensor scales, Tensor roi,
|
||||
vector<uint32_t> sizes_, vector<float> scales_,
|
||||
vector<int64_t> sizes_, vector<float> scales_,
|
||||
vector<float> roi_, string mode,
|
||||
string ratioPolicy, string nearestMode,
|
||||
string coordTransMode) {
|
||||
if (sizes_.size() > 0) {
|
||||
sizes->dataMalloc();
|
||||
sizes->copyin<uint32_t>(sizes_);
|
||||
sizes->copyin<int64_t>(sizes_);
|
||||
}
|
||||
if (scales_.size() > 0) {
|
||||
scales->dataMalloc();
|
||||
|
|
|
@ -52,9 +52,12 @@ class AvgPooling : public ASCENDKernelWithoutConfig {
|
|||
|
||||
auto ret = aclnnAvgPool2dGetWorkspaceSize(
|
||||
selfTensor, kernelSize, strides, paddings, false, true,
|
||||
divisorOverride, int8_t(1), outputTensor, &workspaceSize,
|
||||
divisorOverride, int8_t(0), outputTensor, &workspaceSize,
|
||||
&executor);
|
||||
assert(ret == ACL_SUCCESS);
|
||||
CHECK_RET(
|
||||
ret == ACL_SUCCESS,
|
||||
LOG_PRINT("aclnnAvgPool2dGetWorkspaceSize failed. ERROR: %d\n",
|
||||
ret));
|
||||
|
||||
void *workspaceAddr = nullptr;
|
||||
if (workspaceSize > 0) {
|
||||
|
@ -63,10 +66,12 @@ class AvgPooling : public ASCENDKernelWithoutConfig {
|
|||
|
||||
ret = aclnnAvgPool2d(workspaceAddr, workspaceSize, executor,
|
||||
context->ASCENDHandle());
|
||||
assert(ret == ACL_SUCCESS);
|
||||
CHECK_RET(ret == ACL_SUCCESS,
|
||||
LOG_PRINT("aclnnAvgPool2d failed. ERROR: %d\n", ret));
|
||||
|
||||
ret = aclrtSynchronizeStream(context->ASCENDHandle());
|
||||
assert(ret == ACL_SUCCESS);
|
||||
CHECK_RET(ret == ACL_SUCCESS,
|
||||
LOG_PRINT("aclrtSynchronizeStream failed. ERROR: %d\n", ret));
|
||||
|
||||
// aclDestroyTensor(selfTensor);
|
||||
// aclDestroyTensor(outputTensor);
|
||||
|
|
|
@ -22,6 +22,9 @@ class ResizeAclnn : public ASCENDKernelWithoutConfig {
|
|||
case ResizeObj::ECoeffMode::nearest:
|
||||
mode = "nearest";
|
||||
break;
|
||||
case ResizeObj::ECoeffMode::linear:
|
||||
mode = "bilinear";
|
||||
break;
|
||||
default:
|
||||
IT_TODO_HALT();
|
||||
}
|
||||
|
|
|
@ -101,8 +101,9 @@ void ResizeObj::InitBySizes(Tensor input, Tensor sizes,
|
|||
// copy sizes data to host.
|
||||
IT_ASSERT(sizes->getDataBlob() != nullptr);
|
||||
Runtime runtime = NativeCpuRuntimeObj::getInstance();
|
||||
std::shared_ptr<int> dataObj((int *)runtime->alloc(sizes->getBytes()),
|
||||
[&](int *p) { runtime->dealloc(p); });
|
||||
std::shared_ptr<int64_t> dataObj(
|
||||
(int64_t *)runtime->alloc(sizes->getBytes()),
|
||||
[&](int64_t *p) { runtime->dealloc(p); });
|
||||
auto data = dataObj.get();
|
||||
sizes->getRuntime()->copyBlobToCPU(
|
||||
(void *)data, sizes->getRawDataPtr<void *>(), sizes->getBytes());
|
||||
|
@ -193,7 +194,7 @@ vector<DataType> ResizeObj::inferDataType(const TensorVec &inputs) const {
|
|||
}
|
||||
if (isResizeBySizes()) {
|
||||
auto sizes = inputs[1];
|
||||
IT_ASSERT(sizes && sizes->getDType() == DataType::UInt32);
|
||||
IT_ASSERT(sizes && sizes->getDType() == DataType::Int64);
|
||||
} else {
|
||||
auto scales = inputs[1];
|
||||
IT_ASSERT(scales && scales->getDType() == DataType::Float32);
|
||||
|
@ -220,8 +221,7 @@ optional<vector<Shape>> ResizeObj::inferShape(const TensorVec &inputs) {
|
|||
|
||||
std::string ResizeObj::toString() const {
|
||||
std::ostringstream os;
|
||||
os << "Resize"
|
||||
<< "[" << getGuid() << "]";
|
||||
os << "Resize" << "[" << getGuid() << "]";
|
||||
os << "(";
|
||||
os << vecToString(inputs[0]->getDims()) << ",";
|
||||
if (inputs.size() == 3) {
|
||||
|
|
|
@ -26,7 +26,7 @@ void testPooling(const std::function<void(void *, size_t, DataType)> &generator,
|
|||
auto inputNpu = npuGraph->cloneTensor(inputCpu);
|
||||
auto npuOp =
|
||||
npuGraph->addOp<T>(inputNpu, nullptr, 3, 3, 1, 1, 1, 1, 2, 2, 0);
|
||||
// npuGraph->addOp<T>(inputNpu, nullptr, 2, 2, 1, 1, 0, 0, 1, 1, 0);
|
||||
// npuGraph->addOp<T>(inputNpu, nullptr, 2, 2, 1, 1, 0, 0, 1, 1, 0);
|
||||
npuGraph->dataMalloc();
|
||||
inputNpu->setData(generator);
|
||||
npuRuntime->run(npuGraph);
|
||||
|
@ -39,10 +39,10 @@ void testPooling(const std::function<void(void *, size_t, DataType)> &generator,
|
|||
}
|
||||
|
||||
TEST(cnnl_Pooling, run) {
|
||||
// aclInit(nullptr);
|
||||
// testPooling<MaxPoolObj>(IncrementalGenerator(), Shape{1, 2, 5, 5});
|
||||
testPooling<AvgPoolObj>(IncrementalGenerator(), Shape{1, 2, 5, 5});
|
||||
// aclFinalize();
|
||||
aclInit(nullptr);
|
||||
testPooling<MaxPoolObj>(IncrementalGenerator(), Shape{1, 3, 5, 5});
|
||||
//testPooling<AvgPoolObj>(IncrementalGenerator(), Shape{1, 2, 5, 5});
|
||||
aclFinalize();
|
||||
}
|
||||
|
||||
} // namespace infini
|
||||
|
|
Loading…
Reference in New Issue