Fold leaks that are referenced by other leaks

Find leaks that have no references at all, or are only referenced by
other leaks in the same strongly connected component, and hide all
referenced leaks.

Bug: 27208635
Change-Id: Ifbfd14e24e2ba0f8af7c1b887e57f34362720f2d
This commit is contained in:
Colin Cross 2016-03-02 17:53:39 -08:00
parent b8e20f557f
commit 8e8f34c558
10 changed files with 923 additions and 41 deletions

View File

@ -3,6 +3,7 @@ LOCAL_PATH := $(call my-dir)
memunreachable_srcs := \
Allocator.cpp \
HeapWalker.cpp \
LeakFolding.cpp \
LeakPipe.cpp \
LineBuffer.cpp \
MemUnreachable.cpp \
@ -14,6 +15,7 @@ memunreachable_test_srcs := \
tests/Allocator_test.cpp \
tests/DisableMalloc_test.cpp \
tests/HeapWalker_test.cpp \
tests/LeakFolding_test.cpp \
tests/MemUnreachable_test.cpp \
tests/ThreadCapture_test.cpp \
@ -49,9 +51,11 @@ LOCAL_MODULE := memunreachable_test
LOCAL_SRC_FILES := \
Allocator.cpp \
HeapWalker.cpp \
LeakFolding.cpp \
tests/Allocator_test.cpp \
tests/HeapWalker_test.cpp \
tests/HostMallocStub.cpp \
tests/LeakFolding_test.cpp \
LOCAL_CFLAGS := -std=c++14 -Wall -Wextra -Werror
LOCAL_CLANG := true

View File

@ -21,17 +21,19 @@
#include "Allocator.h"
#include "HeapWalker.h"
#include "LeakFolding.h"
#include "log.h"
bool HeapWalker::Allocation(uintptr_t begin, uintptr_t end) {
if (end == begin) {
end = begin + 1;
}
auto inserted = allocations_.insert(std::pair<Range, RangeInfo>(Range{begin, end}, RangeInfo{false, false}));
Range range{begin, end};
auto inserted = allocations_.insert(std::pair<Range, AllocationInfo>(range, AllocationInfo{}));
if (inserted.second) {
valid_allocations_range_.begin = std::min(valid_allocations_range_.begin, begin);
valid_allocations_range_.end = std::max(valid_allocations_range_.end, end);
allocation_bytes_ += end - begin;
allocation_bytes_ += range.size();
return true;
} else {
Range overlap = inserted.first->first;
@ -44,27 +46,30 @@ bool HeapWalker::Allocation(uintptr_t begin, uintptr_t end) {
}
}
void HeapWalker::Walk(const Range& range, bool RangeInfo::*flag) {
allocator::vector<Range> to_do(1, range, allocator_);
bool HeapWalker::IsAllocationPtr(uintptr_t ptr, Range* range, AllocationInfo** info) {
if (ptr >= valid_allocations_range_.begin && ptr < valid_allocations_range_.end) {
AllocationMap::iterator it = allocations_.find(Range{ptr, ptr + 1});
if (it != allocations_.end()) {
*range = it->first;
*info = &it->second;
return true;
}
}
return false;
}
void HeapWalker::RecurseRoot(const Range& root) {
allocator::vector<Range> to_do(1, root, allocator_);
while (!to_do.empty()) {
Range range = to_do.back();
to_do.pop_back();
uintptr_t begin = (range.begin + (sizeof(uintptr_t) - 1)) & ~(sizeof(uintptr_t) - 1);
// TODO(ccross): we might need to consider a pointer to the end of a buffer
// to be inside the buffer, which means the common case of a pointer to the
// beginning of a buffer may keep two ranges live.
for (uintptr_t i = begin; i < range.end; i += sizeof(uintptr_t)) {
uintptr_t val = *reinterpret_cast<uintptr_t*>(i);
if (val >= valid_allocations_range_.begin && val < valid_allocations_range_.end) {
RangeMap::iterator it = allocations_.find(Range{val, val + 1});
if (it != allocations_.end()) {
if (!(it->second.*flag)) {
to_do.push_back(it->first);
it->second.*flag = true;
}
}
ForEachPtrInRange(range, [&](Range& ref_range, AllocationInfo* ref_info) {
if (!ref_info->referenced_from_root) {
ref_info->referenced_from_root = true;
to_do.push_back(ref_range);
}
}
});
}
}
@ -85,27 +90,22 @@ size_t HeapWalker::AllocationBytes() {
}
bool HeapWalker::DetectLeaks() {
// Recursively walk pointers from roots to mark referenced allocations
for (auto it = roots_.begin(); it != roots_.end(); it++) {
Walk(*it, &RangeInfo::referenced_from_root);
RecurseRoot(*it);
}
Range vals;
vals.begin = reinterpret_cast<uintptr_t>(root_vals_.data());
vals.end = vals.begin + root_vals_.size() * sizeof(uintptr_t);
Walk(vals, &RangeInfo::referenced_from_root);
for (auto it = allocations_.begin(); it != allocations_.end(); it++) {
if (!it->second.referenced_from_root) {
Walk(it->first, &RangeInfo::referenced_from_leak);
}
}
RecurseRoot(vals);
return true;
}
bool HeapWalker::Leaked(allocator::vector<Range>& leaked, size_t limit,
size_t* num_leaks_out, size_t* leak_bytes_out) {
DetectLeaks();
leaked.clear();
size_t num_leaks = 0;
@ -120,7 +120,7 @@ bool HeapWalker::Leaked(allocator::vector<Range>& leaked, size_t limit,
size_t n = 0;
for (auto it = allocations_.begin(); it != allocations_.end(); it++) {
if (!it->second.referenced_from_root) {
if (n++ <= limit) {
if (n++ < limit) {
leaked.push_back(it->first);
}
}

View File

@ -20,11 +20,14 @@
#include "android-base/macros.h"
#include "Allocator.h"
#include "Tarjan.h"
// A range [begin, end)
struct Range {
uintptr_t begin;
uintptr_t end;
size_t size() const { return end - begin; };
};
// Comparator for Ranges that returns equivalence for overlapping ranges
@ -34,7 +37,6 @@ struct compare_range {
}
};
class HeapWalker {
public:
HeapWalker(Allocator<HeapWalker> allocator) : allocator_(allocator),
@ -55,16 +57,25 @@ class HeapWalker {
size_t Allocations();
size_t AllocationBytes();
private:
struct RangeInfo {
template<class F>
void ForEachPtrInRange(const Range& range, F&& f);
template<class F>
void ForEachAllocation(F&& f);
struct AllocationInfo {
bool referenced_from_root;
bool referenced_from_leak;
};
void Walk(const Range& range, bool RangeInfo::* flag);
private:
void RecurseRoot(const Range& root);
bool IsAllocationPtr(uintptr_t ptr, Range* range, AllocationInfo** info);
DISALLOW_COPY_AND_ASSIGN(HeapWalker);
Allocator<HeapWalker> allocator_;
using RangeMap = allocator::map<RangeInfo, Range, compare_range>;
RangeMap allocations_;
using AllocationMap = allocator::map<AllocationInfo, Range, compare_range>;
AllocationMap allocations_;
size_t allocation_bytes_;
Range valid_allocations_range_;
@ -72,4 +83,28 @@ class HeapWalker {
allocator::vector<uintptr_t> root_vals_;
};
template<class F>
inline void HeapWalker::ForEachPtrInRange(const Range& range, F&& f) {
uintptr_t begin = (range.begin + (sizeof(uintptr_t) - 1)) & ~(sizeof(uintptr_t) - 1);
// TODO(ccross): we might need to consider a pointer to the end of a buffer
// to be inside the buffer, which means the common case of a pointer to the
// beginning of a buffer may keep two ranges live.
for (uintptr_t i = begin; i < range.end; i += sizeof(uintptr_t)) {
Range ref_range;
AllocationInfo* ref_info;
if (IsAllocationPtr(*reinterpret_cast<uintptr_t*>(i), &ref_range, &ref_info)) {
f(ref_range, ref_info);
}
}
}
template<class F>
inline void HeapWalker::ForEachAllocation(F&& f) {
for (auto& it : allocations_) {
const Range& range = it.first;
HeapWalker::AllocationInfo& allocation = it.second;
f(range, allocation);
}
}
#endif

View File

@ -0,0 +1,143 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <inttypes.h>
#include "Allocator.h"
#include "HeapWalker.h"
#include "LeakFolding.h"
#include "Tarjan.h"
#include "log.h"
// Converts possibly cyclic graph of leaks to a DAG by combining
// strongly-connected components into a object, stored in the scc pointer
// of each node in the component.
void LeakFolding::ComputeDAG() {
SCCList<LeakInfo> scc_list{allocator_};
Tarjan(leak_graph_, scc_list);
Allocator<SCCInfo> scc_allocator = allocator_;
for (auto& scc_nodes: scc_list) {
Allocator<SCCInfo>::unique_ptr leak_scc;
leak_scc = scc_allocator.make_unique(scc_allocator);
for (auto& node: scc_nodes) {
node->ptr->scc = leak_scc.get();
leak_scc->count++;
leak_scc->size += node->ptr->range.size();
}
leak_scc_.emplace_back(std::move(leak_scc));
}
for (auto& it : leak_map_) {
LeakInfo& leak = it.second;
for (auto& ref: leak.node.references_out) {
if (leak.scc != ref->ptr->scc) {
leak.scc->node.Edge(&ref->ptr->scc->node);
}
}
}
}
void LeakFolding::AccumulateLeaks(SCCInfo* dominator) {
std::function<void(SCCInfo*)> walk(std::allocator_arg, allocator_,
[&](SCCInfo* scc) {
if (scc->accumulator != dominator) {
scc->accumulator = dominator;
dominator->cuumulative_size += scc->size;
dominator->cuumulative_count += scc->count;
scc->node.Foreach([&](SCCInfo* ref) {
walk(ref);
});
}
});
walk(dominator);
}
bool LeakFolding::FoldLeaks() {
Allocator<LeakInfo> leak_allocator = allocator_;
// Find all leaked allocations insert them into leak_map_ and leak_graph_
heap_walker_.ForEachAllocation(
[&](const Range& range, HeapWalker::AllocationInfo& allocation) {
if (!allocation.referenced_from_root) {
auto it = leak_map_.emplace(std::piecewise_construct,
std::forward_as_tuple(range),
std::forward_as_tuple(range, allocator_));
LeakInfo& leak = it.first->second;
leak_graph_.push_back(&leak.node);
}
});
// Find references between leaked allocations and connect them in leak_graph_
for (auto& it : leak_map_) {
LeakInfo& leak = it.second;
heap_walker_.ForEachPtrInRange(leak.range,
[&](Range& ptr_range, HeapWalker::AllocationInfo* ptr_info) {
if (!ptr_info->referenced_from_root) {
LeakInfo* ptr_leak = &leak_map_.at(ptr_range);
leak.node.Edge(&ptr_leak->node);
}
});
}
// Convert the cyclic graph to a DAG by grouping strongly connected components
ComputeDAG();
// Compute dominators and cuumulative sizes
for (auto& scc : leak_scc_) {
if (scc->node.references_in.size() == 0) {
scc->dominator = true;
AccumulateLeaks(scc.get());
}
}
return true;
}
bool LeakFolding::Leaked(allocator::vector<LeakFolding::Leak>& leaked,
size_t limit, size_t* num_leaks_out, size_t* leak_bytes_out) {
size_t num_leaks = 0;
size_t leak_bytes = 0;
for (auto& it : leak_map_) {
const LeakInfo& leak = it.second;
num_leaks++;
leak_bytes += leak.range.size();
}
size_t n = 0;
for (auto& it : leak_map_) {
const LeakInfo& leak = it.second;
if (leak.scc->dominator) {
if (n++ < limit) {
leaked.emplace_back(Leak{leak.range,
leak.scc->cuumulative_count - 1,
leak.scc->cuumulative_size - leak.range.size()});
}
}
}
if (num_leaks_out) {
*num_leaks_out = num_leaks;
}
if (leak_bytes_out) {
*leak_bytes_out = leak_bytes;
}
return true;
}

View File

@ -0,0 +1,89 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIBMEMUNREACHABLE_LEAK_FOLDING_H_
#define LIBMEMUNREACHABLE_LEAK_FOLDING_H_
#include "HeapWalker.h"
class LeakFolding {
public:
LeakFolding(Allocator<void> allocator, HeapWalker& heap_walker)
: allocator_(allocator), heap_walker_(heap_walker),
leak_map_(allocator), leak_graph_(allocator), leak_scc_(allocator) {}
bool FoldLeaks();
struct Leak {
const Range range;
size_t referenced_count;
size_t referenced_size;
};
bool Leaked(allocator::vector<Leak>& leaked, size_t limit,
size_t* num_leaks_out, size_t* leak_bytes_out);
private:
DISALLOW_COPY_AND_ASSIGN(LeakFolding);
Allocator<void> allocator_;
HeapWalker& heap_walker_;
struct SCCInfo {
public:
Node<SCCInfo> node;
size_t count;
size_t size;
size_t cuumulative_count;
size_t cuumulative_size;
bool dominator;
SCCInfo* accumulator;
SCCInfo(Allocator<SCCInfo> allocator) : node(this, allocator),
count(0), size(0), cuumulative_count(0), cuumulative_size(0),
dominator(false), accumulator(nullptr) {}
private:
SCCInfo(SCCInfo&&) = delete;
DISALLOW_COPY_AND_ASSIGN(SCCInfo);
};
struct LeakInfo {
public:
Node<LeakInfo> node;
const Range range;
SCCInfo* scc;
LeakInfo(const Range& range, Allocator<LeakInfo> allocator)
: node(this, allocator), range(range),
scc(nullptr) {}
private:
DISALLOW_COPY_AND_ASSIGN(LeakInfo);
};
void ComputeDAG();
void AccumulateLeaks(SCCInfo* dominator);
allocator::map<LeakInfo, Range, compare_range> leak_map_;
Graph<LeakInfo> leak_graph_;
allocator::vector<Allocator<SCCInfo>::unique_ptr> leak_scc_;
};
#endif // LIBMEMUNREACHABLE_LEAK_FOLDING_H_

View File

@ -27,6 +27,7 @@
#include "Allocator.h"
#include "HeapWalker.h"
#include "LeakFolding.h"
#include "LeakPipe.h"
#include "ProcessMappings.h"
#include "PtracerThread.h"
@ -122,18 +123,30 @@ bool MemUnreachable::GetUnreachableMemory(allocator::vector<Leak>& leaks, size_t
ALOGI("sweeping process %d for unreachable memory", pid_);
leaks.clear();
allocator::vector<Range> leaked{allocator_};
if (!heap_walker_.Leaked(leaked, limit, num_leaks, leak_bytes)) {
if (!heap_walker_.DetectLeaks()) {
return false;
}
LeakFolding folding(allocator_, heap_walker_);
if (!folding.FoldLeaks()) {
return false;
}
allocator::vector<LeakFolding::Leak> leaked{allocator_};
if (!folding.Leaked(leaked, limit, num_leaks, leak_bytes)) {
return false;
}
for (auto it = leaked.begin(); it != leaked.end(); it++) {
Leak leak{};
leak.begin = it->begin;
leak.size = it->end - it->begin;;
memcpy(leak.contents, reinterpret_cast<void*>(it->begin),
leak.begin = it->range.begin;
leak.size = it->range.size();
leak.referenced_count = it->referenced_count;
leak.referenced_size = it->referenced_size;
memcpy(leak.contents, reinterpret_cast<void*>(it->range.begin),
std::min(leak.size, Leak::contents_length));
ssize_t num_backtrace_frames = malloc_backtrace(reinterpret_cast<void*>(it->begin),
ssize_t num_backtrace_frames = malloc_backtrace(reinterpret_cast<void*>(it->range.begin),
leak.backtrace_frames, leak.backtrace_length);
if (num_backtrace_frames > 0) {
leak.num_backtrace_frames = num_backtrace_frames;
@ -352,6 +365,11 @@ std::string Leak::ToString(bool log_contents) const {
oss << " " << std::dec << size;
oss << " bytes unreachable at ";
oss << std::hex << begin;
if (referenced_count > 0) {
oss << " referencing " << std::dec << referenced_size << " unreachable bytes";
oss << " in " << referenced_count;
oss << " allocation" << ((referenced_count == 1) ? "" : "s");
}
oss << std::endl;
if (log_contents) {

131
libmemunreachable/Tarjan.h Normal file
View File

@ -0,0 +1,131 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Based on system/update_engine/payload_generator/tarjan.cc
#ifndef LIBMEMUNREACHABLE_TARJAN_H_
#define LIBMEMUNREACHABLE_TARJAN_H_
#include <algorithm>
#include "Allocator.h"
template<class T>
class Node {
public:
allocator::set<Node<T>*> references_in;
allocator::set<Node<T>*> references_out;
size_t index;
size_t lowlink;
T* ptr;
Node(T* ptr, Allocator<Node> allocator) : references_in(allocator), references_out(allocator),
ptr(ptr) {};
Node(Node&& rhs) = default;
void Edge(Node<T>* ref) {
references_out.emplace(ref);
ref->references_in.emplace(this);
}
template<class F>
void Foreach(F&& f) {
for (auto& node: references_out) {
f(node->ptr);
}
}
private:
DISALLOW_COPY_AND_ASSIGN(Node<T>);
};
template<class T>
using Graph = allocator::vector<Node<T>*>;
template<class T>
using SCC = allocator::vector<Node<T>*>;
template<class T>
using SCCList = allocator::vector<SCC<T>>;
template<class T>
class TarjanAlgorithm {
public:
TarjanAlgorithm(Allocator<void> allocator) : index_(0),
stack_(allocator), components_(allocator) {}
void Execute(Graph<T>& graph, SCCList<T>& out);
private:
static constexpr size_t UNDEFINED_INDEX = static_cast<size_t>(-1);
void Tarjan(Node<T>* vertex, Graph<T>& graph);
size_t index_;
allocator::vector<Node<T>*> stack_;
SCCList<T> components_;
};
template<class T>
void TarjanAlgorithm<T>::Execute(Graph<T>& graph, SCCList<T>& out) {
stack_.clear();
components_.clear();
index_ = 0;
for (auto& it: graph) {
it->index = UNDEFINED_INDEX;
it->lowlink = UNDEFINED_INDEX;
}
for (auto& it: graph) {
if (it->index == UNDEFINED_INDEX) {
Tarjan(it, graph);
}
}
out.swap(components_);
}
template<class T>
void TarjanAlgorithm<T>::Tarjan(Node<T>* vertex, Graph<T>& graph) {
assert(vertex->index == UNDEFINED_INDEX);
vertex->index = index_;
vertex->lowlink = index_;
index_++;
stack_.push_back(vertex);
for (auto& it: vertex->references_out) {
Node<T>* vertex_next = it;
if (vertex_next->index == UNDEFINED_INDEX) {
Tarjan(vertex_next, graph);
vertex->lowlink = std::min(vertex->lowlink, vertex_next->lowlink);
} else if (std::find(stack_.begin(), stack_.end(), vertex_next) != stack_.end()) {
vertex->lowlink = std::min(vertex->lowlink, vertex_next->index);
}
}
if (vertex->lowlink == vertex->index) {
SCC<T> component{components_.get_allocator()};
Node<T>* other_vertex;
do {
other_vertex = stack_.back();
stack_.pop_back();
component.push_back(other_vertex);
} while (other_vertex != vertex && !stack_.empty());
components_.emplace_back(component);
}
}
template<class T>
void Tarjan(Graph<T>& graph, SCCList<T>& out) {
TarjanAlgorithm<T> tarjan{graph.get_allocator()};
tarjan.Execute(graph, out);
}
#endif // LIBMEMUNREACHABLE_TARJAN_H_

View File

@ -27,9 +27,15 @@
struct Leak {
uintptr_t begin;
size_t size;
size_t referenced_count;
size_t referenced_size;
size_t num_backtrace_frames;
static const size_t contents_length = 32;
char contents[contents_length];
static const size_t backtrace_length = 16;
uintptr_t backtrace_frames[backtrace_length];

View File

@ -80,6 +80,8 @@ TEST_F(HeapWalkerTest, leak) {
HeapWalker heap_walker(heap_);
heap_walker.Allocation(buffer_begin(buffer2), buffer_end(buffer2));
ASSERT_EQ(true, heap_walker.DetectLeaks());
allocator::vector<Range> leaked(heap_);
size_t num_leaks = 0;
size_t leaked_bytes = 0;
@ -106,6 +108,8 @@ TEST_F(HeapWalkerTest, live) {
heap_walker.Allocation(buffer_begin(buffer2), buffer_end(buffer2));
heap_walker.Root(buffer_begin(buffer1), buffer_end(buffer1));
ASSERT_EQ(true, heap_walker.DetectLeaks());
allocator::vector<Range> leaked(heap_);
size_t num_leaks = SIZE_MAX;
size_t leaked_bytes = SIZE_MAX;
@ -132,6 +136,8 @@ TEST_F(HeapWalkerTest, unaligned) {
heap_walker.Allocation(buffer_begin(buffer2), buffer_end(buffer2));
heap_walker.Root(buffer_begin(buffer1) + i, buffer_end(buffer1) - j);
ASSERT_EQ(true, heap_walker.DetectLeaks());
allocator::vector<Range> leaked(heap_);
size_t num_leaks = SIZE_MAX;
size_t leaked_bytes = SIZE_MAX;
@ -143,3 +149,26 @@ TEST_F(HeapWalkerTest, unaligned) {
}
}
}
TEST_F(HeapWalkerTest, cycle) {
void* buffer1;
void* buffer2;
buffer1 = &buffer2;
buffer2 = &buffer1;
HeapWalker heap_walker(heap_);
heap_walker.Allocation(buffer_begin(buffer1), buffer_end(buffer1));
heap_walker.Allocation(buffer_begin(buffer2), buffer_end(buffer2));
ASSERT_EQ(true, heap_walker.DetectLeaks());
allocator::vector<Range> leaked(heap_);
size_t num_leaks = 0;
size_t leaked_bytes = 0;
ASSERT_EQ(true, heap_walker.Leaked(leaked, 100, &num_leaks, &leaked_bytes));
EXPECT_EQ(2U, num_leaks);
EXPECT_EQ(2*sizeof(uintptr_t), leaked_bytes);
ASSERT_EQ(2U, leaked.size());
}

View File

@ -0,0 +1,427 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "HeapWalker.h"
#include "LeakFolding.h"
#include <gtest/gtest.h>
#include <ScopedDisableMalloc.h>
#include "Allocator.h"
class LeakFoldingTest : public ::testing::Test {
public:
LeakFoldingTest() : disable_malloc_(), heap_() {}
void TearDown() {
ASSERT_TRUE(heap_.empty());
if (!HasFailure()) {
ASSERT_FALSE(disable_malloc_.timed_out());
}
}
protected:
ScopedDisableMallocTimeout disable_malloc_;
Heap heap_;
};
#define buffer_begin(buffer) reinterpret_cast<uintptr_t>(&buffer[0])
#define buffer_end(buffer) (reinterpret_cast<uintptr_t>(&buffer[0]) + sizeof(buffer))
#define ALLOCATION(heap_walker, buffer) \
ASSERT_EQ(true, heap_walker.Allocation(buffer_begin(buffer), buffer_end(buffer)))
TEST_F(LeakFoldingTest, one) {
void* buffer1[1] = {nullptr};
HeapWalker heap_walker(heap_);
ALLOCATION(heap_walker, buffer1);
LeakFolding folding(heap_, heap_walker);
ASSERT_TRUE(folding.FoldLeaks());
allocator::vector<LeakFolding::Leak> leaked(heap_);
size_t num_leaks = 0;
size_t leaked_bytes = 0;
ASSERT_EQ(true, folding.Leaked(leaked, 100, &num_leaks, &leaked_bytes));
EXPECT_EQ(1U, num_leaks);
EXPECT_EQ(sizeof(uintptr_t), leaked_bytes);
ASSERT_EQ(1U, leaked.size());
EXPECT_EQ(0U, leaked[0].referenced_count);
EXPECT_EQ(0U, leaked[0].referenced_size);
}
TEST_F(LeakFoldingTest, two) {
void* buffer1[1] = {nullptr};
void* buffer2[1] = {nullptr};
HeapWalker heap_walker(heap_);
ALLOCATION(heap_walker, buffer1);
ALLOCATION(heap_walker, buffer2);
LeakFolding folding(heap_, heap_walker);
ASSERT_TRUE(folding.FoldLeaks());
allocator::vector<LeakFolding::Leak> leaked(heap_);
size_t num_leaks = 0;
size_t leaked_bytes = 0;
ASSERT_EQ(true, folding.Leaked(leaked, 100, &num_leaks, &leaked_bytes));
EXPECT_EQ(2U, num_leaks);
EXPECT_EQ(2*sizeof(uintptr_t), leaked_bytes);
ASSERT_EQ(2U, leaked.size());
EXPECT_EQ(0U, leaked[0].referenced_count);
EXPECT_EQ(0U, leaked[0].referenced_size);
EXPECT_EQ(0U, leaked[1].referenced_count);
EXPECT_EQ(0U, leaked[1].referenced_size);
}
TEST_F(LeakFoldingTest, dominator) {
void* buffer1[1];
void* buffer2[1] = {nullptr};
buffer1[0] = buffer2;
HeapWalker heap_walker(heap_);
ALLOCATION(heap_walker, buffer1);
ALLOCATION(heap_walker, buffer2);
LeakFolding folding(heap_, heap_walker);
ASSERT_TRUE(folding.FoldLeaks());
allocator::vector<LeakFolding::Leak> leaked(heap_);
size_t num_leaks = 0;
size_t leaked_bytes = 0;
ASSERT_EQ(true, folding.Leaked(leaked, 100, &num_leaks, &leaked_bytes));
EXPECT_EQ(2U, num_leaks);
EXPECT_EQ(2*sizeof(uintptr_t), leaked_bytes);
ASSERT_EQ(1U, leaked.size());
EXPECT_EQ(1U, leaked[0].referenced_count);
EXPECT_EQ(sizeof(uintptr_t), leaked[0].referenced_size);
}
TEST_F(LeakFoldingTest, cycle) {
void* buffer1[1];
void* buffer2[1];
void* buffer3[1];
buffer1[0] = buffer2;
buffer2[0] = buffer3;
buffer3[0] = buffer2;
HeapWalker heap_walker(heap_);
ALLOCATION(heap_walker, buffer1);
ALLOCATION(heap_walker, buffer2);
ALLOCATION(heap_walker, buffer3);
LeakFolding folding(heap_, heap_walker);
ASSERT_TRUE(folding.FoldLeaks());
allocator::vector<LeakFolding::Leak> leaked(heap_);
size_t num_leaks = 0;
size_t leaked_bytes = 0;
ASSERT_EQ(true, folding.Leaked(leaked, 100, &num_leaks, &leaked_bytes));
EXPECT_EQ(3U, num_leaks);
EXPECT_EQ(3*sizeof(uintptr_t), leaked_bytes);
ASSERT_EQ(1U, leaked.size());
EXPECT_EQ(2U, leaked[0].referenced_count);
EXPECT_EQ(2*sizeof(uintptr_t), leaked[0].referenced_size);
}
TEST_F(LeakFoldingTest, dominator_cycle) {
void* buffer1[2] = {nullptr, nullptr};
void* buffer2[2];
void* buffer3[1] = {nullptr};
buffer1[0] = &buffer2;
buffer2[0] = &buffer1;
buffer2[1] = &buffer3;
HeapWalker heap_walker(heap_);
ALLOCATION(heap_walker, buffer1);
ALLOCATION(heap_walker, buffer2);
ALLOCATION(heap_walker, buffer3);
LeakFolding folding(heap_, heap_walker);
ASSERT_TRUE(folding.FoldLeaks());
allocator::vector<LeakFolding::Leak> leaked(heap_);
size_t num_leaks = 0;
size_t leaked_bytes = 0;
ASSERT_EQ(true, folding.Leaked(leaked, 100, &num_leaks, &leaked_bytes));
EXPECT_EQ(3U, num_leaks);
EXPECT_EQ(5*sizeof(uintptr_t), leaked_bytes);
ASSERT_EQ(2U, leaked.size());
EXPECT_EQ(2U, leaked[0].referenced_count);
EXPECT_EQ(3*sizeof(uintptr_t), leaked[0].referenced_size);
EXPECT_EQ(2U, leaked[1].referenced_count);
EXPECT_EQ(3*sizeof(uintptr_t), leaked[1].referenced_size);
}
TEST_F(LeakFoldingTest, two_cycles) {
void* buffer1[1];
void* buffer2[1];
void* buffer3[1];
void* buffer4[1];
void* buffer5[1];
void* buffer6[1];
buffer1[0] = buffer3;
buffer2[0] = buffer5;
buffer3[0] = buffer4;
buffer4[0] = buffer3;
buffer5[0] = buffer6;
buffer6[0] = buffer5;
HeapWalker heap_walker(heap_);
ALLOCATION(heap_walker, buffer1);
ALLOCATION(heap_walker, buffer2);
ALLOCATION(heap_walker, buffer3);
ALLOCATION(heap_walker, buffer4);
ALLOCATION(heap_walker, buffer5);
ALLOCATION(heap_walker, buffer6);
LeakFolding folding(heap_, heap_walker);
ASSERT_TRUE(folding.FoldLeaks());
allocator::vector<LeakFolding::Leak> leaked(heap_);
size_t num_leaks = 0;
size_t leaked_bytes = 0;
ASSERT_EQ(true, folding.Leaked(leaked, 100, &num_leaks, &leaked_bytes));
EXPECT_EQ(6U, num_leaks);
EXPECT_EQ(6*sizeof(uintptr_t), leaked_bytes);
ASSERT_EQ(2U, leaked.size());
EXPECT_EQ(2U, leaked[0].referenced_count);
EXPECT_EQ(2*sizeof(uintptr_t), leaked[0].referenced_size);
EXPECT_EQ(2U, leaked[1].referenced_count);
EXPECT_EQ(2*sizeof(uintptr_t), leaked[1].referenced_size);
}
TEST_F(LeakFoldingTest, two_dominator_cycles) {
void* buffer1[1];
void* buffer2[1];
void* buffer3[1];
void* buffer4[1];
buffer1[0] = buffer2;
buffer2[0] = buffer1;
buffer3[0] = buffer4;
buffer4[0] = buffer3;
HeapWalker heap_walker(heap_);
ALLOCATION(heap_walker, buffer1);
ALLOCATION(heap_walker, buffer2);
ALLOCATION(heap_walker, buffer3);
ALLOCATION(heap_walker, buffer4);
LeakFolding folding(heap_, heap_walker);
ASSERT_TRUE(folding.FoldLeaks());
allocator::vector<LeakFolding::Leak> leaked(heap_);
size_t num_leaks = 0;
size_t leaked_bytes = 0;
ASSERT_EQ(true, folding.Leaked(leaked, 100, &num_leaks, &leaked_bytes));
EXPECT_EQ(4U, num_leaks);
EXPECT_EQ(4*sizeof(uintptr_t), leaked_bytes);
ASSERT_EQ(4U, leaked.size());
EXPECT_EQ(1U, leaked[0].referenced_count);
EXPECT_EQ(sizeof(uintptr_t), leaked[0].referenced_size);
EXPECT_EQ(1U, leaked[1].referenced_count);
EXPECT_EQ(sizeof(uintptr_t), leaked[1].referenced_size);
EXPECT_EQ(1U, leaked[2].referenced_count);
EXPECT_EQ(sizeof(uintptr_t), leaked[2].referenced_size);
EXPECT_EQ(1U, leaked[3].referenced_count);
EXPECT_EQ(sizeof(uintptr_t), leaked[3].referenced_size);
}
TEST_F(LeakFoldingTest, giant_dominator_cycle) {
const size_t n = 1000;
void* buffer[n];
HeapWalker heap_walker(heap_);
for (size_t i = 0; i < n; i ++) {
ASSERT_TRUE(heap_walker.Allocation(reinterpret_cast<uintptr_t>(&buffer[i]),
reinterpret_cast<uintptr_t>(&buffer[i+1])));
}
for (size_t i = 0; i < n - 1; i++) {
buffer[i] = &buffer[i+1];
}
buffer[n - 1] = &buffer[0];
LeakFolding folding(heap_, heap_walker);
ASSERT_TRUE(folding.FoldLeaks());
allocator::vector<LeakFolding::Leak> leaked(heap_);
size_t num_leaks = 0;
size_t leaked_bytes = 0;
ASSERT_EQ(true, folding.Leaked(leaked, 100, &num_leaks, &leaked_bytes));
EXPECT_EQ(n, num_leaks);
EXPECT_EQ(n * sizeof(uintptr_t), leaked_bytes);
ASSERT_EQ(100U, leaked.size());
EXPECT_EQ(n - 1, leaked[0].referenced_count);
EXPECT_EQ((n - 1) * sizeof(uintptr_t), leaked[0].referenced_size);
}
TEST_F(LeakFoldingTest, giant_cycle) {
const size_t n = 1000;
void* buffer[n];
void* buffer1[1];
HeapWalker heap_walker(heap_);
for (size_t i = 0; i < n - 1; i++) {
buffer[i] = &buffer[i+1];
}
buffer[n - 1] = &buffer[0];
buffer1[0] = &buffer[0];
for (size_t i = 0; i < n; i ++) {
ASSERT_TRUE(heap_walker.Allocation(reinterpret_cast<uintptr_t>(&buffer[i]),
reinterpret_cast<uintptr_t>(&buffer[i+1])));
}
ALLOCATION(heap_walker, buffer1);
LeakFolding folding(heap_, heap_walker);
ASSERT_TRUE(folding.FoldLeaks());
allocator::vector<LeakFolding::Leak> leaked(heap_);
size_t num_leaks = 0;
size_t leaked_bytes = 0;
ASSERT_EQ(true, folding.Leaked(leaked, 100, &num_leaks, &leaked_bytes));
EXPECT_EQ(n + 1, num_leaks);
EXPECT_EQ((n + 1) * sizeof(uintptr_t), leaked_bytes);
ASSERT_EQ(1U, leaked.size());
EXPECT_EQ(n, leaked[0].referenced_count);
EXPECT_EQ(n * sizeof(uintptr_t), leaked[0].referenced_size);
}
TEST_F(LeakFoldingTest, multipath) {
void* buffer1[2];
void* buffer2[1];
void* buffer3[1];
void* buffer4[1] = {nullptr};
// 1
// / \
// v v
// 2 3
// \ /
// v
// 4
buffer1[0] = &buffer2;
buffer1[1] = &buffer3;
buffer2[0] = &buffer4;
buffer3[0] = &buffer4;
HeapWalker heap_walker(heap_);
ALLOCATION(heap_walker, buffer1);
ALLOCATION(heap_walker, buffer2);
ALLOCATION(heap_walker, buffer3);
ALLOCATION(heap_walker, buffer4);
LeakFolding folding(heap_, heap_walker);
ASSERT_TRUE(folding.FoldLeaks());
allocator::vector<LeakFolding::Leak> leaked(heap_);
size_t num_leaks = 0;
size_t leaked_bytes = 0;
ASSERT_EQ(true, folding.Leaked(leaked, 100, &num_leaks, &leaked_bytes));
EXPECT_EQ(4U, num_leaks);
EXPECT_EQ(5 * sizeof(uintptr_t), leaked_bytes);
ASSERT_EQ(1U, leaked.size());
EXPECT_EQ(3U, leaked[0].referenced_count);
EXPECT_EQ(3 * sizeof(uintptr_t), leaked[0].referenced_size);
}
TEST_F(LeakFoldingTest, multicycle) {
void* buffer1[2]{};
void* buffer2[2]{};
void* buffer3[2]{};
void* buffer4[2]{};
// 1
// / ^
// v \
// 2 -> 3
// \ ^
// v /
// 4
buffer1[0] = &buffer2;
buffer2[0] = &buffer3;
buffer2[1] = &buffer4;
buffer3[0] = &buffer1;
buffer4[0] = &buffer3;
HeapWalker heap_walker(heap_);
ALLOCATION(heap_walker, buffer1);
ALLOCATION(heap_walker, buffer2);
ALLOCATION(heap_walker, buffer3);
ALLOCATION(heap_walker, buffer4);
LeakFolding folding(heap_, heap_walker);
ASSERT_TRUE(folding.FoldLeaks());
allocator::vector<LeakFolding::Leak> leaked(heap_);
size_t num_leaks = 0;
size_t leaked_bytes = 0;
ASSERT_EQ(true, folding.Leaked(leaked, 100, &num_leaks, &leaked_bytes));
EXPECT_EQ(4U, num_leaks);
EXPECT_EQ(8 * sizeof(uintptr_t), leaked_bytes);
ASSERT_EQ(4U, leaked.size());
EXPECT_EQ(3U, leaked[0].referenced_count);
EXPECT_EQ(6 * sizeof(uintptr_t), leaked[0].referenced_size);
EXPECT_EQ(3U, leaked[1].referenced_count);
EXPECT_EQ(6 * sizeof(uintptr_t), leaked[1].referenced_size);
EXPECT_EQ(3U, leaked[2].referenced_count);
EXPECT_EQ(6 * sizeof(uintptr_t), leaked[2].referenced_size);
EXPECT_EQ(3U, leaked[3].referenced_count);
EXPECT_EQ(6 * sizeof(uintptr_t), leaked[3].referenced_size);
}