Merge changes Ie9594b6e,I2e050a37,Ia2bf2ccf,I4e4db704,Ifbfd14e2, ...

* changes:
  Fix mac build
  Add ABI to leak report
  Combine leaks with same stacktrace
  Fix allocator::map template argument ordering
  Fold leaks that are referenced by other leaks
  Compile some tests for the host
This commit is contained in:
Colin Cross 2016-03-08 22:26:39 +00:00 committed by Gerrit Code Review
commit 0bc37ffbc6
19 changed files with 1257 additions and 169 deletions

View File

@ -370,11 +370,11 @@ void* HeapImpl::Alloc(size_t size) {
}
void* HeapImpl::AllocLocked(size_t size) {
if (__predict_false(size > kMaxBucketAllocationSize)) {
if (size > kMaxBucketAllocationSize) {
return MapAlloc(size);
}
int bucket = size_to_bucket(size);
if (__predict_false(free_chunks_[bucket].empty())) {
if (free_chunks_[bucket].empty()) {
Chunk *chunk = new Chunk(this, bucket);
free_chunks_[bucket].insert(chunk->node_);
}

View File

@ -24,6 +24,7 @@
#include <map>
#include <memory>
#include <set>
#include <unordered_map>
#include <unordered_set>
#include <vector>
extern std::atomic<int> heap_count;
@ -209,9 +210,12 @@ using vector = std::vector<T, Allocator<T>>;
template<class T>
using list = std::list<T, Allocator<T>>;
template<class T, class Key, class Compare = std::less<Key>>
template<class Key, class T, class Compare = std::less<Key>>
using map = std::map<Key, T, Compare, Allocator<std::pair<const Key, T>>>;
template<class Key, class T, class Hash = std::hash<Key>, class KeyEqual = std::equal_to<Key>>
using unordered_map = std::unordered_map<Key, T, Hash, KeyEqual, Allocator<std::pair<const Key, T>>>;
template<class Key, class Hash = std::hash<Key>, class KeyEqual = std::equal_to<Key>>
using unordered_set = std::unordered_set<Key, Hash, KeyEqual, Allocator<Key>>;

View File

@ -3,6 +3,7 @@ LOCAL_PATH := $(call my-dir)
memunreachable_srcs := \
Allocator.cpp \
HeapWalker.cpp \
LeakFolding.cpp \
LeakPipe.cpp \
LineBuffer.cpp \
MemUnreachable.cpp \
@ -12,7 +13,9 @@ memunreachable_srcs := \
memunreachable_test_srcs := \
tests/Allocator_test.cpp \
tests/DisableMalloc_test.cpp \
tests/HeapWalker_test.cpp \
tests/LeakFolding_test.cpp \
tests/MemUnreachable_test.cpp \
tests/ThreadCapture_test.cpp \
@ -41,3 +44,22 @@ LOCAL_CLANG := true
LOCAL_SHARED_LIBRARIES := libmemunreachable libbase liblog
include $(BUILD_NATIVE_TEST)
include $(CLEAR_VARS)
LOCAL_MODULE := memunreachable_test
LOCAL_SRC_FILES := \
Allocator.cpp \
HeapWalker.cpp \
LeakFolding.cpp \
tests/Allocator_test.cpp \
tests/HeapWalker_test.cpp \
tests/HostMallocStub.cpp \
tests/LeakFolding_test.cpp \
LOCAL_CFLAGS := -std=c++14 -Wall -Wextra -Werror
LOCAL_CLANG := true
LOCAL_SHARED_LIBRARIES := libbase liblog
LOCAL_MODULE_HOST_OS := linux
include $(BUILD_HOST_NATIVE_TEST)

View File

@ -21,17 +21,19 @@
#include "Allocator.h"
#include "HeapWalker.h"
#include "LeakFolding.h"
#include "log.h"
bool HeapWalker::Allocation(uintptr_t begin, uintptr_t end) {
if (end == begin) {
end = begin + 1;
}
auto inserted = allocations_.insert(std::pair<Range, RangeInfo>(Range{begin, end}, RangeInfo{false, false}));
Range range{begin, end};
auto inserted = allocations_.insert(std::pair<Range, AllocationInfo>(range, AllocationInfo{}));
if (inserted.second) {
valid_allocations_range_.begin = std::min(valid_allocations_range_.begin, begin);
valid_allocations_range_.end = std::max(valid_allocations_range_.end, end);
allocation_bytes_ += end - begin;
allocation_bytes_ += range.size();
return true;
} else {
Range overlap = inserted.first->first;
@ -44,27 +46,30 @@ bool HeapWalker::Allocation(uintptr_t begin, uintptr_t end) {
}
}
void HeapWalker::Walk(const Range& range, bool RangeInfo::*flag) {
allocator::vector<Range> to_do(1, range, allocator_);
bool HeapWalker::IsAllocationPtr(uintptr_t ptr, Range* range, AllocationInfo** info) {
if (ptr >= valid_allocations_range_.begin && ptr < valid_allocations_range_.end) {
AllocationMap::iterator it = allocations_.find(Range{ptr, ptr + 1});
if (it != allocations_.end()) {
*range = it->first;
*info = &it->second;
return true;
}
}
return false;
}
void HeapWalker::RecurseRoot(const Range& root) {
allocator::vector<Range> to_do(1, root, allocator_);
while (!to_do.empty()) {
Range range = to_do.back();
to_do.pop_back();
uintptr_t begin = (range.begin + (sizeof(uintptr_t) - 1)) & ~(sizeof(uintptr_t) - 1);
// TODO(ccross): we might need to consider a pointer to the end of a buffer
// to be inside the buffer, which means the common case of a pointer to the
// beginning of a buffer may keep two ranges live.
for (uintptr_t i = begin; i < range.end; i += sizeof(uintptr_t)) {
uintptr_t val = *reinterpret_cast<uintptr_t*>(i);
if (val >= valid_allocations_range_.begin && val < valid_allocations_range_.end) {
RangeMap::iterator it = allocations_.find(Range{val, val + 1});
if (it != allocations_.end()) {
if (!(it->second.*flag)) {
to_do.push_back(it->first);
it->second.*flag = true;
}
}
ForEachPtrInRange(range, [&](Range& ref_range, AllocationInfo* ref_info) {
if (!ref_info->referenced_from_root) {
ref_info->referenced_from_root = true;
to_do.push_back(ref_range);
}
}
});
}
}
@ -85,27 +90,22 @@ size_t HeapWalker::AllocationBytes() {
}
bool HeapWalker::DetectLeaks() {
// Recursively walk pointers from roots to mark referenced allocations
for (auto it = roots_.begin(); it != roots_.end(); it++) {
Walk(*it, &RangeInfo::referenced_from_root);
RecurseRoot(*it);
}
Range vals;
vals.begin = reinterpret_cast<uintptr_t>(root_vals_.data());
vals.end = vals.begin + root_vals_.size() * sizeof(uintptr_t);
Walk(vals, &RangeInfo::referenced_from_root);
for (auto it = allocations_.begin(); it != allocations_.end(); it++) {
if (!it->second.referenced_from_root) {
Walk(it->first, &RangeInfo::referenced_from_leak);
}
}
RecurseRoot(vals);
return true;
}
bool HeapWalker::Leaked(allocator::vector<Range>& leaked, size_t limit,
size_t* num_leaks_out, size_t* leak_bytes_out) {
DetectLeaks();
leaked.clear();
size_t num_leaks = 0;
@ -120,7 +120,7 @@ bool HeapWalker::Leaked(allocator::vector<Range>& leaked, size_t limit,
size_t n = 0;
for (auto it = allocations_.begin(); it != allocations_.end(); it++) {
if (!it->second.referenced_from_root) {
if (n++ <= limit) {
if (n++ < limit) {
leaked.push_back(it->first);
}
}

View File

@ -20,11 +20,14 @@
#include "android-base/macros.h"
#include "Allocator.h"
#include "Tarjan.h"
// A range [begin, end)
struct Range {
uintptr_t begin;
uintptr_t end;
size_t size() const { return end - begin; };
};
// Comparator for Ranges that returns equivalence for overlapping ranges
@ -34,7 +37,6 @@ struct compare_range {
}
};
class HeapWalker {
public:
HeapWalker(Allocator<HeapWalker> allocator) : allocator_(allocator),
@ -55,16 +57,25 @@ class HeapWalker {
size_t Allocations();
size_t AllocationBytes();
private:
struct RangeInfo {
template<class F>
void ForEachPtrInRange(const Range& range, F&& f);
template<class F>
void ForEachAllocation(F&& f);
struct AllocationInfo {
bool referenced_from_root;
bool referenced_from_leak;
};
void Walk(const Range& range, bool RangeInfo::* flag);
private:
void RecurseRoot(const Range& root);
bool IsAllocationPtr(uintptr_t ptr, Range* range, AllocationInfo** info);
DISALLOW_COPY_AND_ASSIGN(HeapWalker);
Allocator<HeapWalker> allocator_;
using RangeMap = allocator::map<RangeInfo, Range, compare_range>;
RangeMap allocations_;
using AllocationMap = allocator::map<Range, AllocationInfo, compare_range>;
AllocationMap allocations_;
size_t allocation_bytes_;
Range valid_allocations_range_;
@ -72,4 +83,28 @@ class HeapWalker {
allocator::vector<uintptr_t> root_vals_;
};
template<class F>
inline void HeapWalker::ForEachPtrInRange(const Range& range, F&& f) {
uintptr_t begin = (range.begin + (sizeof(uintptr_t) - 1)) & ~(sizeof(uintptr_t) - 1);
// TODO(ccross): we might need to consider a pointer to the end of a buffer
// to be inside the buffer, which means the common case of a pointer to the
// beginning of a buffer may keep two ranges live.
for (uintptr_t i = begin; i < range.end; i += sizeof(uintptr_t)) {
Range ref_range;
AllocationInfo* ref_info;
if (IsAllocationPtr(*reinterpret_cast<uintptr_t*>(i), &ref_range, &ref_info)) {
f(ref_range, ref_info);
}
}
}
template<class F>
inline void HeapWalker::ForEachAllocation(F&& f) {
for (auto& it : allocations_) {
const Range& range = it.first;
HeapWalker::AllocationInfo& allocation = it.second;
f(range, allocation);
}
}
#endif

57
libmemunreachable/Leak.h Normal file
View File

@ -0,0 +1,57 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIBMEMUNREACHABLE_LEAK_H_
#define LIBMEMUNREACHABLE_LEAK_H_
#include <functional>
#include <vector>
#include "memunreachable/memunreachable.h"
// Custom std::hash specialization so that Leak::Backtrace can be used
// as a key in std::unordered_map.
namespace std {
template<>
struct hash<Leak::Backtrace> {
std::size_t operator()(const Leak::Backtrace& key) const {
std::size_t seed = 0;
hash_combine(seed, key.num_frames);
for (size_t i = 0; i < key.num_frames; i++) {
hash_combine(seed, key.frames[i]);
}
return seed;
}
private:
template<typename T>
inline void hash_combine(std::size_t& seed, const T& v) const {
std::hash<T> hasher;
seed ^= hasher(v) + 0x9e3779b9 + (seed << 6) + (seed >> 2);
}
};
} // namespace std
static bool operator==(const Leak::Backtrace& lhs, const Leak::Backtrace& rhs) {
return (lhs.num_frames == rhs.num_frames) &&
memcmp(lhs.frames, rhs.frames, lhs.num_frames * sizeof(lhs.frames[0])) == 0;
}
#endif

View File

@ -0,0 +1,140 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <inttypes.h>
#include "Allocator.h"
#include "HeapWalker.h"
#include "LeakFolding.h"
#include "Tarjan.h"
#include "log.h"
// Converts possibly cyclic graph of leaks to a DAG by combining
// strongly-connected components into a object, stored in the scc pointer
// of each node in the component.
void LeakFolding::ComputeDAG() {
SCCList<LeakInfo> scc_list{allocator_};
Tarjan(leak_graph_, scc_list);
Allocator<SCCInfo> scc_allocator = allocator_;
for (auto& scc_nodes: scc_list) {
Allocator<SCCInfo>::unique_ptr leak_scc;
leak_scc = scc_allocator.make_unique(scc_allocator);
for (auto& node: scc_nodes) {
node->ptr->scc = leak_scc.get();
leak_scc->count++;
leak_scc->size += node->ptr->range.size();
}
leak_scc_.emplace_back(std::move(leak_scc));
}
for (auto& it : leak_map_) {
LeakInfo& leak = it.second;
for (auto& ref: leak.node.references_out) {
if (leak.scc != ref->ptr->scc) {
leak.scc->node.Edge(&ref->ptr->scc->node);
}
}
}
}
void LeakFolding::AccumulateLeaks(SCCInfo* dominator) {
std::function<void(SCCInfo*)> walk(std::allocator_arg, allocator_,
[&](SCCInfo* scc) {
if (scc->accumulator != dominator) {
scc->accumulator = dominator;
dominator->cuumulative_size += scc->size;
dominator->cuumulative_count += scc->count;
scc->node.Foreach([&](SCCInfo* ref) {
walk(ref);
});
}
});
walk(dominator);
}
bool LeakFolding::FoldLeaks() {
Allocator<LeakInfo> leak_allocator = allocator_;
// Find all leaked allocations insert them into leak_map_ and leak_graph_
heap_walker_.ForEachAllocation(
[&](const Range& range, HeapWalker::AllocationInfo& allocation) {
if (!allocation.referenced_from_root) {
auto it = leak_map_.emplace(std::piecewise_construct,
std::forward_as_tuple(range),
std::forward_as_tuple(range, allocator_));
LeakInfo& leak = it.first->second;
leak_graph_.push_back(&leak.node);
}
});
// Find references between leaked allocations and connect them in leak_graph_
for (auto& it : leak_map_) {
LeakInfo& leak = it.second;
heap_walker_.ForEachPtrInRange(leak.range,
[&](Range& ptr_range, HeapWalker::AllocationInfo* ptr_info) {
if (!ptr_info->referenced_from_root) {
LeakInfo* ptr_leak = &leak_map_.at(ptr_range);
leak.node.Edge(&ptr_leak->node);
}
});
}
// Convert the cyclic graph to a DAG by grouping strongly connected components
ComputeDAG();
// Compute dominators and cuumulative sizes
for (auto& scc : leak_scc_) {
if (scc->node.references_in.size() == 0) {
scc->dominator = true;
AccumulateLeaks(scc.get());
}
}
return true;
}
bool LeakFolding::Leaked(allocator::vector<LeakFolding::Leak>& leaked,
size_t* num_leaks_out, size_t* leak_bytes_out) {
size_t num_leaks = 0;
size_t leak_bytes = 0;
for (auto& it : leak_map_) {
const LeakInfo& leak = it.second;
num_leaks++;
leak_bytes += leak.range.size();
}
for (auto& it : leak_map_) {
const LeakInfo& leak = it.second;
if (leak.scc->dominator) {
leaked.emplace_back(Leak{leak.range,
leak.scc->cuumulative_count - 1,
leak.scc->cuumulative_size - leak.range.size()});
}
}
if (num_leaks_out) {
*num_leaks_out = num_leaks;
}
if (leak_bytes_out) {
*leak_bytes_out = leak_bytes;
}
return true;
}

View File

@ -0,0 +1,89 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIBMEMUNREACHABLE_LEAK_FOLDING_H_
#define LIBMEMUNREACHABLE_LEAK_FOLDING_H_
#include "HeapWalker.h"
class LeakFolding {
public:
LeakFolding(Allocator<void> allocator, HeapWalker& heap_walker)
: allocator_(allocator), heap_walker_(heap_walker),
leak_map_(allocator), leak_graph_(allocator), leak_scc_(allocator) {}
bool FoldLeaks();
struct Leak {
const Range range;
size_t referenced_count;
size_t referenced_size;
};
bool Leaked(allocator::vector<Leak>& leaked,
size_t* num_leaks_out, size_t* leak_bytes_out);
private:
DISALLOW_COPY_AND_ASSIGN(LeakFolding);
Allocator<void> allocator_;
HeapWalker& heap_walker_;
struct SCCInfo {
public:
Node<SCCInfo> node;
size_t count;
size_t size;
size_t cuumulative_count;
size_t cuumulative_size;
bool dominator;
SCCInfo* accumulator;
SCCInfo(Allocator<SCCInfo> allocator) : node(this, allocator),
count(0), size(0), cuumulative_count(0), cuumulative_size(0),
dominator(false), accumulator(nullptr) {}
private:
SCCInfo(SCCInfo&&) = delete;
DISALLOW_COPY_AND_ASSIGN(SCCInfo);
};
struct LeakInfo {
public:
Node<LeakInfo> node;
const Range range;
SCCInfo* scc;
LeakInfo(const Range& range, Allocator<LeakInfo> allocator)
: node(this, allocator), range(range),
scc(nullptr) {}
private:
DISALLOW_COPY_AND_ASSIGN(LeakInfo);
};
void ComputeDAG();
void AccumulateLeaks(SCCInfo* dominator);
allocator::map<Range, LeakInfo, compare_range> leak_map_;
Graph<LeakInfo> leak_graph_;
allocator::vector<Allocator<SCCInfo>::unique_ptr> leak_scc_;
};
#endif // LIBMEMUNREACHABLE_LEAK_FOLDING_H_

View File

@ -21,12 +21,15 @@
#include <mutex>
#include <string>
#include <sstream>
#include <unordered_map>
#include <backtrace.h>
#include <android-base/macros.h>
#include "Allocator.h"
#include "HeapWalker.h"
#include "Leak.h"
#include "LeakFolding.h"
#include "LeakPipe.h"
#include "ProcessMappings.h"
#include "PtracerThread.h"
@ -117,32 +120,84 @@ bool MemUnreachable::CollectAllocations(const allocator::vector<ThreadInfo>& thr
return true;
}
bool MemUnreachable::GetUnreachableMemory(allocator::vector<Leak>& leaks, size_t limit,
size_t* num_leaks, size_t* leak_bytes) {
bool MemUnreachable::GetUnreachableMemory(allocator::vector<Leak>& leaks,
size_t limit, size_t* num_leaks, size_t* leak_bytes) {
ALOGI("sweeping process %d for unreachable memory", pid_);
leaks.clear();
allocator::vector<Range> leaked{allocator_};
if (!heap_walker_.Leaked(leaked, limit, num_leaks, leak_bytes)) {
if (!heap_walker_.DetectLeaks()) {
return false;
}
for (auto it = leaked.begin(); it != leaked.end(); it++) {
Leak leak{};
leak.begin = it->begin;
leak.size = it->end - it->begin;;
memcpy(leak.contents, reinterpret_cast<void*>(it->begin),
std::min(leak.size, Leak::contents_length));
ssize_t num_backtrace_frames = malloc_backtrace(reinterpret_cast<void*>(it->begin),
leak.backtrace_frames, leak.backtrace_length);
if (num_backtrace_frames > 0) {
leak.num_backtrace_frames = num_backtrace_frames;
}
leaks.emplace_back(leak);
}
allocator::vector<Range> leaked1{allocator_};
heap_walker_.Leaked(leaked1, 0, num_leaks, leak_bytes);
ALOGI("sweeping done");
ALOGI("folding related leaks");
LeakFolding folding(allocator_, heap_walker_);
if (!folding.FoldLeaks()) {
return false;
}
allocator::vector<LeakFolding::Leak> leaked{allocator_};
if (!folding.Leaked(leaked, num_leaks, leak_bytes)) {
return false;
}
allocator::unordered_map<Leak::Backtrace, Leak*> backtrace_map{allocator_};
// Prevent reallocations of backing memory so we can store pointers into it
// in backtrace_map.
leaks.reserve(leaked.size());
for (auto& it: leaked) {
leaks.emplace_back();
Leak* leak = &leaks.back();
ssize_t num_backtrace_frames = malloc_backtrace(reinterpret_cast<void*>(it.range.begin),
leak->backtrace.frames, leak->backtrace.max_frames);
if (num_backtrace_frames > 0) {
leak->backtrace.num_frames = num_backtrace_frames;
auto inserted = backtrace_map.emplace(leak->backtrace, leak);
if (!inserted.second) {
// Leak with same backtrace already exists, drop this one and
// increment similar counts on the existing one.
leaks.pop_back();
Leak* similar_leak = inserted.first->second;
similar_leak->similar_count++;
similar_leak->similar_size += it.range.size();
similar_leak->similar_referenced_count += it.referenced_count;
similar_leak->similar_referenced_size += it.referenced_size;
similar_leak->total_size += it.range.size();
similar_leak->total_size += it.referenced_size;
continue;
}
}
leak->begin = it.range.begin;
leak->size = it.range.size();
leak->referenced_count = it.referenced_count;
leak->referenced_size = it.referenced_size;
leak->total_size = leak->size + leak->referenced_size;
memcpy(leak->contents, reinterpret_cast<void*>(it.range.begin),
std::min(leak->size, Leak::contents_length));
}
ALOGI("folding done");
std::sort(leaks.begin(), leaks.end(), [](const Leak& a, const Leak& b) {
return a.total_size > b.total_size;
});
if (leaks.size() > limit) {
leaks.resize(limit);
}
return true;
}
@ -203,6 +258,11 @@ bool MemUnreachable::ClassifyMappings(const allocator::vector<Mapping>& mappings
return true;
}
template<typename T>
static inline const char* plural(T val) {
return (val == 1) ? "" : "s";
}
bool GetUnreachableMemory(UnreachableMemoryInfo& info, size_t limit) {
int parent_pid = getpid();
int parent_tid = gettid();
@ -339,9 +399,8 @@ bool GetUnreachableMemory(UnreachableMemoryInfo& info, size_t limit) {
ALOGI("unreachable memory detection done");
ALOGE("%zu bytes in %zu allocation%s unreachable out of %zu bytes in %zu allocation%s",
info.leak_bytes, info.num_leaks, info.num_leaks == 1 ? "" : "s",
info.allocation_bytes, info.num_allocations, info.num_allocations == 1 ? "" : "s");
info.leak_bytes, info.num_leaks, plural(info.num_leaks),
info.allocation_bytes, info.num_allocations, plural(info.num_allocations));
return true;
}
@ -353,6 +412,23 @@ std::string Leak::ToString(bool log_contents) const {
oss << " bytes unreachable at ";
oss << std::hex << begin;
oss << std::endl;
if (referenced_count > 0) {
oss << std::dec;
oss << " referencing " << referenced_size << " unreachable bytes";
oss << " in " << referenced_count << " allocation" << plural(referenced_count);
oss << std::endl;
}
if (similar_count > 0) {
oss << std::dec;
oss << " and " << similar_size << " similar unreachable bytes";
oss << " in " << similar_count << " allocation" << plural(similar_count);
oss << std::endl;
if (similar_referenced_count > 0) {
oss << " referencing " << similar_referenced_size << " unreachable bytes";
oss << " in " << similar_referenced_count << " allocation" << plural(similar_referenced_count);
oss << std::endl;
}
}
if (log_contents) {
const int bytes_per_line = 16;
@ -361,7 +437,7 @@ std::string Leak::ToString(bool log_contents) const {
if (bytes == size) {
oss << " contents:" << std::endl;
} else {
oss << " first " << bytes << " bytes of contents:" << std::endl;
oss << " first " << bytes << " bytes of contents:" << std::endl;
}
for (size_t i = 0; i < bytes; i += bytes_per_line) {
@ -385,21 +461,41 @@ std::string Leak::ToString(bool log_contents) const {
oss << std::endl;
}
}
if (num_backtrace_frames > 0) {
oss << backtrace_string(backtrace_frames, num_backtrace_frames);
if (backtrace.num_frames > 0) {
oss << backtrace_string(backtrace.frames, backtrace.num_frames);
}
return oss.str();
}
// Figure out the abi based on defined macros.
#if defined(__arm__)
#define ABI_STRING "arm"
#elif defined(__aarch64__)
#define ABI_STRING "arm64"
#elif defined(__mips__) && !defined(__LP64__)
#define ABI_STRING "mips"
#elif defined(__mips__) && defined(__LP64__)
#define ABI_STRING "mips64"
#elif defined(__i386__)
#define ABI_STRING "x86"
#elif defined(__x86_64__)
#define ABI_STRING "x86_64"
#else
#error "Unsupported ABI"
#endif
std::string UnreachableMemoryInfo::ToString(bool log_contents) const {
std::ostringstream oss;
oss << " " << leak_bytes << " bytes in ";
oss << num_leaks << " unreachable allocation" << (num_leaks == 1 ? "" : "s");
oss << num_leaks << " unreachable allocation" << plural(num_leaks);
oss << std::endl;
oss << " ABI: '" ABI_STRING "'" << std::endl;
oss << std::endl;
for (auto it = leaks.begin(); it != leaks.end(); it++) {
oss << it->ToString(log_contents);
oss << std::endl;
}
return oss.str();

View File

@ -18,6 +18,7 @@
#define LIBMEMUNREACHABLE_SCOPED_ALARM_H_
#include <signal.h>
#include <sys/time.h>
#include <chrono>
#include <functional>

131
libmemunreachable/Tarjan.h Normal file
View File

@ -0,0 +1,131 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Based on system/update_engine/payload_generator/tarjan.cc
#ifndef LIBMEMUNREACHABLE_TARJAN_H_
#define LIBMEMUNREACHABLE_TARJAN_H_
#include <algorithm>
#include "Allocator.h"
template<class T>
class Node {
public:
allocator::set<Node<T>*> references_in;
allocator::set<Node<T>*> references_out;
size_t index;
size_t lowlink;
T* ptr;
Node(T* ptr, Allocator<Node> allocator) : references_in(allocator), references_out(allocator),
ptr(ptr) {};
Node(Node&& rhs) = default;
void Edge(Node<T>* ref) {
references_out.emplace(ref);
ref->references_in.emplace(this);
}
template<class F>
void Foreach(F&& f) {
for (auto& node: references_out) {
f(node->ptr);
}
}
private:
DISALLOW_COPY_AND_ASSIGN(Node<T>);
};
template<class T>
using Graph = allocator::vector<Node<T>*>;
template<class T>
using SCC = allocator::vector<Node<T>*>;
template<class T>
using SCCList = allocator::vector<SCC<T>>;
template<class T>
class TarjanAlgorithm {
public:
TarjanAlgorithm(Allocator<void> allocator) : index_(0),
stack_(allocator), components_(allocator) {}
void Execute(Graph<T>& graph, SCCList<T>& out);
private:
static constexpr size_t UNDEFINED_INDEX = static_cast<size_t>(-1);
void Tarjan(Node<T>* vertex, Graph<T>& graph);
size_t index_;
allocator::vector<Node<T>*> stack_;
SCCList<T> components_;
};
template<class T>
void TarjanAlgorithm<T>::Execute(Graph<T>& graph, SCCList<T>& out) {
stack_.clear();
components_.clear();
index_ = 0;
for (auto& it: graph) {
it->index = UNDEFINED_INDEX;
it->lowlink = UNDEFINED_INDEX;
}
for (auto& it: graph) {
if (it->index == UNDEFINED_INDEX) {
Tarjan(it, graph);
}
}
out.swap(components_);
}
template<class T>
void TarjanAlgorithm<T>::Tarjan(Node<T>* vertex, Graph<T>& graph) {
assert(vertex->index == UNDEFINED_INDEX);
vertex->index = index_;
vertex->lowlink = index_;
index_++;
stack_.push_back(vertex);
for (auto& it: vertex->references_out) {
Node<T>* vertex_next = it;
if (vertex_next->index == UNDEFINED_INDEX) {
Tarjan(vertex_next, graph);
vertex->lowlink = std::min(vertex->lowlink, vertex_next->lowlink);
} else if (std::find(stack_.begin(), stack_.end(), vertex_next) != stack_.end()) {
vertex->lowlink = std::min(vertex->lowlink, vertex_next->index);
}
}
if (vertex->lowlink == vertex->index) {
SCC<T> component{components_.get_allocator()};
Node<T>* other_vertex;
do {
other_vertex = stack_.back();
stack_.pop_back();
component.push_back(other_vertex);
} while (other_vertex != vertex && !stack_.empty());
components_.emplace_back(component);
}
}
template<class T>
void Tarjan(Graph<T>& graph, SCCList<T>& out) {
TarjanAlgorithm<T> tarjan{graph.get_allocator()};
tarjan.Execute(graph, out);
}
#endif // LIBMEMUNREACHABLE_TARJAN_H_

View File

@ -86,7 +86,7 @@ class ThreadCaptureImpl {
void PtraceDetach(pid_t tid, unsigned int signal);
bool PtraceThreadInfo(pid_t tid, ThreadInfo& thread_info);
allocator::map<unsigned int, pid_t> captured_threads_;
allocator::map<pid_t, unsigned int> captured_threads_;
Allocator<ThreadCaptureImpl> allocator_;
pid_t pid_;
std::function<void(pid_t)> inject_test_func_;

View File

@ -18,6 +18,8 @@
#define LIBMEMUNREACHABLE_BIONIC_H_
#include <sys/cdefs.h>
#include <stdint.h>
#include <stdlib.h>
__BEGIN_DECLS

View File

@ -27,11 +27,26 @@
struct Leak {
uintptr_t begin;
size_t size;
size_t num_backtrace_frames;
size_t referenced_count;
size_t referenced_size;
size_t similar_count;
size_t similar_size;
size_t similar_referenced_count;
size_t similar_referenced_size;
size_t total_size;
static const size_t contents_length = 32;
char contents[contents_length];
static const size_t backtrace_length = 16;
uintptr_t backtrace_frames[backtrace_length];
struct Backtrace {
size_t num_frames;
static const size_t max_frames = 16;
uintptr_t frames[max_frames];
} backtrace;
std::string ToString(bool log_contents) const;
};

View File

@ -15,12 +15,6 @@
*/
#include <Allocator.h>
#include <sys/time.h>
#include <chrono>
#include <functional>
#include <list>
#include <vector>
#include <gtest/gtest.h>
#include <ScopedDisableMalloc.h>
@ -28,8 +22,6 @@
std::function<void()> ScopedAlarm::func_;
using namespace std::chrono_literals;
class AllocatorTest : public testing::Test {
protected:
AllocatorTest() : heap(), disable_malloc_() {}
@ -180,94 +172,3 @@ TEST_F(AllocatorTest, unique) {
ASSERT_NE(ptr, nullptr);
}
class DisableMallocTest : public ::testing::Test {
protected:
void alarm(std::chrono::microseconds us) {
std::chrono::seconds s = std::chrono::duration_cast<std::chrono::seconds>(us);
itimerval t = itimerval();
t.it_value.tv_sec = s.count();
t.it_value.tv_usec = (us - s).count();
setitimer(ITIMER_REAL, &t, NULL);
}
};
TEST_F(DisableMallocTest, reenable) {
ASSERT_EXIT({
alarm(100ms);
void *ptr1 = malloc(128);
ASSERT_NE(ptr1, nullptr);
free(ptr1);
{
ScopedDisableMalloc disable_malloc;
}
void *ptr2 = malloc(128);
ASSERT_NE(ptr2, nullptr);
free(ptr2);
_exit(1);
}, ::testing::ExitedWithCode(1), "");
}
TEST_F(DisableMallocTest, deadlock_allocate) {
ASSERT_DEATH({
void *ptr = malloc(128);
ASSERT_NE(ptr, nullptr);
free(ptr);
{
alarm(100ms);
ScopedDisableMalloc disable_malloc;
void* ptr = malloc(128);
ASSERT_NE(ptr, nullptr);
free(ptr);
}
}, "");
}
TEST_F(DisableMallocTest, deadlock_new) {
ASSERT_DEATH({
char* ptr = new(char);
ASSERT_NE(ptr, nullptr);
delete(ptr);
{
alarm(100ms);
ScopedDisableMalloc disable_malloc;
char* ptr = new(char);
ASSERT_NE(ptr, nullptr);
delete(ptr);
}
}, "");
}
TEST_F(DisableMallocTest, deadlock_delete) {
ASSERT_DEATH({
char* ptr = new(char);
ASSERT_NE(ptr, nullptr);
{
alarm(250ms);
ScopedDisableMalloc disable_malloc;
delete(ptr);
}
}, "");
}
TEST_F(DisableMallocTest, deadlock_free) {
ASSERT_DEATH({
void *ptr = malloc(128);
ASSERT_NE(ptr, nullptr);
{
alarm(100ms);
ScopedDisableMalloc disable_malloc;
free(ptr);
}
}, "");
}
TEST_F(DisableMallocTest, deadlock_fork) {
ASSERT_DEATH({
{
alarm(100ms);
ScopedDisableMalloc disable_malloc;
fork();
}
}, "");
}

View File

@ -0,0 +1,116 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <sys/time.h>
#include <chrono>
#include <functional>
#include <gtest/gtest.h>
#include <ScopedDisableMalloc.h>
using namespace std::chrono_literals;
class DisableMallocTest : public ::testing::Test {
protected:
void alarm(std::chrono::microseconds us) {
std::chrono::seconds s = std::chrono::duration_cast<std::chrono::seconds>(us);
itimerval t = itimerval();
t.it_value.tv_sec = s.count();
t.it_value.tv_usec = (us - s).count();
setitimer(ITIMER_REAL, &t, NULL);
}
};
TEST_F(DisableMallocTest, reenable) {
ASSERT_EXIT({
alarm(100ms);
void *ptr1 = malloc(128);
ASSERT_NE(ptr1, nullptr);
free(ptr1);
{
ScopedDisableMalloc disable_malloc;
}
void *ptr2 = malloc(128);
ASSERT_NE(ptr2, nullptr);
free(ptr2);
_exit(1);
}, ::testing::ExitedWithCode(1), "");
}
TEST_F(DisableMallocTest, deadlock_allocate) {
ASSERT_DEATH({
void *ptr = malloc(128);
ASSERT_NE(ptr, nullptr);
free(ptr);
{
alarm(100ms);
ScopedDisableMalloc disable_malloc;
void* ptr = malloc(128);
ASSERT_NE(ptr, nullptr);
free(ptr);
}
}, "");
}
TEST_F(DisableMallocTest, deadlock_new) {
ASSERT_DEATH({
char* ptr = new(char);
ASSERT_NE(ptr, nullptr);
delete(ptr);
{
alarm(100ms);
ScopedDisableMalloc disable_malloc;
char* ptr = new(char);
ASSERT_NE(ptr, nullptr);
delete(ptr);
}
}, "");
}
TEST_F(DisableMallocTest, deadlock_delete) {
ASSERT_DEATH({
char* ptr = new(char);
ASSERT_NE(ptr, nullptr);
{
alarm(250ms);
ScopedDisableMalloc disable_malloc;
delete(ptr);
}
}, "");
}
TEST_F(DisableMallocTest, deadlock_free) {
ASSERT_DEATH({
void *ptr = malloc(128);
ASSERT_NE(ptr, nullptr);
{
alarm(100ms);
ScopedDisableMalloc disable_malloc;
free(ptr);
}
}, "");
}
TEST_F(DisableMallocTest, deadlock_fork) {
ASSERT_DEATH({
{
alarm(100ms);
ScopedDisableMalloc disable_malloc;
fork();
}
}, "");
}

View File

@ -80,6 +80,8 @@ TEST_F(HeapWalkerTest, leak) {
HeapWalker heap_walker(heap_);
heap_walker.Allocation(buffer_begin(buffer2), buffer_end(buffer2));
ASSERT_EQ(true, heap_walker.DetectLeaks());
allocator::vector<Range> leaked(heap_);
size_t num_leaks = 0;
size_t leaked_bytes = 0;
@ -106,9 +108,11 @@ TEST_F(HeapWalkerTest, live) {
heap_walker.Allocation(buffer_begin(buffer2), buffer_end(buffer2));
heap_walker.Root(buffer_begin(buffer1), buffer_end(buffer1));
ASSERT_EQ(true, heap_walker.DetectLeaks());
allocator::vector<Range> leaked(heap_);
size_t num_leaks = SIZE_T_MAX;
size_t leaked_bytes = SIZE_T_MAX;
size_t num_leaks = SIZE_MAX;
size_t leaked_bytes = SIZE_MAX;
ASSERT_EQ(true, heap_walker.Leaked(leaked, 100, &num_leaks, &leaked_bytes));
EXPECT_EQ(0U, num_leaks);
@ -132,9 +136,11 @@ TEST_F(HeapWalkerTest, unaligned) {
heap_walker.Allocation(buffer_begin(buffer2), buffer_end(buffer2));
heap_walker.Root(buffer_begin(buffer1) + i, buffer_end(buffer1) - j);
ASSERT_EQ(true, heap_walker.DetectLeaks());
allocator::vector<Range> leaked(heap_);
size_t num_leaks = SIZE_T_MAX;
size_t leaked_bytes = SIZE_T_MAX;
size_t num_leaks = SIZE_MAX;
size_t leaked_bytes = SIZE_MAX;
ASSERT_EQ(true, heap_walker.Leaked(leaked, 100, &num_leaks, &leaked_bytes));
EXPECT_EQ(0U, num_leaks);
@ -143,3 +149,26 @@ TEST_F(HeapWalkerTest, unaligned) {
}
}
}
TEST_F(HeapWalkerTest, cycle) {
void* buffer1;
void* buffer2;
buffer1 = &buffer2;
buffer2 = &buffer1;
HeapWalker heap_walker(heap_);
heap_walker.Allocation(buffer_begin(buffer1), buffer_end(buffer1));
heap_walker.Allocation(buffer_begin(buffer2), buffer_end(buffer2));
ASSERT_EQ(true, heap_walker.DetectLeaks());
allocator::vector<Range> leaked(heap_);
size_t num_leaks = 0;
size_t leaked_bytes = 0;
ASSERT_EQ(true, heap_walker.Leaked(leaked, 100, &num_leaks, &leaked_bytes));
EXPECT_EQ(2U, num_leaks);
EXPECT_EQ(2*sizeof(uintptr_t), leaked_bytes);
ASSERT_EQ(2U, leaked.size());
}

View File

@ -0,0 +1,23 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "bionic.h"
void malloc_disable() {
}
void malloc_enable() {
}

View File

@ -0,0 +1,427 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "HeapWalker.h"
#include "LeakFolding.h"
#include <gtest/gtest.h>
#include <ScopedDisableMalloc.h>
#include "Allocator.h"
class LeakFoldingTest : public ::testing::Test {
public:
LeakFoldingTest() : disable_malloc_(), heap_() {}
void TearDown() {
ASSERT_TRUE(heap_.empty());
if (!HasFailure()) {
ASSERT_FALSE(disable_malloc_.timed_out());
}
}
protected:
ScopedDisableMallocTimeout disable_malloc_;
Heap heap_;
};
#define buffer_begin(buffer) reinterpret_cast<uintptr_t>(&buffer[0])
#define buffer_end(buffer) (reinterpret_cast<uintptr_t>(&buffer[0]) + sizeof(buffer))
#define ALLOCATION(heap_walker, buffer) \
ASSERT_EQ(true, heap_walker.Allocation(buffer_begin(buffer), buffer_end(buffer)))
TEST_F(LeakFoldingTest, one) {
void* buffer1[1] = {nullptr};
HeapWalker heap_walker(heap_);
ALLOCATION(heap_walker, buffer1);
LeakFolding folding(heap_, heap_walker);
ASSERT_TRUE(folding.FoldLeaks());
allocator::vector<LeakFolding::Leak> leaked(heap_);
size_t num_leaks = 0;
size_t leaked_bytes = 0;
ASSERT_EQ(true, folding.Leaked(leaked, &num_leaks, &leaked_bytes));
EXPECT_EQ(1U, num_leaks);
EXPECT_EQ(sizeof(uintptr_t), leaked_bytes);
ASSERT_EQ(1U, leaked.size());
EXPECT_EQ(0U, leaked[0].referenced_count);
EXPECT_EQ(0U, leaked[0].referenced_size);
}
TEST_F(LeakFoldingTest, two) {
void* buffer1[1] = {nullptr};
void* buffer2[1] = {nullptr};
HeapWalker heap_walker(heap_);
ALLOCATION(heap_walker, buffer1);
ALLOCATION(heap_walker, buffer2);
LeakFolding folding(heap_, heap_walker);
ASSERT_TRUE(folding.FoldLeaks());
allocator::vector<LeakFolding::Leak> leaked(heap_);
size_t num_leaks = 0;
size_t leaked_bytes = 0;
ASSERT_EQ(true, folding.Leaked(leaked, &num_leaks, &leaked_bytes));
EXPECT_EQ(2U, num_leaks);
EXPECT_EQ(2*sizeof(uintptr_t), leaked_bytes);
ASSERT_EQ(2U, leaked.size());
EXPECT_EQ(0U, leaked[0].referenced_count);
EXPECT_EQ(0U, leaked[0].referenced_size);
EXPECT_EQ(0U, leaked[1].referenced_count);
EXPECT_EQ(0U, leaked[1].referenced_size);
}
TEST_F(LeakFoldingTest, dominator) {
void* buffer1[1];
void* buffer2[1] = {nullptr};
buffer1[0] = buffer2;
HeapWalker heap_walker(heap_);
ALLOCATION(heap_walker, buffer1);
ALLOCATION(heap_walker, buffer2);
LeakFolding folding(heap_, heap_walker);
ASSERT_TRUE(folding.FoldLeaks());
allocator::vector<LeakFolding::Leak> leaked(heap_);
size_t num_leaks = 0;
size_t leaked_bytes = 0;
ASSERT_EQ(true, folding.Leaked(leaked, &num_leaks, &leaked_bytes));
EXPECT_EQ(2U, num_leaks);
EXPECT_EQ(2*sizeof(uintptr_t), leaked_bytes);
ASSERT_EQ(1U, leaked.size());
EXPECT_EQ(1U, leaked[0].referenced_count);
EXPECT_EQ(sizeof(uintptr_t), leaked[0].referenced_size);
}
TEST_F(LeakFoldingTest, cycle) {
void* buffer1[1];
void* buffer2[1];
void* buffer3[1];
buffer1[0] = buffer2;
buffer2[0] = buffer3;
buffer3[0] = buffer2;
HeapWalker heap_walker(heap_);
ALLOCATION(heap_walker, buffer1);
ALLOCATION(heap_walker, buffer2);
ALLOCATION(heap_walker, buffer3);
LeakFolding folding(heap_, heap_walker);
ASSERT_TRUE(folding.FoldLeaks());
allocator::vector<LeakFolding::Leak> leaked(heap_);
size_t num_leaks = 0;
size_t leaked_bytes = 0;
ASSERT_EQ(true, folding.Leaked(leaked, &num_leaks, &leaked_bytes));
EXPECT_EQ(3U, num_leaks);
EXPECT_EQ(3*sizeof(uintptr_t), leaked_bytes);
ASSERT_EQ(1U, leaked.size());
EXPECT_EQ(2U, leaked[0].referenced_count);
EXPECT_EQ(2*sizeof(uintptr_t), leaked[0].referenced_size);
}
TEST_F(LeakFoldingTest, dominator_cycle) {
void* buffer1[2] = {nullptr, nullptr};
void* buffer2[2];
void* buffer3[1] = {nullptr};
buffer1[0] = &buffer2;
buffer2[0] = &buffer1;
buffer2[1] = &buffer3;
HeapWalker heap_walker(heap_);
ALLOCATION(heap_walker, buffer1);
ALLOCATION(heap_walker, buffer2);
ALLOCATION(heap_walker, buffer3);
LeakFolding folding(heap_, heap_walker);
ASSERT_TRUE(folding.FoldLeaks());
allocator::vector<LeakFolding::Leak> leaked(heap_);
size_t num_leaks = 0;
size_t leaked_bytes = 0;
ASSERT_EQ(true, folding.Leaked(leaked, &num_leaks, &leaked_bytes));
EXPECT_EQ(3U, num_leaks);
EXPECT_EQ(5*sizeof(uintptr_t), leaked_bytes);
ASSERT_EQ(2U, leaked.size());
EXPECT_EQ(2U, leaked[0].referenced_count);
EXPECT_EQ(3*sizeof(uintptr_t), leaked[0].referenced_size);
EXPECT_EQ(2U, leaked[1].referenced_count);
EXPECT_EQ(3*sizeof(uintptr_t), leaked[1].referenced_size);
}
TEST_F(LeakFoldingTest, two_cycles) {
void* buffer1[1];
void* buffer2[1];
void* buffer3[1];
void* buffer4[1];
void* buffer5[1];
void* buffer6[1];
buffer1[0] = buffer3;
buffer2[0] = buffer5;
buffer3[0] = buffer4;
buffer4[0] = buffer3;
buffer5[0] = buffer6;
buffer6[0] = buffer5;
HeapWalker heap_walker(heap_);
ALLOCATION(heap_walker, buffer1);
ALLOCATION(heap_walker, buffer2);
ALLOCATION(heap_walker, buffer3);
ALLOCATION(heap_walker, buffer4);
ALLOCATION(heap_walker, buffer5);
ALLOCATION(heap_walker, buffer6);
LeakFolding folding(heap_, heap_walker);
ASSERT_TRUE(folding.FoldLeaks());
allocator::vector<LeakFolding::Leak> leaked(heap_);
size_t num_leaks = 0;
size_t leaked_bytes = 0;
ASSERT_EQ(true, folding.Leaked(leaked, &num_leaks, &leaked_bytes));
EXPECT_EQ(6U, num_leaks);
EXPECT_EQ(6*sizeof(uintptr_t), leaked_bytes);
ASSERT_EQ(2U, leaked.size());
EXPECT_EQ(2U, leaked[0].referenced_count);
EXPECT_EQ(2*sizeof(uintptr_t), leaked[0].referenced_size);
EXPECT_EQ(2U, leaked[1].referenced_count);
EXPECT_EQ(2*sizeof(uintptr_t), leaked[1].referenced_size);
}
TEST_F(LeakFoldingTest, two_dominator_cycles) {
void* buffer1[1];
void* buffer2[1];
void* buffer3[1];
void* buffer4[1];
buffer1[0] = buffer2;
buffer2[0] = buffer1;
buffer3[0] = buffer4;
buffer4[0] = buffer3;
HeapWalker heap_walker(heap_);
ALLOCATION(heap_walker, buffer1);
ALLOCATION(heap_walker, buffer2);
ALLOCATION(heap_walker, buffer3);
ALLOCATION(heap_walker, buffer4);
LeakFolding folding(heap_, heap_walker);
ASSERT_TRUE(folding.FoldLeaks());
allocator::vector<LeakFolding::Leak> leaked(heap_);
size_t num_leaks = 0;
size_t leaked_bytes = 0;
ASSERT_EQ(true, folding.Leaked(leaked, &num_leaks, &leaked_bytes));
EXPECT_EQ(4U, num_leaks);
EXPECT_EQ(4*sizeof(uintptr_t), leaked_bytes);
ASSERT_EQ(4U, leaked.size());
EXPECT_EQ(1U, leaked[0].referenced_count);
EXPECT_EQ(sizeof(uintptr_t), leaked[0].referenced_size);
EXPECT_EQ(1U, leaked[1].referenced_count);
EXPECT_EQ(sizeof(uintptr_t), leaked[1].referenced_size);
EXPECT_EQ(1U, leaked[2].referenced_count);
EXPECT_EQ(sizeof(uintptr_t), leaked[2].referenced_size);
EXPECT_EQ(1U, leaked[3].referenced_count);
EXPECT_EQ(sizeof(uintptr_t), leaked[3].referenced_size);
}
TEST_F(LeakFoldingTest, giant_dominator_cycle) {
const size_t n = 1000;
void* buffer[n];
HeapWalker heap_walker(heap_);
for (size_t i = 0; i < n; i ++) {
ASSERT_TRUE(heap_walker.Allocation(reinterpret_cast<uintptr_t>(&buffer[i]),
reinterpret_cast<uintptr_t>(&buffer[i+1])));
}
for (size_t i = 0; i < n - 1; i++) {
buffer[i] = &buffer[i+1];
}
buffer[n - 1] = &buffer[0];
LeakFolding folding(heap_, heap_walker);
ASSERT_TRUE(folding.FoldLeaks());
allocator::vector<LeakFolding::Leak> leaked(heap_);
size_t num_leaks = 0;
size_t leaked_bytes = 0;
ASSERT_EQ(true, folding.Leaked(leaked, &num_leaks, &leaked_bytes));
EXPECT_EQ(n, num_leaks);
EXPECT_EQ(n * sizeof(uintptr_t), leaked_bytes);
ASSERT_EQ(1000U, leaked.size());
EXPECT_EQ(n - 1, leaked[0].referenced_count);
EXPECT_EQ((n - 1) * sizeof(uintptr_t), leaked[0].referenced_size);
}
TEST_F(LeakFoldingTest, giant_cycle) {
const size_t n = 1000;
void* buffer[n];
void* buffer1[1];
HeapWalker heap_walker(heap_);
for (size_t i = 0; i < n - 1; i++) {
buffer[i] = &buffer[i+1];
}
buffer[n - 1] = &buffer[0];
buffer1[0] = &buffer[0];
for (size_t i = 0; i < n; i ++) {
ASSERT_TRUE(heap_walker.Allocation(reinterpret_cast<uintptr_t>(&buffer[i]),
reinterpret_cast<uintptr_t>(&buffer[i+1])));
}
ALLOCATION(heap_walker, buffer1);
LeakFolding folding(heap_, heap_walker);
ASSERT_TRUE(folding.FoldLeaks());
allocator::vector<LeakFolding::Leak> leaked(heap_);
size_t num_leaks = 0;
size_t leaked_bytes = 0;
ASSERT_EQ(true, folding.Leaked(leaked, &num_leaks, &leaked_bytes));
EXPECT_EQ(n + 1, num_leaks);
EXPECT_EQ((n + 1) * sizeof(uintptr_t), leaked_bytes);
ASSERT_EQ(1U, leaked.size());
EXPECT_EQ(n, leaked[0].referenced_count);
EXPECT_EQ(n * sizeof(uintptr_t), leaked[0].referenced_size);
}
TEST_F(LeakFoldingTest, multipath) {
void* buffer1[2];
void* buffer2[1];
void* buffer3[1];
void* buffer4[1] = {nullptr};
// 1
// / \
// v v
// 2 3
// \ /
// v
// 4
buffer1[0] = &buffer2;
buffer1[1] = &buffer3;
buffer2[0] = &buffer4;
buffer3[0] = &buffer4;
HeapWalker heap_walker(heap_);
ALLOCATION(heap_walker, buffer1);
ALLOCATION(heap_walker, buffer2);
ALLOCATION(heap_walker, buffer3);
ALLOCATION(heap_walker, buffer4);
LeakFolding folding(heap_, heap_walker);
ASSERT_TRUE(folding.FoldLeaks());
allocator::vector<LeakFolding::Leak> leaked(heap_);
size_t num_leaks = 0;
size_t leaked_bytes = 0;
ASSERT_EQ(true, folding.Leaked(leaked, &num_leaks, &leaked_bytes));
EXPECT_EQ(4U, num_leaks);
EXPECT_EQ(5 * sizeof(uintptr_t), leaked_bytes);
ASSERT_EQ(1U, leaked.size());
EXPECT_EQ(3U, leaked[0].referenced_count);
EXPECT_EQ(3 * sizeof(uintptr_t), leaked[0].referenced_size);
}
TEST_F(LeakFoldingTest, multicycle) {
void* buffer1[2]{};
void* buffer2[2]{};
void* buffer3[2]{};
void* buffer4[2]{};
// 1
// / ^
// v \
// 2 -> 3
// \ ^
// v /
// 4
buffer1[0] = &buffer2;
buffer2[0] = &buffer3;
buffer2[1] = &buffer4;
buffer3[0] = &buffer1;
buffer4[0] = &buffer3;
HeapWalker heap_walker(heap_);
ALLOCATION(heap_walker, buffer1);
ALLOCATION(heap_walker, buffer2);
ALLOCATION(heap_walker, buffer3);
ALLOCATION(heap_walker, buffer4);
LeakFolding folding(heap_, heap_walker);
ASSERT_TRUE(folding.FoldLeaks());
allocator::vector<LeakFolding::Leak> leaked(heap_);
size_t num_leaks = 0;
size_t leaked_bytes = 0;
ASSERT_EQ(true, folding.Leaked(leaked, &num_leaks, &leaked_bytes));
EXPECT_EQ(4U, num_leaks);
EXPECT_EQ(8 * sizeof(uintptr_t), leaked_bytes);
ASSERT_EQ(4U, leaked.size());
EXPECT_EQ(3U, leaked[0].referenced_count);
EXPECT_EQ(6 * sizeof(uintptr_t), leaked[0].referenced_size);
EXPECT_EQ(3U, leaked[1].referenced_count);
EXPECT_EQ(6 * sizeof(uintptr_t), leaked[1].referenced_size);
EXPECT_EQ(3U, leaked[2].referenced_count);
EXPECT_EQ(6 * sizeof(uintptr_t), leaked[2].referenced_size);
EXPECT_EQ(3U, leaked[3].referenced_count);
EXPECT_EQ(6 * sizeof(uintptr_t), leaked[3].referenced_size);
}