diff --git a/libmemunreachable/Allocator.cpp b/libmemunreachable/Allocator.cpp index b75f1e559..68f654c29 100644 --- a/libmemunreachable/Allocator.cpp +++ b/libmemunreachable/Allocator.cpp @@ -370,11 +370,11 @@ void* HeapImpl::Alloc(size_t size) { } void* HeapImpl::AllocLocked(size_t size) { - if (__predict_false(size > kMaxBucketAllocationSize)) { + if (size > kMaxBucketAllocationSize) { return MapAlloc(size); } int bucket = size_to_bucket(size); - if (__predict_false(free_chunks_[bucket].empty())) { + if (free_chunks_[bucket].empty()) { Chunk *chunk = new Chunk(this, bucket); free_chunks_[bucket].insert(chunk->node_); } diff --git a/libmemunreachable/Allocator.h b/libmemunreachable/Allocator.h index b0a4d4cfb..a8f579ee1 100644 --- a/libmemunreachable/Allocator.h +++ b/libmemunreachable/Allocator.h @@ -24,6 +24,7 @@ #include #include #include +#include #include #include extern std::atomic heap_count; @@ -209,9 +210,12 @@ using vector = std::vector>; template using list = std::list>; -template> +template> using map = std::map>>; +template, class KeyEqual = std::equal_to> +using unordered_map = std::unordered_map>>; + template, class KeyEqual = std::equal_to> using unordered_set = std::unordered_set>; diff --git a/libmemunreachable/Android.mk b/libmemunreachable/Android.mk index 0df26a84a..7b66d4401 100644 --- a/libmemunreachable/Android.mk +++ b/libmemunreachable/Android.mk @@ -3,6 +3,7 @@ LOCAL_PATH := $(call my-dir) memunreachable_srcs := \ Allocator.cpp \ HeapWalker.cpp \ + LeakFolding.cpp \ LeakPipe.cpp \ LineBuffer.cpp \ MemUnreachable.cpp \ @@ -12,7 +13,9 @@ memunreachable_srcs := \ memunreachable_test_srcs := \ tests/Allocator_test.cpp \ + tests/DisableMalloc_test.cpp \ tests/HeapWalker_test.cpp \ + tests/LeakFolding_test.cpp \ tests/MemUnreachable_test.cpp \ tests/ThreadCapture_test.cpp \ @@ -41,3 +44,22 @@ LOCAL_CLANG := true LOCAL_SHARED_LIBRARIES := libmemunreachable libbase liblog include $(BUILD_NATIVE_TEST) + +include $(CLEAR_VARS) + +LOCAL_MODULE := memunreachable_test +LOCAL_SRC_FILES := \ + Allocator.cpp \ + HeapWalker.cpp \ + LeakFolding.cpp \ + tests/Allocator_test.cpp \ + tests/HeapWalker_test.cpp \ + tests/HostMallocStub.cpp \ + tests/LeakFolding_test.cpp \ + +LOCAL_CFLAGS := -std=c++14 -Wall -Wextra -Werror +LOCAL_CLANG := true +LOCAL_SHARED_LIBRARIES := libbase liblog +LOCAL_MODULE_HOST_OS := linux + +include $(BUILD_HOST_NATIVE_TEST) diff --git a/libmemunreachable/HeapWalker.cpp b/libmemunreachable/HeapWalker.cpp index 1a0c33dde..19393ecb8 100644 --- a/libmemunreachable/HeapWalker.cpp +++ b/libmemunreachable/HeapWalker.cpp @@ -21,17 +21,19 @@ #include "Allocator.h" #include "HeapWalker.h" +#include "LeakFolding.h" #include "log.h" bool HeapWalker::Allocation(uintptr_t begin, uintptr_t end) { if (end == begin) { end = begin + 1; } - auto inserted = allocations_.insert(std::pair(Range{begin, end}, RangeInfo{false, false})); + Range range{begin, end}; + auto inserted = allocations_.insert(std::pair(range, AllocationInfo{})); if (inserted.second) { valid_allocations_range_.begin = std::min(valid_allocations_range_.begin, begin); valid_allocations_range_.end = std::max(valid_allocations_range_.end, end); - allocation_bytes_ += end - begin; + allocation_bytes_ += range.size(); return true; } else { Range overlap = inserted.first->first; @@ -44,27 +46,30 @@ bool HeapWalker::Allocation(uintptr_t begin, uintptr_t end) { } } -void HeapWalker::Walk(const Range& range, bool RangeInfo::*flag) { - allocator::vector to_do(1, range, allocator_); +bool HeapWalker::IsAllocationPtr(uintptr_t ptr, Range* range, AllocationInfo** info) { + if (ptr >= valid_allocations_range_.begin && ptr < valid_allocations_range_.end) { + AllocationMap::iterator it = allocations_.find(Range{ptr, ptr + 1}); + if (it != allocations_.end()) { + *range = it->first; + *info = &it->second; + return true; + } + } + return false; +} + +void HeapWalker::RecurseRoot(const Range& root) { + allocator::vector to_do(1, root, allocator_); while (!to_do.empty()) { Range range = to_do.back(); to_do.pop_back(); - uintptr_t begin = (range.begin + (sizeof(uintptr_t) - 1)) & ~(sizeof(uintptr_t) - 1); - // TODO(ccross): we might need to consider a pointer to the end of a buffer - // to be inside the buffer, which means the common case of a pointer to the - // beginning of a buffer may keep two ranges live. - for (uintptr_t i = begin; i < range.end; i += sizeof(uintptr_t)) { - uintptr_t val = *reinterpret_cast(i); - if (val >= valid_allocations_range_.begin && val < valid_allocations_range_.end) { - RangeMap::iterator it = allocations_.find(Range{val, val + 1}); - if (it != allocations_.end()) { - if (!(it->second.*flag)) { - to_do.push_back(it->first); - it->second.*flag = true; - } - } + + ForEachPtrInRange(range, [&](Range& ref_range, AllocationInfo* ref_info) { + if (!ref_info->referenced_from_root) { + ref_info->referenced_from_root = true; + to_do.push_back(ref_range); } - } + }); } } @@ -85,27 +90,22 @@ size_t HeapWalker::AllocationBytes() { } bool HeapWalker::DetectLeaks() { + // Recursively walk pointers from roots to mark referenced allocations for (auto it = roots_.begin(); it != roots_.end(); it++) { - Walk(*it, &RangeInfo::referenced_from_root); + RecurseRoot(*it); } Range vals; vals.begin = reinterpret_cast(root_vals_.data()); vals.end = vals.begin + root_vals_.size() * sizeof(uintptr_t); - Walk(vals, &RangeInfo::referenced_from_root); - for (auto it = allocations_.begin(); it != allocations_.end(); it++) { - if (!it->second.referenced_from_root) { - Walk(it->first, &RangeInfo::referenced_from_leak); - } - } + RecurseRoot(vals); return true; } bool HeapWalker::Leaked(allocator::vector& leaked, size_t limit, size_t* num_leaks_out, size_t* leak_bytes_out) { - DetectLeaks(); leaked.clear(); size_t num_leaks = 0; @@ -120,7 +120,7 @@ bool HeapWalker::Leaked(allocator::vector& leaked, size_t limit, size_t n = 0; for (auto it = allocations_.begin(); it != allocations_.end(); it++) { if (!it->second.referenced_from_root) { - if (n++ <= limit) { + if (n++ < limit) { leaked.push_back(it->first); } } diff --git a/libmemunreachable/HeapWalker.h b/libmemunreachable/HeapWalker.h index 4be1934c3..7b851c4c1 100644 --- a/libmemunreachable/HeapWalker.h +++ b/libmemunreachable/HeapWalker.h @@ -20,11 +20,14 @@ #include "android-base/macros.h" #include "Allocator.h" +#include "Tarjan.h" // A range [begin, end) struct Range { uintptr_t begin; uintptr_t end; + + size_t size() const { return end - begin; }; }; // Comparator for Ranges that returns equivalence for overlapping ranges @@ -34,7 +37,6 @@ struct compare_range { } }; - class HeapWalker { public: HeapWalker(Allocator allocator) : allocator_(allocator), @@ -55,16 +57,25 @@ class HeapWalker { size_t Allocations(); size_t AllocationBytes(); - private: - struct RangeInfo { + template + void ForEachPtrInRange(const Range& range, F&& f); + + template + void ForEachAllocation(F&& f); + + struct AllocationInfo { bool referenced_from_root; - bool referenced_from_leak; }; - void Walk(const Range& range, bool RangeInfo::* flag); + + private: + + void RecurseRoot(const Range& root); + bool IsAllocationPtr(uintptr_t ptr, Range* range, AllocationInfo** info); + DISALLOW_COPY_AND_ASSIGN(HeapWalker); Allocator allocator_; - using RangeMap = allocator::map; - RangeMap allocations_; + using AllocationMap = allocator::map; + AllocationMap allocations_; size_t allocation_bytes_; Range valid_allocations_range_; @@ -72,4 +83,28 @@ class HeapWalker { allocator::vector root_vals_; }; +template +inline void HeapWalker::ForEachPtrInRange(const Range& range, F&& f) { + uintptr_t begin = (range.begin + (sizeof(uintptr_t) - 1)) & ~(sizeof(uintptr_t) - 1); + // TODO(ccross): we might need to consider a pointer to the end of a buffer + // to be inside the buffer, which means the common case of a pointer to the + // beginning of a buffer may keep two ranges live. + for (uintptr_t i = begin; i < range.end; i += sizeof(uintptr_t)) { + Range ref_range; + AllocationInfo* ref_info; + if (IsAllocationPtr(*reinterpret_cast(i), &ref_range, &ref_info)) { + f(ref_range, ref_info); + } + } +} + +template +inline void HeapWalker::ForEachAllocation(F&& f) { + for (auto& it : allocations_) { + const Range& range = it.first; + HeapWalker::AllocationInfo& allocation = it.second; + f(range, allocation); + } +} + #endif diff --git a/libmemunreachable/Leak.h b/libmemunreachable/Leak.h new file mode 100644 index 000000000..eaeeea7cf --- /dev/null +++ b/libmemunreachable/Leak.h @@ -0,0 +1,57 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef LIBMEMUNREACHABLE_LEAK_H_ +#define LIBMEMUNREACHABLE_LEAK_H_ + +#include +#include + +#include "memunreachable/memunreachable.h" + +// Custom std::hash specialization so that Leak::Backtrace can be used +// as a key in std::unordered_map. +namespace std { + +template<> +struct hash { + std::size_t operator()(const Leak::Backtrace& key) const { + std::size_t seed = 0; + + hash_combine(seed, key.num_frames); + for (size_t i = 0; i < key.num_frames; i++) { + hash_combine(seed, key.frames[i]); + } + + return seed; + } + + private: + template + inline void hash_combine(std::size_t& seed, const T& v) const { + std::hash hasher; + seed ^= hasher(v) + 0x9e3779b9 + (seed << 6) + (seed >> 2); + } +}; + +} // namespace std + +static bool operator==(const Leak::Backtrace& lhs, const Leak::Backtrace& rhs) { + return (lhs.num_frames == rhs.num_frames) && + memcmp(lhs.frames, rhs.frames, lhs.num_frames * sizeof(lhs.frames[0])) == 0; +} + +#endif diff --git a/libmemunreachable/LeakFolding.cpp b/libmemunreachable/LeakFolding.cpp new file mode 100644 index 000000000..be4d20c95 --- /dev/null +++ b/libmemunreachable/LeakFolding.cpp @@ -0,0 +1,140 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include "Allocator.h" +#include "HeapWalker.h" +#include "LeakFolding.h" +#include "Tarjan.h" +#include "log.h" + +// Converts possibly cyclic graph of leaks to a DAG by combining +// strongly-connected components into a object, stored in the scc pointer +// of each node in the component. +void LeakFolding::ComputeDAG() { + SCCList scc_list{allocator_}; + Tarjan(leak_graph_, scc_list); + + Allocator scc_allocator = allocator_; + + for (auto& scc_nodes: scc_list) { + Allocator::unique_ptr leak_scc; + leak_scc = scc_allocator.make_unique(scc_allocator); + + for (auto& node: scc_nodes) { + node->ptr->scc = leak_scc.get(); + leak_scc->count++; + leak_scc->size += node->ptr->range.size(); + } + + leak_scc_.emplace_back(std::move(leak_scc)); + } + + for (auto& it : leak_map_) { + LeakInfo& leak = it.second; + for (auto& ref: leak.node.references_out) { + if (leak.scc != ref->ptr->scc) { + leak.scc->node.Edge(&ref->ptr->scc->node); + } + } + } +} + +void LeakFolding::AccumulateLeaks(SCCInfo* dominator) { + std::function walk(std::allocator_arg, allocator_, + [&](SCCInfo* scc) { + if (scc->accumulator != dominator) { + scc->accumulator = dominator; + dominator->cuumulative_size += scc->size; + dominator->cuumulative_count += scc->count; + scc->node.Foreach([&](SCCInfo* ref) { + walk(ref); + }); + } + }); + walk(dominator); +} + +bool LeakFolding::FoldLeaks() { + Allocator leak_allocator = allocator_; + + // Find all leaked allocations insert them into leak_map_ and leak_graph_ + heap_walker_.ForEachAllocation( + [&](const Range& range, HeapWalker::AllocationInfo& allocation) { + if (!allocation.referenced_from_root) { + auto it = leak_map_.emplace(std::piecewise_construct, + std::forward_as_tuple(range), + std::forward_as_tuple(range, allocator_)); + LeakInfo& leak = it.first->second; + leak_graph_.push_back(&leak.node); + } + }); + + // Find references between leaked allocations and connect them in leak_graph_ + for (auto& it : leak_map_) { + LeakInfo& leak = it.second; + heap_walker_.ForEachPtrInRange(leak.range, + [&](Range& ptr_range, HeapWalker::AllocationInfo* ptr_info) { + if (!ptr_info->referenced_from_root) { + LeakInfo* ptr_leak = &leak_map_.at(ptr_range); + leak.node.Edge(&ptr_leak->node); + } + }); + } + + // Convert the cyclic graph to a DAG by grouping strongly connected components + ComputeDAG(); + + // Compute dominators and cuumulative sizes + for (auto& scc : leak_scc_) { + if (scc->node.references_in.size() == 0) { + scc->dominator = true; + AccumulateLeaks(scc.get()); + } + } + + return true; +} + +bool LeakFolding::Leaked(allocator::vector& leaked, + size_t* num_leaks_out, size_t* leak_bytes_out) { + size_t num_leaks = 0; + size_t leak_bytes = 0; + for (auto& it : leak_map_) { + const LeakInfo& leak = it.second; + num_leaks++; + leak_bytes += leak.range.size(); + } + + for (auto& it : leak_map_) { + const LeakInfo& leak = it.second; + if (leak.scc->dominator) { + leaked.emplace_back(Leak{leak.range, + leak.scc->cuumulative_count - 1, + leak.scc->cuumulative_size - leak.range.size()}); + } + } + + if (num_leaks_out) { + *num_leaks_out = num_leaks; + } + if (leak_bytes_out) { + *leak_bytes_out = leak_bytes; + } + + return true; +} diff --git a/libmemunreachable/LeakFolding.h b/libmemunreachable/LeakFolding.h new file mode 100644 index 000000000..732d3f281 --- /dev/null +++ b/libmemunreachable/LeakFolding.h @@ -0,0 +1,89 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef LIBMEMUNREACHABLE_LEAK_FOLDING_H_ +#define LIBMEMUNREACHABLE_LEAK_FOLDING_H_ + +#include "HeapWalker.h" + +class LeakFolding { + public: + LeakFolding(Allocator allocator, HeapWalker& heap_walker) + : allocator_(allocator), heap_walker_(heap_walker), + leak_map_(allocator), leak_graph_(allocator), leak_scc_(allocator) {} + + bool FoldLeaks(); + + struct Leak { + const Range range; + size_t referenced_count; + size_t referenced_size; + }; + + bool Leaked(allocator::vector& leaked, + size_t* num_leaks_out, size_t* leak_bytes_out); + + private: + DISALLOW_COPY_AND_ASSIGN(LeakFolding); + Allocator allocator_; + HeapWalker& heap_walker_; + + struct SCCInfo { + public: + Node node; + + size_t count; + size_t size; + + size_t cuumulative_count; + size_t cuumulative_size; + + bool dominator; + SCCInfo* accumulator; + + SCCInfo(Allocator allocator) : node(this, allocator), + count(0), size(0), cuumulative_count(0), cuumulative_size(0), + dominator(false), accumulator(nullptr) {} + private: + SCCInfo(SCCInfo&&) = delete; + DISALLOW_COPY_AND_ASSIGN(SCCInfo); + }; + + struct LeakInfo { + public: + Node node; + + const Range range; + + SCCInfo* scc; + + LeakInfo(const Range& range, Allocator allocator) + : node(this, allocator), range(range), + scc(nullptr) {} + + private: + DISALLOW_COPY_AND_ASSIGN(LeakInfo); + }; + + void ComputeDAG(); + void AccumulateLeaks(SCCInfo* dominator); + + allocator::map leak_map_; + Graph leak_graph_; + allocator::vector::unique_ptr> leak_scc_; +}; + +#endif // LIBMEMUNREACHABLE_LEAK_FOLDING_H_ diff --git a/libmemunreachable/MemUnreachable.cpp b/libmemunreachable/MemUnreachable.cpp index eca26eb6d..ac19a6615 100644 --- a/libmemunreachable/MemUnreachable.cpp +++ b/libmemunreachable/MemUnreachable.cpp @@ -21,12 +21,15 @@ #include #include #include +#include #include #include #include "Allocator.h" #include "HeapWalker.h" +#include "Leak.h" +#include "LeakFolding.h" #include "LeakPipe.h" #include "ProcessMappings.h" #include "PtracerThread.h" @@ -117,32 +120,84 @@ bool MemUnreachable::CollectAllocations(const allocator::vector& thr return true; } -bool MemUnreachable::GetUnreachableMemory(allocator::vector& leaks, size_t limit, - size_t* num_leaks, size_t* leak_bytes) { +bool MemUnreachable::GetUnreachableMemory(allocator::vector& leaks, + size_t limit, size_t* num_leaks, size_t* leak_bytes) { ALOGI("sweeping process %d for unreachable memory", pid_); leaks.clear(); - allocator::vector leaked{allocator_}; - if (!heap_walker_.Leaked(leaked, limit, num_leaks, leak_bytes)) { + if (!heap_walker_.DetectLeaks()) { return false; } - for (auto it = leaked.begin(); it != leaked.end(); it++) { - Leak leak{}; - leak.begin = it->begin; - leak.size = it->end - it->begin;; - memcpy(leak.contents, reinterpret_cast(it->begin), - std::min(leak.size, Leak::contents_length)); - ssize_t num_backtrace_frames = malloc_backtrace(reinterpret_cast(it->begin), - leak.backtrace_frames, leak.backtrace_length); - if (num_backtrace_frames > 0) { - leak.num_backtrace_frames = num_backtrace_frames; - } - leaks.emplace_back(leak); - } + + allocator::vector leaked1{allocator_}; + heap_walker_.Leaked(leaked1, 0, num_leaks, leak_bytes); ALOGI("sweeping done"); + ALOGI("folding related leaks"); + + LeakFolding folding(allocator_, heap_walker_); + if (!folding.FoldLeaks()) { + return false; + } + + allocator::vector leaked{allocator_}; + + if (!folding.Leaked(leaked, num_leaks, leak_bytes)) { + return false; + } + + allocator::unordered_map backtrace_map{allocator_}; + + // Prevent reallocations of backing memory so we can store pointers into it + // in backtrace_map. + leaks.reserve(leaked.size()); + + for (auto& it: leaked) { + leaks.emplace_back(); + Leak* leak = &leaks.back(); + + ssize_t num_backtrace_frames = malloc_backtrace(reinterpret_cast(it.range.begin), + leak->backtrace.frames, leak->backtrace.max_frames); + if (num_backtrace_frames > 0) { + leak->backtrace.num_frames = num_backtrace_frames; + + auto inserted = backtrace_map.emplace(leak->backtrace, leak); + if (!inserted.second) { + // Leak with same backtrace already exists, drop this one and + // increment similar counts on the existing one. + leaks.pop_back(); + Leak* similar_leak = inserted.first->second; + similar_leak->similar_count++; + similar_leak->similar_size += it.range.size(); + similar_leak->similar_referenced_count += it.referenced_count; + similar_leak->similar_referenced_size += it.referenced_size; + similar_leak->total_size += it.range.size(); + similar_leak->total_size += it.referenced_size; + continue; + } + } + + leak->begin = it.range.begin; + leak->size = it.range.size(); + leak->referenced_count = it.referenced_count; + leak->referenced_size = it.referenced_size; + leak->total_size = leak->size + leak->referenced_size; + memcpy(leak->contents, reinterpret_cast(it.range.begin), + std::min(leak->size, Leak::contents_length)); + } + + ALOGI("folding done"); + + std::sort(leaks.begin(), leaks.end(), [](const Leak& a, const Leak& b) { + return a.total_size > b.total_size; + }); + + if (leaks.size() > limit) { + leaks.resize(limit); + } + return true; } @@ -203,6 +258,11 @@ bool MemUnreachable::ClassifyMappings(const allocator::vector& mappings return true; } +template +static inline const char* plural(T val) { + return (val == 1) ? "" : "s"; +} + bool GetUnreachableMemory(UnreachableMemoryInfo& info, size_t limit) { int parent_pid = getpid(); int parent_tid = gettid(); @@ -339,9 +399,8 @@ bool GetUnreachableMemory(UnreachableMemoryInfo& info, size_t limit) { ALOGI("unreachable memory detection done"); ALOGE("%zu bytes in %zu allocation%s unreachable out of %zu bytes in %zu allocation%s", - info.leak_bytes, info.num_leaks, info.num_leaks == 1 ? "" : "s", - info.allocation_bytes, info.num_allocations, info.num_allocations == 1 ? "" : "s"); - + info.leak_bytes, info.num_leaks, plural(info.num_leaks), + info.allocation_bytes, info.num_allocations, plural(info.num_allocations)); return true; } @@ -353,6 +412,23 @@ std::string Leak::ToString(bool log_contents) const { oss << " bytes unreachable at "; oss << std::hex << begin; oss << std::endl; + if (referenced_count > 0) { + oss << std::dec; + oss << " referencing " << referenced_size << " unreachable bytes"; + oss << " in " << referenced_count << " allocation" << plural(referenced_count); + oss << std::endl; + } + if (similar_count > 0) { + oss << std::dec; + oss << " and " << similar_size << " similar unreachable bytes"; + oss << " in " << similar_count << " allocation" << plural(similar_count); + oss << std::endl; + if (similar_referenced_count > 0) { + oss << " referencing " << similar_referenced_size << " unreachable bytes"; + oss << " in " << similar_referenced_count << " allocation" << plural(similar_referenced_count); + oss << std::endl; + } + } if (log_contents) { const int bytes_per_line = 16; @@ -361,7 +437,7 @@ std::string Leak::ToString(bool log_contents) const { if (bytes == size) { oss << " contents:" << std::endl; } else { - oss << " first " << bytes << " bytes of contents:" << std::endl; + oss << " first " << bytes << " bytes of contents:" << std::endl; } for (size_t i = 0; i < bytes; i += bytes_per_line) { @@ -385,21 +461,41 @@ std::string Leak::ToString(bool log_contents) const { oss << std::endl; } } - if (num_backtrace_frames > 0) { - oss << backtrace_string(backtrace_frames, num_backtrace_frames); + if (backtrace.num_frames > 0) { + oss << backtrace_string(backtrace.frames, backtrace.num_frames); } return oss.str(); } +// Figure out the abi based on defined macros. +#if defined(__arm__) +#define ABI_STRING "arm" +#elif defined(__aarch64__) +#define ABI_STRING "arm64" +#elif defined(__mips__) && !defined(__LP64__) +#define ABI_STRING "mips" +#elif defined(__mips__) && defined(__LP64__) +#define ABI_STRING "mips64" +#elif defined(__i386__) +#define ABI_STRING "x86" +#elif defined(__x86_64__) +#define ABI_STRING "x86_64" +#else +#error "Unsupported ABI" +#endif + std::string UnreachableMemoryInfo::ToString(bool log_contents) const { std::ostringstream oss; oss << " " << leak_bytes << " bytes in "; - oss << num_leaks << " unreachable allocation" << (num_leaks == 1 ? "" : "s"); + oss << num_leaks << " unreachable allocation" << plural(num_leaks); + oss << std::endl; + oss << " ABI: '" ABI_STRING "'" << std::endl; oss << std::endl; for (auto it = leaks.begin(); it != leaks.end(); it++) { oss << it->ToString(log_contents); + oss << std::endl; } return oss.str(); diff --git a/libmemunreachable/ScopedAlarm.h b/libmemunreachable/ScopedAlarm.h index 019deea41..287f479a9 100644 --- a/libmemunreachable/ScopedAlarm.h +++ b/libmemunreachable/ScopedAlarm.h @@ -18,6 +18,7 @@ #define LIBMEMUNREACHABLE_SCOPED_ALARM_H_ #include +#include #include #include diff --git a/libmemunreachable/Tarjan.h b/libmemunreachable/Tarjan.h new file mode 100644 index 000000000..d7ecdb9ba --- /dev/null +++ b/libmemunreachable/Tarjan.h @@ -0,0 +1,131 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Based on system/update_engine/payload_generator/tarjan.cc + +#ifndef LIBMEMUNREACHABLE_TARJAN_H_ +#define LIBMEMUNREACHABLE_TARJAN_H_ + +#include + +#include "Allocator.h" + +template +class Node { + public: + allocator::set*> references_in; + allocator::set*> references_out; + size_t index; + size_t lowlink; + + T* ptr; + + Node(T* ptr, Allocator allocator) : references_in(allocator), references_out(allocator), + ptr(ptr) {}; + Node(Node&& rhs) = default; + void Edge(Node* ref) { + references_out.emplace(ref); + ref->references_in.emplace(this); + } + template + void Foreach(F&& f) { + for (auto& node: references_out) { + f(node->ptr); + } + } + private: + DISALLOW_COPY_AND_ASSIGN(Node); +}; + +template +using Graph = allocator::vector*>; + +template +using SCC = allocator::vector*>; + +template +using SCCList = allocator::vector>; + +template +class TarjanAlgorithm { + public: + TarjanAlgorithm(Allocator allocator) : index_(0), + stack_(allocator), components_(allocator) {} + + void Execute(Graph& graph, SCCList& out); + private: + static constexpr size_t UNDEFINED_INDEX = static_cast(-1); + void Tarjan(Node* vertex, Graph& graph); + + size_t index_; + allocator::vector*> stack_; + SCCList components_; +}; + +template +void TarjanAlgorithm::Execute(Graph& graph, SCCList& out) { + stack_.clear(); + components_.clear(); + index_ = 0; + for (auto& it: graph) { + it->index = UNDEFINED_INDEX; + it->lowlink = UNDEFINED_INDEX; + } + + for (auto& it: graph) { + if (it->index == UNDEFINED_INDEX) { + Tarjan(it, graph); + } + } + out.swap(components_); +} + +template +void TarjanAlgorithm::Tarjan(Node* vertex, Graph& graph) { + assert(vertex->index == UNDEFINED_INDEX); + vertex->index = index_; + vertex->lowlink = index_; + index_++; + stack_.push_back(vertex); + for (auto& it: vertex->references_out) { + Node* vertex_next = it; + if (vertex_next->index == UNDEFINED_INDEX) { + Tarjan(vertex_next, graph); + vertex->lowlink = std::min(vertex->lowlink, vertex_next->lowlink); + } else if (std::find(stack_.begin(), stack_.end(), vertex_next) != stack_.end()) { + vertex->lowlink = std::min(vertex->lowlink, vertex_next->index); + } + } + if (vertex->lowlink == vertex->index) { + SCC component{components_.get_allocator()}; + Node* other_vertex; + do { + other_vertex = stack_.back(); + stack_.pop_back(); + component.push_back(other_vertex); + } while (other_vertex != vertex && !stack_.empty()); + + components_.emplace_back(component); + } +} + +template +void Tarjan(Graph& graph, SCCList& out) { + TarjanAlgorithm tarjan{graph.get_allocator()}; + tarjan.Execute(graph, out); +} + +#endif // LIBMEMUNREACHABLE_TARJAN_H_ diff --git a/libmemunreachable/ThreadCapture.cpp b/libmemunreachable/ThreadCapture.cpp index 635784020..e8a8392cc 100644 --- a/libmemunreachable/ThreadCapture.cpp +++ b/libmemunreachable/ThreadCapture.cpp @@ -86,7 +86,7 @@ class ThreadCaptureImpl { void PtraceDetach(pid_t tid, unsigned int signal); bool PtraceThreadInfo(pid_t tid, ThreadInfo& thread_info); - allocator::map captured_threads_; + allocator::map captured_threads_; Allocator allocator_; pid_t pid_; std::function inject_test_func_; diff --git a/libmemunreachable/bionic.h b/libmemunreachable/bionic.h index 92de24a81..83d07a8d0 100644 --- a/libmemunreachable/bionic.h +++ b/libmemunreachable/bionic.h @@ -18,6 +18,8 @@ #define LIBMEMUNREACHABLE_BIONIC_H_ #include +#include +#include __BEGIN_DECLS diff --git a/libmemunreachable/include/memunreachable/memunreachable.h b/libmemunreachable/include/memunreachable/memunreachable.h index f4f01ce99..9b227fd3b 100644 --- a/libmemunreachable/include/memunreachable/memunreachable.h +++ b/libmemunreachable/include/memunreachable/memunreachable.h @@ -27,11 +27,26 @@ struct Leak { uintptr_t begin; size_t size; - size_t num_backtrace_frames; + + size_t referenced_count; + size_t referenced_size; + + size_t similar_count; + size_t similar_size; + size_t similar_referenced_count; + size_t similar_referenced_size; + + size_t total_size; + static const size_t contents_length = 32; char contents[contents_length]; - static const size_t backtrace_length = 16; - uintptr_t backtrace_frames[backtrace_length]; + + struct Backtrace { + size_t num_frames; + + static const size_t max_frames = 16; + uintptr_t frames[max_frames]; + } backtrace; std::string ToString(bool log_contents) const; }; diff --git a/libmemunreachable/tests/Allocator_test.cpp b/libmemunreachable/tests/Allocator_test.cpp index d8e473eba..fa76ae034 100644 --- a/libmemunreachable/tests/Allocator_test.cpp +++ b/libmemunreachable/tests/Allocator_test.cpp @@ -15,12 +15,6 @@ */ #include -#include - -#include -#include -#include -#include #include #include @@ -28,8 +22,6 @@ std::function ScopedAlarm::func_; -using namespace std::chrono_literals; - class AllocatorTest : public testing::Test { protected: AllocatorTest() : heap(), disable_malloc_() {} @@ -180,94 +172,3 @@ TEST_F(AllocatorTest, unique) { ASSERT_NE(ptr, nullptr); } - -class DisableMallocTest : public ::testing::Test { - protected: - void alarm(std::chrono::microseconds us) { - std::chrono::seconds s = std::chrono::duration_cast(us); - itimerval t = itimerval(); - t.it_value.tv_sec = s.count(); - t.it_value.tv_usec = (us - s).count(); - setitimer(ITIMER_REAL, &t, NULL); - } -}; - -TEST_F(DisableMallocTest, reenable) { - ASSERT_EXIT({ - alarm(100ms); - void *ptr1 = malloc(128); - ASSERT_NE(ptr1, nullptr); - free(ptr1); - { - ScopedDisableMalloc disable_malloc; - } - void *ptr2 = malloc(128); - ASSERT_NE(ptr2, nullptr); - free(ptr2); - _exit(1); - }, ::testing::ExitedWithCode(1), ""); -} - -TEST_F(DisableMallocTest, deadlock_allocate) { - ASSERT_DEATH({ - void *ptr = malloc(128); - ASSERT_NE(ptr, nullptr); - free(ptr); - { - alarm(100ms); - ScopedDisableMalloc disable_malloc; - void* ptr = malloc(128); - ASSERT_NE(ptr, nullptr); - free(ptr); - } - }, ""); -} - -TEST_F(DisableMallocTest, deadlock_new) { - ASSERT_DEATH({ - char* ptr = new(char); - ASSERT_NE(ptr, nullptr); - delete(ptr); - { - alarm(100ms); - ScopedDisableMalloc disable_malloc; - char* ptr = new(char); - ASSERT_NE(ptr, nullptr); - delete(ptr); - } - }, ""); -} - -TEST_F(DisableMallocTest, deadlock_delete) { - ASSERT_DEATH({ - char* ptr = new(char); - ASSERT_NE(ptr, nullptr); - { - alarm(250ms); - ScopedDisableMalloc disable_malloc; - delete(ptr); - } - }, ""); -} - -TEST_F(DisableMallocTest, deadlock_free) { - ASSERT_DEATH({ - void *ptr = malloc(128); - ASSERT_NE(ptr, nullptr); - { - alarm(100ms); - ScopedDisableMalloc disable_malloc; - free(ptr); - } - }, ""); -} - -TEST_F(DisableMallocTest, deadlock_fork) { - ASSERT_DEATH({ - { - alarm(100ms); - ScopedDisableMalloc disable_malloc; - fork(); - } - }, ""); -} diff --git a/libmemunreachable/tests/DisableMalloc_test.cpp b/libmemunreachable/tests/DisableMalloc_test.cpp new file mode 100644 index 000000000..ea5c22c88 --- /dev/null +++ b/libmemunreachable/tests/DisableMalloc_test.cpp @@ -0,0 +1,116 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include +#include + +#include +#include + +using namespace std::chrono_literals; + +class DisableMallocTest : public ::testing::Test { + protected: + void alarm(std::chrono::microseconds us) { + std::chrono::seconds s = std::chrono::duration_cast(us); + itimerval t = itimerval(); + t.it_value.tv_sec = s.count(); + t.it_value.tv_usec = (us - s).count(); + setitimer(ITIMER_REAL, &t, NULL); + } +}; + +TEST_F(DisableMallocTest, reenable) { + ASSERT_EXIT({ + alarm(100ms); + void *ptr1 = malloc(128); + ASSERT_NE(ptr1, nullptr); + free(ptr1); + { + ScopedDisableMalloc disable_malloc; + } + void *ptr2 = malloc(128); + ASSERT_NE(ptr2, nullptr); + free(ptr2); + _exit(1); + }, ::testing::ExitedWithCode(1), ""); +} + +TEST_F(DisableMallocTest, deadlock_allocate) { + ASSERT_DEATH({ + void *ptr = malloc(128); + ASSERT_NE(ptr, nullptr); + free(ptr); + { + alarm(100ms); + ScopedDisableMalloc disable_malloc; + void* ptr = malloc(128); + ASSERT_NE(ptr, nullptr); + free(ptr); + } + }, ""); +} + +TEST_F(DisableMallocTest, deadlock_new) { + ASSERT_DEATH({ + char* ptr = new(char); + ASSERT_NE(ptr, nullptr); + delete(ptr); + { + alarm(100ms); + ScopedDisableMalloc disable_malloc; + char* ptr = new(char); + ASSERT_NE(ptr, nullptr); + delete(ptr); + } + }, ""); +} + +TEST_F(DisableMallocTest, deadlock_delete) { + ASSERT_DEATH({ + char* ptr = new(char); + ASSERT_NE(ptr, nullptr); + { + alarm(250ms); + ScopedDisableMalloc disable_malloc; + delete(ptr); + } + }, ""); +} + +TEST_F(DisableMallocTest, deadlock_free) { + ASSERT_DEATH({ + void *ptr = malloc(128); + ASSERT_NE(ptr, nullptr); + { + alarm(100ms); + ScopedDisableMalloc disable_malloc; + free(ptr); + } + }, ""); +} + +TEST_F(DisableMallocTest, deadlock_fork) { + ASSERT_DEATH({ + { + alarm(100ms); + ScopedDisableMalloc disable_malloc; + fork(); + } + }, ""); +} diff --git a/libmemunreachable/tests/HeapWalker_test.cpp b/libmemunreachable/tests/HeapWalker_test.cpp index 9921eb65e..c3e1c4d56 100644 --- a/libmemunreachable/tests/HeapWalker_test.cpp +++ b/libmemunreachable/tests/HeapWalker_test.cpp @@ -80,6 +80,8 @@ TEST_F(HeapWalkerTest, leak) { HeapWalker heap_walker(heap_); heap_walker.Allocation(buffer_begin(buffer2), buffer_end(buffer2)); + ASSERT_EQ(true, heap_walker.DetectLeaks()); + allocator::vector leaked(heap_); size_t num_leaks = 0; size_t leaked_bytes = 0; @@ -106,9 +108,11 @@ TEST_F(HeapWalkerTest, live) { heap_walker.Allocation(buffer_begin(buffer2), buffer_end(buffer2)); heap_walker.Root(buffer_begin(buffer1), buffer_end(buffer1)); + ASSERT_EQ(true, heap_walker.DetectLeaks()); + allocator::vector leaked(heap_); - size_t num_leaks = SIZE_T_MAX; - size_t leaked_bytes = SIZE_T_MAX; + size_t num_leaks = SIZE_MAX; + size_t leaked_bytes = SIZE_MAX; ASSERT_EQ(true, heap_walker.Leaked(leaked, 100, &num_leaks, &leaked_bytes)); EXPECT_EQ(0U, num_leaks); @@ -132,9 +136,11 @@ TEST_F(HeapWalkerTest, unaligned) { heap_walker.Allocation(buffer_begin(buffer2), buffer_end(buffer2)); heap_walker.Root(buffer_begin(buffer1) + i, buffer_end(buffer1) - j); + ASSERT_EQ(true, heap_walker.DetectLeaks()); + allocator::vector leaked(heap_); - size_t num_leaks = SIZE_T_MAX; - size_t leaked_bytes = SIZE_T_MAX; + size_t num_leaks = SIZE_MAX; + size_t leaked_bytes = SIZE_MAX; ASSERT_EQ(true, heap_walker.Leaked(leaked, 100, &num_leaks, &leaked_bytes)); EXPECT_EQ(0U, num_leaks); @@ -143,3 +149,26 @@ TEST_F(HeapWalkerTest, unaligned) { } } } + +TEST_F(HeapWalkerTest, cycle) { + void* buffer1; + void* buffer2; + + buffer1 = &buffer2; + buffer2 = &buffer1; + + HeapWalker heap_walker(heap_); + heap_walker.Allocation(buffer_begin(buffer1), buffer_end(buffer1)); + heap_walker.Allocation(buffer_begin(buffer2), buffer_end(buffer2)); + + ASSERT_EQ(true, heap_walker.DetectLeaks()); + + allocator::vector leaked(heap_); + size_t num_leaks = 0; + size_t leaked_bytes = 0; + ASSERT_EQ(true, heap_walker.Leaked(leaked, 100, &num_leaks, &leaked_bytes)); + + EXPECT_EQ(2U, num_leaks); + EXPECT_EQ(2*sizeof(uintptr_t), leaked_bytes); + ASSERT_EQ(2U, leaked.size()); +} diff --git a/libmemunreachable/tests/HostMallocStub.cpp b/libmemunreachable/tests/HostMallocStub.cpp new file mode 100644 index 000000000..a7e3f07d3 --- /dev/null +++ b/libmemunreachable/tests/HostMallocStub.cpp @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "bionic.h" + +void malloc_disable() { +} + +void malloc_enable() { +} diff --git a/libmemunreachable/tests/LeakFolding_test.cpp b/libmemunreachable/tests/LeakFolding_test.cpp new file mode 100644 index 000000000..879a3a023 --- /dev/null +++ b/libmemunreachable/tests/LeakFolding_test.cpp @@ -0,0 +1,427 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "HeapWalker.h" +#include "LeakFolding.h" + +#include +#include +#include "Allocator.h" + +class LeakFoldingTest : public ::testing::Test { + public: + LeakFoldingTest() : disable_malloc_(), heap_() {} + + void TearDown() { + ASSERT_TRUE(heap_.empty()); + if (!HasFailure()) { + ASSERT_FALSE(disable_malloc_.timed_out()); + } + } + + protected: + ScopedDisableMallocTimeout disable_malloc_; + Heap heap_; +}; + +#define buffer_begin(buffer) reinterpret_cast(&buffer[0]) +#define buffer_end(buffer) (reinterpret_cast(&buffer[0]) + sizeof(buffer)) +#define ALLOCATION(heap_walker, buffer) \ + ASSERT_EQ(true, heap_walker.Allocation(buffer_begin(buffer), buffer_end(buffer))) + +TEST_F(LeakFoldingTest, one) { + void* buffer1[1] = {nullptr}; + + HeapWalker heap_walker(heap_); + + ALLOCATION(heap_walker, buffer1); + + LeakFolding folding(heap_, heap_walker); + + ASSERT_TRUE(folding.FoldLeaks()); + + allocator::vector leaked(heap_); + size_t num_leaks = 0; + size_t leaked_bytes = 0; + ASSERT_EQ(true, folding.Leaked(leaked, &num_leaks, &leaked_bytes)); + + EXPECT_EQ(1U, num_leaks); + EXPECT_EQ(sizeof(uintptr_t), leaked_bytes); + ASSERT_EQ(1U, leaked.size()); + EXPECT_EQ(0U, leaked[0].referenced_count); + EXPECT_EQ(0U, leaked[0].referenced_size); +} + +TEST_F(LeakFoldingTest, two) { + void* buffer1[1] = {nullptr}; + void* buffer2[1] = {nullptr}; + + HeapWalker heap_walker(heap_); + + ALLOCATION(heap_walker, buffer1); + ALLOCATION(heap_walker, buffer2); + + LeakFolding folding(heap_, heap_walker); + + ASSERT_TRUE(folding.FoldLeaks()); + + allocator::vector leaked(heap_); + size_t num_leaks = 0; + size_t leaked_bytes = 0; + ASSERT_EQ(true, folding.Leaked(leaked, &num_leaks, &leaked_bytes)); + + EXPECT_EQ(2U, num_leaks); + EXPECT_EQ(2*sizeof(uintptr_t), leaked_bytes); + ASSERT_EQ(2U, leaked.size()); + EXPECT_EQ(0U, leaked[0].referenced_count); + EXPECT_EQ(0U, leaked[0].referenced_size); + EXPECT_EQ(0U, leaked[1].referenced_count); + EXPECT_EQ(0U, leaked[1].referenced_size); +} + +TEST_F(LeakFoldingTest, dominator) { + void* buffer1[1]; + void* buffer2[1] = {nullptr}; + + buffer1[0] = buffer2; + + HeapWalker heap_walker(heap_); + + ALLOCATION(heap_walker, buffer1); + ALLOCATION(heap_walker, buffer2); + + LeakFolding folding(heap_, heap_walker); + + ASSERT_TRUE(folding.FoldLeaks()); + + allocator::vector leaked(heap_); + size_t num_leaks = 0; + size_t leaked_bytes = 0; + ASSERT_EQ(true, folding.Leaked(leaked, &num_leaks, &leaked_bytes)); + + EXPECT_EQ(2U, num_leaks); + EXPECT_EQ(2*sizeof(uintptr_t), leaked_bytes); + ASSERT_EQ(1U, leaked.size()); + EXPECT_EQ(1U, leaked[0].referenced_count); + EXPECT_EQ(sizeof(uintptr_t), leaked[0].referenced_size); +} + +TEST_F(LeakFoldingTest, cycle) { + void* buffer1[1]; + void* buffer2[1]; + void* buffer3[1]; + + buffer1[0] = buffer2; + buffer2[0] = buffer3; + buffer3[0] = buffer2; + + HeapWalker heap_walker(heap_); + + ALLOCATION(heap_walker, buffer1); + ALLOCATION(heap_walker, buffer2); + ALLOCATION(heap_walker, buffer3); + + LeakFolding folding(heap_, heap_walker); + + ASSERT_TRUE(folding.FoldLeaks()); + + allocator::vector leaked(heap_); + size_t num_leaks = 0; + size_t leaked_bytes = 0; + ASSERT_EQ(true, folding.Leaked(leaked, &num_leaks, &leaked_bytes)); + + EXPECT_EQ(3U, num_leaks); + EXPECT_EQ(3*sizeof(uintptr_t), leaked_bytes); + ASSERT_EQ(1U, leaked.size()); + EXPECT_EQ(2U, leaked[0].referenced_count); + EXPECT_EQ(2*sizeof(uintptr_t), leaked[0].referenced_size); +} + +TEST_F(LeakFoldingTest, dominator_cycle) { + void* buffer1[2] = {nullptr, nullptr}; + void* buffer2[2]; + void* buffer3[1] = {nullptr}; + + buffer1[0] = &buffer2; + buffer2[0] = &buffer1; + buffer2[1] = &buffer3; + + HeapWalker heap_walker(heap_); + + ALLOCATION(heap_walker, buffer1); + ALLOCATION(heap_walker, buffer2); + ALLOCATION(heap_walker, buffer3); + + LeakFolding folding(heap_, heap_walker); + + ASSERT_TRUE(folding.FoldLeaks()); + + allocator::vector leaked(heap_); + size_t num_leaks = 0; + size_t leaked_bytes = 0; + ASSERT_EQ(true, folding.Leaked(leaked, &num_leaks, &leaked_bytes)); + + EXPECT_EQ(3U, num_leaks); + EXPECT_EQ(5*sizeof(uintptr_t), leaked_bytes); + ASSERT_EQ(2U, leaked.size()); + + EXPECT_EQ(2U, leaked[0].referenced_count); + EXPECT_EQ(3*sizeof(uintptr_t), leaked[0].referenced_size); + EXPECT_EQ(2U, leaked[1].referenced_count); + EXPECT_EQ(3*sizeof(uintptr_t), leaked[1].referenced_size); +} + +TEST_F(LeakFoldingTest, two_cycles) { + void* buffer1[1]; + void* buffer2[1]; + void* buffer3[1]; + void* buffer4[1]; + void* buffer5[1]; + void* buffer6[1]; + + buffer1[0] = buffer3; + buffer2[0] = buffer5; + buffer3[0] = buffer4; + buffer4[0] = buffer3; + buffer5[0] = buffer6; + buffer6[0] = buffer5; + + HeapWalker heap_walker(heap_); + + ALLOCATION(heap_walker, buffer1); + ALLOCATION(heap_walker, buffer2); + ALLOCATION(heap_walker, buffer3); + ALLOCATION(heap_walker, buffer4); + ALLOCATION(heap_walker, buffer5); + ALLOCATION(heap_walker, buffer6); + + LeakFolding folding(heap_, heap_walker); + + ASSERT_TRUE(folding.FoldLeaks()); + + allocator::vector leaked(heap_); + size_t num_leaks = 0; + size_t leaked_bytes = 0; + ASSERT_EQ(true, folding.Leaked(leaked, &num_leaks, &leaked_bytes)); + + EXPECT_EQ(6U, num_leaks); + EXPECT_EQ(6*sizeof(uintptr_t), leaked_bytes); + ASSERT_EQ(2U, leaked.size()); + EXPECT_EQ(2U, leaked[0].referenced_count); + EXPECT_EQ(2*sizeof(uintptr_t), leaked[0].referenced_size); + EXPECT_EQ(2U, leaked[1].referenced_count); + EXPECT_EQ(2*sizeof(uintptr_t), leaked[1].referenced_size); +} + +TEST_F(LeakFoldingTest, two_dominator_cycles) { + void* buffer1[1]; + void* buffer2[1]; + void* buffer3[1]; + void* buffer4[1]; + + buffer1[0] = buffer2; + buffer2[0] = buffer1; + buffer3[0] = buffer4; + buffer4[0] = buffer3; + + HeapWalker heap_walker(heap_); + + ALLOCATION(heap_walker, buffer1); + ALLOCATION(heap_walker, buffer2); + ALLOCATION(heap_walker, buffer3); + ALLOCATION(heap_walker, buffer4); + + LeakFolding folding(heap_, heap_walker); + + ASSERT_TRUE(folding.FoldLeaks()); + + allocator::vector leaked(heap_); + size_t num_leaks = 0; + size_t leaked_bytes = 0; + ASSERT_EQ(true, folding.Leaked(leaked, &num_leaks, &leaked_bytes)); + + EXPECT_EQ(4U, num_leaks); + EXPECT_EQ(4*sizeof(uintptr_t), leaked_bytes); + ASSERT_EQ(4U, leaked.size()); + EXPECT_EQ(1U, leaked[0].referenced_count); + EXPECT_EQ(sizeof(uintptr_t), leaked[0].referenced_size); + EXPECT_EQ(1U, leaked[1].referenced_count); + EXPECT_EQ(sizeof(uintptr_t), leaked[1].referenced_size); + EXPECT_EQ(1U, leaked[2].referenced_count); + EXPECT_EQ(sizeof(uintptr_t), leaked[2].referenced_size); + EXPECT_EQ(1U, leaked[3].referenced_count); + EXPECT_EQ(sizeof(uintptr_t), leaked[3].referenced_size); +} + +TEST_F(LeakFoldingTest, giant_dominator_cycle) { + const size_t n = 1000; + void* buffer[n]; + + HeapWalker heap_walker(heap_); + + for (size_t i = 0; i < n; i ++) { + ASSERT_TRUE(heap_walker.Allocation(reinterpret_cast(&buffer[i]), + reinterpret_cast(&buffer[i+1]))); + } + + for (size_t i = 0; i < n - 1; i++) { + buffer[i] = &buffer[i+1]; + } + buffer[n - 1] = &buffer[0]; + + LeakFolding folding(heap_, heap_walker); + + ASSERT_TRUE(folding.FoldLeaks()); + + allocator::vector leaked(heap_); + size_t num_leaks = 0; + size_t leaked_bytes = 0; + ASSERT_EQ(true, folding.Leaked(leaked, &num_leaks, &leaked_bytes)); + + EXPECT_EQ(n, num_leaks); + EXPECT_EQ(n * sizeof(uintptr_t), leaked_bytes); + ASSERT_EQ(1000U, leaked.size()); + EXPECT_EQ(n - 1, leaked[0].referenced_count); + EXPECT_EQ((n - 1) * sizeof(uintptr_t), leaked[0].referenced_size); +} + +TEST_F(LeakFoldingTest, giant_cycle) { + const size_t n = 1000; + void* buffer[n]; + void* buffer1[1]; + + HeapWalker heap_walker(heap_); + + for (size_t i = 0; i < n - 1; i++) { + buffer[i] = &buffer[i+1]; + } + buffer[n - 1] = &buffer[0]; + + buffer1[0] = &buffer[0]; + + for (size_t i = 0; i < n; i ++) { + ASSERT_TRUE(heap_walker.Allocation(reinterpret_cast(&buffer[i]), + reinterpret_cast(&buffer[i+1]))); + } + + ALLOCATION(heap_walker, buffer1); + + LeakFolding folding(heap_, heap_walker); + + ASSERT_TRUE(folding.FoldLeaks()); + + allocator::vector leaked(heap_); + size_t num_leaks = 0; + size_t leaked_bytes = 0; + ASSERT_EQ(true, folding.Leaked(leaked, &num_leaks, &leaked_bytes)); + + EXPECT_EQ(n + 1, num_leaks); + EXPECT_EQ((n + 1) * sizeof(uintptr_t), leaked_bytes); + ASSERT_EQ(1U, leaked.size()); + EXPECT_EQ(n, leaked[0].referenced_count); + EXPECT_EQ(n * sizeof(uintptr_t), leaked[0].referenced_size); +} + +TEST_F(LeakFoldingTest, multipath) { + void* buffer1[2]; + void* buffer2[1]; + void* buffer3[1]; + void* buffer4[1] = {nullptr}; + + // 1 + // / \ + // v v + // 2 3 + // \ / + // v + // 4 + + buffer1[0] = &buffer2; + buffer1[1] = &buffer3; + buffer2[0] = &buffer4; + buffer3[0] = &buffer4; + + HeapWalker heap_walker(heap_); + + ALLOCATION(heap_walker, buffer1); + ALLOCATION(heap_walker, buffer2); + ALLOCATION(heap_walker, buffer3); + ALLOCATION(heap_walker, buffer4); + + LeakFolding folding(heap_, heap_walker); + + ASSERT_TRUE(folding.FoldLeaks()); + + allocator::vector leaked(heap_); + size_t num_leaks = 0; + size_t leaked_bytes = 0; + ASSERT_EQ(true, folding.Leaked(leaked, &num_leaks, &leaked_bytes)); + + EXPECT_EQ(4U, num_leaks); + EXPECT_EQ(5 * sizeof(uintptr_t), leaked_bytes); + ASSERT_EQ(1U, leaked.size()); + EXPECT_EQ(3U, leaked[0].referenced_count); + EXPECT_EQ(3 * sizeof(uintptr_t), leaked[0].referenced_size); +} + +TEST_F(LeakFoldingTest, multicycle) { + void* buffer1[2]{}; + void* buffer2[2]{}; + void* buffer3[2]{}; + void* buffer4[2]{}; + + // 1 + // / ^ + // v \ + // 2 -> 3 + // \ ^ + // v / + // 4 + + buffer1[0] = &buffer2; + buffer2[0] = &buffer3; + buffer2[1] = &buffer4; + buffer3[0] = &buffer1; + buffer4[0] = &buffer3; + + HeapWalker heap_walker(heap_); + + ALLOCATION(heap_walker, buffer1); + ALLOCATION(heap_walker, buffer2); + ALLOCATION(heap_walker, buffer3); + ALLOCATION(heap_walker, buffer4); + + LeakFolding folding(heap_, heap_walker); + + ASSERT_TRUE(folding.FoldLeaks()); + + allocator::vector leaked(heap_); + size_t num_leaks = 0; + size_t leaked_bytes = 0; + ASSERT_EQ(true, folding.Leaked(leaked, &num_leaks, &leaked_bytes)); + + EXPECT_EQ(4U, num_leaks); + EXPECT_EQ(8 * sizeof(uintptr_t), leaked_bytes); + ASSERT_EQ(4U, leaked.size()); + EXPECT_EQ(3U, leaked[0].referenced_count); + EXPECT_EQ(6 * sizeof(uintptr_t), leaked[0].referenced_size); + EXPECT_EQ(3U, leaked[1].referenced_count); + EXPECT_EQ(6 * sizeof(uintptr_t), leaked[1].referenced_size); + EXPECT_EQ(3U, leaked[2].referenced_count); + EXPECT_EQ(6 * sizeof(uintptr_t), leaked[2].referenced_size); + EXPECT_EQ(3U, leaked[3].referenced_count); + EXPECT_EQ(6 * sizeof(uintptr_t), leaked[3].referenced_size); +}