imprecise mark and sweep native memory leak detector

libmemunreachable uses an imprecise mark and sweep pass over all memory
allocated by jemalloc in order to find unreachable allocations.

Change-Id: Ia70bbf31f5b40ff71dab28cfd6cd06c5ef01a2d4
This commit is contained in:
Colin Cross 2016-01-14 15:35:40 -08:00
parent aae1eb2c4f
commit bcb4ed3eaa
30 changed files with 4084 additions and 0 deletions

View File

@ -0,0 +1,480 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Header page:
//
// For minimum allocation size (8 bytes), bitmap can store used allocations for
// up to 4032*8*8=258048, which is 256KiB minus the header page
#include <assert.h>
#include <stdlib.h>
#include <sys/cdefs.h>
#include <sys/mman.h>
#include <cmath>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <mutex>
#include "android-base/macros.h"
#include "anon_vma_naming.h"
#include "Allocator.h"
#include "LinkedList.h"
// runtime interfaces used:
// abort
// assert - fprintf + mmap
// mmap
// munmap
// prctl
constexpr size_t const_log2(size_t n, size_t p = 0) {
return (n <= 1) ? p : const_log2(n / 2, p + 1);
}
constexpr unsigned int div_round_up(unsigned int x, unsigned int y) {
return (x + y - 1) / y;
}
#define ARRAY_SIZE(x) (sizeof(x)/sizeof((x)[0]))
static constexpr size_t kPageSize = 4096;
static constexpr size_t kChunkSize = 256 * 1024;
static constexpr size_t kUsableChunkSize = kChunkSize - kPageSize;
static constexpr size_t kMaxBucketAllocationSize = kChunkSize / 4;
static constexpr size_t kMinBucketAllocationSize = 8;
static constexpr unsigned int kNumBuckets = const_log2(kMaxBucketAllocationSize)
- const_log2(kMinBucketAllocationSize) + 1;
static constexpr unsigned int kUsablePagesPerChunk = kUsableChunkSize
/ kPageSize;
std::atomic<int> heap_count;
class Chunk;
class HeapImpl {
public:
HeapImpl();
~HeapImpl();
void* operator new(std::size_t count) noexcept;
void operator delete(void* ptr);
void* Alloc(size_t size);
void Free(void* ptr);
bool Empty();
void MoveToFullList(Chunk* chunk, int bucket_);
void MoveToFreeList(Chunk* chunk, int bucket_);
private:
DISALLOW_COPY_AND_ASSIGN(HeapImpl);
LinkedList<Chunk*> free_chunks_[kNumBuckets];
LinkedList<Chunk*> full_chunks_[kNumBuckets];
void MoveToList(Chunk* chunk, LinkedList<Chunk*>* head);
void* MapAlloc(size_t size);
void MapFree(void* ptr);
void* AllocLocked(size_t size);
void FreeLocked(void* ptr);
struct MapAllocation {
void *ptr;
size_t size;
MapAllocation* next;
};
MapAllocation* map_allocation_list_;
std::mutex m_;
};
// Integer log 2, rounds down
static inline unsigned int log2(size_t n) {
return 8 * sizeof(unsigned long long) - __builtin_clzll(n) - 1;
}
static inline unsigned int size_to_bucket(size_t size) {
if (size < kMinBucketAllocationSize)
return kMinBucketAllocationSize;
return log2(size - 1) + 1 - const_log2(kMinBucketAllocationSize);
}
static inline size_t bucket_to_size(unsigned int bucket) {
return kMinBucketAllocationSize << bucket;
}
static void* MapAligned(size_t size, size_t align) {
const int prot = PROT_READ | PROT_WRITE;
const int flags = MAP_ANONYMOUS | MAP_PRIVATE;
size = (size + kPageSize - 1) & ~(kPageSize - 1);
// Over-allocate enough to align
size_t map_size = size + align - kPageSize;
if (map_size < size) {
return nullptr;
}
void* ptr = mmap(NULL, map_size, prot, flags, -1, 0);
if (ptr == MAP_FAILED) {
return nullptr;
}
size_t aligned_size = map_size;
void* aligned_ptr = ptr;
std::align(align, size, aligned_ptr, aligned_size);
// Trim beginning
if (aligned_ptr != ptr) {
ptrdiff_t extra = reinterpret_cast<uintptr_t>(aligned_ptr)
- reinterpret_cast<uintptr_t>(ptr);
munmap(ptr, extra);
map_size -= extra;
ptr = aligned_ptr;
}
// Trim end
if (map_size != size) {
assert(map_size > size);
assert(ptr != NULL);
munmap(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(ptr) + size),
map_size - size);
}
#define PR_SET_VMA 0x53564d41
#define PR_SET_VMA_ANON_NAME 0
prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME,
reinterpret_cast<uintptr_t>(ptr), size, "leak_detector_malloc");
return ptr;
}
class Chunk {
public:
static void* operator new(std::size_t count) noexcept;
static void operator delete(void* ptr);
Chunk(HeapImpl* heap, int bucket);
~Chunk() {}
void *Alloc();
void Free(void* ptr);
void Purge();
bool Empty();
static Chunk* ptr_to_chunk(void* ptr) {
return reinterpret_cast<Chunk*>(reinterpret_cast<uintptr_t>(ptr)
& ~(kChunkSize - 1));
}
static bool is_chunk(void* ptr) {
return (reinterpret_cast<uintptr_t>(ptr) & (kChunkSize - 1)) != 0;
}
unsigned int free_count() {
return free_count_;
}
HeapImpl* heap() {
return heap_;
}
LinkedList<Chunk*> node_; // linked list sorted by minimum free count
private:
DISALLOW_COPY_AND_ASSIGN(Chunk);
HeapImpl* heap_;
unsigned int bucket_;
unsigned int allocation_size_; // size of allocations in chunk, min 8 bytes
unsigned int max_allocations_; // maximum number of allocations in the chunk
unsigned int first_free_bitmap_; // index into bitmap for first non-full entry
unsigned int free_count_; // number of available allocations
unsigned int frees_since_purge_; // number of calls to Free since last Purge
// bitmap of pages that have been dirtied
uint32_t dirty_pages_[div_round_up(kUsablePagesPerChunk, 32)];
// bitmap of free allocations.
uint32_t free_bitmap_[kUsableChunkSize / kMinBucketAllocationSize / 32];
char data_[0];
unsigned int ptr_to_n(void* ptr) {
ptrdiff_t offset = reinterpret_cast<uintptr_t>(ptr)
- reinterpret_cast<uintptr_t>(data_);
return offset / allocation_size_;
}
void* n_to_ptr(unsigned int n) {
return data_ + n * allocation_size_;
}
};
static_assert(sizeof(Chunk) <= kPageSize, "header must fit in page");
// Override new operator on chunk to use mmap to allocate kChunkSize
void* Chunk::operator new(std::size_t count __attribute__((unused))) noexcept {
assert(count == sizeof(Chunk));
void* mem = MapAligned(kChunkSize, kChunkSize);
if (!mem) {
abort(); //throw std::bad_alloc;
}
return mem;
}
// Override new operator on chunk to use mmap to allocate kChunkSize
void Chunk::operator delete(void *ptr) {
assert(reinterpret_cast<Chunk*>(ptr) == ptr_to_chunk(ptr));
munmap(ptr, kChunkSize);
}
Chunk::Chunk(HeapImpl* heap, int bucket) :
node_(this), heap_(heap), bucket_(bucket), allocation_size_(
bucket_to_size(bucket)), max_allocations_(
kUsableChunkSize / allocation_size_), first_free_bitmap_(0), free_count_(
max_allocations_), frees_since_purge_(0) {
memset(dirty_pages_, 0, sizeof(dirty_pages_));
memset(free_bitmap_, 0xff, sizeof(free_bitmap_));
}
bool Chunk::Empty() {
return free_count_ == max_allocations_;
}
void* Chunk::Alloc() {
assert(free_count_ > 0);
unsigned int i = first_free_bitmap_;
while (free_bitmap_[i] == 0)
i++;
assert(i < ARRAY_SIZE(free_bitmap_));
unsigned int bit = __builtin_ffs(free_bitmap_[i]) - 1;
assert(free_bitmap_[i] & (1U << bit));
free_bitmap_[i] &= ~(1U << bit);
unsigned int n = i * 32 + bit;
assert(n < max_allocations_);
unsigned int page = n * allocation_size_ / kPageSize;
assert(page / 32 < ARRAY_SIZE(dirty_pages_));
dirty_pages_[page / 32] |= 1U << (page % 32);
free_count_--;
if (free_count_ == 0) {
heap_->MoveToFullList(this, bucket_);
}
return n_to_ptr(n);
}
void Chunk::Free(void* ptr) {
assert(is_chunk(ptr));
assert(ptr_to_chunk(ptr) == this);
unsigned int n = ptr_to_n(ptr);
unsigned int i = n / 32;
unsigned int bit = n % 32;
assert(i < ARRAY_SIZE(free_bitmap_));
assert(!(free_bitmap_[i] & (1U << bit)));
free_bitmap_[i] |= 1U << bit;
free_count_++;
if (i < first_free_bitmap_) {
first_free_bitmap_ = i;
}
if (free_count_ == 1) {
heap_->MoveToFreeList(this, bucket_);
} else {
// TODO(ccross): move down free list if necessary
}
if (frees_since_purge_++ * allocation_size_ > 16 * kPageSize) {
Purge();
}
}
void Chunk::Purge() {
frees_since_purge_ = 0;
//unsigned int allocsPerPage = kPageSize / allocation_size_;
}
// Override new operator on HeapImpl to use mmap to allocate a page
void* HeapImpl::operator new(std::size_t count __attribute__((unused)))
noexcept {
assert(count == sizeof(HeapImpl));
void* mem = MapAligned(kPageSize, kPageSize);
if (!mem) {
abort(); //throw std::bad_alloc;
}
heap_count++;
return mem;
}
void HeapImpl::operator delete(void *ptr) {
munmap(ptr, kPageSize);
}
HeapImpl::HeapImpl() :
free_chunks_(), full_chunks_(), map_allocation_list_(NULL) {
}
bool HeapImpl::Empty() {
for (unsigned int i = 0; i < kNumBuckets; i++) {
for (LinkedList<Chunk*> *it = free_chunks_[i].next(); it->data() != NULL; it = it->next()) {
if (!it->data()->Empty()) {
return false;
}
}
for (LinkedList<Chunk*> *it = full_chunks_[i].next(); it->data() != NULL; it = it->next()) {
if (!it->data()->Empty()) {
return false;
}
}
}
return true;
}
HeapImpl::~HeapImpl() {
for (unsigned int i = 0; i < kNumBuckets; i++) {
while (!free_chunks_[i].empty()) {
Chunk *chunk = free_chunks_[i].next()->data();
chunk->node_.remove();
delete chunk;
}
while (!full_chunks_[i].empty()) {
Chunk *chunk = full_chunks_[i].next()->data();
chunk->node_.remove();
delete chunk;
}
}
}
void* HeapImpl::Alloc(size_t size) {
std::lock_guard<std::mutex> lk(m_);
return AllocLocked(size);
}
void* HeapImpl::AllocLocked(size_t size) {
if (__predict_false(size > kMaxBucketAllocationSize)) {
return MapAlloc(size);
}
int bucket = size_to_bucket(size);
if (__predict_false(free_chunks_[bucket].empty())) {
Chunk *chunk = new Chunk(this, bucket);
free_chunks_[bucket].insert(chunk->node_);
}
return free_chunks_[bucket].next()->data()->Alloc();
}
void HeapImpl::Free(void *ptr) {
std::lock_guard<std::mutex> lk(m_);
FreeLocked(ptr);
}
void HeapImpl::FreeLocked(void *ptr) {
if (!Chunk::is_chunk(ptr)) {
HeapImpl::MapFree(ptr);
} else {
Chunk* chunk = Chunk::ptr_to_chunk(ptr);
assert(chunk->heap() == this);
chunk->Free(ptr);
}
}
void* HeapImpl::MapAlloc(size_t size) {
size = (size + kPageSize - 1) & ~(kPageSize - 1);
MapAllocation* allocation = reinterpret_cast<MapAllocation*>(AllocLocked(
sizeof(MapAllocation)));
void* ptr = MapAligned(size, kChunkSize);
if (!ptr) {
FreeLocked(allocation);
abort(); //throw std::bad_alloc;
}
allocation->ptr = ptr;
allocation->size = size;
allocation->next = map_allocation_list_;
map_allocation_list_ = allocation;
return ptr;
}
void HeapImpl::MapFree(void *ptr) {
MapAllocation **allocation = &map_allocation_list_;
while (*allocation && (*allocation)->ptr != ptr)
allocation = &(*allocation)->next;
assert(*allocation != nullptr);
munmap((*allocation)->ptr, (*allocation)->size);
FreeLocked(*allocation);
*allocation = (*allocation)->next;
}
void HeapImpl::MoveToFreeList(Chunk *chunk, int bucket) {
MoveToList(chunk, &free_chunks_[bucket]);
}
void HeapImpl::MoveToFullList(Chunk *chunk, int bucket) {
MoveToList(chunk, &full_chunks_[bucket]);
}
void HeapImpl::MoveToList(Chunk *chunk, LinkedList<Chunk*>* head) {
// Remove from old list
chunk->node_.remove();
LinkedList<Chunk*> *node = head;
// Insert into new list, sorted by lowest free count
while (node->next() != head && node->data() != nullptr
&& node->data()->free_count() < chunk->free_count())
node = node->next();
node->insert(chunk->node_);
}
Heap::Heap() {
// HeapImpl overloads the operator new in order to mmap itself instead of
// allocating with new.
// Can't use a shared_ptr to store the result because shared_ptr needs to
// allocate, and Allocator<T> is still being constructed.
impl_ = new HeapImpl();
owns_impl_ = true;
}
Heap::~Heap() {
if (owns_impl_) {
delete impl_;
}
}
void* Heap::allocate(size_t size) {
return impl_->Alloc(size);
}
void Heap::deallocate(void* ptr) {
impl_->Free(ptr);
}
void Heap::deallocate(HeapImpl*impl, void* ptr) {
impl->Free(ptr);
}
bool Heap::empty() {
return impl_->Empty();
}

View File

@ -0,0 +1,224 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIBMEMUNREACHABLE_ALLOCATOR_H_
#define LIBMEMUNREACHABLE_ALLOCATOR_H_
#include <atomic>
#include <cstddef>
#include <functional>
#include <list>
#include <map>
#include <memory>
#include <set>
#include <unordered_set>
#include <vector>
extern std::atomic<int> heap_count;
class HeapImpl;
template<typename T>
class Allocator;
// Non-templated class that implements wraps HeapImpl to keep
// implementation out of the header file
class Heap {
public:
Heap();
~Heap();
// Copy constructor that does not take ownership of impl_
Heap(const Heap& other) : impl_(other.impl_), owns_impl_(false) {}
// Assignment disabled
Heap& operator=(const Heap&) = delete;
// Allocate size bytes
void* allocate(size_t size);
// Deallocate allocation returned by allocate
void deallocate(void*);
bool empty();
static void deallocate(HeapImpl* impl, void* ptr);
// Allocate a class of type T
template<class T>
T* allocate() {
return reinterpret_cast<T*>(allocate(sizeof(T)));
}
// Comparators, copied objects will be equal
bool operator ==(const Heap& other) const {
return impl_ == other.impl_;
}
bool operator !=(const Heap& other) const {
return !(*this == other);
}
// std::unique_ptr wrapper that allocates using allocate and deletes using
// deallocate
template<class T>
using unique_ptr = std::unique_ptr<T, std::function<void(void*)>>;
template<class T, class... Args>
unique_ptr<T> make_unique(Args&&... args) {
HeapImpl* impl = impl_;
return unique_ptr<T>(new (allocate<T>()) T(std::forward<Args>(args)...),
[impl](void* ptr) {
reinterpret_cast<T*>(ptr)->~T();
deallocate(impl, ptr);
});
}
// std::unique_ptr wrapper that allocates using allocate and deletes using
// deallocate
template<class T>
using shared_ptr = std::shared_ptr<T>;
template<class T, class... Args>
shared_ptr<T> make_shared(Args&&... args);
protected:
HeapImpl* impl_;
bool owns_impl_;
};
// STLAllocator implements the std allocator interface on top of a Heap
template<typename T>
class STLAllocator {
public:
using value_type = T;
~STLAllocator() {
}
// Construct an STLAllocator on top of a Heap
STLAllocator(const Heap& heap) :
heap_(heap) {
}
// Rebind an STLAllocator from an another STLAllocator
template<typename U>
STLAllocator(const STLAllocator<U>& other) :
heap_(other.heap_) {
}
STLAllocator(const STLAllocator&) = default;
STLAllocator<T>& operator=(const STLAllocator<T>&) = default;
T* allocate(std::size_t n) {
return reinterpret_cast<T*>(heap_.allocate(n * sizeof(T)));
}
void deallocate(T* ptr, std::size_t) {
heap_.deallocate(ptr);
}
template<typename U>
bool operator ==(const STLAllocator<U>& other) const {
return heap_ == other.heap_;
}
template<typename U>
inline bool operator !=(const STLAllocator<U>& other) const {
return !(this == other);
}
template<typename U>
friend class STLAllocator;
protected:
Heap heap_;
};
// Allocator extends STLAllocator with some convenience methods for allocating
// a single object and for constructing unique_ptr and shared_ptr objects with
// appropriate deleters.
template<class T>
class Allocator : public STLAllocator<T> {
public:
~Allocator() {}
Allocator(const Heap& other) :
STLAllocator<T>(other) {
}
template<typename U>
Allocator(const STLAllocator<U>& other) :
STLAllocator<T>(other) {
}
Allocator(const Allocator&) = default;
Allocator<T>& operator=(const Allocator<T>&) = default;
using STLAllocator<T>::allocate;
using STLAllocator<T>::deallocate;
using STLAllocator<T>::heap_;
T* allocate() {
return STLAllocator<T>::allocate(1);
}
void deallocate(void* ptr) {
heap_.deallocate(ptr);
}
using shared_ptr = Heap::shared_ptr<T>;
template<class... Args>
shared_ptr make_shared(Args&& ...args) {
return heap_.template make_shared<T>(std::forward<Args>(args)...);
}
using unique_ptr = Heap::unique_ptr<T>;
template<class... Args>
unique_ptr make_unique(Args&& ...args) {
return heap_.template make_unique<T>(std::forward<Args>(args)...);
}
};
// std::unique_ptr wrapper that allocates using allocate and deletes using
// deallocate. Implemented outside class definition in order to pass
// Allocator<T> to shared_ptr.
template<class T, class... Args>
inline Heap::shared_ptr<T> Heap::make_shared(Args&&... args) {
return std::allocate_shared<T, Allocator<T>, Args...>(Allocator<T>(*this),
std::forward<Args>(args)...);
}
namespace allocator {
template<class T>
using vector = std::vector<T, Allocator<T>>;
template<class T>
using list = std::list<T, Allocator<T>>;
template<class T, class Key, class Compare = std::less<Key>>
using map = std::map<Key, T, Compare, Allocator<std::pair<const Key, T>>>;
template<class Key, class Hash = std::hash<Key>, class KeyEqual = std::equal_to<Key>>
using unordered_set = std::unordered_set<Key, Hash, KeyEqual, Allocator<Key>>;
template<class Key, class Compare = std::less<Key>>
using set = std::set<Key, Compare, Allocator<Key>>;
using string = std::basic_string<char, std::char_traits<char>, Allocator<char>>;
}
#endif

View File

@ -0,0 +1,43 @@
LOCAL_PATH := $(call my-dir)
memunreachable_srcs := \
Allocator.cpp \
HeapWalker.cpp \
LeakPipe.cpp \
LineBuffer.cpp \
MemUnreachable.cpp \
ProcessMappings.cpp \
PtracerThread.cpp \
ThreadCapture.cpp \
memunreachable_test_srcs := \
tests/Allocator_test.cpp \
tests/HeapWalker_test.cpp \
tests/MemUnreachable_test.cpp \
tests/ThreadCapture_test.cpp \
include $(CLEAR_VARS)
LOCAL_MODULE := libmemunreachable
LOCAL_SRC_FILES := $(memunreachable_srcs)
LOCAL_CFLAGS := -std=c++14 -Wall -Wextra -Werror
LOCAL_SHARED_LIBRARIES := libbase liblog
LOCAL_STATIC_LIBRARIES := libc_malloc_debug_backtrace libc_logging
# Only need this for arm since libc++ uses its own unwind code that
# doesn't mix with the other default unwind code.
LOCAL_STATIC_LIBRARIES_arm := libunwind_llvm
LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)/include
LOCAL_C_INCLUDES := $(LOCAL_PATH)/include
LOCAL_CLANG := true
include $(BUILD_SHARED_LIBRARY)
include $(CLEAR_VARS)
LOCAL_MODULE := memunreachable_test
LOCAL_SRC_FILES := $(memunreachable_test_srcs)
LOCAL_CFLAGS := -std=c++14 -Wall -Wextra -Werror
LOCAL_CLANG := true
LOCAL_SHARED_LIBRARIES := libmemunreachable libbase liblog
include $(BUILD_NATIVE_TEST)

View File

@ -0,0 +1,137 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <inttypes.h>
#include <map>
#include <utility>
#include "Allocator.h"
#include "HeapWalker.h"
#include "log.h"
bool HeapWalker::Allocation(uintptr_t begin, uintptr_t end) {
if (end == begin) {
end = begin + 1;
}
auto inserted = allocations_.insert(std::pair<Range, RangeInfo>(Range{begin, end}, RangeInfo{false, false}));
if (inserted.second) {
valid_allocations_range_.begin = std::min(valid_allocations_range_.begin, begin);
valid_allocations_range_.end = std::max(valid_allocations_range_.end, end);
allocation_bytes_ += end - begin;
return true;
} else {
Range overlap = inserted.first->first;
ALOGE("range %p-%p overlaps with existing range %p-%p",
reinterpret_cast<void*>(begin),
reinterpret_cast<void*>(end),
reinterpret_cast<void*>(overlap.begin),
reinterpret_cast<void*>(overlap.end));
return false;
}
}
void HeapWalker::Walk(const Range& range, bool RangeInfo::*flag) {
allocator::vector<Range> to_do(1, range, allocator_);
while (!to_do.empty()) {
Range range = to_do.back();
to_do.pop_back();
uintptr_t begin = (range.begin + (sizeof(uintptr_t) - 1)) & ~(sizeof(uintptr_t) - 1);
// TODO(ccross): we might need to consider a pointer to the end of a buffer
// to be inside the buffer, which means the common case of a pointer to the
// beginning of a buffer may keep two ranges live.
for (uintptr_t i = begin; i < range.end; i += sizeof(uintptr_t)) {
uintptr_t val = *reinterpret_cast<uintptr_t*>(i);
if (val >= valid_allocations_range_.begin && val < valid_allocations_range_.end) {
RangeMap::iterator it = allocations_.find(Range{val, val + 1});
if (it != allocations_.end()) {
if (!(it->second.*flag)) {
to_do.push_back(it->first);
it->second.*flag = true;
}
}
}
}
}
}
void HeapWalker::Root(uintptr_t begin, uintptr_t end) {
roots_.push_back(Range{begin, end});
}
void HeapWalker::Root(const allocator::vector<uintptr_t>& vals) {
root_vals_.insert(root_vals_.end(), vals.begin(), vals.end());
}
size_t HeapWalker::Allocations() {
return allocations_.size();
}
size_t HeapWalker::AllocationBytes() {
return allocation_bytes_;
}
bool HeapWalker::DetectLeaks() {
for (auto it = roots_.begin(); it != roots_.end(); it++) {
Walk(*it, &RangeInfo::referenced_from_root);
}
Range vals;
vals.begin = reinterpret_cast<uintptr_t>(root_vals_.data());
vals.end = vals.begin + root_vals_.size() * sizeof(uintptr_t);
Walk(vals, &RangeInfo::referenced_from_root);
for (auto it = allocations_.begin(); it != allocations_.end(); it++) {
if (!it->second.referenced_from_root) {
Walk(it->first, &RangeInfo::referenced_from_leak);
}
}
return true;
}
bool HeapWalker::Leaked(allocator::vector<Range>& leaked, size_t limit,
size_t* num_leaks_out, size_t* leak_bytes_out) {
DetectLeaks();
leaked.clear();
size_t num_leaks = 0;
size_t leak_bytes = 0;
for (auto it = allocations_.begin(); it != allocations_.end(); it++) {
if (!it->second.referenced_from_root) {
num_leaks++;
leak_bytes += it->first.end - it->first.begin;
}
}
size_t n = 0;
for (auto it = allocations_.begin(); it != allocations_.end(); it++) {
if (!it->second.referenced_from_root) {
if (n++ <= limit) {
leaked.push_back(it->first);
}
}
}
if (num_leaks_out) {
*num_leaks_out = num_leaks;
}
if (leak_bytes_out) {
*leak_bytes_out = leak_bytes;
}
return true;
}

View File

@ -0,0 +1,75 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIBMEMUNREACHABLE_HEAP_WALKER_H_
#define LIBMEMUNREACHABLE_HEAP_WALKER_H_
#include "android-base/macros.h"
#include "Allocator.h"
// A range [begin, end)
struct Range {
uintptr_t begin;
uintptr_t end;
};
// Comparator for Ranges that returns equivalence for overlapping ranges
struct compare_range {
bool operator()(const Range& a, const Range& b) const {
return a.end <= b.begin;
}
};
class HeapWalker {
public:
HeapWalker(Allocator<HeapWalker> allocator) : allocator_(allocator),
allocations_(allocator), allocation_bytes_(0),
roots_(allocator), root_vals_(allocator) {
valid_allocations_range_.end = 0;
valid_allocations_range_.begin = ~valid_allocations_range_.end;
}
~HeapWalker() {}
bool Allocation(uintptr_t begin, uintptr_t end);
void Root(uintptr_t begin, uintptr_t end);
void Root(const allocator::vector<uintptr_t>& vals);
bool DetectLeaks();
bool Leaked(allocator::vector<Range>&, size_t limit, size_t* num_leaks,
size_t* leak_bytes);
size_t Allocations();
size_t AllocationBytes();
private:
struct RangeInfo {
bool referenced_from_root;
bool referenced_from_leak;
};
void Walk(const Range& range, bool RangeInfo::* flag);
DISALLOW_COPY_AND_ASSIGN(HeapWalker);
Allocator<HeapWalker> allocator_;
using RangeMap = allocator::map<RangeInfo, Range, compare_range>;
RangeMap allocations_;
size_t allocation_bytes_;
Range valid_allocations_range_;
allocator::vector<Range> roots_;
allocator::vector<uintptr_t> root_vals_;
};
#endif

View File

@ -0,0 +1,89 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <errno.h>
#include <string.h>
#include "LeakPipe.h"
#include "log.h"
bool LeakPipe::SendFd(int sock, int fd) {
struct msghdr hdr{};
struct iovec iov{};
unsigned int data = 0xfdfdfdfd;
alignas(struct cmsghdr) char cmsgbuf[CMSG_SPACE(sizeof(int))];
hdr.msg_iov = &iov;
hdr.msg_iovlen = 1;
iov.iov_base = &data;
iov.iov_len = sizeof(data);
hdr.msg_control = cmsgbuf;
hdr.msg_controllen = CMSG_LEN(sizeof(int));
struct cmsghdr* cmsg = CMSG_FIRSTHDR(&hdr);
cmsg->cmsg_len = CMSG_LEN(sizeof(int));
cmsg->cmsg_level = SOL_SOCKET;
cmsg->cmsg_type = SCM_RIGHTS;
*(int*)CMSG_DATA(cmsg) = fd;
int ret = sendmsg(sock, &hdr, 0);
if (ret < 0) {
ALOGE("failed to send fd: %s", strerror(errno));
return false;
}
if (ret == 0) {
ALOGE("eof when sending fd");
return false;
}
return true;
}
int LeakPipe::ReceiveFd(int sock) {
struct msghdr hdr{};
struct iovec iov{};
unsigned int data;
alignas(struct cmsghdr) char cmsgbuf[CMSG_SPACE(sizeof(int))];
hdr.msg_iov = &iov;
hdr.msg_iovlen = 1;
iov.iov_base = &data;
iov.iov_len = sizeof(data);
hdr.msg_control = cmsgbuf;
hdr.msg_controllen = CMSG_LEN(sizeof(int));
int ret = recvmsg(sock, &hdr, 0);
if (ret < 0) {
ALOGE("failed to receive fd: %s", strerror(errno));
return -1;
}
if (ret == 0) {
ALOGE("eof when receiving fd");
return -1;
}
struct cmsghdr* cmsg = CMSG_FIRSTHDR(&hdr);
if (cmsg == NULL || cmsg->cmsg_level != SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
ALOGE("missing fd while receiving fd");
return -1;
}
return *(int*)CMSG_DATA(cmsg);
}

View File

@ -0,0 +1,201 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIBMEMUNREACHABLE_LEAK_PIPE_H_
#define LIBMEMUNREACHABLE_LEAK_PIPE_H_
#include <sys/socket.h>
#include <vector>
#include "android-base/macros.h"
#include "ScopedPipe.h"
#include "log.h"
// LeakPipe implements a pipe that can transfer vectors of simple objects
// between processes. The pipe is created in the sending process and
// transferred over a socketpair that was created before forking. This ensures
// that only the sending process can have the send side of the pipe open, so if
// the sending process dies the pipe will close.
class LeakPipe {
public:
LeakPipe() {
int ret = socketpair(AF_UNIX, SOCK_STREAM|SOCK_CLOEXEC, 0, sv_);
if (ret < 0) {
LOG_ALWAYS_FATAL("failed to create socketpair: %s", strerror(errno));
}
}
~LeakPipe() {
Close();
}
void Close() {
close(sv_[0]);
close(sv_[1]);
sv_[0] = -1;
sv_[1] = -1;
}
bool OpenReceiver() {
int fd = ReceiveFd(sv_[0]);
if (fd < 0) {
return false;
}
receiver_.SetFd(fd);
return true;
}
bool OpenSender() {
ScopedPipe pipe;
if (!SendFd(sv_[1], pipe.Receiver())) {
return false;
}
pipe.ReleaseReceiver();
sender_.SetFd(pipe.ReleaseSender());
return true;
}
class LeakPipeBase {
public:
LeakPipeBase() : fd_(-1) {}
~LeakPipeBase() {
Close();
}
void SetFd(int fd) {
fd_ = fd;
}
void Close() {
close(fd_);
fd_ = -1;
}
protected:
int fd_;
private:
DISALLOW_COPY_AND_ASSIGN(LeakPipeBase);
};
class LeakPipeSender : public LeakPipeBase {
public:
using LeakPipeBase::LeakPipeBase;
template<typename T>
bool Send(const T& value) {
ssize_t ret = TEMP_FAILURE_RETRY(write(fd_, &value, sizeof(T)));
if (ret < 0) {
ALOGE("failed to send value: %s", strerror(errno));
return false;
} else if (static_cast<size_t>(ret) != sizeof(T)) {
ALOGE("eof while writing value");
return false;
}
return true;
}
template<class T, class Alloc = std::allocator<T>>
bool SendVector(const std::vector<T, Alloc>& vector) {
size_t size = vector.size() * sizeof(T);
if (!Send(size)) {
return false;
}
ssize_t ret = TEMP_FAILURE_RETRY(write(fd_, vector.data(), size));
if (ret < 0) {
ALOGE("failed to send vector: %s", strerror(errno));
return false;
} else if (static_cast<size_t>(ret) != size) {
ALOGE("eof while writing vector");
return false;
}
return true;
}
};
class LeakPipeReceiver : public LeakPipeBase {
public:
using LeakPipeBase::LeakPipeBase;
template<typename T>
bool Receive(T* value) {
ssize_t ret = TEMP_FAILURE_RETRY(read(fd_, reinterpret_cast<void*>(value), sizeof(T)));
if (ret < 0) {
ALOGE("failed to receive value: %s", strerror(errno));
return false;
} else if (static_cast<size_t>(ret) != sizeof(T)) {
ALOGE("eof while receiving value");
return false;
}
return true;
}
template<class T, class Alloc = std::allocator<T>>
bool ReceiveVector(std::vector<T, Alloc>& vector) {
size_t size = 0;
if (!Receive(&size)) {
return false;
}
vector.resize(size / sizeof(T));
char* ptr = reinterpret_cast<char*>(vector.data());
while (size > 0) {
ssize_t ret = TEMP_FAILURE_RETRY(read(fd_, ptr, size));
if (ret < 0) {
ALOGE("failed to send vector: %s", strerror(errno));
return false;
} else if (ret == 0) {
ALOGE("eof while reading vector");
return false;
}
size -= ret;
ptr += ret;
}
return true;
}
};
LeakPipeReceiver& Receiver() {
return receiver_;
}
LeakPipeSender& Sender() {
return sender_;
}
private:
LeakPipeReceiver receiver_;
LeakPipeSender sender_;
bool SendFd(int sock, int fd);
int ReceiveFd(int sock);
DISALLOW_COPY_AND_ASSIGN(LeakPipe);
int sv_[2];
};
#endif // LIBMEMUNREACHABLE_LEAK_PIPE_H_

View File

@ -0,0 +1,62 @@
/*
* Copyright (C) 2015 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Copied from system/extras/memory_replay/LineBuffer.cpp
// TODO(ccross): find a way to share between libmemunreachable and memory_replay?
#include <errno.h>
#include <string.h>
#include <unistd.h>
#include "LineBuffer.h"
LineBuffer::LineBuffer(int fd, char* buffer, size_t buffer_len) : fd_(fd), buffer_(buffer), buffer_len_(buffer_len) {
}
bool LineBuffer::GetLine(char** line, size_t* line_len) {
while (true) {
if (bytes_ > 0) {
char* newline = reinterpret_cast<char*>(memchr(buffer_ + start_, '\n', bytes_));
if (newline != nullptr) {
*newline = '\0';
*line = buffer_ + start_;
start_ = newline - buffer_ + 1;
bytes_ -= newline - *line + 1;
*line_len = newline - *line;
return true;
}
}
if (start_ > 0) {
// Didn't find anything, copy the current to the front of the buffer.
memmove(buffer_, buffer_ + start_, bytes_);
start_ = 0;
}
ssize_t bytes = TEMP_FAILURE_RETRY(read(fd_, buffer_ + bytes_, buffer_len_ - bytes_ - 1));
if (bytes <= 0) {
if (bytes_ > 0) {
// The read data might not contain a nul terminator, so add one.
buffer_[bytes_] = '\0';
*line = buffer_ + start_;
*line_len = bytes_;
bytes_ = 0;
start_ = 0;
return true;
}
return false;
}
bytes_ += bytes;
}
}

View File

@ -0,0 +1,36 @@
/*
* Copyright (C) 2015 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef _LIBMEMUNREACHABLE_LINE_BUFFER_H
#define _LIBMEMUNREACHABLE_LINE_BUFFER_H
#include <stdint.h>
class LineBuffer {
public:
LineBuffer(int fd, char* buffer, size_t buffer_len);
bool GetLine(char** line, size_t* line_len);
private:
int fd_;
char* buffer_ = nullptr;
size_t buffer_len_ = 0;
size_t start_ = 0;
size_t bytes_ = 0;
};
#endif // _LIBMEMUNREACHABLE_LINE_BUFFER_H

View File

@ -0,0 +1,60 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIBMEMUNREACHABLE_LINKED_LIST_H_
#define LIBMEMUNREACHABLE_LINKED_LIST_H_
template<class T>
class LinkedList {
public:
LinkedList() : next_(this), prev_(this), data_() {}
LinkedList(T data) : LinkedList() {
data_ = data;
}
~LinkedList() {}
void insert(LinkedList<T>& node) {
assert(node.empty());
node.next_ = this->next_;
node.next_->prev_ = &node;
this->next_ = &node;
node.prev_ = this;
}
void remove() {
this->next_->prev_ = this->prev_;
this->prev_->next_ = this->next_;
this->next_ = this;
this->prev_ = this;
}
T data() { return data_; }
bool empty() { return next_ == this && prev_ == this; }
LinkedList<T> *next() { return next_; }
private:
LinkedList<T> *next_;
LinkedList<T> *prev_;
T data_;
};
template<class T>
class LinkedListHead {
public:
LinkedListHead() : node_() {}
~LinkedListHead() {}
private:
LinkedList<T> node_;
};
#endif

View File

@ -0,0 +1,434 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <inttypes.h>
#include <functional>
#include <iomanip>
#include <mutex>
#include <string>
#include <sstream>
#include <backtrace.h>
#include <android-base/macros.h>
#include "Allocator.h"
#include "HeapWalker.h"
#include "LeakPipe.h"
#include "ProcessMappings.h"
#include "PtracerThread.h"
#include "ScopedDisableMalloc.h"
#include "Semaphore.h"
#include "ThreadCapture.h"
#include "memunreachable/memunreachable.h"
#include "bionic.h"
#include "log.h"
const size_t Leak::contents_length;
using namespace std::chrono_literals;
class MemUnreachable {
public:
MemUnreachable(pid_t pid, Allocator<void> allocator) : pid_(pid), allocator_(allocator),
heap_walker_(allocator_) {}
bool CollectAllocations(const allocator::vector<ThreadInfo>& threads,
const allocator::vector<Mapping>& mappings);
bool GetUnreachableMemory(allocator::vector<Leak>& leaks, size_t limit,
size_t* num_leaks, size_t* leak_bytes);
size_t Allocations() { return heap_walker_.Allocations(); }
size_t AllocationBytes() { return heap_walker_.AllocationBytes(); }
private:
bool ClassifyMappings(const allocator::vector<Mapping>& mappings,
allocator::vector<Mapping>& heap_mappings,
allocator::vector<Mapping>& anon_mappings,
allocator::vector<Mapping>& globals_mappings,
allocator::vector<Mapping>& stack_mappings);
DISALLOW_COPY_AND_ASSIGN(MemUnreachable);
pid_t pid_;
Allocator<void> allocator_;
HeapWalker heap_walker_;
};
static void HeapIterate(const Mapping& heap_mapping,
const std::function<void(uintptr_t, size_t)>& func) {
malloc_iterate(heap_mapping.begin, heap_mapping.end - heap_mapping.begin,
[](uintptr_t base, size_t size, void* arg) {
auto f = reinterpret_cast<const std::function<void(uintptr_t, size_t)>*>(arg);
(*f)(base, size);
}, const_cast<void*>(reinterpret_cast<const void*>(&func)));
}
bool MemUnreachable::CollectAllocations(const allocator::vector<ThreadInfo>& threads,
const allocator::vector<Mapping>& mappings) {
ALOGI("searching process %d for allocations", pid_);
allocator::vector<Mapping> heap_mappings{mappings};
allocator::vector<Mapping> anon_mappings{mappings};
allocator::vector<Mapping> globals_mappings{mappings};
allocator::vector<Mapping> stack_mappings{mappings};
if (!ClassifyMappings(mappings, heap_mappings, anon_mappings,
globals_mappings, stack_mappings)) {
return false;
}
for (auto it = heap_mappings.begin(); it != heap_mappings.end(); it++) {
ALOGV("Heap mapping %" PRIxPTR "-%" PRIxPTR " %s", it->begin, it->end, it->name);
HeapIterate(*it, [&](uintptr_t base, size_t size) {
heap_walker_.Allocation(base, base + size);
});
}
for (auto it = anon_mappings.begin(); it != anon_mappings.end(); it++) {
ALOGV("Anon mapping %" PRIxPTR "-%" PRIxPTR " %s", it->begin, it->end, it->name);
heap_walker_.Allocation(it->begin, it->end);
}
for (auto it = globals_mappings.begin(); it != globals_mappings.end(); it++) {
ALOGV("Globals mapping %" PRIxPTR "-%" PRIxPTR " %s", it->begin, it->end, it->name);
heap_walker_.Root(it->begin, it->end);
}
for (auto thread_it = threads.begin(); thread_it != threads.end(); thread_it++) {
for (auto it = stack_mappings.begin(); it != stack_mappings.end(); it++) {
if (thread_it->stack.first >= it->begin && thread_it->stack.first <= it->end) {
ALOGV("Stack %" PRIxPTR "-%" PRIxPTR " %s", thread_it->stack.first, it->end, it->name);
heap_walker_.Root(thread_it->stack.first, it->end);
}
}
heap_walker_.Root(thread_it->regs);
}
ALOGI("searching done");
return true;
}
bool MemUnreachable::GetUnreachableMemory(allocator::vector<Leak>& leaks, size_t limit,
size_t* num_leaks, size_t* leak_bytes) {
ALOGI("sweeping process %d for unreachable memory", pid_);
leaks.clear();
allocator::vector<Range> leaked{allocator_};
if (!heap_walker_.Leaked(leaked, limit, num_leaks, leak_bytes)) {
return false;
}
for (auto it = leaked.begin(); it != leaked.end(); it++) {
Leak leak{};
leak.begin = it->begin;
leak.size = it->end - it->begin;;
memcpy(leak.contents, reinterpret_cast<void*>(it->begin),
std::min(leak.size, Leak::contents_length));
ssize_t num_backtrace_frames = malloc_backtrace(reinterpret_cast<void*>(it->begin),
leak.backtrace_frames, leak.backtrace_length);
if (num_backtrace_frames > 0) {
leak.num_backtrace_frames = num_backtrace_frames;
}
leaks.emplace_back(leak);
}
ALOGI("sweeping done");
return true;
}
static bool has_prefix(const allocator::string& s, const char* prefix) {
int ret = s.compare(0, strlen(prefix), prefix);
return ret == 0;
}
bool MemUnreachable::ClassifyMappings(const allocator::vector<Mapping>& mappings,
allocator::vector<Mapping>& heap_mappings,
allocator::vector<Mapping>& anon_mappings,
allocator::vector<Mapping>& globals_mappings,
allocator::vector<Mapping>& stack_mappings)
{
heap_mappings.clear();
anon_mappings.clear();
globals_mappings.clear();
stack_mappings.clear();
allocator::string current_lib{allocator_};
for (auto it = mappings.begin(); it != mappings.end(); it++) {
if (it->execute) {
current_lib = it->name;
continue;
}
if (!it->read) {
continue;
}
const allocator::string mapping_name{it->name, allocator_};
if (mapping_name == "[anon:.bss]") {
// named .bss section
globals_mappings.emplace_back(*it);
} else if (mapping_name == current_lib) {
// .rodata or .data section
globals_mappings.emplace_back(*it);
} else if (mapping_name == "[anon:libc_malloc]") {
// named malloc mapping
heap_mappings.emplace_back(*it);
} else if (has_prefix(mapping_name, "/dev/ashmem/dalvik")) {
// named dalvik heap mapping
globals_mappings.emplace_back(*it);
} else if (has_prefix(mapping_name, "[stack")) {
// named stack mapping
stack_mappings.emplace_back(*it);
} else if (mapping_name.size() == 0) {
globals_mappings.emplace_back(*it);
} else if (has_prefix(mapping_name, "[anon:") && mapping_name != "[anon:leak_detector_malloc]") {
// TODO(ccross): it would be nice to treat named anonymous mappings as
// possible leaks, but naming something in a .bss or .data section makes
// it impossible to distinguish them from mmaped and then named mappings.
globals_mappings.emplace_back(*it);
}
}
return true;
}
bool GetUnreachableMemory(UnreachableMemoryInfo& info, size_t limit) {
int parent_pid = getpid();
int parent_tid = gettid();
Heap heap;
Semaphore continue_parent_sem;
LeakPipe pipe;
PtracerThread thread{[&]() -> int {
/////////////////////////////////////////////
// Collection thread
/////////////////////////////////////////////
ALOGI("collecting thread info for process %d...", parent_pid);
ThreadCapture thread_capture(parent_pid, heap);
allocator::vector<ThreadInfo> thread_info(heap);
allocator::vector<Mapping> mappings(heap);
// ptrace all the threads
if (!thread_capture.CaptureThreads()) {
return 1;
}
// collect register contents and stacks
if (!thread_capture.CapturedThreadInfo(thread_info)) {
return 1;
}
// snapshot /proc/pid/maps
if (!ProcessMappings(parent_pid, mappings)) {
return 1;
}
// malloc must be enabled to call fork, at_fork handlers take the same
// locks as ScopedDisableMalloc. All threads are paused in ptrace, so
// memory state is still consistent. Unfreeze the original thread so it
// can drop the malloc locks, it will block until the collection thread
// exits.
thread_capture.ReleaseThread(parent_tid);
continue_parent_sem.Post();
// fork a process to do the heap walking
int ret = fork();
if (ret < 0) {
return 1;
} else if (ret == 0) {
/////////////////////////////////////////////
// Heap walker process
/////////////////////////////////////////////
// Examine memory state in the child using the data collected above and
// the CoW snapshot of the process memory contents.
if (!pipe.OpenSender()) {
_exit(1);
}
MemUnreachable unreachable{parent_pid, heap};
if (!unreachable.CollectAllocations(thread_info, mappings)) {
_exit(2);
}
size_t num_allocations = unreachable.Allocations();
size_t allocation_bytes = unreachable.AllocationBytes();
allocator::vector<Leak> leaks{heap};
size_t num_leaks = 0;
size_t leak_bytes = 0;
bool ok = unreachable.GetUnreachableMemory(leaks, limit, &num_leaks, &leak_bytes);
ok = ok && pipe.Sender().Send(num_allocations);
ok = ok && pipe.Sender().Send(allocation_bytes);
ok = ok && pipe.Sender().Send(num_leaks);
ok = ok && pipe.Sender().Send(leak_bytes);
ok = ok && pipe.Sender().SendVector(leaks);
if (!ok) {
_exit(3);
}
_exit(0);
} else {
// Nothing left to do in the collection thread, return immediately,
// releasing all the captured threads.
ALOGI("collection thread done");
return 0;
}
}};
/////////////////////////////////////////////
// Original thread
/////////////////////////////////////////////
{
// Disable malloc to get a consistent view of memory
ScopedDisableMalloc disable_malloc;
// Start the collection thread
thread.Start();
// Wait for the collection thread to signal that it is ready to fork the
// heap walker process.
continue_parent_sem.Wait(100s);
// Re-enable malloc so the collection thread can fork.
}
// Wait for the collection thread to exit
int ret = thread.Join();
if (ret != 0) {
return false;
}
// Get a pipe from the heap walker process. Transferring a new pipe fd
// ensures no other forked processes can have it open, so when the heap
// walker process dies the remote side of the pipe will close.
if (!pipe.OpenReceiver()) {
return false;
}
bool ok = true;
ok = ok && pipe.Receiver().Receive(&info.num_allocations);
ok = ok && pipe.Receiver().Receive(&info.allocation_bytes);
ok = ok && pipe.Receiver().Receive(&info.num_leaks);
ok = ok && pipe.Receiver().Receive(&info.leak_bytes);
ok = ok && pipe.Receiver().ReceiveVector(info.leaks);
if (!ok) {
return false;
}
ALOGI("unreachable memory detection done");
ALOGE("%zu bytes in %zu allocation%s unreachable out of %zu bytes in %zu allocation%s",
info.leak_bytes, info.num_leaks, info.num_leaks == 1 ? "" : "s",
info.allocation_bytes, info.num_allocations, info.num_allocations == 1 ? "" : "s");
return true;
}
std::string Leak::ToString(bool log_contents) const {
std::ostringstream oss;
oss << " " << std::dec << size;
oss << " bytes at ";
oss << std::hex << begin;
oss << std::endl;
if (log_contents) {
const int bytes_per_line = 16;
const size_t bytes = std::min(size, contents_length);
if (bytes == size) {
oss << " contents:" << std::endl;
} else {
oss << " first " << bytes << " bytes of contents:" << std::endl;
}
for (size_t i = 0; i < bytes; i += bytes_per_line) {
oss << " " << std::hex << begin + i << ": ";
size_t j;
oss << std::setfill('0');
for (j = i; j < bytes && j < i + bytes_per_line; j++) {
oss << std::setw(2) << static_cast<int>(contents[j]) << " ";
}
oss << std::setfill(' ');
for (; j < i + bytes_per_line; j++) {
oss << " ";
}
for (j = i; j < bytes && j < i + bytes_per_line; j++) {
char c = contents[j];
if (c < ' ' || c >= 0x7f) {
c = '.';
}
oss << c;
}
oss << std::endl;
}
}
if (num_backtrace_frames > 0) {
oss << backtrace_string(backtrace_frames, num_backtrace_frames);
}
return oss.str();
}
std::string UnreachableMemoryInfo::ToString(bool log_contents) const {
std::ostringstream oss;
oss << " " << leak_bytes << " bytes in ";
oss << num_leaks << " unreachable allocation" << (num_leaks == 1 ? "" : "s");
oss << std::endl;
for (auto it = leaks.begin(); it != leaks.end(); it++) {
oss << it->ToString(log_contents);
}
return oss.str();
}
std::string GetUnreachableMemoryString(bool log_contents, size_t limit) {
UnreachableMemoryInfo info;
if (!GetUnreachableMemory(info, limit)) {
return "Failed to get unreachable memory";
}
return info.ToString(log_contents);
}
bool LogUnreachableMemory(bool log_contents, size_t limit) {
UnreachableMemoryInfo info;
if (!GetUnreachableMemory(info, limit)) {
return false;
}
for (auto it = info.leaks.begin(); it != info.leaks.end(); it++) {
ALOGE("%s", it->ToString(log_contents).c_str());
}
return true;
}
bool NoLeaks() {
UnreachableMemoryInfo info;
if (!GetUnreachableMemory(info, 0)) {
return false;
}
return info.num_leaks == 0;
}

View File

@ -0,0 +1,67 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <inttypes.h>
#include <fcntl.h>
#include <string.h>
#include <unistd.h>
#include <android-base/unique_fd.h>
#include "LineBuffer.h"
#include "ProcessMappings.h"
#include "log.h"
// This function is not re-entrant since it uses a static buffer for
// the line data.
bool ProcessMappings(pid_t pid, allocator::vector<Mapping>& mappings) {
char map_buffer[1024];
snprintf(map_buffer, sizeof(map_buffer), "/proc/%d/maps", pid);
int fd = open(map_buffer, O_RDONLY);
if (fd < 0) {
return false;
}
android::base::unique_fd fd_guard{fd};
LineBuffer line_buf(fd, map_buffer, sizeof(map_buffer));
char* line;
size_t line_len;
while (line_buf.GetLine(&line, &line_len)) {
int name_pos;
char perms[5];
Mapping mapping{};
if (sscanf(line, "%" SCNxPTR "-%" SCNxPTR " %4s %*x %*x:%*x %*d %n",
&mapping.begin, &mapping.end, perms, &name_pos) == 3) {
if (perms[0] == 'r') {
mapping.read = true;
}
if (perms[1] == 'w') {
mapping.write = true;
}
if (perms[2] == 'x') {
mapping.execute = true;
}
if (perms[3] == 'p') {
mapping.priv = true;
}
if ((size_t)name_pos < line_len) {
strlcpy(mapping.name, line + name_pos, sizeof(mapping.name));
}
mappings.emplace_back(mapping);
}
}
return true;
}

View File

@ -0,0 +1,36 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIBMEMUNREACHABLE_PROCESS_MAPPING_H_
#define LIBMEMUNREACHABLE_PROCESS_MAPPING_H_
#include "Allocator.h"
struct Mapping {
uintptr_t begin;
uintptr_t end;
bool read;
bool write;
bool execute;
bool priv;
char name[96];
};
// This function is not re-entrant since it uses a static buffer for
// the line data.
bool ProcessMappings(pid_t pid, allocator::vector<Mapping>& mappings);
#endif // LIBMEMUNREACHABLE_PROCESS_MAPPING_H_

View File

@ -0,0 +1,153 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <errno.h>
#include <fcntl.h>
#include <inttypes.h>
#include <pthread.h>
#include <sched.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <sys/mman.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <sys/wait.h>
#include "android-base/macros.h"
#include "anon_vma_naming.h"
#include "log.h"
#include "PtracerThread.h"
class Stack {
public:
Stack(size_t size) : size_(size) {
int prot = PROT_READ | PROT_WRITE;
int flags = MAP_PRIVATE | MAP_ANONYMOUS;
page_size_ = sysconf(_SC_PAGE_SIZE);
size_ += page_size_*2; // guard pages
base_ = mmap(NULL, size_, prot, flags, -1, 0);
if (base_ == MAP_FAILED) {
base_ = NULL;
size_ = 0;
return;
}
prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, base_, size_, "libmemunreachable stack");
mprotect(base_, page_size_, PROT_NONE);
mprotect(top(), page_size_, PROT_NONE);
};
~Stack() {
munmap(base_, size_);
};
void* top() {
return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(base_) + size_ - page_size_);
};
private:
DISALLOW_COPY_AND_ASSIGN(Stack);
void *base_;
size_t size_;
size_t page_size_;
};
PtracerThread::PtracerThread(const std::function<int()>& func) :
child_pid_(0) {
stack_ = std::make_unique<Stack>(PTHREAD_STACK_MIN);
if (stack_->top() == nullptr) {
LOG_ALWAYS_FATAL("failed to mmap child stack: %s", strerror(errno));
}
func_ = std::function<int()>{[&, func]() -> int {
// In the child thread, lock and unlock the mutex to wait for the parent
// to finish setting up for the child thread
std::unique_lock<std::mutex> lk(m_);
lk.unlock();
_exit(func());
}};
}
PtracerThread::~PtracerThread() {
Kill();
Join();
ClearTracer();
stack_ = nullptr;
}
bool PtracerThread::Start() {
std::unique_lock<std::mutex> lk(m_);
// Convert from void(*)(void*) to lambda with captures
auto proxy = [](void *arg) -> int {
prctl(PR_SET_NAME, "libmemunreachable ptrace thread");
return (*reinterpret_cast<std::function<int()>*>(arg))();
};
child_pid_ = clone(proxy, stack_->top(),
CLONE_VM|CLONE_FS|CLONE_FILES/*|CLONE_UNTRACED*/,
reinterpret_cast<void*>(&func_));
if (child_pid_ < 0) {
ALOGE("failed to clone child: %s", strerror(errno));
return false;
}
SetTracer(child_pid_);
lk.unlock();
return true;
}
int PtracerThread::Join() {
if (child_pid_ == -1) {
return -1;
}
int status;
int ret = TEMP_FAILURE_RETRY(waitpid(child_pid_, &status, __WALL));
if (ret < 0) {
ALOGE("waitpid %d failed: %s", child_pid_, strerror(errno));
return -1;
}
child_pid_ = -1;
if (WIFEXITED(status)) {
return WEXITSTATUS(status);
} else if (WIFSIGNALED(status)) {
return -WTERMSIG(status);
} else {
ALOGE("unexpected status %x", status);
return -1;
}
}
void PtracerThread::Kill() {
if (child_pid_ == -1) {
return;
}
syscall(SYS_tkill, child_pid_, SIGKILL);
}
void PtracerThread::SetTracer(pid_t tracer_pid) {
prctl(PR_SET_PTRACER, tracer_pid);
}
void PtracerThread::ClearTracer() {
prctl(PR_SET_PTRACER, 0);
}

View File

@ -0,0 +1,50 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIBMEMUNREACHABLE_PTRACER_THREAD_H_
#define LIBMEMUNREACHABLE_PTRACER_THREAD_H_
#include <functional>
#include <mutex>
#include "android-base/macros.h"
#include "Allocator.h"
class Stack;
// PtracerThread is similar to std::thread, except that it creates a "thread"
// that can ptrace the other threads. The thread is actually a separate
// process, with its own thread group, but shares address space and fds with
// the parent.
class PtracerThread {
public:
PtracerThread(const std::function<int()>& func);
~PtracerThread();
bool Start();
int Join();
private:
void SetTracer(pid_t);
void ClearTracer();
void Kill();
DISALLOW_COPY_AND_ASSIGN(PtracerThread);
std::unique_ptr<Stack> stack_;
std::function<int()> func_;
std::mutex m_;
pid_t child_pid_;
};
#endif // LIBMEMUNREACHABLE_PTRACER_THREAD_H_

View File

@ -0,0 +1,71 @@
libmemunreachable
================
Introduction
--------------
libmemunreachable is a zero-overhead native memory leak detector. It uses an imprecise mark-and-sweep garbage collector pass over all native memory, reporting any unreachable blocks as leaks. It is similar to the [Heap Checker from tcmalloc](http://htmlpreview.github.io/?https://github.com/gperftools/gperftools/blob/master/doc/heap_checker.html), but with a few key differences to remove the overhead. Instead of instrumenting every call to malloc and free, it queries the allocator (jemalloc) for active allocations when leak detection is requested. In addition, it performs a very short stop-the-world data collection on the main process, and then forks a copy of the process to perform the mark-and-sweep, minimizing disruption to the original process.
In the default (zero-overhead) mode, the returned data on leaks is limited to the address, approximate (upper bound) size, and the the first 32 bytes of the contents of the leaked allocation. If malloc_debug backtraces are enabled they will be included in the leak information, but backtracing allocations requires significant overhead.
----------
Usage
-------
### C interface ###
#### `bool LogUnreachableMemory(bool log_contents, size_t limit)` ####
Writes a description of leaked memory to the log. A summary is always written, followed by details of up to `limit` leaks. If `log_contents` is `true`, details include up to 32 bytes of the contents of each leaked allocation.
Returns true if leak detection succeeded.
#### `bool NoLeaks()` ####
Returns `true` if no unreachable memory was found.
### C++ interface ###
####`bool GetUnreachableMemory(UnreachableMemoryInfo& info, size_t limit = 100)`####
Updates an `UnreachableMemoryInfo` object with information on leaks, including details on up to `limit` leaks. Returns true if leak detection succeeded.
#### `std::string GetUnreachableMemoryString(bool log_contents = false, size_t limit = 100)` ####
Returns a description of leaked memory. A summary is always written, followed by details of up to `limit` leaks. If `log_contents` is `true`, details include up to 32 bytes of the contents of each leaked allocation.
Returns true if leak detection succeeded.
Implementation
-------------------
The sequence of steps required to perform a leak detection pass is divided into three processes - the original process, the collection process, and the sweeper process.
1. *Original process*: Leak detection is requested by calling `GetUnreachableMemory()`
2. Allocations are disabled using `malloc_disable()`
3. The collection process is spawned. The collection process is similar to a normal `fork()` child process, except that it shares the address space of the parent - any writes by the original process are visible to the collection process, and vice-versa.
4. *Collection process*: All threads in the original process are paused with `ptrace()`.
5. Registers contents, active stack areas, and memory mapping information are collected.
6. *Original process*: Allocations are re-enabled using `malloc_enable()`, but all threads are still paused with `ptrace()`.
7. *Collection process*: The sweeper process is spawned using a normal `fork()`. The sweeper process has a copy of all memory from the original process, including all the data collected by the collection process.
8. Collection process releases all threads from `ptrace` and exits
9. *Original process*: All threads continue, the thread that called `GetUnreachableMemory()` blocks waiting for leak data over a pipe.
10. *Sweeper process*: A list of all active allocations is produced by examining the memory mappings and calling `malloc_iterate()` on any heap mappings.
11. A list of all roots is produced from globals (.data and .bss sections of binaries), and registers and stacks from each thread.
12. The mark-and-sweep pass is performed starting from roots.
13. Unmarked allocations are sent over the pipe back to the original process.
----------
Components
---------------
- `MemUnreachable.cpp`: Entry points, implements the sequencing described above.
- `PtracerThread.cpp`: Used to clone the collection process with shared address space.
- `ThreadCapture.cpp`: Pauses threads in the main process and collects register contents.
- `ProcessMappings.cpp`: Collects snapshots of `/proc/pid/maps`.
- `HeapWalker.cpp`: Performs the mark-and-sweep pass over active allocations.
- `LeakPipe.cpp`: transfers data describing leaks from the sweeper process to the original process.
Heap allocator requirements
----------------------------------
libmemunreachable requires a small interface to the allocator in order to collect information about active allocations.
- `malloc_disable()`: prevent any thread from mutating internal allocator state.
- `malloc enable()`: re-enable allocations in all threads.
- `malloc_iterate()`: call a callback on each active allocation in a given heap region.
- `malloc_backtrace()`: return the backtrace from when the allocation at the given address was allocated, if it was collected.

View File

@ -0,0 +1,52 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIBMEMUNREACHABLE_SCOPED_ALARM_H_
#define LIBMEMUNREACHABLE_SCOPED_ALARM_H_
#include <signal.h>
#include <chrono>
#include <functional>
class ScopedAlarm {
public:
ScopedAlarm(std::chrono::microseconds us, std::function<void()> func) {
func_ = func;
struct sigaction oldact{};
struct sigaction act{};
act.sa_handler = [](int) {
ScopedAlarm::func_();
};
sigaction(SIGALRM, &act, &oldact);
std::chrono::seconds s = std::chrono::duration_cast<std::chrono::seconds>(us);
itimerval t = itimerval{};
t.it_value.tv_sec = s.count();
t.it_value.tv_usec = (us - s).count();
setitimer(ITIMER_REAL, &t, NULL);
}
~ScopedAlarm() {
itimerval t = itimerval{};
setitimer(ITIMER_REAL, &t, NULL);
struct sigaction act{};
act.sa_handler = SIG_DFL;
sigaction(SIGALRM, &act, NULL);
}
private:
static std::function<void()> func_;
};
#endif

View File

@ -0,0 +1,113 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIBMEMUNREACHABLE_SCOPED_DISABLE_MALLOC_H_
#define LIBMEMUNREACHABLE_SCOPED_DISABLE_MALLOC_H_
#include <memory>
#include "android-base/macros.h"
#include "bionic.h"
#include "log.h"
#include "ScopedAlarm.h"
class DisableMallocGuard{
public:
DisableMallocGuard() : disabled_(false){}
~DisableMallocGuard() {
Enable();
}
void Disable() {
if (!disabled_) {
malloc_disable();
disabled_ = true;
}
}
void Enable() {
if (disabled_) {
malloc_enable();
disabled_ = false;
}
}
private:
DISALLOW_COPY_AND_ASSIGN(DisableMallocGuard);
bool disabled_;
};
// Any calls to malloc or free from this thread will deadlock as long as this
// object is in scope. Calls to malloc from other threads may succeed (for
// example if the allocation is satisfied out of the thread's tcache), or may
// block until the object is destroyed.
//
// Don't call fork() while malloc is disabled, it needs the same locks held
// here.
class ScopedDisableMalloc {
public:
ScopedDisableMalloc() {
disable_malloc_.Disable();
}
~ScopedDisableMalloc() {
disable_malloc_.Enable();
}
private:
DISALLOW_COPY_AND_ASSIGN(ScopedDisableMalloc);
DisableMallocGuard disable_malloc_;
};
class ScopedDisableMallocTimeout {
public:
ScopedDisableMallocTimeout(std::chrono::milliseconds timeout = std::chrono::milliseconds(2000)) :
timeout_(timeout), timed_out_(false), disable_malloc_() {
Disable();
}
~ScopedDisableMallocTimeout() {
Enable();
}
bool timed_out() {
return timed_out_;
}
void Enable() {
disable_malloc_.Enable();
alarm_ = nullptr;
}
void Disable() {
// set up the alarm before disabling malloc so unique_ptr can be used
alarm_ = std::make_unique<ScopedAlarm>(timeout_, [&]() {
disable_malloc_.Enable();
timed_out_ = true;
});
disable_malloc_.Disable();
}
private:
DISALLOW_COPY_AND_ASSIGN(ScopedDisableMallocTimeout);
std::chrono::milliseconds timeout_;
bool timed_out_;
std::unique_ptr<ScopedAlarm> alarm_;
DisableMallocGuard disable_malloc_;
};
#endif // LIBMEMUNREACHABLE_SCOPED_DISABLE_MALLOC_H_

View File

@ -0,0 +1,81 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIBMEMUNREACHABLE_SCOPED_PIPE_H_
#define LIBMEMUNREACHABLE_SCOPED_PIPE_H_
#include <unistd.h>
#include "log.h"
class ScopedPipe {
public:
ScopedPipe() : pipefd_{-1, -1} {
int ret = pipe2(pipefd_, O_CLOEXEC);
if (ret < 0) {
LOG_ALWAYS_FATAL("failed to open pipe");
}
}
~ScopedPipe() {
Close();
}
ScopedPipe(ScopedPipe&& other) {
SetReceiver(other.ReleaseReceiver());
SetSender(other.ReleaseSender());
}
ScopedPipe& operator = (ScopedPipe&& other) {
SetReceiver(other.ReleaseReceiver());
SetSender(other.ReleaseSender());
return *this;
}
void CloseReceiver() {
close(ReleaseReceiver());
}
void CloseSender() {
close(ReleaseSender());
}
void Close() {
CloseReceiver();
CloseSender();
}
int Receiver() { return pipefd_[0]; }
int Sender() { return pipefd_[1]; }
int ReleaseReceiver() {
int ret = Receiver();
SetReceiver(-1);
return ret;
}
int ReleaseSender() {
int ret = Sender();
SetSender(-1);
return ret;
}
private:
void SetReceiver(int fd) { pipefd_[0] = fd; };
void SetSender(int fd) { pipefd_[1] = fd; };
int pipefd_[2];
};
#endif

View File

@ -0,0 +1,56 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIBMEMUNREACHABLE_SEMAPHORE_H_
#define LIBMEMUNREACHABLE_SEMAPHORE_H_
#include <chrono>
#include <mutex>
#include "android-base/macros.h"
class Semaphore {
public:
Semaphore(int count = 0) : count_(count) {}
~Semaphore() = default;
void Wait(std::chrono::milliseconds ms) {
std::unique_lock<std::mutex> lk(m_);
cv_.wait_for(lk, ms, [&]{
if (count_ > 0) {
count_--;
return true;
}
return false;
});
}
void Post() {
{
std::lock_guard<std::mutex> lk(m_);
count_++;
}
cv_.notify_one();
}
private:
DISALLOW_COPY_AND_ASSIGN(Semaphore);
int count_;
std::mutex m_;
std::condition_variable cv_;
};
#endif // LIBMEMUNREACHABLE_SEMAPHORE_H_

View File

@ -0,0 +1,370 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ThreadCapture.h"
#include <elf.h>
#include <errno.h>
#include <fcntl.h>
#include <limits.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/ptrace.h>
#include <sys/stat.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <sys/uio.h>
#include <sys/wait.h>
#include <map>
#include <memory>
#include <set>
#include <vector>
#include <android-base/unique_fd.h>
#include "Allocator.h"
#include "log.h"
// bionic interfaces used:
// atoi
// strlcat
// writev
// bionic interfaces reimplemented to avoid allocation:
// getdents64
// Convert a pid > 0 to a string. sprintf might allocate, so we can't use it.
// Returns a pointer somewhere in buf to a null terminated string, or NULL
// on error.
static char *pid_to_str(char *buf, size_t len, pid_t pid) {
if (pid <= 0) {
return nullptr;
}
char *ptr = buf + len - 1;
*ptr = 0;
while (pid > 0) {
ptr--;
if (ptr < buf) {
return nullptr;
}
*ptr = '0' + (pid % 10);
pid /= 10;
}
return ptr;
}
class ThreadCaptureImpl {
public:
ThreadCaptureImpl(pid_t pid, Allocator<ThreadCaptureImpl>& allocator);
~ThreadCaptureImpl() {}
bool ListThreads(TidList& tids);
bool CaptureThreads();
bool ReleaseThreads();
bool ReleaseThread(pid_t tid);
bool CapturedThreadInfo(ThreadInfoList& threads);
void InjectTestFunc(std::function<void(pid_t)>&& f) { inject_test_func_ = f; }
private:
int CaptureThread(pid_t tid);
bool ReleaseThread(pid_t tid, unsigned int signal);
int PtraceAttach(pid_t tid);
void PtraceDetach(pid_t tid, unsigned int signal);
bool PtraceThreadInfo(pid_t tid, ThreadInfo& thread_info);
allocator::map<unsigned int, pid_t> captured_threads_;
Allocator<ThreadCaptureImpl> allocator_;
pid_t pid_;
std::function<void(pid_t)> inject_test_func_;
};
ThreadCaptureImpl::ThreadCaptureImpl(pid_t pid, Allocator<ThreadCaptureImpl>& allocator) :
captured_threads_(allocator), allocator_(allocator), pid_(pid) {
}
bool ThreadCaptureImpl::ListThreads(TidList& tids) {
tids.clear();
char pid_buf[11];
char path[256] = "/proc/";
char* pid_str = pid_to_str(pid_buf, sizeof(pid_buf), pid_);
if (!pid_str) {
return false;
}
strlcat(path, pid_str, sizeof(path));
strlcat(path, "/task", sizeof(path));
int fd = open(path, O_CLOEXEC | O_DIRECTORY | O_RDONLY);
if (fd < 0) {
ALOGE("failed to open %s: %s", path, strerror(errno));
return false;
}
android::base::unique_fd fd_guard{fd};
struct linux_dirent64 {
uint64_t d_ino;
int64_t d_off;
uint16_t d_reclen;
char d_type;
char d_name[];
} __attribute((packed));
char dirent_buf[4096];
ssize_t nread;
do {
nread = syscall(SYS_getdents64, fd, dirent_buf, sizeof(dirent_buf));
if (nread < 0) {
ALOGE("failed to get directory entries from %s: %s", path, strerror(errno));
return false;
} else if (nread > 0) {
ssize_t off = 0;
while (off < nread) {
linux_dirent64* dirent = reinterpret_cast<linux_dirent64*>(dirent_buf + off);
off += dirent->d_reclen;
pid_t tid = atoi(dirent->d_name);
if (tid <= 0) {
continue;
}
tids.push_back(tid);
}
}
} while (nread != 0);
return true;
}
bool ThreadCaptureImpl::CaptureThreads() {
TidList tids{allocator_};
bool found_new_thread;
do {
if (!ListThreads(tids)) {
ReleaseThreads();
return false;
}
found_new_thread = false;
for (auto it = tids.begin(); it != tids.end(); it++) {
auto captured = captured_threads_.find(*it);
if (captured == captured_threads_.end()) {
if (CaptureThread(*it) < 0) {
ReleaseThreads();
return false;
}
found_new_thread = true;
}
}
} while (found_new_thread);
return true;
}
// Detatches from a thread, delivering signal if nonzero, logs on error
void ThreadCaptureImpl::PtraceDetach(pid_t tid, unsigned int signal) {
void* sig_ptr = reinterpret_cast<void*>(static_cast<uintptr_t>(signal));
if (ptrace(PTRACE_DETACH, tid, NULL, sig_ptr) < 0 && errno != ESRCH) {
ALOGE("failed to detach from thread %d of process %d: %s", tid, pid_,
strerror(errno));
}
}
// Attaches to and pauses thread.
// Returns 1 on attach, 0 on tid not found, -1 and logs on error
int ThreadCaptureImpl::PtraceAttach(pid_t tid) {
int ret = ptrace(PTRACE_SEIZE, tid, NULL, NULL);
if (ret < 0) {
ALOGE("failed to attach to thread %d of process %d: %s", tid, pid_,
strerror(errno));
return -1;
}
if (inject_test_func_) {
inject_test_func_(tid);
}
if (ptrace(PTRACE_INTERRUPT, tid, 0, 0) < 0) {
if (errno == ESRCH) {
return 0;
} else {
ALOGE("failed to interrupt thread %d of process %d: %s", tid, pid_,
strerror(errno));
PtraceDetach(tid, 0);
return -1;
}
}
return 1;
}
bool ThreadCaptureImpl::PtraceThreadInfo(pid_t tid, ThreadInfo& thread_info) {
thread_info.tid = tid;
const unsigned int max_num_regs = 128; // larger than number of registers on any device
uintptr_t regs[max_num_regs];
struct iovec iovec;
iovec.iov_base = &regs;
iovec.iov_len = sizeof(regs);
if (ptrace(PTRACE_GETREGSET, tid, reinterpret_cast<void*>(NT_PRSTATUS), &iovec)) {
ALOGE("ptrace getregset for thread %d of process %d failed: %s",
tid, pid_, strerror(errno));
return false;
}
unsigned int num_regs = iovec.iov_len / sizeof(uintptr_t);
thread_info.regs.assign(&regs[0], &regs[num_regs]);
const int sp =
#if defined(__x86_64__)
offsetof(struct pt_regs, rsp) / sizeof(uintptr_t)
#elif defined(__i386__)
offsetof(struct pt_regs, esp) / sizeof(uintptr_t)
#elif defined(__arm__)
offsetof(struct pt_regs, ARM_sp) / sizeof(uintptr_t)
#elif defined(__aarch64__)
offsetof(struct user_pt_regs, sp) / sizeof(uintptr_t)
#elif defined(__mips__) || defined(__mips64__)
offsetof(struct pt_regs, regs[29]) / sizeof(uintptr_t)
#else
#error Unrecognized architecture
#endif
;
// TODO(ccross): use /proc/tid/status or /proc/pid/maps to get start_stack
thread_info.stack = std::pair<uintptr_t, uintptr_t>(regs[sp], 0);
return true;
}
int ThreadCaptureImpl::CaptureThread(pid_t tid) {
int ret = PtraceAttach(tid);
if (ret <= 0) {
return ret;
}
int status = 0;
if (TEMP_FAILURE_RETRY(waitpid(tid, &status, __WALL)) < 0) {
ALOGE("failed to wait for pause of thread %d of process %d: %s", tid, pid_,
strerror(errno));
PtraceDetach(tid, 0);
return -1;
}
if (!WIFSTOPPED(status)) {
ALOGE("thread %d of process %d was not paused after waitpid, killed?",
tid, pid_);
return 0;
}
unsigned int resume_signal = 0;
unsigned int signal = WSTOPSIG(status);
if ((status >> 16) == PTRACE_EVENT_STOP) {
switch (signal) {
case SIGSTOP:
case SIGTSTP:
case SIGTTIN:
case SIGTTOU:
// group-stop signals
break;
case SIGTRAP:
// normal ptrace interrupt stop
break;
default:
ALOGE("unexpected signal %d with PTRACE_EVENT_STOP for thread %d of process %d",
signal, tid, pid_);
return -1;
}
} else {
// signal-delivery-stop
resume_signal = signal;
}
captured_threads_[tid] = resume_signal;
return 1;
}
bool ThreadCaptureImpl::ReleaseThread(pid_t tid) {
auto it = captured_threads_.find(tid);
if (it == captured_threads_.end()) {
return false;
}
return ReleaseThread(it->first, it->second);
}
bool ThreadCaptureImpl::ReleaseThread(pid_t tid, unsigned int signal) {
PtraceDetach(tid, signal);
return true;
}
bool ThreadCaptureImpl::ReleaseThreads() {
bool ret = true;
for (auto it = captured_threads_.begin(); it != captured_threads_.end(); ) {
if (ReleaseThread(it->first, it->second)) {
it = captured_threads_.erase(it);
} else {
it++;
ret = false;
}
}
return ret;
}
bool ThreadCaptureImpl::CapturedThreadInfo(ThreadInfoList& threads) {
threads.clear();
for (auto it = captured_threads_.begin(); it != captured_threads_.end(); it++) {
ThreadInfo t{0, allocator::vector<uintptr_t>(allocator_), std::pair<uintptr_t, uintptr_t>(0, 0)};
if (!PtraceThreadInfo(it->first, t)) {
return false;
}
threads.push_back(t);
}
return true;
}
ThreadCapture::ThreadCapture(pid_t pid, Allocator<ThreadCapture> allocator) {
Allocator<ThreadCaptureImpl> impl_allocator = allocator;
impl_ = impl_allocator.make_unique(pid, impl_allocator);
}
ThreadCapture::~ThreadCapture() {}
bool ThreadCapture::ListThreads(TidList& tids) {
return impl_->ListThreads(tids);
}
bool ThreadCapture::CaptureThreads() {
return impl_->CaptureThreads();
}
bool ThreadCapture::ReleaseThreads() {
return impl_->ReleaseThreads();
}
bool ThreadCapture::ReleaseThread(pid_t tid) {
return impl_->ReleaseThread(tid);
}
bool ThreadCapture::CapturedThreadInfo(ThreadInfoList& threads) {
return impl_->CapturedThreadInfo(threads);
}
void ThreadCapture::InjectTestFunc(std::function<void(pid_t)>&& f) {
impl_->InjectTestFunc(std::forward<std::function<void(pid_t)>>(f));
}

View File

@ -0,0 +1,54 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIBMEMUNREACHABLE_THREAD_CAPTURE_H_
#define LIBMEMUNREACHABLE_THREAD_CAPTURE_H_
#include <utility>
#include "Allocator.h"
struct ThreadInfo {
pid_t tid;
allocator::vector<uintptr_t> regs;
std::pair<uintptr_t, uintptr_t> stack;
};
using TidList = allocator::vector<pid_t>;
using ThreadInfoList = allocator::vector<ThreadInfo>;
class ThreadCaptureImpl;
class ThreadCapture {
public:
ThreadCapture(pid_t pid, Allocator<ThreadCapture> allocator);
~ThreadCapture();
bool ListThreads(TidList& tids);
bool CaptureThreads();
bool ReleaseThreads();
bool ReleaseThread(pid_t tid);
bool CapturedThreadInfo(ThreadInfoList& threads);
void InjectTestFunc(std::function<void(pid_t)>&& f);
private:
ThreadCapture(const ThreadCapture&) = delete;
void operator=(const ThreadCapture&) = delete;
Allocator<ThreadCaptureImpl>::unique_ptr impl_;
};
#endif

View File

@ -0,0 +1,25 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIBMEMUNREACHABLE_ANON_VMA_NAMING_H_
#define LIBMEMUNREACHABLE_ANON_VMA_NAMING_H_
#include <sys/prctl.h>
#define PR_SET_VMA 0x53564d41
#define PR_SET_VMA_ANON_NAME 0
#endif // LIBMEMUNREACHABLE_ANON_VMA_NAMING_H_

View File

@ -0,0 +1,33 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIBMEMUNREACHABLE_BIONIC_H_
#define LIBMEMUNREACHABLE_BIONIC_H_
#include <sys/cdefs.h>
__BEGIN_DECLS
/* Exported from bionic */
extern void malloc_disable();
extern void malloc_enable();
extern int malloc_iterate(uintptr_t base, size_t size,
void (*callback)(uintptr_t base, size_t size, void* arg), void* arg);
extern ssize_t malloc_backtrace(void* pointer, uintptr_t* frames, size_t frame_count);
__END_DECLS
#endif // LIBMEMUNREACHABLE_BIONIC_H_

View File

@ -0,0 +1,71 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIBMEMUNREACHABLE_MEMUNREACHABLE_H_
#define LIBMEMUNREACHABLE_MEMUNREACHABLE_H_
#include <sys/cdefs.h>
#ifdef __cplusplus
#include <vector>
#include <string>
struct Leak {
uintptr_t begin;
size_t size;
size_t num_backtrace_frames;
static const size_t contents_length = 32;
char contents[contents_length];
static const size_t backtrace_length = 16;
uintptr_t backtrace_frames[backtrace_length];
std::string ToString(bool log_contents) const;
};
struct UnreachableMemoryInfo {
std::vector<Leak> leaks;
size_t num_leaks;
size_t leak_bytes;
size_t num_allocations;
size_t allocation_bytes;
UnreachableMemoryInfo() {}
~UnreachableMemoryInfo() {
// Clear the memory that holds the leaks, otherwise the next attempt to
// detect leaks may find the old data (for example in the jemalloc tcache)
// and consider all the leaks to be referenced.
memset(leaks.data(), 0, leaks.capacity() * sizeof(Leak));
}
std::string ToString(bool log_contents) const;
};
bool GetUnreachableMemory(UnreachableMemoryInfo& info, size_t limit = 100);
std::string GetUnreachableMemoryString(bool log_contents = false, size_t limit = 100);
#endif
__BEGIN_DECLS
bool LogUnreachableMemory(bool log_contents, size_t limit);
bool NoLeaks();
__END_DECLS
#endif // LIBMEMUNREACHABLE_MEMUNREACHABLE_H_

24
libmemunreachable/log.h Normal file
View File

@ -0,0 +1,24 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIBMEMUNREACHABLE_LOG_H_
#define LIBMEMUNREACHABLE_LOG_H_
#define LOG_TAG "libmemunreachable"
#include <log/log.h>
#endif // LIBMEMUNREACHABLE_LOG_H_

View File

@ -0,0 +1,273 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <Allocator.h>
#include <sys/time.h>
#include <chrono>
#include <functional>
#include <list>
#include <vector>
#include <gtest/gtest.h>
#include <ScopedDisableMalloc.h>
std::function<void()> ScopedAlarm::func_;
using namespace std::chrono_literals;
class AllocatorTest : public testing::Test {
protected:
AllocatorTest() : heap(), disable_malloc_() {}
virtual void SetUp() {
heap_count = 0;
}
virtual void TearDown() {
ASSERT_EQ(heap_count, 0);
ASSERT_TRUE(heap.empty());
ASSERT_FALSE(disable_malloc_.timed_out());
}
Heap heap;
private:
ScopedDisableMallocTimeout disable_malloc_;
};
TEST_F(AllocatorTest, simple) {
Allocator<char[100]> allocator(heap);
void *ptr = allocator.allocate();
ASSERT_TRUE(ptr != NULL);
allocator.deallocate(ptr);
}
TEST_F(AllocatorTest, multiple) {
Allocator<char[100]> allocator(heap);
void *ptr1 = allocator.allocate();
ASSERT_TRUE(ptr1 != NULL);
void *ptr2 = allocator.allocate();
ASSERT_TRUE(ptr2 != NULL);
ASSERT_NE(ptr1, ptr2);
allocator.deallocate(ptr1);
void *ptr3 = allocator.allocate();
ASSERT_EQ(ptr1, ptr3);
allocator.deallocate(ptr3);
allocator.deallocate(ptr2);
}
TEST_F(AllocatorTest, many) {
const int num = 4096;
const int size = 128;
Allocator<char[size]> allocator(heap);
void *ptr[num];
for (int i = 0; i < num; i++) {
ptr[i] = allocator.allocate();
memset(ptr[i], 0xaa, size);
*(reinterpret_cast<unsigned char*>(ptr[i])) = i;
}
for (int i = 0; i < num; i++) {
for (int j = 0; j < num; j++) {
if (i != j) {
ASSERT_NE(ptr[i], ptr[j]);
}
}
}
for (int i = 0; i < num; i++) {
ASSERT_EQ(*(reinterpret_cast<unsigned char*>(ptr[i])), i & 0xFF);
allocator.deallocate(ptr[i]);
}
}
TEST_F(AllocatorTest, large) {
const size_t size = 1024 * 1024;
Allocator<char[size]> allocator(heap);
void *ptr = allocator.allocate();
memset(ptr, 0xaa, size);
allocator.deallocate(ptr);
}
TEST_F(AllocatorTest, many_large) {
const int num = 128;
const int size = 1024 * 1024;
Allocator<char[size]> allocator(heap);
void *ptr[num];
for (int i = 0; i < num; i++) {
ptr[i] = allocator.allocate();
memset(ptr[i], 0xaa, size);
*(reinterpret_cast<unsigned char*>(ptr[i])) = i;
}
for (int i = 0; i < num; i++) {
ASSERT_EQ(*(reinterpret_cast<unsigned char*>(ptr[i])), i & 0xFF);
allocator.deallocate(ptr[i]);
}
}
TEST_F(AllocatorTest, copy) {
Allocator<char[100]> a(heap);
Allocator<char[200]> b = a;
Allocator<char[300]> c(b);
Allocator<char[100]> d(a);
Allocator<char[100]> e(heap);
ASSERT_EQ(a, b);
ASSERT_EQ(a, c);
ASSERT_EQ(a, d);
ASSERT_EQ(a, e);
void* ptr1 = a.allocate();
void* ptr2 = b.allocate();
void* ptr3 = c.allocate();
void* ptr4 = d.allocate();
b.deallocate(ptr1);
d.deallocate(ptr2);
a.deallocate(ptr3);
c.deallocate(ptr4);
}
TEST_F(AllocatorTest, stl_vector) {
auto v = allocator::vector<int>(Allocator<int>(heap));
for (int i = 0; i < 1024; i++) {
v.push_back(i);
}
for (int i = 0; i < 1024; i++) {
ASSERT_EQ(v[i], i);
}
v.clear();
}
TEST_F(AllocatorTest, stl_list) {
auto v = allocator::list<int>(Allocator<int>(heap));
for (int i = 0; i < 1024; i++) {
v.push_back(i);
}
int i = 0;
for (auto iter = v.begin(); iter != v.end(); iter++, i++) {
ASSERT_EQ(*iter, i);
}
v.clear();
}
TEST_F(AllocatorTest, shared) {
Allocator<int> allocator(heap);
Allocator<int>::shared_ptr ptr = allocator.make_shared(0);
{
auto ptr2 = ptr;
}
ASSERT_NE(ptr, nullptr);
}
TEST_F(AllocatorTest, unique) {
Allocator<int> allocator(heap);
Allocator<int>::unique_ptr ptr = allocator.make_unique(0);
ASSERT_NE(ptr, nullptr);
}
class DisableMallocTest : public ::testing::Test {
protected:
void alarm(std::chrono::microseconds us) {
std::chrono::seconds s = std::chrono::duration_cast<std::chrono::seconds>(us);
itimerval t = itimerval();
t.it_value.tv_sec = s.count();
t.it_value.tv_usec = (us - s).count();
setitimer(ITIMER_REAL, &t, NULL);
}
};
TEST_F(DisableMallocTest, reenable) {
ASSERT_EXIT({
alarm(100ms);
void *ptr1 = malloc(128);
ASSERT_NE(ptr1, nullptr);
free(ptr1);
{
ScopedDisableMalloc disable_malloc;
}
void *ptr2 = malloc(128);
ASSERT_NE(ptr2, nullptr);
free(ptr2);
_exit(1);
}, ::testing::ExitedWithCode(1), "");
}
TEST_F(DisableMallocTest, deadlock_allocate) {
ASSERT_DEATH({
void *ptr = malloc(128);
ASSERT_NE(ptr, nullptr);
free(ptr);
{
alarm(100ms);
ScopedDisableMalloc disable_malloc;
void* ptr = malloc(128);
ASSERT_NE(ptr, nullptr);
free(ptr);
}
}, "");
}
TEST_F(DisableMallocTest, deadlock_new) {
ASSERT_DEATH({
char* ptr = new(char);
ASSERT_NE(ptr, nullptr);
delete(ptr);
{
alarm(100ms);
ScopedDisableMalloc disable_malloc;
char* ptr = new(char);
ASSERT_NE(ptr, nullptr);
delete(ptr);
}
}, "");
}
TEST_F(DisableMallocTest, deadlock_delete) {
ASSERT_DEATH({
char* ptr = new(char);
ASSERT_NE(ptr, nullptr);
{
alarm(250ms);
ScopedDisableMalloc disable_malloc;
delete(ptr);
}
}, "");
}
TEST_F(DisableMallocTest, deadlock_free) {
ASSERT_DEATH({
void *ptr = malloc(128);
ASSERT_NE(ptr, nullptr);
{
alarm(100ms);
ScopedDisableMalloc disable_malloc;
free(ptr);
}
}, "");
}
TEST_F(DisableMallocTest, deadlock_fork) {
ASSERT_DEATH({
{
alarm(100ms);
ScopedDisableMalloc disable_malloc;
fork();
}
}, "");
}

View File

@ -0,0 +1,145 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "HeapWalker.h"
#include <gtest/gtest.h>
#include <ScopedDisableMalloc.h>
#include "Allocator.h"
class HeapWalkerTest : public ::testing::Test {
public:
HeapWalkerTest() : disable_malloc_(), heap_() {}
void TearDown() {
ASSERT_TRUE(heap_.empty());
if (!HasFailure()) {
ASSERT_FALSE(disable_malloc_.timed_out());
}
}
protected:
ScopedDisableMallocTimeout disable_malloc_;
Heap heap_;
};
TEST_F(HeapWalkerTest, allocation) {
HeapWalker heap_walker(heap_);
ASSERT_TRUE(heap_walker.Allocation(3, 4));
ASSERT_TRUE(heap_walker.Allocation(2, 3));
ASSERT_TRUE(heap_walker.Allocation(4, 5));
ASSERT_TRUE(heap_walker.Allocation(6, 7));
ASSERT_TRUE(heap_walker.Allocation(0, 1));
}
TEST_F(HeapWalkerTest, overlap) {
HeapWalker heap_walker(heap_);
ASSERT_TRUE(heap_walker.Allocation(2, 3));
ASSERT_TRUE(heap_walker.Allocation(3, 4));
ASSERT_FALSE(heap_walker.Allocation(2, 3));
ASSERT_FALSE(heap_walker.Allocation(1, 3));
ASSERT_FALSE(heap_walker.Allocation(1, 4));
ASSERT_FALSE(heap_walker.Allocation(1, 5));
ASSERT_FALSE(heap_walker.Allocation(3, 4));
ASSERT_FALSE(heap_walker.Allocation(3, 5));
ASSERT_TRUE(heap_walker.Allocation(4, 5));
ASSERT_TRUE(heap_walker.Allocation(1, 2));
}
TEST_F(HeapWalkerTest, zero) {
HeapWalker heap_walker(heap_);
ASSERT_TRUE(heap_walker.Allocation(2, 2));
ASSERT_FALSE(heap_walker.Allocation(2, 2));
ASSERT_TRUE(heap_walker.Allocation(3, 3));
ASSERT_TRUE(heap_walker.Allocation(1, 1));
ASSERT_FALSE(heap_walker.Allocation(2, 3));
}
#define buffer_begin(buffer) reinterpret_cast<uintptr_t>(buffer)
#define buffer_end(buffer) (reinterpret_cast<uintptr_t>(buffer) + sizeof(buffer))
TEST_F(HeapWalkerTest, leak) {
void* buffer1[16]{};
char buffer2[16]{};
buffer1[0] = &buffer2[0] - sizeof(void*);
buffer1[1] = &buffer2[15] + sizeof(void*);
HeapWalker heap_walker(heap_);
heap_walker.Allocation(buffer_begin(buffer2), buffer_end(buffer2));
allocator::vector<Range> leaked(heap_);
size_t num_leaks = 0;
size_t leaked_bytes = 0;
ASSERT_EQ(true, heap_walker.Leaked(leaked, 100, &num_leaks, &leaked_bytes));
EXPECT_EQ(1U, num_leaks);
EXPECT_EQ(16U, leaked_bytes);
ASSERT_EQ(1U, leaked.size());
EXPECT_EQ(buffer_begin(buffer2), leaked[0].begin);
EXPECT_EQ(buffer_end(buffer2), leaked[0].end);
}
TEST_F(HeapWalkerTest, live) {
const int from_buffer_entries = 4;
const int to_buffer_bytes = 16;
for (int i = 0; i < from_buffer_entries; i++) {
for (int j = 0; j < to_buffer_bytes; j++) {
void* buffer1[from_buffer_entries]{};
char buffer2[to_buffer_bytes]{};
buffer1[i] = &buffer2[j];
HeapWalker heap_walker(heap_);
heap_walker.Allocation(buffer_begin(buffer2), buffer_end(buffer2));
heap_walker.Root(buffer_begin(buffer1), buffer_end(buffer1));
allocator::vector<Range> leaked(heap_);
size_t num_leaks = SIZE_T_MAX;
size_t leaked_bytes = SIZE_T_MAX;
ASSERT_EQ(true, heap_walker.Leaked(leaked, 100, &num_leaks, &leaked_bytes));
EXPECT_EQ(0U, num_leaks);
EXPECT_EQ(0U, leaked_bytes);
EXPECT_EQ(0U, leaked.size());
}
}
}
TEST_F(HeapWalkerTest, unaligned) {
const int from_buffer_entries = 4;
const int to_buffer_bytes = 16;
void* buffer1[from_buffer_entries]{};
char buffer2[to_buffer_bytes]{};
buffer1[1] = &buffer2;
for (unsigned int i = 0; i < sizeof(uintptr_t); i++) {
for (unsigned int j = 0; j < sizeof(uintptr_t); j++) {
HeapWalker heap_walker(heap_);
heap_walker.Allocation(buffer_begin(buffer2), buffer_end(buffer2));
heap_walker.Root(buffer_begin(buffer1) + i, buffer_end(buffer1) - j);
allocator::vector<Range> leaked(heap_);
size_t num_leaks = SIZE_T_MAX;
size_t leaked_bytes = SIZE_T_MAX;
ASSERT_EQ(true, heap_walker.Leaked(leaked, 100, &num_leaks, &leaked_bytes));
EXPECT_EQ(0U, num_leaks);
EXPECT_EQ(0U, leaked_bytes);
EXPECT_EQ(0U, leaked.size());
}
}
}

View File

@ -0,0 +1,218 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <fcntl.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/prctl.h>
#include <gtest/gtest.h>
#include <memunreachable/memunreachable.h>
void* ptr;
class HiddenPointer {
public:
HiddenPointer(size_t size = 256) {
Set(malloc(size));
}
~HiddenPointer() {
Free();
}
void* Get() {
return reinterpret_cast<void*>(~ptr_);
}
void Free() {
free(Get());
Set(nullptr);
}
private:
void Set(void* ptr) {
ptr_ = ~reinterpret_cast<uintptr_t>(ptr);
}
volatile uintptr_t ptr_;
};
static void Ref(void* ptr) {
write(0, ptr, 0);
}
TEST(MemunreachableTest, clean) {
UnreachableMemoryInfo info;
ASSERT_TRUE(LogUnreachableMemory(true, 100));
ASSERT_TRUE(GetUnreachableMemory(info));
ASSERT_EQ(0U, info.leaks.size());
}
TEST(MemunreachableTest, stack) {
HiddenPointer hidden_ptr;
{
void* ptr = hidden_ptr.Get();
Ref(ptr);
UnreachableMemoryInfo info;
ASSERT_TRUE(GetUnreachableMemory(info));
ASSERT_EQ(0U, info.leaks.size());
Ref(ptr);
}
{
UnreachableMemoryInfo info;
ASSERT_TRUE(GetUnreachableMemory(info));
ASSERT_EQ(1U, info.leaks.size());
}
hidden_ptr.Free();
{
UnreachableMemoryInfo info;
ASSERT_TRUE(GetUnreachableMemory(info));
ASSERT_EQ(0U, info.leaks.size());
}
}
TEST(MemunreachableTest, global) {
HiddenPointer hidden_ptr;
ptr = hidden_ptr.Get();
{
UnreachableMemoryInfo info;
ASSERT_TRUE(GetUnreachableMemory(info));
ASSERT_EQ(0U, info.leaks.size());
}
ptr = NULL;
{
UnreachableMemoryInfo info;
ASSERT_TRUE(GetUnreachableMemory(info));
ASSERT_EQ(1U, info.leaks.size());
}
hidden_ptr.Free();
{
UnreachableMemoryInfo info;
ASSERT_TRUE(GetUnreachableMemory(info));
ASSERT_EQ(0U, info.leaks.size());
}
}
TEST(MemunreachableTest, tls) {
HiddenPointer hidden_ptr;
pthread_key_t key;
pthread_key_create(&key, NULL);
pthread_setspecific(key, hidden_ptr.Get());
{
UnreachableMemoryInfo info;
ASSERT_TRUE(GetUnreachableMemory(info));
ASSERT_EQ(0U, info.leaks.size());
}
pthread_setspecific(key, nullptr);
{
UnreachableMemoryInfo info;
ASSERT_TRUE(GetUnreachableMemory(info));
ASSERT_EQ(1U, info.leaks.size());
}
hidden_ptr.Free();
{
UnreachableMemoryInfo info;
ASSERT_TRUE(GetUnreachableMemory(info));
ASSERT_EQ(0U, info.leaks.size());
}
pthread_key_delete(key);
}
TEST(MemunreachableTest, twice) {
HiddenPointer hidden_ptr;
{
UnreachableMemoryInfo info;
ASSERT_TRUE(GetUnreachableMemory(info));
ASSERT_EQ(1U, info.leaks.size());
}
{
UnreachableMemoryInfo info;
ASSERT_TRUE(GetUnreachableMemory(info));
ASSERT_EQ(1U, info.leaks.size());
}
hidden_ptr.Free();
{
UnreachableMemoryInfo info;
ASSERT_TRUE(GetUnreachableMemory(info));
ASSERT_EQ(0U, info.leaks.size());
}
}
TEST(MemunreachableTest, log) {
HiddenPointer hidden_ptr;
ASSERT_TRUE(LogUnreachableMemory(true, 100));
hidden_ptr.Free();
{
UnreachableMemoryInfo info;
ASSERT_TRUE(GetUnreachableMemory(info));
ASSERT_EQ(0U, info.leaks.size());
}
}
TEST(MemunreachableTest, notdumpable) {
ASSERT_EQ(0, prctl(PR_SET_DUMPABLE, 0));
HiddenPointer hidden_ptr;
ASSERT_TRUE(LogUnreachableMemory(true, 100));
ASSERT_EQ(0, prctl(PR_SET_DUMPABLE, 1));
}
TEST(MemunreachableTest, leak_lots) {
std::vector<HiddenPointer> hidden_ptrs;
hidden_ptrs.resize(1024);
ASSERT_TRUE(LogUnreachableMemory(true, 100));
}

View File

@ -0,0 +1,351 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ThreadCapture.h"
#include <fcntl.h>
#include <pthread.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <algorithm>
#include <functional>
#include <memory>
#include <thread>
#include <gtest/gtest.h>
#include <android-base/unique_fd.h>
#include "Allocator.h"
#include "ScopedDisableMalloc.h"
#include "ScopedPipe.h"
using namespace std::chrono_literals;
class ThreadListTest : public ::testing::TestWithParam<int> {
public:
ThreadListTest() : stop_(false) {}
~ThreadListTest() {
// pthread_join may return before the entry in /proc/pid/task/ is gone,
// loop until ListThreads only finds the main thread so the next test
// doesn't fail.
WaitForThreads();
}
virtual void TearDown() {
ASSERT_TRUE(heap.empty());
}
protected:
template<class Function>
void StartThreads(unsigned int threads, Function&& func) {
threads_.reserve(threads);
tids_.reserve(threads);
for (unsigned int i = 0; i < threads; i++) {
threads_.emplace_back([&, i, threads, this]() {
{
std::lock_guard<std::mutex> lk(m_);
tids_.push_back(gettid());
if (tids_.size() == threads) {
cv_start_.notify_one();
}
}
func();
{
std::unique_lock<std::mutex> lk(m_);
cv_stop_.wait(lk, [&] {return stop_;});
}
});
}
{
std::unique_lock<std::mutex> lk(m_);
cv_start_.wait(lk, [&]{ return tids_.size() == threads; });
}
}
void StopThreads() {
{
std::lock_guard<std::mutex> lk(m_);
stop_ = true;
}
cv_stop_.notify_all();
for (auto i = threads_.begin(); i != threads_.end(); i++) {
i->join();
}
threads_.clear();
tids_.clear();
}
std::vector<pid_t>& tids() {
return tids_;
}
Heap heap;
private:
void WaitForThreads() {
auto tids = TidList{heap};
ThreadCapture thread_capture{getpid(), heap};
for (unsigned int i = 0; i < 100; i++) {
EXPECT_TRUE(thread_capture.ListThreads(tids));
if (tids.size() == 1) {
break;
}
std::this_thread::sleep_for(10ms);
}
EXPECT_EQ(1U, tids.size());
}
std::mutex m_;
std::condition_variable cv_start_;
std::condition_variable cv_stop_;
bool stop_;
std::vector<pid_t> tids_;
std::vector<std::thread> threads_;
};
TEST_F(ThreadListTest, list_one) {
ScopedDisableMallocTimeout disable_malloc;
ThreadCapture thread_capture(getpid(), heap);
auto expected_tids = allocator::vector<pid_t>(1, getpid(), heap);
auto list_tids = allocator::vector<pid_t>(heap);
ASSERT_TRUE(thread_capture.ListThreads(list_tids));
ASSERT_EQ(expected_tids, list_tids);
if (!HasFailure()) {
ASSERT_FALSE(disable_malloc.timed_out());
}
}
TEST_P(ThreadListTest, list_some) {
const unsigned int threads = GetParam() - 1;
StartThreads(threads, [](){});
std::vector<pid_t> expected_tids = tids();
expected_tids.push_back(getpid());
auto list_tids = allocator::vector<pid_t>(heap);
{
ScopedDisableMallocTimeout disable_malloc;
ThreadCapture thread_capture(getpid(), heap);
ASSERT_TRUE(thread_capture.ListThreads(list_tids));
if (!HasFailure()) {
ASSERT_FALSE(disable_malloc.timed_out());
}
}
StopThreads();
std::sort(list_tids.begin(), list_tids.end());
std::sort(expected_tids.begin(), expected_tids.end());
ASSERT_EQ(expected_tids.size(), list_tids.size());
EXPECT_TRUE(std::equal(expected_tids.begin(), expected_tids.end(), list_tids.begin()));
}
INSTANTIATE_TEST_CASE_P(ThreadListTest, ThreadListTest, ::testing::Values(1, 2, 10, 1024));
class ThreadCaptureTest : public ThreadListTest {
public:
ThreadCaptureTest() {}
~ThreadCaptureTest() {}
void Fork(std::function<void()>&& child_init,
std::function<void()>&& child_cleanup,
std::function<void(pid_t)>&& parent) {
ScopedPipe start_pipe;
ScopedPipe stop_pipe;
int pid = fork();
if (pid == 0) {
// child
child_init();
EXPECT_EQ(1, TEMP_FAILURE_RETRY(write(start_pipe.Sender(), "+", 1))) << strerror(errno);
char buf;
EXPECT_EQ(1, TEMP_FAILURE_RETRY(read(stop_pipe.Receiver(), &buf, 1))) << strerror(errno);
child_cleanup();
_exit(0);
} else {
// parent
ASSERT_GT(pid, 0);
char buf;
ASSERT_EQ(1, TEMP_FAILURE_RETRY(read(start_pipe.Receiver(), &buf, 1))) << strerror(errno);
parent(pid);
ASSERT_EQ(1, TEMP_FAILURE_RETRY(write(stop_pipe.Sender(), "+", 1))) << strerror(errno);
siginfo_t info{};
ASSERT_EQ(0, TEMP_FAILURE_RETRY(waitid(P_PID, pid, &info, WEXITED))) << strerror(errno);
}
}
};
TEST_P(ThreadCaptureTest, capture_some) {
const unsigned int threads = GetParam();
Fork([&](){
// child init
StartThreads(threads - 1, [](){});
},
[&](){
// child cleanup
StopThreads();
},
[&](pid_t child){
// parent
ASSERT_GT(child, 0);
{
ScopedDisableMallocTimeout disable_malloc;
ThreadCapture thread_capture(child, heap);
auto list_tids = allocator::vector<pid_t>(heap);
ASSERT_TRUE(thread_capture.ListThreads(list_tids));
ASSERT_EQ(threads, list_tids.size());
ASSERT_TRUE(thread_capture.CaptureThreads());
auto thread_info = allocator::vector<ThreadInfo>(heap);
ASSERT_TRUE(thread_capture.CapturedThreadInfo(thread_info));
ASSERT_EQ(threads, thread_info.size());
ASSERT_TRUE(thread_capture.ReleaseThreads());
if (!HasFailure()) {
ASSERT_FALSE(disable_malloc.timed_out());
}
}
});
}
INSTANTIATE_TEST_CASE_P(ThreadCaptureTest, ThreadCaptureTest, ::testing::Values(1, 2, 10, 1024));
TEST_F(ThreadCaptureTest, capture_kill) {
int ret = fork();
if (ret == 0) {
// child
sleep(10);
} else {
// parent
ASSERT_GT(ret, 0);
{
ScopedDisableMallocTimeout disable_malloc;
ThreadCapture thread_capture(ret, heap);
thread_capture.InjectTestFunc([&](pid_t tid){
syscall(SYS_tgkill, ret, tid, SIGKILL);
usleep(10000);
});
auto list_tids = allocator::vector<pid_t>(heap);
ASSERT_TRUE(thread_capture.ListThreads(list_tids));
ASSERT_EQ(1U, list_tids.size());
ASSERT_FALSE(thread_capture.CaptureThreads());
if (!HasFailure()) {
ASSERT_FALSE(disable_malloc.timed_out());
}
}
}
}
TEST_F(ThreadCaptureTest, capture_signal) {
const int sig = SIGUSR1;
ScopedPipe pipe;
// For signal handler
static ScopedPipe* g_pipe;
Fork([&](){
// child init
pipe.CloseReceiver();
g_pipe = &pipe;
struct sigaction act{};
act.sa_handler = [](int){
char buf = '+';
write(g_pipe->Sender(), &buf, 1);
g_pipe->CloseSender();
};
sigaction(sig, &act, NULL);
sigset_t set;
sigemptyset(&set);
sigaddset(&set, sig);
pthread_sigmask(SIG_UNBLOCK, &set, NULL);
},
[&](){
// child cleanup
g_pipe = nullptr;
pipe.Close();
},
[&](pid_t child){
// parent
ASSERT_GT(child, 0);
pipe.CloseSender();
{
ScopedDisableMallocTimeout disable_malloc;
ThreadCapture thread_capture(child, heap);
thread_capture.InjectTestFunc([&](pid_t tid){
syscall(SYS_tgkill, child, tid, sig);
usleep(10000);
});
auto list_tids = allocator::vector<pid_t>(heap);
ASSERT_TRUE(thread_capture.ListThreads(list_tids));
ASSERT_EQ(1U, list_tids.size());
ASSERT_TRUE(thread_capture.CaptureThreads());
auto thread_info = allocator::vector<ThreadInfo>(heap);
ASSERT_TRUE(thread_capture.CapturedThreadInfo(thread_info));
ASSERT_EQ(1U, thread_info.size());
ASSERT_TRUE(thread_capture.ReleaseThreads());
usleep(100000);
char buf;
ASSERT_EQ(1, TEMP_FAILURE_RETRY(read(pipe.Receiver(), &buf, 1)));
ASSERT_EQ(buf, '+');
if (!HasFailure()) {
ASSERT_FALSE(disable_malloc.timed_out());
}
}
});
}