Merge "Demand read load bias for a map."

This commit is contained in:
Christopher Ferris 2017-12-05 23:36:44 +00:00 committed by Gerrit Code Review
commit 1ae6d14f93
15 changed files with 346 additions and 37 deletions

View File

@ -417,7 +417,7 @@ static void dump_all_maps(Backtrace* backtrace, BacktraceMap* map, log_t* log, p
"memory map (%zu entr%s):",
map->size(), map->size() == 1 ? "y" : "ies");
if (print_fault_address_marker) {
if (map->begin() != map->end() && addr < map->begin()->start) {
if (map->begin() != map->end() && addr < (*map->begin())->start) {
_LOG(log, logtype::MAPS, "\n--->Fault address falls at %s before any mapped regions\n",
get_addr_string(addr).c_str());
print_fault_address_marker = false;
@ -429,49 +429,50 @@ static void dump_all_maps(Backtrace* backtrace, BacktraceMap* map, log_t* log, p
}
std::string line;
for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) {
for (auto it = map->begin(); it != map->end(); ++it) {
const backtrace_map_t* entry = *it;
line = " ";
if (print_fault_address_marker) {
if (addr < it->start) {
if (addr < entry->start) {
_LOG(log, logtype::MAPS, "--->Fault address falls at %s between mapped regions\n",
get_addr_string(addr).c_str());
print_fault_address_marker = false;
} else if (addr >= it->start && addr < it->end) {
} else if (addr >= entry->start && addr < entry->end) {
line = "--->";
print_fault_address_marker = false;
}
}
line += get_addr_string(it->start) + '-' + get_addr_string(it->end - 1) + ' ';
if (it->flags & PROT_READ) {
line += get_addr_string(entry->start) + '-' + get_addr_string(entry->end - 1) + ' ';
if (entry->flags & PROT_READ) {
line += 'r';
} else {
line += '-';
}
if (it->flags & PROT_WRITE) {
if (entry->flags & PROT_WRITE) {
line += 'w';
} else {
line += '-';
}
if (it->flags & PROT_EXEC) {
if (entry->flags & PROT_EXEC) {
line += 'x';
} else {
line += '-';
}
line += StringPrintf(" %8" PRIxPTR " %8" PRIxPTR, it->offset, it->end - it->start);
line += StringPrintf(" %8" PRIxPTR " %8" PRIxPTR, entry->offset, entry->end - entry->start);
bool space_needed = true;
if (it->name.length() > 0) {
if (entry->name.length() > 0) {
space_needed = false;
line += " " + it->name;
line += " " + entry->name;
std::string build_id;
if ((it->flags & PROT_READ) && elf_get_build_id(backtrace, it->start, &build_id)) {
if ((entry->flags & PROT_READ) && elf_get_build_id(backtrace, entry->start, &build_id)) {
line += " (BuildId: " + build_id + ")";
}
}
if (it->load_bias != 0) {
if (entry->load_bias != 0) {
if (space_needed) {
line += ' ';
}
line += StringPrintf(" (load bias 0x%" PRIxPTR ")", it->load_bias);
line += StringPrintf(" (load bias 0x%" PRIxPTR ")", entry->load_bias);
}
_LOG(log, logtype::MAPS, "%s\n", line.c_str());
}

View File

@ -40,9 +40,10 @@ BacktraceMap::~BacktraceMap() {
void BacktraceMap::FillIn(uintptr_t addr, backtrace_map_t* map) {
ScopedBacktraceMapIteratorLock lock(this);
for (BacktraceMap::const_iterator it = begin(); it != end(); ++it) {
if (addr >= it->start && addr < it->end) {
*map = *it;
for (auto it = begin(); it != end(); ++it) {
const backtrace_map_t* entry = *it;
if (addr >= entry->start && addr < entry->end) {
*map = *entry;
return;
}
}

View File

@ -71,8 +71,19 @@ void UnwindStackMap::FillIn(uintptr_t addr, backtrace_map_t* map) {
if (map_info == nullptr) {
return;
}
unwindstack::Elf* elf = map_info->GetElf(process_memory_, true);
map->load_bias = elf->GetLoadBias();
map->load_bias = map_info->GetLoadBias(process_memory_);
}
uint64_t UnwindStackMap::GetLoadBias(size_t index) {
if (index >= stack_maps_->Total()) {
return 0;
}
unwindstack::MapInfo* map_info = stack_maps_->Get(index);
if (map_info == nullptr) {
return 0;
}
return map_info->GetLoadBias(process_memory_);
}
std::string UnwindStackMap::GetFunctionName(uintptr_t pc, uintptr_t* offset) {

View File

@ -42,6 +42,8 @@ class UnwindStackMap : public BacktraceMap {
const std::shared_ptr<unwindstack::Memory>& process_memory() { return process_memory_; }
protected:
uint64_t GetLoadBias(size_t index) override;
std::unique_ptr<unwindstack::Maps> stack_maps_;
std::shared_ptr<unwindstack::Memory> process_memory_;
};

View File

@ -171,10 +171,12 @@ TEST(libbacktrace, DISABLED_generate_offline_testdata) {
testdata += android::base::StringPrintf("pid: %d tid: %d\n", getpid(), arg.tid);
// 2. Dump maps
for (auto it = map->begin(); it != map->end(); ++it) {
testdata += android::base::StringPrintf(
"map: start: %" PRIxPTR " end: %" PRIxPTR " offset: %" PRIxPTR " load_bias: %" PRIxPTR
" flags: %d name: %s\n",
it->start, it->end, it->offset, it->load_bias, it->flags, it->name.c_str());
const backtrace_map_t* entry = *it;
testdata +=
android::base::StringPrintf("map: start: %" PRIxPTR " end: %" PRIxPTR " offset: %" PRIxPTR
" load_bias: %" PRIxPTR " flags: %d name: %s\n",
entry->start, entry->end, entry->offset, entry->load_bias,
entry->flags, entry->name.c_str());
}
// 3. Dump registers
testdata += android::base::StringPrintf("registers: %zu ", sizeof(arg.unw_context));

View File

@ -857,6 +857,34 @@ struct map_test_t {
static bool map_sort(map_test_t i, map_test_t j) { return i.start < j.start; }
static std::string GetTestMapsAsString(const std::vector<map_test_t>& maps) {
if (maps.size() == 0) {
return "No test map entries\n";
}
std::string map_txt;
for (auto map : maps) {
map_txt += android::base::StringPrintf("%" PRIxPTR "-%" PRIxPTR "\n", map.start, map.end);
}
return map_txt;
}
static std::string GetMapsAsString(BacktraceMap* maps) {
if (maps->size() == 0) {
return "No map entries\n";
}
std::string map_txt;
for (const backtrace_map_t* map : *maps) {
map_txt += android::base::StringPrintf(
"%" PRIxPTR "-%" PRIxPTR " flags: 0x%x offset: 0x%" PRIxPTR " load_bias: 0x%" PRIxPTR,
map->start, map->end, map->flags, map->offset, map->load_bias);
if (!map->name.empty()) {
map_txt += ' ' + map->name;
}
map_txt += '\n';
}
return map_txt;
}
static void VerifyMap(pid_t pid) {
char buffer[4096];
snprintf(buffer, sizeof(buffer), "/proc/%d/maps", pid);
@ -875,12 +903,20 @@ static void VerifyMap(pid_t pid) {
std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(pid));
// Basic test that verifies that the map is in the expected order.
ScopedBacktraceMapIteratorLock lock(map.get());
std::vector<map_test_t>::const_iterator test_it = test_maps.begin();
for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) {
ASSERT_TRUE(test_it != test_maps.end());
ASSERT_EQ(test_it->start, it->start);
ASSERT_EQ(test_it->end, it->end);
auto test_it = test_maps.begin();
for (auto it = map->begin(); it != map->end(); ++it) {
ASSERT_TRUE(test_it != test_maps.end()) << "Mismatch in number of maps, expected test maps:\n"
<< GetTestMapsAsString(test_maps) << "Actual maps:\n"
<< GetMapsAsString(map.get());
ASSERT_EQ(test_it->start, (*it)->start) << "Mismatch in map data, expected test maps:\n"
<< GetTestMapsAsString(test_maps) << "Actual maps:\n"
<< GetMapsAsString(map.get());
ASSERT_EQ(test_it->end, (*it)->end) << "Mismatch maps in map data, expected test maps:\n"
<< GetTestMapsAsString(test_maps) << "Actual maps:\n"
<< GetMapsAsString(map.get());
// Make sure the load bias get set to a value.
ASSERT_NE(static_cast<uint64_t>(-1), (*it)->load_bias) << "Found uninitialized load_bias\n"
<< GetMapsAsString(map.get());
++test_it;
}
ASSERT_TRUE(test_it == test_maps.end());

View File

@ -30,6 +30,7 @@
#endif
#include <deque>
#include <iterator>
#include <string>
#include <vector>
@ -61,6 +62,49 @@ public:
virtual ~BacktraceMap();
class iterator : public std::iterator<std::bidirectional_iterator_tag, backtrace_map_t*> {
public:
iterator(BacktraceMap* map, size_t index) : map_(map), index_(index) {}
iterator& operator++() {
index_++;
return *this;
}
iterator& operator++(int increment) {
index_ += increment;
return *this;
}
iterator& operator--() {
index_--;
return *this;
}
iterator& operator--(int decrement) {
index_ -= decrement;
return *this;
}
bool operator==(const iterator& rhs) { return this->index_ == rhs.index_; }
bool operator!=(const iterator& rhs) { return this->index_ != rhs.index_; }
const backtrace_map_t* operator*() {
if (index_ >= map_->size()) {
return nullptr;
}
backtrace_map_t* map = &map_->maps_[index_];
if (map->load_bias == static_cast<uintptr_t>(-1)) {
map->load_bias = map_->GetLoadBias(index_);
}
return map;
}
private:
BacktraceMap* map_ = nullptr;
size_t index_ = 0;
};
iterator begin() { return iterator(this, 0); }
iterator end() { return iterator(this, maps_.size()); }
// Fill in the map data structure for the given address.
virtual void FillIn(uintptr_t addr, backtrace_map_t* map);
@ -89,14 +133,6 @@ public:
virtual void LockIterator() {}
virtual void UnlockIterator() {}
typedef std::deque<backtrace_map_t>::iterator iterator;
iterator begin() { return maps_.begin(); }
iterator end() { return maps_.end(); }
typedef std::deque<backtrace_map_t>::const_iterator const_iterator;
const_iterator begin() const { return maps_.begin(); }
const_iterator end() const { return maps_.end(); }
size_t size() const { return maps_.size(); }
virtual bool Build();
@ -114,6 +150,8 @@ public:
protected:
BacktraceMap(pid_t pid);
virtual uint64_t GetLoadBias(size_t /* index */) { return 0; }
virtual bool ParseLine(const char* line, backtrace_map_t* map);
pid_t pid_;

View File

@ -118,6 +118,7 @@ cc_test {
"tests/ElfTestUtils.cpp",
"tests/LogFake.cpp",
"tests/MapInfoGetElfTest.cpp",
"tests/MapInfoGetLoadBiasTest.cpp",
"tests/MapsTest.cpp",
"tests/MemoryBufferTest.cpp",
"tests/MemoryFake.cpp",

View File

@ -215,4 +215,22 @@ ElfInterface* Elf::CreateInterfaceFromMemory(Memory* memory) {
return interface.release();
}
uint64_t Elf::GetLoadBias(Memory* memory) {
if (!IsValidElf(memory)) {
return 0;
}
uint8_t class_type;
if (!memory->Read(EI_CLASS, &class_type, 1)) {
return 0;
}
if (class_type == ELFCLASS32) {
return ElfInterface::GetLoadBias<Elf32_Ehdr, Elf32_Phdr>(memory);
} else if (class_type == ELFCLASS64) {
return ElfInterface::GetLoadBias<Elf64_Ehdr, Elf64_Phdr>(memory);
}
return 0;
}
} // namespace unwindstack

View File

@ -147,6 +147,26 @@ bool ElfInterface::ReadAllHeaders(uint64_t* load_bias) {
return true;
}
template <typename EhdrType, typename PhdrType>
uint64_t ElfInterface::GetLoadBias(Memory* memory) {
EhdrType ehdr;
if (!memory->Read(0, &ehdr, sizeof(ehdr))) {
return false;
}
uint64_t offset = ehdr.e_phoff;
for (size_t i = 0; i < ehdr.e_phnum; i++, offset += ehdr.e_phentsize) {
PhdrType phdr;
if (!memory->Read(offset, &phdr, sizeof(phdr))) {
return 0;
}
if (phdr.p_type == PT_LOAD && phdr.p_offset == 0) {
return phdr.p_vaddr;
}
}
return 0;
}
template <typename EhdrType, typename PhdrType>
bool ElfInterface::ReadProgramHeaders(const EhdrType& ehdr, uint64_t* load_bias) {
uint64_t offset = ehdr.e_phoff;
@ -421,4 +441,7 @@ template bool ElfInterface::GetFunctionNameWithTemplate<Elf64_Sym>(uint64_t, uin
template void ElfInterface::GetMaxSizeWithTemplate<Elf32_Ehdr>(Memory*, uint64_t*);
template void ElfInterface::GetMaxSizeWithTemplate<Elf64_Ehdr>(Memory*, uint64_t*);
template uint64_t ElfInterface::GetLoadBias<Elf32_Ehdr, Elf32_Phdr>(Memory*);
template uint64_t ElfInterface::GetLoadBias<Elf64_Ehdr, Elf64_Phdr>(Memory*);
} // namespace unwindstack

View File

@ -121,4 +121,23 @@ Elf* MapInfo::GetElf(const std::shared_ptr<Memory>& process_memory, bool init_gn
return elf;
}
uint64_t MapInfo::GetLoadBias(const std::shared_ptr<Memory>& process_memory) {
{
// Make sure no other thread is trying to add the elf to this map.
std::lock_guard<std::mutex> guard(mutex_);
if (elf != nullptr) {
if (elf->valid()) {
return elf->GetLoadBias();
} else {
return 0;
}
}
}
// Call lightweight static function that will only read enough of the
// elf data to get the load bias.
std::unique_ptr<Memory> memory(CreateMemory(process_memory));
return Elf::GetLoadBias(memory.get());
}
} // namespace unwindstack

View File

@ -74,6 +74,8 @@ class Elf {
static void GetInfo(Memory* memory, bool* valid, uint64_t* size);
static uint64_t GetLoadBias(Memory* memory);
protected:
bool valid_ = false;
uint64_t load_bias_ = 0;

View File

@ -82,6 +82,9 @@ class ElfInterface {
DwarfSection* eh_frame() { return eh_frame_.get(); }
DwarfSection* debug_frame() { return debug_frame_.get(); }
template <typename EhdrType, typename PhdrType>
static uint64_t GetLoadBias(Memory* memory);
protected:
template <typename AddressType>
void InitHeadersWithTemplate();

View File

@ -51,6 +51,8 @@ struct MapInfo {
// This function guarantees it will never return nullptr.
Elf* GetElf(const std::shared_ptr<Memory>& process_memory, bool init_gnu_debugdata = false);
uint64_t GetLoadBias(const std::shared_ptr<Memory>& process_memory);
private:
MapInfo(const MapInfo&) = delete;
void operator=(const MapInfo&) = delete;

View File

@ -0,0 +1,150 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <elf.h>
#include <errno.h>
#include <signal.h>
#include <string.h>
#include <sys/mman.h>
#include <sys/ptrace.h>
#include <sys/types.h>
#include <unistd.h>
#include <atomic>
#include <memory>
#include <thread>
#include <vector>
#include <android-base/file.h>
#include <android-base/test_utils.h>
#include <gtest/gtest.h>
#include <unwindstack/Elf.h>
#include <unwindstack/MapInfo.h>
#include <unwindstack/Maps.h>
#include <unwindstack/Memory.h>
#include "ElfFake.h"
#include "ElfTestUtils.h"
#include "MemoryFake.h"
namespace unwindstack {
class MapInfoGetLoadBiasTest : public ::testing::Test {
protected:
void SetUp() override {
memory_ = new MemoryFake;
process_memory_.reset(memory_);
elf_ = new ElfFake(new MemoryFake);
elf_container_.reset(elf_);
map_info_.reset(new MapInfo(0x1000, 0x20000, 0, PROT_READ | PROT_WRITE, ""));
}
void MultipleThreadTest(uint64_t expected_load_bias);
std::shared_ptr<Memory> process_memory_;
MemoryFake* memory_;
ElfFake* elf_;
std::unique_ptr<ElfFake> elf_container_;
std::unique_ptr<MapInfo> map_info_;
};
TEST_F(MapInfoGetLoadBiasTest, no_elf_and_no_valid_elf_in_memory) {
MapInfo info(0x1000, 0x2000, 0, PROT_READ, "");
EXPECT_EQ(0U, info.GetLoadBias(process_memory_));
}
TEST_F(MapInfoGetLoadBiasTest, elf_exists) {
map_info_->elf = elf_container_.release();
elf_->FakeSetLoadBias(0);
EXPECT_EQ(0U, map_info_->GetLoadBias(process_memory_));
elf_->FakeSetLoadBias(0x1000);
EXPECT_EQ(0x1000U, map_info_->GetLoadBias(process_memory_));
}
void MapInfoGetLoadBiasTest::MultipleThreadTest(uint64_t expected_load_bias) {
static constexpr size_t kNumConcurrentThreads = 100;
uint64_t load_bias_values[kNumConcurrentThreads];
std::vector<std::thread*> threads;
std::atomic_bool wait;
wait = true;
// Create all of the threads and have them do the GetLoadBias at the same time
// to make it likely that a race will occur.
for (size_t i = 0; i < kNumConcurrentThreads; i++) {
std::thread* thread = new std::thread([i, this, &wait, &load_bias_values]() {
while (wait)
;
load_bias_values[i] = map_info_->GetLoadBias(process_memory_);
});
threads.push_back(thread);
}
// Set them all going and wait for the threads to finish.
wait = false;
for (auto thread : threads) {
thread->join();
delete thread;
}
// Now verify that all of the elf files are exactly the same and valid.
for (size_t i = 0; i < kNumConcurrentThreads; i++) {
EXPECT_EQ(expected_load_bias, load_bias_values[i]) << "Thread " << i << " mismatched.";
}
}
TEST_F(MapInfoGetLoadBiasTest, multiple_thread_elf_exists) {
map_info_->elf = elf_container_.release();
elf_->FakeSetLoadBias(0x1000);
MultipleThreadTest(0x1000);
}
static void InitElfData(MemoryFake* memory, uint64_t offset) {
Elf32_Ehdr ehdr;
TestInitEhdr(&ehdr, ELFCLASS32, EM_ARM);
ehdr.e_phoff = 0x5000;
ehdr.e_phnum = 2;
ehdr.e_phentsize = sizeof(Elf32_Phdr);
memory->SetMemory(offset, &ehdr, sizeof(ehdr));
Elf32_Phdr phdr;
memset(&phdr, 0, sizeof(phdr));
phdr.p_type = PT_NULL;
memory->SetMemory(offset + 0x5000, &phdr, sizeof(phdr));
phdr.p_type = PT_LOAD;
phdr.p_offset = 0;
phdr.p_vaddr = 0xe000;
memory->SetMemory(offset + 0x5000 + sizeof(phdr), &phdr, sizeof(phdr));
}
TEST_F(MapInfoGetLoadBiasTest, elf_exists_in_memory) {
InitElfData(memory_, map_info_->start);
EXPECT_EQ(0xe000U, map_info_->GetLoadBias(process_memory_));
}
TEST_F(MapInfoGetLoadBiasTest, multiple_thread_elf_exists_in_memory) {
InitElfData(memory_, map_info_->start);
MultipleThreadTest(0xe000);
}
} // namespace unwindstack