am e05ac541: am 9d779bf1: Merge "Upgrade to dlmalloc 2.8.5."

* commit 'e05ac5415e861dee6a3f175c8066ff577736ba1f':
  Upgrade to dlmalloc 2.8.5.
This commit is contained in:
Brian Carlstrom 2012-08-20 17:04:24 -07:00 committed by Android Git Automerger
commit 1768707951
6 changed files with 80 additions and 448 deletions

View File

@ -1,128 +0,0 @@
/*
* Copyright (C) 2006 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* A wrapper file for dlmalloc.h that defines prototypes for the
* mspace_*() functions, which provide an interface for creating
* multiple heaps.
*/
#ifndef MSPACE_H_
#define MSPACE_H_
/* It's a pain getting the mallinfo stuff to work
* with Linux, OSX, and klibc, so just turn it off
* for now.
* TODO: make mallinfo work
*/
#define NO_MALLINFO 1
/* Allow setting the maximum heap footprint.
*/
#define USE_MAX_ALLOWED_FOOTPRINT 1
#define USE_CONTIGUOUS_MSPACES 1
#if USE_CONTIGUOUS_MSPACES
#define HAVE_MMAP 0
#define HAVE_MORECORE 1
#define MORECORE_CONTIGUOUS 0
#endif
#define MSPACES 1
#define ONLY_MSPACES 1
#include "../../../../bionic/libc/bionic/dlmalloc.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
mspace_usable_size(void* p);
Returns the number of bytes you can actually use in
an allocated chunk, which may be more than you requested (although
often not) due to alignment and minimum size constraints.
You can use this many bytes without worrying about
overwriting other allocated objects. This is not a particularly great
programming practice. mspace_usable_size can be more useful in
debugging and assertions, for example:
p = mspace_malloc(msp, n);
assert(mspace_usable_size(msp, p) >= 256);
*/
size_t mspace_usable_size(mspace, const void*);
#if USE_CONTIGUOUS_MSPACES
/*
Similar to create_mspace(), but the underlying memory is
guaranteed to be contiguous. No more than max_capacity
bytes is ever allocated to the mspace.
*/
mspace create_contiguous_mspace(size_t starting_capacity, size_t max_capacity,
int locked);
/*
Identical to create_contiguous_mspace, but labels the mapping 'mspace/name'
instead of 'mspace'
*/
mspace create_contiguous_mspace_with_name(size_t starting_capacity,
size_t max_capacity, int locked, const char *name);
/*
Identical to create_contiguous_mspace, but uses previously mapped memory.
*/
mspace create_contiguous_mspace_with_base(size_t starting_capacity,
size_t max_capacity, int locked, void *base);
size_t destroy_contiguous_mspace(mspace msp);
/*
Returns the position of the "break" within the given mspace.
*/
void *contiguous_mspace_sbrk0(mspace msp);
#endif
/*
Call the handler for each block in the specified mspace.
chunkptr and chunklen refer to the heap-level chunk including
the chunk overhead, and userptr and userlen refer to the
user-usable part of the chunk. If the chunk is free, userptr
will be NULL and userlen will be 0. userlen is not guaranteed
to be the same value passed into malloc() for a given chunk;
it is >= the requested size.
*/
void mspace_walk_heap(mspace msp,
void(*handler)(const void *chunkptr, size_t chunklen,
const void *userptr, size_t userlen, void *arg), void *harg);
/*
mspace_walk_free_pages(handler, harg)
Calls the provided handler on each free region in the specified
mspace. The memory between start and end are guaranteed not to
contain any important data, so the handler is free to alter the
contents in any way. This can be used to advise the OS that large
free regions may be swapped out.
The value in harg will be passed to each call of the handler.
*/
void mspace_walk_free_pages(mspace msp,
void(*handler)(void *start, void *end, void *arg), void *harg);
#ifdef __cplusplus
}; /* end of extern "C" */
#endif
#endif /* MSPACE_H_ */

View File

@ -75,7 +75,6 @@ ifeq ($(WINDOWS_HOST_ONLY),1)
else
commonSources += \
abort_socket.c \
mspace.c \
selector.c \
tztime.c \
multiuser.c \

View File

@ -14,16 +14,22 @@
* limitations under the License.
*/
/* No-op stubs for functions defined in system/bionic/bionic/dlmalloc.c.
#include "../../../bionic/libc/bionic/dlmalloc.h"
#include "cutils/log.h"
/*
* Stubs for functions defined in bionic/libc/bionic/dlmalloc.c. These
* are used in host builds, as the host libc will not contain these
* functions.
*/
void dlmalloc_walk_free_pages()
void dlmalloc_inspect_all(void(*handler)(void*, void *, size_t, void*),
void* arg)
{
ALOGW("Called host unimplemented stub: dlmalloc_inspect_all");
}
void dlmalloc_walk_heap()
{
}
void dlmalloc_trim()
int dlmalloc_trim(size_t unused)
{
ALOGW("Called host unimplemented stub: dlmalloc_trim");
return 0;
}

View File

@ -1,286 +0,0 @@
/* Copyright 2006 The Android Open Source Project */
/* A wrapper file for dlmalloc.c that compiles in the
* mspace_*() functions, which provide an interface for
* creating multiple heaps.
*/
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <stdint.h>
#include <sys/ioctl.h>
#include <cutils/ashmem.h>
/* It's a pain getting the mallinfo stuff to work
* with Linux, OSX, and klibc, so just turn it off
* for now.
* TODO: make mallinfo work
*/
#define NO_MALLINFO 1
/* Allow setting the maximum heap footprint.
*/
#define USE_MAX_ALLOWED_FOOTPRINT 1
/* Don't try to trim memory.
* TODO: support this.
*/
#define MORECORE_CANNOT_TRIM 1
/* Use mmap()d anonymous memory to guarantee
* that an mspace is contiguous.
*
* create_mspace() won't work right if this is
* defined, so hide the definition of it and
* break any users at build time.
*/
#define USE_CONTIGUOUS_MSPACES 1
#if USE_CONTIGUOUS_MSPACES
/* This combination of settings forces sys_alloc()
* to always use MORECORE(). It won't expect the
* results to be contiguous, but we'll guarantee
* that they are.
*/
#define HAVE_MMAP 0
#define HAVE_MORECORE 1
#define MORECORE_CONTIGUOUS 0
/* m is always the appropriate local when MORECORE() is called. */
#define MORECORE(S) contiguous_mspace_morecore(m, S)
#define create_mspace HIDDEN_create_mspace_HIDDEN
#define destroy_mspace HIDDEN_destroy_mspace_HIDDEN
typedef struct malloc_state *mstate0;
static void *contiguous_mspace_morecore(mstate0 m, ssize_t nb);
#endif
#define MSPACES 1
#define ONLY_MSPACES 1
#include "../../../bionic/libc/bionic/dlmalloc.c"
#ifndef PAGESIZE
#define PAGESIZE mparams.page_size
#endif
#define ALIGN_UP(p, alignment) \
(((uintptr_t)(p) + (alignment)-1) & ~((alignment)-1))
/* A direct copy of dlmalloc_usable_size(),
* which isn't compiled in when ONLY_MSPACES is set.
* The mspace parameter isn't actually necessary,
* but we include it to be consistent with the
* rest of the mspace_*() functions.
*/
size_t mspace_usable_size(mspace _unused, const void* mem) {
if (mem != 0) {
const mchunkptr p = mem2chunk(mem);
if (cinuse(p))
return chunksize(p) - overhead_for(p);
}
return 0;
}
#if USE_CONTIGUOUS_MSPACES
#include <sys/mman.h>
#include <limits.h>
#define CONTIG_STATE_MAGIC 0xf00dd00d
struct mspace_contig_state {
unsigned int magic;
char *brk;
char *top;
mspace m;
};
static void *contiguous_mspace_morecore(mstate m, ssize_t nb) {
struct mspace_contig_state *cs;
char *oldbrk;
const unsigned int pagesize = PAGESIZE;
cs = (struct mspace_contig_state *)((uintptr_t)m & ~(pagesize-1));
assert(cs->magic == CONTIG_STATE_MAGIC);
assert(cs->m == m);
assert(nb >= 0); //xxx deal with the trim case
oldbrk = cs->brk;
if (nb > 0) {
/* Break to the first page boundary that satisfies the request.
*/
char *newbrk = (char *)ALIGN_UP(oldbrk + nb, pagesize);
if (newbrk > cs->top)
return CMFAIL;
/* Update the protection on the underlying memory.
* Pages we've given to dlmalloc are read/write, and
* pages we haven't are not accessable (read or write
* will cause a seg fault).
*/
if (mprotect(cs, newbrk - (char *)cs, PROT_READ | PROT_WRITE) < 0)
return CMFAIL;
if (newbrk != cs->top) {
if (mprotect(newbrk, cs->top - newbrk, PROT_NONE) < 0)
return CMFAIL;
}
cs->brk = newbrk;
/* Make sure that dlmalloc will merge this block with the
* initial block that was passed to create_mspace_with_base().
* We don't care about extern vs. non-extern, so just clear it.
*/
m->seg.sflags &= ~EXTERN_BIT;
}
return oldbrk;
}
mspace create_contiguous_mspace_with_base(size_t starting_capacity,
size_t max_capacity, int locked, void *base) {
struct mspace_contig_state *cs;
unsigned int pagesize;
mstate m;
init_mparams();
pagesize = PAGESIZE;
assert(starting_capacity <= max_capacity);
assert(((uintptr_t)base & (pagesize-1)) == 0);
assert(((uintptr_t)max_capacity & (pagesize-1)) == 0);
starting_capacity = (size_t)ALIGN_UP(starting_capacity, pagesize);
/* Make the first page read/write. dlmalloc needs to use that page.
*/
if (mprotect(base, starting_capacity, PROT_READ | PROT_WRITE) < 0) {
goto error;
}
/* Create the mspace, pointing to the memory given.
*/
m = create_mspace_with_base((char *)base + sizeof(*cs), starting_capacity,
locked);
if (m == (mspace)0) {
goto error;
}
/* Make sure that m is in the same page as base.
*/
assert(((uintptr_t)m & (uintptr_t)~(pagesize-1)) == (uintptr_t)base);
/* Use some space for the information that our MORECORE needs.
*/
cs = (struct mspace_contig_state *)base;
/* Find out exactly how much of the memory the mspace
* is using.
*/
cs->brk = m->seg.base + m->seg.size;
cs->top = (char *)base + max_capacity;
assert((char *)base <= cs->brk);
assert(cs->brk <= cs->top);
/* Prevent access to the memory we haven't handed out yet.
*/
if (cs->brk != cs->top) {
/* mprotect() requires page-aligned arguments, but it's possible
* for cs->brk not to be page-aligned at this point.
*/
char *prot_brk = (char *)ALIGN_UP(cs->brk, pagesize);
if ((mprotect(base, prot_brk - (char *)base, PROT_READ | PROT_WRITE) < 0) ||
(mprotect(prot_brk, cs->top - prot_brk, PROT_NONE) < 0)) {
goto error;
}
}
cs->m = m;
cs->magic = CONTIG_STATE_MAGIC;
return (mspace)m;
error:
return (mspace)0;
}
mspace create_contiguous_mspace_with_name(size_t starting_capacity,
size_t max_capacity, int locked, char const *name) {
int fd, ret;
char buf[ASHMEM_NAME_LEN] = "mspace";
void *base;
unsigned int pagesize;
mstate m;
if (starting_capacity > max_capacity)
return (mspace)0;
init_mparams();
pagesize = PAGESIZE;
/* Create the anonymous memory that will back the mspace.
* This reserves all of the virtual address space we could
* ever need. Physical pages will be mapped as the memory
* is touched.
*
* Align max_capacity to a whole page.
*/
max_capacity = (size_t)ALIGN_UP(max_capacity, pagesize);
if (name)
snprintf(buf, sizeof(buf), "mspace/%s", name);
fd = ashmem_create_region(buf, max_capacity);
if (fd < 0)
return (mspace)0;
base = mmap(NULL, max_capacity, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
close(fd);
if (base == MAP_FAILED)
return (mspace)0;
/* Make sure that base is at the beginning of a page.
*/
assert(((uintptr_t)base & (pagesize-1)) == 0);
m = create_contiguous_mspace_with_base(starting_capacity, max_capacity,
locked, base);
if (m == 0) {
munmap(base, max_capacity);
}
return m;
}
mspace create_contiguous_mspace(size_t starting_capacity,
size_t max_capacity, int locked) {
return create_contiguous_mspace_with_name(starting_capacity,
max_capacity, locked, NULL);
}
size_t destroy_contiguous_mspace(mspace msp) {
mstate ms = (mstate)msp;
if (ok_magic(ms)) {
struct mspace_contig_state *cs;
size_t length;
const unsigned int pagesize = PAGESIZE;
cs = (struct mspace_contig_state *)((uintptr_t)ms & ~(pagesize-1));
assert(cs->magic == CONTIG_STATE_MAGIC);
assert(cs->m == ms);
length = cs->top - (char *)cs;
if (munmap((char *)cs, length) != 0)
return length;
}
else {
USAGE_ERROR_ACTION(ms, ms);
}
return 0;
}
void *contiguous_mspace_sbrk0(mspace msp) {
struct mspace_contig_state *cs;
mstate ms;
const unsigned int pagesize = PAGESIZE;
ms = (mstate)msp;
cs = (struct mspace_contig_state *)((uintptr_t)ms & ~(pagesize-1));
assert(cs->magic == CONTIG_STATE_MAGIC);
assert(cs->m == ms);
return cs->brk;
}
#endif

View File

@ -23,10 +23,13 @@
#include <sys/mman.h>
#include <cutils/log.h>
#include <cutils/ashmem.h>
#include <cutils/atomic.h>
#include "codeflinger/CodeCache.h"
#define LOG_TAG "CodeCache"
namespace android {
// ----------------------------------------------------------------------------
@ -38,12 +41,72 @@ namespace android {
// ----------------------------------------------------------------------------
// A dlmalloc mspace is used to manage the code cache over a mmaped region.
#define HAVE_MMAP 0
#define HAVE_MREMAP 0
#define HAVE_MORECORE 0
#define MALLOC_ALIGNMENT 16
#define MSPACES 1
#define NO_MALLINFO 1
#define ONLY_MSPACES 1
// Custom heap error handling.
#define PROCEED_ON_ERROR 0
static void heap_error(const char* msg, const char* function, void* p);
#define CORRUPTION_ERROR_ACTION(m) \
heap_error("HEAP MEMORY CORRUPTION", __FUNCTION__, NULL)
#define USAGE_ERROR_ACTION(m,p) \
heap_error("ARGUMENT IS INVALID HEAP ADDRESS", __FUNCTION__, p)
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#pragma GCC diagnostic ignored "-Wempty-body"
#include "../../../../bionic/libc/upstream-dlmalloc/malloc.c"
#pragma GCC diagnostic warning "-Wstrict-aliasing"
#pragma GCC diagnostic warning "-Wempty-body"
static void heap_error(const char* msg, const char* function, void* p) {
ALOG(LOG_FATAL, LOG_TAG, "@@@ ABORTING: CODE FLINGER: %s IN %s addr=%p",
msg, function, p);
/* So that we can get a memory dump around p */
*((int **) 0xdeadbaad) = (int *) p;
}
// ----------------------------------------------------------------------------
static void* gExecutableStore = NULL;
static mspace gMspace = NULL;
const size_t kMaxCodeCacheCapacity = 1024 * 1024;
static mspace getMspace()
{
if (gExecutableStore == NULL) {
int fd = ashmem_create_region("CodeFlinger code cache",
kMaxCodeCacheCapacity);
LOG_ALWAYS_FATAL_IF(fd < 0,
"Creating code cache, ashmem_create_region "
"failed with error '%s'", strerror(errno));
gExecutableStore = mmap(NULL, kMaxCodeCacheCapacity,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_PRIVATE, fd, 0);
LOG_ALWAYS_FATAL_IF(gExecutableStore == NULL,
"Creating code cache, mmap failed with error "
"'%s'", strerror(errno));
close(fd);
gMspace = create_mspace_with_base(gExecutableStore, kMaxCodeCacheCapacity,
/*locked=*/ false);
mspace_set_footprint_limit(gMspace, kMaxCodeCacheCapacity);
}
return gMspace;
}
Assembly::Assembly(size_t size)
: mCount(1), mSize(0)
{
mBase = (uint32_t*)mspace_malloc(getMspace(), size);
LOG_ALWAYS_FATAL_IF(mBase == NULL,
"Failed to create Assembly of size %zd in executable "
"store of size %zd", size, kMaxCodeCacheCapacity);
mSize = size;
ensureMbaseExecutable();
}
Assembly::~Assembly()
@ -77,31 +140,13 @@ uint32_t* Assembly::base() const
ssize_t Assembly::resize(size_t newSize)
{
mBase = (uint32_t*)mspace_realloc(getMspace(), mBase, newSize);
LOG_ALWAYS_FATAL_IF(mBase == NULL,
"Failed to resize Assembly to %zd in code cache "
"of size %zd", newSize, kMaxCodeCacheCapacity);
mSize = newSize;
ensureMbaseExecutable();
return size();
}
mspace Assembly::getMspace()
{
static mspace msp = create_contiguous_mspace(2 * 1024, 1024 * 1024, /*locked=*/ false);
return msp;
}
void Assembly::ensureMbaseExecutable()
{
long pagesize = sysconf(_SC_PAGESIZE);
long pagemask = ~(pagesize - 1); // assumes pagesize is a power of 2
uint32_t* pageStart = (uint32_t*) (((uintptr_t) mBase) & pagemask);
size_t adjustedLength = (mBase - pageStart) * sizeof(uint32_t) + mSize;
if (mBase && mprotect(pageStart, adjustedLength, PROT_READ | PROT_WRITE | PROT_EXEC) != 0) {
mspace_free(getMspace(), mBase);
mBase = NULL;
}
}
// ----------------------------------------------------------------------------
CodeCache::CodeCache(size_t size)

View File

@ -22,7 +22,6 @@
#include <stdint.h>
#include <pthread.h>
#include <sys/types.h>
#include <cutils/mspace.h>
#include "tinyutils/KeyedVector.h"
#include "tinyutils/smartpointer.h"
@ -68,9 +67,6 @@ public:
typedef void weakref_type;
private:
static mspace getMspace();
void ensureMbaseExecutable();
mutable int32_t mCount;
uint32_t* mBase;
size_t mSize;