mirror of https://gitee.com/openkylin/qemu.git
Remove PAGE_RESERVED
The usermode PAGE_RESERVED code is not required by the current mmap implementation, and is already broken when guest_base != 0. Unfortunately the bsd emulation still uses the old mmap implementation, so we can't rip it out altogether. Signed-off-by: Paul Brook <paul@codesourcery.com>
This commit is contained in:
parent
048d179f20
commit
2e9a5713f0
|
@ -742,7 +742,10 @@ extern unsigned long qemu_host_page_mask;
|
||||||
/* original state of the write flag (used when tracking self-modifying
|
/* original state of the write flag (used when tracking self-modifying
|
||||||
code */
|
code */
|
||||||
#define PAGE_WRITE_ORG 0x0010
|
#define PAGE_WRITE_ORG 0x0010
|
||||||
|
#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
|
||||||
|
/* FIXME: Code that sets/uses this is broken and needs to go away. */
|
||||||
#define PAGE_RESERVED 0x0020
|
#define PAGE_RESERVED 0x0020
|
||||||
|
#endif
|
||||||
|
|
||||||
#if defined(CONFIG_USER_ONLY)
|
#if defined(CONFIG_USER_ONLY)
|
||||||
void page_dump(FILE *f);
|
void page_dump(FILE *f);
|
||||||
|
|
31
exec.c
31
exec.c
|
@ -288,7 +288,7 @@ static void page_init(void)
|
||||||
qemu_host_page_bits++;
|
qemu_host_page_bits++;
|
||||||
qemu_host_page_mask = ~(qemu_host_page_size - 1);
|
qemu_host_page_mask = ~(qemu_host_page_size - 1);
|
||||||
|
|
||||||
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
|
#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
|
||||||
{
|
{
|
||||||
#ifdef HAVE_KINFO_GETVMMAP
|
#ifdef HAVE_KINFO_GETVMMAP
|
||||||
struct kinfo_vmentry *freep;
|
struct kinfo_vmentry *freep;
|
||||||
|
@ -324,11 +324,7 @@ static void page_init(void)
|
||||||
|
|
||||||
last_brk = (unsigned long)sbrk(0);
|
last_brk = (unsigned long)sbrk(0);
|
||||||
|
|
||||||
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
|
|
||||||
f = fopen("/compat/linux/proc/self/maps", "r");
|
f = fopen("/compat/linux/proc/self/maps", "r");
|
||||||
#else
|
|
||||||
f = fopen("/proc/self/maps", "r");
|
|
||||||
#endif
|
|
||||||
if (f) {
|
if (f) {
|
||||||
mmap_lock();
|
mmap_lock();
|
||||||
|
|
||||||
|
@ -365,24 +361,11 @@ static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
#if defined(CONFIG_USER_ONLY)
|
#if defined(CONFIG_USER_ONLY)
|
||||||
/* We can't use qemu_malloc because it may recurse into a locked mutex.
|
/* We can't use qemu_malloc because it may recurse into a locked mutex. */
|
||||||
Neither can we record the new pages we reserve while allocating a
|
|
||||||
given page because that may recurse into an unallocated page table
|
|
||||||
entry. Stuff the allocations we do make into a queue and process
|
|
||||||
them after having completed one entire page table allocation. */
|
|
||||||
|
|
||||||
unsigned long reserve[2 * (V_L1_SHIFT / L2_BITS)];
|
|
||||||
int reserve_idx = 0;
|
|
||||||
|
|
||||||
# define ALLOC(P, SIZE) \
|
# define ALLOC(P, SIZE) \
|
||||||
do { \
|
do { \
|
||||||
P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
|
P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
|
||||||
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
|
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
|
||||||
if (h2g_valid(P)) { \
|
|
||||||
reserve[reserve_idx] = h2g(P); \
|
|
||||||
reserve[reserve_idx + 1] = SIZE; \
|
|
||||||
reserve_idx += 2; \
|
|
||||||
} \
|
|
||||||
} while (0)
|
} while (0)
|
||||||
#else
|
#else
|
||||||
# define ALLOC(P, SIZE) \
|
# define ALLOC(P, SIZE) \
|
||||||
|
@ -417,16 +400,6 @@ static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
|
||||||
}
|
}
|
||||||
|
|
||||||
#undef ALLOC
|
#undef ALLOC
|
||||||
#if defined(CONFIG_USER_ONLY)
|
|
||||||
for (i = 0; i < reserve_idx; i += 2) {
|
|
||||||
unsigned long addr = reserve[i];
|
|
||||||
unsigned long len = reserve[i + 1];
|
|
||||||
|
|
||||||
page_set_flags(addr & TARGET_PAGE_MASK,
|
|
||||||
TARGET_PAGE_ALIGN(addr + len),
|
|
||||||
PAGE_RESERVED);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
return pd + (index & (L2_SIZE - 1));
|
return pd + (index & (L2_SIZE - 1));
|
||||||
}
|
}
|
||||||
|
|
|
@ -2159,12 +2159,6 @@ static int vma_walker(void *priv, abi_ulong start, abi_ulong end,
|
||||||
{
|
{
|
||||||
struct mm_struct *mm = (struct mm_struct *)priv;
|
struct mm_struct *mm = (struct mm_struct *)priv;
|
||||||
|
|
||||||
/*
|
|
||||||
* Don't dump anything that qemu has reserved for internal use.
|
|
||||||
*/
|
|
||||||
if (flags & PAGE_RESERVED)
|
|
||||||
return (0);
|
|
||||||
|
|
||||||
vma_add_mapping(mm, start, end, flags);
|
vma_add_mapping(mm, start, end, flags);
|
||||||
return (0);
|
return (0);
|
||||||
}
|
}
|
||||||
|
|
|
@ -85,14 +85,6 @@ void *qemu_vmalloc(size_t size)
|
||||||
/* Use map and mark the pages as used. */
|
/* Use map and mark the pages as used. */
|
||||||
p = mmap(NULL, size, PROT_READ | PROT_WRITE,
|
p = mmap(NULL, size, PROT_READ | PROT_WRITE,
|
||||||
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
||||||
|
|
||||||
if (h2g_valid(p)) {
|
|
||||||
/* Allocated region overlaps guest address space. This may recurse. */
|
|
||||||
abi_ulong addr = h2g(p);
|
|
||||||
page_set_flags(addr & TARGET_PAGE_MASK, TARGET_PAGE_ALIGN(addr + size),
|
|
||||||
PAGE_RESERVED);
|
|
||||||
}
|
|
||||||
|
|
||||||
mmap_unlock();
|
mmap_unlock();
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
@ -484,9 +476,6 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
|
||||||
}
|
}
|
||||||
start = h2g(host_start);
|
start = h2g(host_start);
|
||||||
} else {
|
} else {
|
||||||
int flg;
|
|
||||||
target_ulong addr;
|
|
||||||
|
|
||||||
if (start & ~TARGET_PAGE_MASK) {
|
if (start & ~TARGET_PAGE_MASK) {
|
||||||
errno = EINVAL;
|
errno = EINVAL;
|
||||||
goto fail;
|
goto fail;
|
||||||
|
@ -504,14 +493,6 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
for(addr = real_start; addr < real_end; addr += TARGET_PAGE_SIZE) {
|
|
||||||
flg = page_get_flags(addr);
|
|
||||||
if (flg & PAGE_RESERVED) {
|
|
||||||
errno = ENXIO;
|
|
||||||
goto fail;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* worst case: we cannot map the file because the offset is not
|
/* worst case: we cannot map the file because the offset is not
|
||||||
aligned, so we read it */
|
aligned, so we read it */
|
||||||
if (!(flags & MAP_ANONYMOUS) &&
|
if (!(flags & MAP_ANONYMOUS) &&
|
||||||
|
|
Loading…
Reference in New Issue