linux-user: Fix shmat emulation by honoring host SHMLBA

For those hosts with SHMLBA > getpagesize, we don't automatically
select a guest address that is compatible with the host.  We can
achieve this by boosting the alignment of guest_base and by adding
an extra alignment argument to mmap_find_vma.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20190519201953.20161-13-richard.henderson@linaro.org>
Signed-off-by: Laurent Vivier <laurent@vivier.eu>
This commit is contained in:
Richard Henderson 2019-05-19 13:19:52 -07:00 committed by Laurent Vivier
parent abcac736c1
commit 30ab9ef296
4 changed files with 52 additions and 44 deletions

View File

@ -3,6 +3,7 @@
#include <sys/param.h> #include <sys/param.h>
#include <sys/resource.h> #include <sys/resource.h>
#include <sys/shm.h>
#include "qemu.h" #include "qemu.h"
#include "disas/disas.h" #include "disas/disas.h"
@ -2012,6 +2013,8 @@ unsigned long init_guest_space(unsigned long host_start,
unsigned long guest_start, unsigned long guest_start,
bool fixed) bool fixed)
{ {
/* In order to use host shmat, we must be able to honor SHMLBA. */
unsigned long align = MAX(SHMLBA, qemu_host_page_size);
unsigned long current_start, aligned_start; unsigned long current_start, aligned_start;
int flags; int flags;
@ -2029,7 +2032,7 @@ unsigned long init_guest_space(unsigned long host_start,
} }
/* Setup the initial flags and start address. */ /* Setup the initial flags and start address. */
current_start = host_start & qemu_host_page_mask; current_start = host_start & -align;
flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE; flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE;
if (fixed) { if (fixed) {
flags |= MAP_FIXED; flags |= MAP_FIXED;
@ -2065,8 +2068,8 @@ unsigned long init_guest_space(unsigned long host_start,
return (unsigned long)-1; return (unsigned long)-1;
} }
munmap((void *)real_start, host_full_size); munmap((void *)real_start, host_full_size);
if (real_start & ~qemu_host_page_mask) { if (real_start & (align - 1)) {
/* The same thing again, but with an extra qemu_host_page_size /* The same thing again, but with extra
* so that we can shift around alignment. * so that we can shift around alignment.
*/ */
unsigned long real_size = host_full_size + qemu_host_page_size; unsigned long real_size = host_full_size + qemu_host_page_size;
@ -2079,7 +2082,7 @@ unsigned long init_guest_space(unsigned long host_start,
return (unsigned long)-1; return (unsigned long)-1;
} }
munmap((void *)real_start, real_size); munmap((void *)real_start, real_size);
real_start = HOST_PAGE_ALIGN(real_start); real_start = ROUND_UP(real_start, align);
} }
current_start = real_start; current_start = real_start;
} }
@ -2106,7 +2109,7 @@ unsigned long init_guest_space(unsigned long host_start,
} }
/* Ensure the address is properly aligned. */ /* Ensure the address is properly aligned. */
if (real_start & ~qemu_host_page_mask) { if (real_start & (align - 1)) {
/* Ideally, we adjust like /* Ideally, we adjust like
* *
* pages: [ ][ ][ ][ ][ ] * pages: [ ][ ][ ][ ][ ]
@ -2134,7 +2137,7 @@ unsigned long init_guest_space(unsigned long host_start,
if (real_start == (unsigned long)-1) { if (real_start == (unsigned long)-1) {
return (unsigned long)-1; return (unsigned long)-1;
} }
aligned_start = HOST_PAGE_ALIGN(real_start); aligned_start = ROUND_UP(real_start, align);
} else { } else {
aligned_start = real_start; aligned_start = real_start;
} }
@ -2171,7 +2174,7 @@ unsigned long init_guest_space(unsigned long host_start,
* because of trouble with ARM commpage setup. * because of trouble with ARM commpage setup.
*/ */
munmap((void *)real_start, real_size); munmap((void *)real_start, real_size);
current_start += qemu_host_page_size; current_start += align;
if (host_start == current_start) { if (host_start == current_start) {
/* Theoretically possible if host doesn't have any suitably /* Theoretically possible if host doesn't have any suitably
* aligned areas. Normally the first mmap will fail. * aligned areas. Normally the first mmap will fail.

View File

@ -202,49 +202,52 @@ unsigned long last_brk;
/* Subroutine of mmap_find_vma, used when we have pre-allocated a chunk /* Subroutine of mmap_find_vma, used when we have pre-allocated a chunk
of guest address space. */ of guest address space. */
static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size) static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
abi_ulong align)
{ {
abi_ulong addr; abi_ulong addr, end_addr, incr = qemu_host_page_size;
abi_ulong end_addr;
int prot; int prot;
int looped = 0; bool looped = false;
if (size > reserved_va) { if (size > reserved_va) {
return (abi_ulong)-1; return (abi_ulong)-1;
} }
size = HOST_PAGE_ALIGN(size); /* Note that start and size have already been aligned by mmap_find_vma. */
end_addr = start + size;
if (end_addr > reserved_va) {
end_addr = reserved_va;
}
addr = end_addr - qemu_host_page_size;
end_addr = start + size;
if (start > reserved_va - size) {
/* Start at the top of the address space. */
end_addr = ((reserved_va - size) & -align) + size;
looped = true;
}
/* Search downward from END_ADDR, checking to see if a page is in use. */
addr = end_addr;
while (1) { while (1) {
addr -= incr;
if (addr > end_addr) { if (addr > end_addr) {
if (looped) { if (looped) {
/* Failure. The entire address space has been searched. */
return (abi_ulong)-1; return (abi_ulong)-1;
} }
end_addr = reserved_va; /* Re-start at the top of the address space. */
addr = end_addr - qemu_host_page_size; addr = end_addr = ((reserved_va - size) & -align) + size;
looped = 1; looped = true;
continue; } else {
prot = page_get_flags(addr);
if (prot) {
/* Page in use. Restart below this page. */
addr = end_addr = ((addr - size) & -align) + size;
} else if (addr && addr + size == end_addr) {
/* Success! All pages between ADDR and END_ADDR are free. */
if (start == mmap_next_start) {
mmap_next_start = addr;
}
return addr;
}
} }
prot = page_get_flags(addr);
if (prot) {
end_addr = addr;
}
if (addr && addr + size == end_addr) {
break;
}
addr -= qemu_host_page_size;
} }
if (start == mmap_next_start) {
mmap_next_start = addr;
}
return addr;
} }
/* /*
@ -253,7 +256,7 @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size)
* It must be called with mmap_lock() held. * It must be called with mmap_lock() held.
* Return -1 if error. * Return -1 if error.
*/ */
abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size) abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align)
{ {
void *ptr, *prev; void *ptr, *prev;
abi_ulong addr; abi_ulong addr;
@ -265,11 +268,12 @@ abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size)
} else { } else {
start &= qemu_host_page_mask; start &= qemu_host_page_mask;
} }
start = ROUND_UP(start, align);
size = HOST_PAGE_ALIGN(size); size = HOST_PAGE_ALIGN(size);
if (reserved_va) { if (reserved_va) {
return mmap_find_vma_reserved(start, size); return mmap_find_vma_reserved(start, size, align);
} }
addr = start; addr = start;
@ -299,7 +303,7 @@ abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size)
if (h2g_valid(ptr + size - 1)) { if (h2g_valid(ptr + size - 1)) {
addr = h2g(ptr); addr = h2g(ptr);
if ((addr & ~TARGET_PAGE_MASK) == 0) { if ((addr & (align - 1)) == 0) {
/* Success. */ /* Success. */
if (start == mmap_next_start && addr >= TASK_UNMAPPED_BASE) { if (start == mmap_next_start && addr >= TASK_UNMAPPED_BASE) {
mmap_next_start = addr + size; mmap_next_start = addr + size;
@ -313,12 +317,12 @@ abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size)
/* Assume the result that the kernel gave us is the /* Assume the result that the kernel gave us is the
first with enough free space, so start again at the first with enough free space, so start again at the
next higher target page. */ next higher target page. */
addr = TARGET_PAGE_ALIGN(addr); addr = ROUND_UP(addr, align);
break; break;
case 1: case 1:
/* Sometimes the kernel decides to perform the allocation /* Sometimes the kernel decides to perform the allocation
at the top end of memory instead. */ at the top end of memory instead. */
addr &= TARGET_PAGE_MASK; addr &= -align;
break; break;
case 2: case 2:
/* Start over at low memory. */ /* Start over at low memory. */
@ -416,7 +420,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
if (!(flags & MAP_FIXED)) { if (!(flags & MAP_FIXED)) {
host_len = len + offset - host_offset; host_len = len + offset - host_offset;
host_len = HOST_PAGE_ALIGN(host_len); host_len = HOST_PAGE_ALIGN(host_len);
start = mmap_find_vma(real_start, host_len); start = mmap_find_vma(real_start, host_len, TARGET_PAGE_SIZE);
if (start == (abi_ulong)-1) { if (start == (abi_ulong)-1) {
errno = ENOMEM; errno = ENOMEM;
goto fail; goto fail;
@ -710,7 +714,7 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
} else if (flags & MREMAP_MAYMOVE) { } else if (flags & MREMAP_MAYMOVE) {
abi_ulong mmap_start; abi_ulong mmap_start;
mmap_start = mmap_find_vma(0, new_size); mmap_start = mmap_find_vma(0, new_size, TARGET_PAGE_SIZE);
if (mmap_start == -1) { if (mmap_start == -1) {
errno = ENOMEM; errno = ENOMEM;

View File

@ -443,7 +443,7 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
abi_ulong new_addr); abi_ulong new_addr);
extern unsigned long last_brk; extern unsigned long last_brk;
extern abi_ulong mmap_next_start; extern abi_ulong mmap_next_start;
abi_ulong mmap_find_vma(abi_ulong, abi_ulong); abi_ulong mmap_find_vma(abi_ulong, abi_ulong, abi_ulong);
void mmap_fork_start(void); void mmap_fork_start(void);
void mmap_fork_end(int child); void mmap_fork_end(int child);

View File

@ -3912,7 +3912,8 @@ static inline abi_ulong do_shmat(CPUArchState *cpu_env,
else { else {
abi_ulong mmap_start; abi_ulong mmap_start;
mmap_start = mmap_find_vma(0, shm_info.shm_segsz); /* In order to use the host shmat, we need to honor host SHMLBA. */
mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
if (mmap_start == -1) { if (mmap_start == -1) {
errno = ENOMEM; errno = ENOMEM;