target-arm queue:

* Support ARMv8.5-MemTag for linux-user
  * ncpm7xx: Support SMBus
  * MAINTAINERS: add section for Clock framework
 -----BEGIN PGP SIGNATURE-----
 
 iQJNBAABCAA3FiEE4aXFk81BneKOgxXPPCUl7RQ2DN4FAmAs95kZHHBldGVyLm1h
 eWRlbGxAbGluYXJvLm9yZwAKCRA8JSXtFDYM3qPcD/9ayCMr/bivsjRJI0jEzf4T
 fkXy6M5hD+RinoVyT21cdTdvmVL2WP6D5bywzJ5EaIhH9TVWLLMkjU67iMt0MjwD
 IzRBqmkgVeyWjSy7iVkR0LMoaLNeIhSMm8arSywZCqVQaTLunmR7y3CclGGA6Mzn
 1L/ViN/ShQQV9CX/S5EifjYpez4FItOLGQ/j63vPTmkF2bTPgNFM4vjX7jdJQvGe
 fu2N0d5///dFPQUSmdMVr8r9jouXvfwsHXMV7ZZzbzir/YikV4RjHyBazz/ujAxa
 zWfwoJtSaGJJiwMOFU9xMaX+Liipva93As7JT7XRRHaP3ozXN/x2rdoLUEkilU54
 3dHBXWObrJVOtCPnmHD41yusuCLIerrP7UrIJ++bsv0JWrBVB6u/Z4Vjeze4WalD
 tAlz/5tDQLSkSiDgNAjmmHt5MVYL5z1Xiuxi/lpFg74d34MculZCl1V0S0q4Vays
 cfrPewCa450ePT2oUf51FVxQYK2KVUOTmh1d8ox1C5uhw64hI0CeocOPWtLDOx7v
 AynqfkwOyk0e1eMO0jf54ddmEK2dVt+fpN+Xx9wj4lm4ls7yBa18z9SbGdBujLjh
 ukkr8n6wc2zI001bnw207UMRdkyS0fXJh36LaJrDSV2VPVYmHktKP6FfqX8ptK0M
 lg42HQNBhXYcdgobkNZ/Yw==
 =yx/x
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20210217' into staging

target-arm queue:
 * Support ARMv8.5-MemTag for linux-user
 * ncpm7xx: Support SMBus
 * MAINTAINERS: add section for Clock framework

# gpg: Signature made Wed 17 Feb 2021 11:01:45 GMT
# gpg:                using RSA key E1A5C593CD419DE28E8315CF3C2525ED14360CDE
# gpg:                issuer "peter.maydell@linaro.org"
# gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" [ultimate]
# gpg:                 aka "Peter Maydell <pmaydell@gmail.com>" [ultimate]
# gpg:                 aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" [ultimate]
# Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83  15CF 3C25 25ED 1436 0CDE

* remotes/pmaydell/tags/pull-target-arm-20210217: (37 commits)
  MAINTAINERS: add myself maintainer for the clock framework
  hw/i2c: Implement NPCM7XX SMBus Module FIFO Mode
  hw/i2c: Add a QTest for NPCM7XX SMBus Device
  hw/arm: Add I2C sensors and EEPROM for GSJ machine
  hw/arm: Add I2C sensors for NPCM750 eval board
  hw/i2c: Implement NPCM7XX SMBus Module Single Mode
  tests/tcg/aarch64: Add mte smoke tests
  target/arm: Enable MTE for user-only
  target/arm: Add allocation tag storage for user mode
  linux-user/aarch64: Signal SEGV_MTEAERR for async tag check error
  linux-user/aarch64: Signal SEGV_MTESERR for sync tag check fault
  linux-user/aarch64: Pass syndrome to EXC_*_ABORT
  target/arm: Split out syndrome.h from internals.h
  linux-user/aarch64: Implement PROT_MTE
  linux-user/aarch64: Implement PR_MTE_TCF and PR_MTE_TAG
  target/arm: Use the proper TBI settings for linux-user
  target/arm: Improve gen_top_byte_ignore
  linux-user/aarch64: Implement PR_TAGGED_ADDR_ENABLE
  linux-user: Handle tags in lock_user/unlock_user
  linux-user: Fix types in uaccess.c
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2021-02-17 11:04:00 +00:00
commit 65d6ae4927
56 changed files with 2976 additions and 553 deletions

View File

@ -2855,6 +2855,17 @@ F: pc-bios/opensbi-*
F: .gitlab-ci.d/opensbi.yml
F: .gitlab-ci.d/opensbi/
Clock framework
M: Luc Michel <luc@lmichel.fr>
R: Damien Hedde <damien.hedde@greensocs.com>
S: Maintained
F: include/hw/clock.h
F: include/hw/qdev-clock.h
F: hw/core/clock.c
F: hw/core/clock-vmstate.c
F: hw/core/qdev-clock.c
F: docs/devel/clocks.rst
Usermode Emulation
------------------
Overall usermode emulation

View File

@ -114,6 +114,7 @@ typedef struct PageDesc {
unsigned int code_write_count;
#else
unsigned long flags;
void *target_data;
#endif
#ifndef CONFIG_USER_ONLY
QemuSpin lock;
@ -1761,7 +1762,7 @@ static inline void tb_page_add(PageDesc *p, TranslationBlock *tb,
prot |= p2->flags;
p2->flags &= ~PAGE_WRITE;
}
mprotect(g2h(page_addr), qemu_host_page_size,
mprotect(g2h_untagged(page_addr), qemu_host_page_size,
(prot & PAGE_BITS) & ~PAGE_WRITE);
if (DEBUG_TB_INVALIDATE_GATE) {
printf("protecting code page: 0x" TB_PAGE_ADDR_FMT "\n", page_addr);
@ -2740,6 +2741,7 @@ int page_get_flags(target_ulong address)
void page_set_flags(target_ulong start, target_ulong end, int flags)
{
target_ulong addr, len;
bool reset_target_data;
/* This function should never be called with addresses outside the
guest address space. If this assert fires, it probably indicates
@ -2754,6 +2756,8 @@ void page_set_flags(target_ulong start, target_ulong end, int flags)
if (flags & PAGE_WRITE) {
flags |= PAGE_WRITE_ORG;
}
reset_target_data = !(flags & PAGE_VALID) || (flags & PAGE_RESET);
flags &= ~PAGE_RESET;
for (addr = start, len = end - start;
len != 0;
@ -2767,10 +2771,34 @@ void page_set_flags(target_ulong start, target_ulong end, int flags)
p->first_tb) {
tb_invalidate_phys_page(addr, 0);
}
if (reset_target_data && p->target_data) {
g_free(p->target_data);
p->target_data = NULL;
}
p->flags = flags;
}
}
void *page_get_target_data(target_ulong address)
{
PageDesc *p = page_find(address >> TARGET_PAGE_BITS);
return p ? p->target_data : NULL;
}
void *page_alloc_target_data(target_ulong address, size_t size)
{
PageDesc *p = page_find(address >> TARGET_PAGE_BITS);
void *ret = NULL;
if (p->flags & PAGE_VALID) {
ret = p->target_data;
if (!ret) {
p->target_data = ret = g_malloc0(size);
}
}
return ret;
}
int page_check_range(target_ulong start, target_ulong len, int flags)
{
PageDesc *p;
@ -2884,7 +2912,7 @@ int page_unprotect(target_ulong address, uintptr_t pc)
}
#endif
}
mprotect((void *)g2h(host_start), qemu_host_page_size,
mprotect((void *)g2h_untagged(host_start), qemu_host_page_size,
prot & PAGE_BITS);
}
mmap_unlock();

View File

@ -213,7 +213,8 @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
g_assert_not_reached();
}
if (!guest_addr_valid(addr) || page_check_range(addr, 1, flags) < 0) {
if (!guest_addr_valid_untagged(addr) ||
page_check_range(addr, 1, flags) < 0) {
if (nonfault) {
return TLB_INVALID_MASK;
} else {
@ -234,7 +235,7 @@ int probe_access_flags(CPUArchState *env, target_ulong addr,
int flags;
flags = probe_access_internal(env, addr, 0, access_type, nonfault, ra);
*phost = flags ? NULL : g2h(addr);
*phost = flags ? NULL : g2h(env_cpu(env), addr);
return flags;
}
@ -247,7 +248,7 @@ void *probe_access(CPUArchState *env, target_ulong addr, int size,
flags = probe_access_internal(env, addr, size, access_type, false, ra);
g_assert(flags == 0);
return size ? g2h(addr) : NULL;
return size ? g2h(env_cpu(env), addr) : NULL;
}
#if defined(__i386__)
@ -842,7 +843,7 @@ uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr)
uint16_t meminfo = trace_mem_get_info(MO_UB, MMU_USER_IDX, false);
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
ret = ldub_p(g2h(ptr));
ret = ldub_p(g2h(env_cpu(env), ptr));
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
return ret;
}
@ -853,7 +854,7 @@ int cpu_ldsb_data(CPUArchState *env, abi_ptr ptr)
uint16_t meminfo = trace_mem_get_info(MO_SB, MMU_USER_IDX, false);
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
ret = ldsb_p(g2h(ptr));
ret = ldsb_p(g2h(env_cpu(env), ptr));
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
return ret;
}
@ -864,7 +865,7 @@ uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr ptr)
uint16_t meminfo = trace_mem_get_info(MO_BEUW, MMU_USER_IDX, false);
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
ret = lduw_be_p(g2h(ptr));
ret = lduw_be_p(g2h(env_cpu(env), ptr));
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
return ret;
}
@ -875,7 +876,7 @@ int cpu_ldsw_be_data(CPUArchState *env, abi_ptr ptr)
uint16_t meminfo = trace_mem_get_info(MO_BESW, MMU_USER_IDX, false);
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
ret = ldsw_be_p(g2h(ptr));
ret = ldsw_be_p(g2h(env_cpu(env), ptr));
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
return ret;
}
@ -886,7 +887,7 @@ uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr ptr)
uint16_t meminfo = trace_mem_get_info(MO_BEUL, MMU_USER_IDX, false);
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
ret = ldl_be_p(g2h(ptr));
ret = ldl_be_p(g2h(env_cpu(env), ptr));
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
return ret;
}
@ -897,7 +898,7 @@ uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr ptr)
uint16_t meminfo = trace_mem_get_info(MO_BEQ, MMU_USER_IDX, false);
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
ret = ldq_be_p(g2h(ptr));
ret = ldq_be_p(g2h(env_cpu(env), ptr));
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
return ret;
}
@ -908,7 +909,7 @@ uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr ptr)
uint16_t meminfo = trace_mem_get_info(MO_LEUW, MMU_USER_IDX, false);
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
ret = lduw_le_p(g2h(ptr));
ret = lduw_le_p(g2h(env_cpu(env), ptr));
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
return ret;
}
@ -919,7 +920,7 @@ int cpu_ldsw_le_data(CPUArchState *env, abi_ptr ptr)
uint16_t meminfo = trace_mem_get_info(MO_LESW, MMU_USER_IDX, false);
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
ret = ldsw_le_p(g2h(ptr));
ret = ldsw_le_p(g2h(env_cpu(env), ptr));
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
return ret;
}
@ -930,7 +931,7 @@ uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr ptr)
uint16_t meminfo = trace_mem_get_info(MO_LEUL, MMU_USER_IDX, false);
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
ret = ldl_le_p(g2h(ptr));
ret = ldl_le_p(g2h(env_cpu(env), ptr));
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
return ret;
}
@ -941,7 +942,7 @@ uint64_t cpu_ldq_le_data(CPUArchState *env, abi_ptr ptr)
uint16_t meminfo = trace_mem_get_info(MO_LEQ, MMU_USER_IDX, false);
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
ret = ldq_le_p(g2h(ptr));
ret = ldq_le_p(g2h(env_cpu(env), ptr));
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
return ret;
}
@ -1051,7 +1052,7 @@ void cpu_stb_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
uint16_t meminfo = trace_mem_get_info(MO_UB, MMU_USER_IDX, true);
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
stb_p(g2h(ptr), val);
stb_p(g2h(env_cpu(env), ptr), val);
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
}
@ -1060,7 +1061,7 @@ void cpu_stw_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
uint16_t meminfo = trace_mem_get_info(MO_BEUW, MMU_USER_IDX, true);
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
stw_be_p(g2h(ptr), val);
stw_be_p(g2h(env_cpu(env), ptr), val);
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
}
@ -1069,7 +1070,7 @@ void cpu_stl_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
uint16_t meminfo = trace_mem_get_info(MO_BEUL, MMU_USER_IDX, true);
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
stl_be_p(g2h(ptr), val);
stl_be_p(g2h(env_cpu(env), ptr), val);
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
}
@ -1078,7 +1079,7 @@ void cpu_stq_be_data(CPUArchState *env, abi_ptr ptr, uint64_t val)
uint16_t meminfo = trace_mem_get_info(MO_BEQ, MMU_USER_IDX, true);
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
stq_be_p(g2h(ptr), val);
stq_be_p(g2h(env_cpu(env), ptr), val);
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
}
@ -1087,7 +1088,7 @@ void cpu_stw_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
uint16_t meminfo = trace_mem_get_info(MO_LEUW, MMU_USER_IDX, true);
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
stw_le_p(g2h(ptr), val);
stw_le_p(g2h(env_cpu(env), ptr), val);
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
}
@ -1096,7 +1097,7 @@ void cpu_stl_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
uint16_t meminfo = trace_mem_get_info(MO_LEUL, MMU_USER_IDX, true);
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
stl_le_p(g2h(ptr), val);
stl_le_p(g2h(env_cpu(env), ptr), val);
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
}
@ -1105,7 +1106,7 @@ void cpu_stq_le_data(CPUArchState *env, abi_ptr ptr, uint64_t val)
uint16_t meminfo = trace_mem_get_info(MO_LEQ, MMU_USER_IDX, true);
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
stq_le_p(g2h(ptr), val);
stq_le_p(g2h(env_cpu(env), ptr), val);
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
}
@ -1170,7 +1171,7 @@ uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr)
uint32_t ret;
set_helper_retaddr(1);
ret = ldub_p(g2h(ptr));
ret = ldub_p(g2h_untagged(ptr));
clear_helper_retaddr();
return ret;
}
@ -1180,7 +1181,7 @@ uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr ptr)
uint32_t ret;
set_helper_retaddr(1);
ret = lduw_p(g2h(ptr));
ret = lduw_p(g2h_untagged(ptr));
clear_helper_retaddr();
return ret;
}
@ -1190,7 +1191,7 @@ uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr ptr)
uint32_t ret;
set_helper_retaddr(1);
ret = ldl_p(g2h(ptr));
ret = ldl_p(g2h_untagged(ptr));
clear_helper_retaddr();
return ret;
}
@ -1200,7 +1201,7 @@ uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr)
uint64_t ret;
set_helper_retaddr(1);
ret = ldq_p(g2h(ptr));
ret = ldq_p(g2h_untagged(ptr));
clear_helper_retaddr();
return ret;
}
@ -1213,7 +1214,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
if (unlikely(addr & (size - 1))) {
cpu_loop_exit_atomic(env_cpu(env), retaddr);
}
void *ret = g2h(addr);
void *ret = g2h(env_cpu(env), addr);
set_helper_retaddr(retaddr);
return ret;
}

View File

@ -737,7 +737,7 @@ static void padzero(abi_ulong elf_bss, abi_ulong last_bss)
end_addr1 = REAL_HOST_PAGE_ALIGN(elf_bss);
end_addr = HOST_PAGE_ALIGN(elf_bss);
if (end_addr1 < end_addr) {
mmap((void *)g2h(end_addr1), end_addr - end_addr1,
mmap((void *)g2h_untagged(end_addr1), end_addr - end_addr1,
PROT_READ|PROT_WRITE|PROT_EXEC,
MAP_FIXED|MAP_PRIVATE|MAP_ANON, -1, 0);
}

View File

@ -42,7 +42,7 @@
int singlestep;
unsigned long mmap_min_addr;
unsigned long guest_base;
uintptr_t guest_base;
bool have_guest_base;
unsigned long reserved_va;
@ -970,7 +970,7 @@ int main(int argc, char **argv)
g_free(target_environ);
if (qemu_loglevel_mask(CPU_LOG_PAGE)) {
qemu_log("guest_base 0x%lx\n", guest_base);
qemu_log("guest_base %p\n", (void *)guest_base);
log_page_dump("binary load");
qemu_log("start_brk 0x" TARGET_ABI_FMT_lx "\n", info->start_brk);
@ -1055,7 +1055,7 @@ int main(int argc, char **argv)
env->idt.base = target_mmap(0, sizeof(uint64_t) * (env->idt.limit + 1),
PROT_READ|PROT_WRITE,
MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
idt_table = g2h(env->idt.base);
idt_table = g2h_untagged(env->idt.base);
set_idt(0, 0);
set_idt(1, 0);
set_idt(2, 0);
@ -1085,7 +1085,7 @@ int main(int argc, char **argv)
PROT_READ|PROT_WRITE,
MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
env->gdt.limit = sizeof(uint64_t) * TARGET_GDT_ENTRIES - 1;
gdt_table = g2h(env->gdt.base);
gdt_table = g2h_untagged(env->gdt.base);
#ifdef TARGET_ABI32
write_dt(&gdt_table[__USER_CS >> 3], 0, 0xfffff,
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK |

View File

@ -102,7 +102,8 @@ int target_mprotect(abi_ulong start, abi_ulong len, int prot)
}
end = host_end;
}
ret = mprotect(g2h(host_start), qemu_host_page_size, prot1 & PAGE_BITS);
ret = mprotect(g2h_untagged(host_start),
qemu_host_page_size, prot1 & PAGE_BITS);
if (ret != 0)
goto error;
host_start += qemu_host_page_size;
@ -112,8 +113,8 @@ int target_mprotect(abi_ulong start, abi_ulong len, int prot)
for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
prot1 |= page_get_flags(addr);
}
ret = mprotect(g2h(host_end - qemu_host_page_size), qemu_host_page_size,
prot1 & PAGE_BITS);
ret = mprotect(g2h_untagged(host_end - qemu_host_page_size),
qemu_host_page_size, prot1 & PAGE_BITS);
if (ret != 0)
goto error;
host_end -= qemu_host_page_size;
@ -121,7 +122,7 @@ int target_mprotect(abi_ulong start, abi_ulong len, int prot)
/* handle the pages in the middle */
if (host_start < host_end) {
ret = mprotect(g2h(host_start), host_end - host_start, prot);
ret = mprotect(g2h_untagged(host_start), host_end - host_start, prot);
if (ret != 0)
goto error;
}
@ -143,7 +144,7 @@ static int mmap_frag(abi_ulong real_start,
int prot1, prot_new;
real_end = real_start + qemu_host_page_size;
host_start = g2h(real_start);
host_start = g2h_untagged(real_start);
/* get the protection of the target pages outside the mapping */
prot1 = 0;
@ -175,7 +176,7 @@ static int mmap_frag(abi_ulong real_start,
mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
/* read the corresponding file data */
pread(fd, g2h(start), end - start, offset);
pread(fd, g2h_untagged(start), end - start, offset);
/* put final protection */
if (prot_new != (prot1 | PROT_WRITE))
@ -300,7 +301,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
/* Note: we prefer to control the mapping address. It is
especially important if qemu_host_page_size >
qemu_real_host_page_size */
p = mmap(g2h(mmap_start),
p = mmap(g2h_untagged(mmap_start),
host_len, prot, flags | MAP_FIXED, fd, host_offset);
if (p == MAP_FAILED)
goto fail;
@ -344,7 +345,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
-1, 0);
if (retaddr == -1)
goto fail;
pread(fd, g2h(start), len, offset);
pread(fd, g2h_untagged(start), len, offset);
if (!(prot & PROT_WRITE)) {
ret = target_mprotect(start, len, prot);
if (ret != 0) {
@ -390,7 +391,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
offset1 = 0;
else
offset1 = offset + real_start - start;
p = mmap(g2h(real_start), real_end - real_start,
p = mmap(g2h_untagged(real_start), real_end - real_start,
prot, flags, fd, offset1);
if (p == MAP_FAILED)
goto fail;
@ -456,7 +457,7 @@ int target_munmap(abi_ulong start, abi_ulong len)
ret = 0;
/* unmap what we can */
if (real_start < real_end) {
ret = munmap(g2h(real_start), real_end - real_start);
ret = munmap(g2h_untagged(real_start), real_end - real_start);
}
if (ret == 0)
@ -479,5 +480,5 @@ int target_msync(abi_ulong start, abi_ulong len, int flags)
return 0;
start &= qemu_host_page_mask;
return msync(g2h(start), end - start, flags);
return msync(g2h_untagged(start), end - start, flags);
}

View File

@ -218,13 +218,12 @@ extern unsigned long x86_stack_size;
/* user access */
#define VERIFY_READ 0
#define VERIFY_WRITE 1 /* implies read access */
#define VERIFY_READ PAGE_READ
#define VERIFY_WRITE (PAGE_READ | PAGE_WRITE)
static inline int access_ok(int type, abi_ulong addr, abi_ulong size)
static inline bool access_ok(int type, abi_ulong addr, abi_ulong size)
{
return page_check_range((target_ulong)addr, size,
(type == VERIFY_READ) ? PAGE_READ : (PAGE_READ | PAGE_WRITE)) == 0;
return page_check_range((target_ulong)addr, size, type) == 0;
}
/* NOTE __get_user and __put_user use host pointers and don't check access. */
@ -357,13 +356,13 @@ static inline void *lock_user(int type, abi_ulong guest_addr, long len, int copy
void *addr;
addr = g_malloc(len);
if (copy)
memcpy(addr, g2h(guest_addr), len);
memcpy(addr, g2h_untagged(guest_addr), len);
else
memset(addr, 0, len);
return addr;
}
#else
return g2h(guest_addr);
return g2h_untagged(guest_addr);
#endif
}
@ -377,10 +376,10 @@ static inline void unlock_user(void *host_ptr, abi_ulong guest_addr,
#ifdef DEBUG_REMAP
if (!host_ptr)
return;
if (host_ptr == g2h(guest_addr))
if (host_ptr == g2h_untagged(guest_addr))
return;
if (len > 0)
memcpy(g2h(guest_addr), host_ptr, len);
memcpy(g2h_untagged(guest_addr), host_ptr, len);
g_free(host_ptr);
#endif
}

View File

@ -43,6 +43,7 @@ Supported devices
* GPIO controller
* Analog to Digital Converter (ADC)
* Pulse Width Modulation (PWM)
* SMBus controller (SMBF)
Missing devices
---------------
@ -58,7 +59,6 @@ Missing devices
* Ethernet controllers (GMAC and EMC)
* USB device (USBD)
* SMBus controller (SMBF)
* Peripheral SPI controller (PSPI)
* SD/MMC host
* PECI interface

View File

@ -370,6 +370,7 @@ config NPCM7XX
bool
select A9MPCORE
select ARM_GIC
select AT24C # EEPROM
select PL310 # cache controller
select SERIAL
select SSI

View File

@ -102,6 +102,22 @@ enum NPCM7xxInterrupt {
NPCM7XX_WDG2_IRQ, /* Timer Module 2 Watchdog */
NPCM7XX_EHCI_IRQ = 61,
NPCM7XX_OHCI_IRQ = 62,
NPCM7XX_SMBUS0_IRQ = 64,
NPCM7XX_SMBUS1_IRQ,
NPCM7XX_SMBUS2_IRQ,
NPCM7XX_SMBUS3_IRQ,
NPCM7XX_SMBUS4_IRQ,
NPCM7XX_SMBUS5_IRQ,
NPCM7XX_SMBUS6_IRQ,
NPCM7XX_SMBUS7_IRQ,
NPCM7XX_SMBUS8_IRQ,
NPCM7XX_SMBUS9_IRQ,
NPCM7XX_SMBUS10_IRQ,
NPCM7XX_SMBUS11_IRQ,
NPCM7XX_SMBUS12_IRQ,
NPCM7XX_SMBUS13_IRQ,
NPCM7XX_SMBUS14_IRQ,
NPCM7XX_SMBUS15_IRQ,
NPCM7XX_PWM0_IRQ = 93, /* PWM module 0 */
NPCM7XX_PWM1_IRQ, /* PWM module 1 */
NPCM7XX_GPIO0_IRQ = 116,
@ -152,6 +168,26 @@ static const hwaddr npcm7xx_pwm_addr[] = {
0xf0104000,
};
/* Direct memory-mapped access to each SMBus Module. */
static const hwaddr npcm7xx_smbus_addr[] = {
0xf0080000,
0xf0081000,
0xf0082000,
0xf0083000,
0xf0084000,
0xf0085000,
0xf0086000,
0xf0087000,
0xf0088000,
0xf0089000,
0xf008a000,
0xf008b000,
0xf008c000,
0xf008d000,
0xf008e000,
0xf008f000,
};
static const struct {
hwaddr regs_addr;
uint32_t unconnected_pins;
@ -353,6 +389,11 @@ static void npcm7xx_init(Object *obj)
object_initialize_child(obj, "gpio[*]", &s->gpio[i], TYPE_NPCM7XX_GPIO);
}
for (i = 0; i < ARRAY_SIZE(s->smbus); i++) {
object_initialize_child(obj, "smbus[*]", &s->smbus[i],
TYPE_NPCM7XX_SMBUS);
}
object_initialize_child(obj, "ehci", &s->ehci, TYPE_NPCM7XX_EHCI);
object_initialize_child(obj, "ohci", &s->ohci, TYPE_SYSBUS_OHCI);
@ -509,6 +550,17 @@ static void npcm7xx_realize(DeviceState *dev, Error **errp)
npcm7xx_irq(s, NPCM7XX_GPIO0_IRQ + i));
}
/* SMBus modules. Cannot fail. */
QEMU_BUILD_BUG_ON(ARRAY_SIZE(npcm7xx_smbus_addr) != ARRAY_SIZE(s->smbus));
for (i = 0; i < ARRAY_SIZE(s->smbus); i++) {
Object *obj = OBJECT(&s->smbus[i]);
sysbus_realize(SYS_BUS_DEVICE(obj), &error_abort);
sysbus_mmio_map(SYS_BUS_DEVICE(obj), 0, npcm7xx_smbus_addr[i]);
sysbus_connect_irq(SYS_BUS_DEVICE(obj), 0,
npcm7xx_irq(s, NPCM7XX_SMBUS0_IRQ + i));
}
/* USB Host */
object_property_set_bool(OBJECT(&s->ehci), "companion-enable", true,
&error_abort);
@ -576,22 +628,6 @@ static void npcm7xx_realize(DeviceState *dev, Error **errp)
create_unimplemented_device("npcm7xx.pcierc", 0xe1000000, 64 * KiB);
create_unimplemented_device("npcm7xx.kcs", 0xf0007000, 4 * KiB);
create_unimplemented_device("npcm7xx.gfxi", 0xf000e000, 4 * KiB);
create_unimplemented_device("npcm7xx.smbus[0]", 0xf0080000, 4 * KiB);
create_unimplemented_device("npcm7xx.smbus[1]", 0xf0081000, 4 * KiB);
create_unimplemented_device("npcm7xx.smbus[2]", 0xf0082000, 4 * KiB);
create_unimplemented_device("npcm7xx.smbus[3]", 0xf0083000, 4 * KiB);
create_unimplemented_device("npcm7xx.smbus[4]", 0xf0084000, 4 * KiB);
create_unimplemented_device("npcm7xx.smbus[5]", 0xf0085000, 4 * KiB);
create_unimplemented_device("npcm7xx.smbus[6]", 0xf0086000, 4 * KiB);
create_unimplemented_device("npcm7xx.smbus[7]", 0xf0087000, 4 * KiB);
create_unimplemented_device("npcm7xx.smbus[8]", 0xf0088000, 4 * KiB);
create_unimplemented_device("npcm7xx.smbus[9]", 0xf0089000, 4 * KiB);
create_unimplemented_device("npcm7xx.smbus[10]", 0xf008a000, 4 * KiB);
create_unimplemented_device("npcm7xx.smbus[11]", 0xf008b000, 4 * KiB);
create_unimplemented_device("npcm7xx.smbus[12]", 0xf008c000, 4 * KiB);
create_unimplemented_device("npcm7xx.smbus[13]", 0xf008d000, 4 * KiB);
create_unimplemented_device("npcm7xx.smbus[14]", 0xf008e000, 4 * KiB);
create_unimplemented_device("npcm7xx.smbus[15]", 0xf008f000, 4 * KiB);
create_unimplemented_device("npcm7xx.espi", 0xf009f000, 4 * KiB);
create_unimplemented_device("npcm7xx.peci", 0xf0100000, 4 * KiB);
create_unimplemented_device("npcm7xx.siox[1]", 0xf0101000, 4 * KiB);

View File

@ -19,6 +19,7 @@
#include "exec/address-spaces.h"
#include "hw/arm/npcm7xx.h"
#include "hw/core/cpu.h"
#include "hw/i2c/smbus_eeprom.h"
#include "hw/loader.h"
#include "hw/qdev-properties.h"
#include "qapi/error.h"
@ -98,6 +99,49 @@ static NPCM7xxState *npcm7xx_create_soc(MachineState *machine,
return NPCM7XX(obj);
}
static I2CBus *npcm7xx_i2c_get_bus(NPCM7xxState *soc, uint32_t num)
{
g_assert(num < ARRAY_SIZE(soc->smbus));
return I2C_BUS(qdev_get_child_bus(DEVICE(&soc->smbus[num]), "i2c-bus"));
}
static void at24c_eeprom_init(NPCM7xxState *soc, int bus, uint8_t addr,
uint32_t rsize)
{
I2CBus *i2c_bus = npcm7xx_i2c_get_bus(soc, bus);
I2CSlave *i2c_dev = i2c_slave_new("at24c-eeprom", addr);
DeviceState *dev = DEVICE(i2c_dev);
qdev_prop_set_uint32(dev, "rom-size", rsize);
i2c_slave_realize_and_unref(i2c_dev, i2c_bus, &error_abort);
}
static void npcm750_evb_i2c_init(NPCM7xxState *soc)
{
/* lm75 temperature sensor on SVB, tmp105 is compatible */
i2c_slave_create_simple(npcm7xx_i2c_get_bus(soc, 0), "tmp105", 0x48);
/* lm75 temperature sensor on EB, tmp105 is compatible */
i2c_slave_create_simple(npcm7xx_i2c_get_bus(soc, 1), "tmp105", 0x48);
/* tmp100 temperature sensor on EB, tmp105 is compatible */
i2c_slave_create_simple(npcm7xx_i2c_get_bus(soc, 2), "tmp105", 0x48);
/* tmp100 temperature sensor on SVB, tmp105 is compatible */
i2c_slave_create_simple(npcm7xx_i2c_get_bus(soc, 6), "tmp105", 0x48);
}
static void quanta_gsj_i2c_init(NPCM7xxState *soc)
{
/* GSJ machine have 4 max31725 temperature sensors, tmp105 is compatible. */
i2c_slave_create_simple(npcm7xx_i2c_get_bus(soc, 1), "tmp105", 0x5c);
i2c_slave_create_simple(npcm7xx_i2c_get_bus(soc, 2), "tmp105", 0x5c);
i2c_slave_create_simple(npcm7xx_i2c_get_bus(soc, 3), "tmp105", 0x5c);
i2c_slave_create_simple(npcm7xx_i2c_get_bus(soc, 4), "tmp105", 0x5c);
at24c_eeprom_init(soc, 9, 0x55, 8192);
at24c_eeprom_init(soc, 10, 0x55, 8192);
/* TODO: Add additional i2c devices. */
}
static void npcm750_evb_init(MachineState *machine)
{
NPCM7xxState *soc;
@ -108,6 +152,7 @@ static void npcm750_evb_init(MachineState *machine)
npcm7xx_load_bootrom(machine, soc);
npcm7xx_connect_flash(&soc->fiu[0], 0, "w25q256", drive_get(IF_MTD, 0, 0));
npcm750_evb_i2c_init(soc);
npcm7xx_load_kernel(machine, soc);
}
@ -122,6 +167,7 @@ static void quanta_gsj_init(MachineState *machine)
npcm7xx_load_bootrom(machine, soc);
npcm7xx_connect_flash(&soc->fiu[0], 0, "mx25l25635e",
drive_get(IF_MTD, 0, 0));
quanta_gsj_i2c_init(soc);
npcm7xx_load_kernel(machine, soc);
}

View File

@ -9,6 +9,7 @@ i2c_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4210_i2c.c'))
i2c_ss.add(when: 'CONFIG_IMX_I2C', if_true: files('imx_i2c.c'))
i2c_ss.add(when: 'CONFIG_MPC_I2C', if_true: files('mpc_i2c.c'))
i2c_ss.add(when: 'CONFIG_NRF51_SOC', if_true: files('microbit_i2c.c'))
i2c_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx_smbus.c'))
i2c_ss.add(when: 'CONFIG_SMBUS_EEPROM', if_true: files('smbus_eeprom.c'))
i2c_ss.add(when: 'CONFIG_VERSATILE_I2C', if_true: files('versatile_i2c.c'))
i2c_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_i2c.c'))

1099
hw/i2c/npcm7xx_smbus.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -14,3 +14,15 @@ aspeed_i2c_bus_read(uint32_t busid, uint64_t offset, unsigned size, uint64_t val
aspeed_i2c_bus_write(uint32_t busid, uint64_t offset, unsigned size, uint64_t value) "bus[%d]: To 0x%" PRIx64 " of size %u: 0x%" PRIx64
aspeed_i2c_bus_send(const char *mode, int i, int count, uint8_t byte) "%s send %d/%d 0x%02x"
aspeed_i2c_bus_recv(const char *mode, int i, int count, uint8_t byte) "%s recv %d/%d 0x%02x"
# npcm7xx_smbus.c
npcm7xx_smbus_read(const char *id, uint64_t offset, uint64_t value, unsigned size) "%s offset: 0x%04" PRIx64 " value: 0x%02" PRIx64 " size: %u"
npcm7xx_smbus_write(const char *id, uint64_t offset, uint64_t value, unsigned size) "%s offset: 0x%04" PRIx64 " value: 0x%02" PRIx64 " size: %u"
npcm7xx_smbus_start(const char *id, int success) "%s starting, success: %d"
npcm7xx_smbus_send_address(const char *id, uint8_t addr, int recv, int success) "%s sending address: 0x%02x, recv: %d, success: %d"
npcm7xx_smbus_send_byte(const char *id, uint8_t value, int success) "%s send byte: 0x%02x, success: %d"
npcm7xx_smbus_recv_byte(const char *id, uint8_t value) "%s recv byte: 0x%02x"
npcm7xx_smbus_stop(const char *id) "%s stopping"
npcm7xx_smbus_nack(const char *id) "%s nacking"
npcm7xx_smbus_recv_fifo(const char *id, uint8_t received, uint8_t expected) "%s recv fifo: received %u, expected %u"

View File

@ -150,7 +150,7 @@ static inline void tswap64s(uint64_t *s)
/* On some host systems the guest address space is reserved on the host.
* This allows the guest address space to be offset to a convenient location.
*/
extern unsigned long guest_base;
extern uintptr_t guest_base;
extern bool have_guest_base;
extern unsigned long reserved_va;
@ -256,18 +256,27 @@ extern intptr_t qemu_host_page_mask;
#define PAGE_EXEC 0x0004
#define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
#define PAGE_VALID 0x0008
/* original state of the write flag (used when tracking self-modifying
code */
/*
* Original state of the write flag (used when tracking self-modifying code)
*/
#define PAGE_WRITE_ORG 0x0010
/* Invalidate the TLB entry immediately, helpful for s390x
* Low-Address-Protection. Used with PAGE_WRITE in tlb_set_page_with_attrs() */
#define PAGE_WRITE_INV 0x0040
/*
* Invalidate the TLB entry immediately, helpful for s390x
* Low-Address-Protection. Used with PAGE_WRITE in tlb_set_page_with_attrs()
*/
#define PAGE_WRITE_INV 0x0020
/* For use with page_set_flags: page is being replaced; target_data cleared. */
#define PAGE_RESET 0x0040
/* For linux-user, indicates that the page is MAP_ANON. */
#define PAGE_ANON 0x0080
#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
/* FIXME: Code that sets/uses this is broken and needs to go away. */
#define PAGE_RESERVED 0x0020
#define PAGE_RESERVED 0x0100
#endif
/* Target-specific bits that will be used via page_get_flags(). */
#define PAGE_TARGET_1 0x0080
#define PAGE_TARGET_2 0x0200
#if defined(CONFIG_USER_ONLY)
void page_dump(FILE *f);
@ -279,6 +288,30 @@ int walk_memory_regions(void *, walk_memory_regions_fn);
int page_get_flags(target_ulong address);
void page_set_flags(target_ulong start, target_ulong end, int flags);
int page_check_range(target_ulong start, target_ulong len, int flags);
/**
* page_alloc_target_data(address, size)
* @address: guest virtual address
* @size: size of data to allocate
*
* Allocate @size bytes of out-of-band data to associate with the
* guest page at @address. If the page is not mapped, NULL will
* be returned. If there is existing data associated with @address,
* no new memory will be allocated.
*
* The memory will be freed when the guest page is deallocated,
* e.g. with the munmap system call.
*/
void *page_alloc_target_data(target_ulong address, size_t size);
/**
* page_get_target_data(address)
* @address: guest virtual address
*
* Return any out-of-bound memory assocated with the guest page
* at @address, as per page_alloc_target_data.
*/
void *page_get_target_data(target_ulong address);
#endif
CPUArchState *cpu_copy(CPUArchState *env);

View File

@ -69,23 +69,40 @@ typedef uint64_t abi_ptr;
#define TARGET_ABI_FMT_ptr "%"PRIx64
#endif
/* All direct uses of g2h and h2g need to go away for usermode softmmu. */
#define g2h(x) ((void *)((unsigned long)(abi_ptr)(x) + guest_base))
#if HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS
#define guest_addr_valid(x) (1)
#else
#define guest_addr_valid(x) ((x) <= GUEST_ADDR_MAX)
#ifndef TARGET_TAGGED_ADDRESSES
static inline abi_ptr cpu_untagged_addr(CPUState *cs, abi_ptr x)
{
return x;
}
#endif
#define h2g_valid(x) guest_addr_valid((unsigned long)(x) - guest_base)
static inline int guest_range_valid(unsigned long start, unsigned long len)
/* All direct uses of g2h and h2g need to go away for usermode softmmu. */
static inline void *g2h_untagged(abi_ptr x)
{
return (void *)((uintptr_t)(x) + guest_base);
}
static inline void *g2h(CPUState *cs, abi_ptr x)
{
return g2h_untagged(cpu_untagged_addr(cs, x));
}
static inline bool guest_addr_valid_untagged(abi_ulong x)
{
return x <= GUEST_ADDR_MAX;
}
static inline bool guest_range_valid_untagged(abi_ulong start, abi_ulong len)
{
return len - 1 <= GUEST_ADDR_MAX && start <= GUEST_ADDR_MAX - len + 1;
}
#define h2g_valid(x) \
(HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS || \
(uintptr_t)(x) - guest_base <= GUEST_ADDR_MAX)
#define h2g_nocheck(x) ({ \
unsigned long __ret = (unsigned long)(x) - guest_base; \
uintptr_t __ret = (uintptr_t)(x) - guest_base; \
(abi_ptr)__ret; \
})
@ -439,7 +456,7 @@ static inline int cpu_ldsw_code(CPUArchState *env, abi_ptr addr)
static inline void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
MMUAccessType access_type, int mmu_idx)
{
return g2h(addr);
return g2h(env_cpu(env), addr);
}
#else
void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,

View File

@ -616,7 +616,7 @@ static inline tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env,
void **hostp)
{
if (hostp) {
*hostp = g2h(addr);
*hostp = g2h_untagged(addr);
}
return addr;
}

View File

@ -20,6 +20,7 @@
#include "hw/adc/npcm7xx_adc.h"
#include "hw/cpu/a9mpcore.h"
#include "hw/gpio/npcm7xx_gpio.h"
#include "hw/i2c/npcm7xx_smbus.h"
#include "hw/mem/npcm7xx_mc.h"
#include "hw/misc/npcm7xx_clk.h"
#include "hw/misc/npcm7xx_gcr.h"
@ -85,6 +86,7 @@ typedef struct NPCM7xxState {
NPCM7xxMCState mc;
NPCM7xxRNGState rng;
NPCM7xxGPIOState gpio[8];
NPCM7xxSMBusState smbus[16];
EHCISysBusState ehci;
OHCISysBusState ohci;
NPCM7xxFIUState fiu[2];

View File

@ -0,0 +1,113 @@
/*
* Nuvoton NPCM7xx SMBus Module.
*
* Copyright 2020 Google LLC
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#ifndef NPCM7XX_SMBUS_H
#define NPCM7XX_SMBUS_H
#include "exec/memory.h"
#include "hw/i2c/i2c.h"
#include "hw/irq.h"
#include "hw/sysbus.h"
/*
* Number of addresses this module contains. Do not change this without
* incrementing the version_id in the vmstate.
*/
#define NPCM7XX_SMBUS_NR_ADDRS 10
/* Size of the FIFO buffer. */
#define NPCM7XX_SMBUS_FIFO_SIZE 16
typedef enum NPCM7xxSMBusStatus {
NPCM7XX_SMBUS_STATUS_IDLE,
NPCM7XX_SMBUS_STATUS_SENDING,
NPCM7XX_SMBUS_STATUS_RECEIVING,
NPCM7XX_SMBUS_STATUS_NEGACK,
NPCM7XX_SMBUS_STATUS_STOPPING_LAST_RECEIVE,
NPCM7XX_SMBUS_STATUS_STOPPING_NEGACK,
} NPCM7xxSMBusStatus;
/*
* struct NPCM7xxSMBusState - System Management Bus device state.
* @bus: The underlying I2C Bus.
* @irq: GIC interrupt line to fire on events (if enabled).
* @sda: The serial data register.
* @st: The status register.
* @cst: The control status register.
* @cst2: The control status register 2.
* @cst3: The control status register 3.
* @ctl1: The control register 1.
* @ctl2: The control register 2.
* @ctl3: The control register 3.
* @ctl4: The control register 4.
* @ctl5: The control register 5.
* @addr: The SMBus module's own addresses on the I2C bus.
* @scllt: The SCL low time register.
* @sclht: The SCL high time register.
* @fif_ctl: The FIFO control register.
* @fif_cts: The FIFO control status register.
* @fair_per: The fair preriod register.
* @txf_ctl: The transmit FIFO control register.
* @t_out: The SMBus timeout register.
* @txf_sts: The transmit FIFO status register.
* @rxf_sts: The receive FIFO status register.
* @rxf_ctl: The receive FIFO control register.
* @rx_fifo: The FIFO buffer for receiving in FIFO mode.
* @rx_cur: The current position of rx_fifo.
* @status: The current status of the SMBus.
*/
typedef struct NPCM7xxSMBusState {
SysBusDevice parent;
MemoryRegion iomem;
I2CBus *bus;
qemu_irq irq;
uint8_t sda;
uint8_t st;
uint8_t cst;
uint8_t cst2;
uint8_t cst3;
uint8_t ctl1;
uint8_t ctl2;
uint8_t ctl3;
uint8_t ctl4;
uint8_t ctl5;
uint8_t addr[NPCM7XX_SMBUS_NR_ADDRS];
uint8_t scllt;
uint8_t sclht;
uint8_t fif_ctl;
uint8_t fif_cts;
uint8_t fair_per;
uint8_t txf_ctl;
uint8_t t_out;
uint8_t txf_sts;
uint8_t rxf_sts;
uint8_t rxf_ctl;
uint8_t rx_fifo[NPCM7XX_SMBUS_FIFO_SIZE];
uint8_t rx_cur;
NPCM7xxSMBusStatus status;
} NPCM7xxSMBusState;
#define TYPE_NPCM7XX_SMBUS "npcm7xx-smbus"
#define NPCM7XX_SMBUS(obj) OBJECT_CHECK(NPCM7xxSMBusState, (obj), \
TYPE_NPCM7XX_SMBUS)
#endif /* NPCM7XX_SMBUS_H */

View File

@ -23,6 +23,7 @@
#include "cpu_loop-common.h"
#include "qemu/guest-random.h"
#include "hw/semihosting/common-semi.h"
#include "target/arm/syndrome.h"
#define get_user_code_u32(x, gaddr, env) \
({ abi_long __r = get_user_u32((x), (gaddr)); \
@ -76,7 +77,7 @@
void cpu_loop(CPUARMState *env)
{
CPUState *cs = env_cpu(env);
int trapnr;
int trapnr, ec, fsc;
abi_long ret;
target_siginfo_t info;
@ -117,9 +118,29 @@ void cpu_loop(CPUARMState *env)
case EXCP_DATA_ABORT:
info.si_signo = TARGET_SIGSEGV;
info.si_errno = 0;
/* XXX: check env->error_code */
info.si_code = TARGET_SEGV_MAPERR;
info._sifields._sigfault._addr = env->exception.vaddress;
/* We should only arrive here with EC in {DATAABORT, INSNABORT}. */
ec = syn_get_ec(env->exception.syndrome);
assert(ec == EC_DATAABORT || ec == EC_INSNABORT);
/* Both EC have the same format for FSC, or close enough. */
fsc = extract32(env->exception.syndrome, 0, 6);
switch (fsc) {
case 0x04 ... 0x07: /* Translation fault, level {0-3} */
info.si_code = TARGET_SEGV_MAPERR;
break;
case 0x09 ... 0x0b: /* Access flag fault, level {1-3} */
case 0x0d ... 0x0f: /* Permission fault, level {1-3} */
info.si_code = TARGET_SEGV_ACCERR;
break;
case 0x11: /* Synchronous Tag Check Fault */
info.si_code = TARGET_SEGV_MTESERR;
break;
default:
g_assert_not_reached();
}
queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
break;
case EXCP_DEBUG:
@ -143,6 +164,17 @@ void cpu_loop(CPUARMState *env)
EXCP_DUMP(env, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr);
abort();
}
/* Check for MTE asynchronous faults */
if (unlikely(env->cp15.tfsr_el[0])) {
env->cp15.tfsr_el[0] = 0;
info.si_signo = TARGET_SIGSEGV;
info.si_errno = 0;
info._sifields._sigfault._addr = 0;
info.si_code = TARGET_SEGV_MTEAERR;
queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
}
process_pending_signals(env);
/* Exception return on AArch64 always clears the exclusive monitor,
* so any return to running guest code implies this.

View File

@ -21,5 +21,8 @@ typedef struct target_sigaltstack {
#include "../generic/signal.h"
#define TARGET_SEGV_MTEAERR 8 /* Asynchronous ARM MTE error */
#define TARGET_SEGV_MTESERR 9 /* Synchronous ARM MTE exception */
#define TARGET_ARCH_HAS_SETUP_FRAME
#endif /* AARCH64_TARGET_SIGNAL_H */

View File

@ -30,4 +30,17 @@ struct target_pt_regs {
# define TARGET_PR_PAC_APDBKEY (1 << 3)
# define TARGET_PR_PAC_APGAKEY (1 << 4)
#define TARGET_PR_SET_TAGGED_ADDR_CTRL 55
#define TARGET_PR_GET_TAGGED_ADDR_CTRL 56
# define TARGET_PR_TAGGED_ADDR_ENABLE (1UL << 0)
/* MTE tag check fault modes */
# define TARGET_PR_MTE_TCF_SHIFT 1
# define TARGET_PR_MTE_TCF_NONE (0UL << TARGET_PR_MTE_TCF_SHIFT)
# define TARGET_PR_MTE_TCF_SYNC (1UL << TARGET_PR_MTE_TCF_SHIFT)
# define TARGET_PR_MTE_TCF_ASYNC (2UL << TARGET_PR_MTE_TCF_SHIFT)
# define TARGET_PR_MTE_TCF_MASK (3UL << TARGET_PR_MTE_TCF_SHIFT)
/* MTE tag inclusion mask */
# define TARGET_PR_MTE_TAG_SHIFT 3
# define TARGET_PR_MTE_TAG_MASK (0xffffUL << TARGET_PR_MTE_TAG_SHIFT)
#endif /* AARCH64_TARGET_SYSCALL_H */

View File

@ -389,7 +389,7 @@ enum {
static bool init_guest_commpage(void)
{
void *want = g2h(ARM_COMMPAGE & -qemu_host_page_size);
void *want = g2h_untagged(ARM_COMMPAGE & -qemu_host_page_size);
void *addr = mmap(want, qemu_host_page_size, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
@ -402,7 +402,7 @@ static bool init_guest_commpage(void)
}
/* Set kernel helper versions; rest of page is 0. */
__put_user(5, (uint32_t *)g2h(0xffff0ffcu));
__put_user(5, (uint32_t *)g2h_untagged(0xffff0ffcu));
if (mprotect(addr, qemu_host_page_size, PROT_READ)) {
perror("Protecting guest commpage");
@ -1872,8 +1872,8 @@ static void zero_bss(abi_ulong elf_bss, abi_ulong last_bss, int prot)
here is still actually needed. For now, continue with it,
but merge it with the "normal" mmap that would allocate the bss. */
host_start = (uintptr_t) g2h(elf_bss);
host_end = (uintptr_t) g2h(last_bss);
host_start = (uintptr_t) g2h_untagged(elf_bss);
host_end = (uintptr_t) g2h_untagged(last_bss);
host_map_start = REAL_HOST_PAGE_ALIGN(host_start);
if (host_map_start < host_end) {
@ -2135,9 +2135,9 @@ static void pgb_have_guest_base(const char *image_name, abi_ulong guest_loaddr,
void *addr, *test;
if (!QEMU_IS_ALIGNED(guest_base, align)) {
fprintf(stderr, "Requested guest base 0x%lx does not satisfy "
fprintf(stderr, "Requested guest base %p does not satisfy "
"host minimum alignment (0x%lx)\n",
guest_base, align);
(void *)guest_base, align);
exit(EXIT_FAILURE);
}
@ -2171,7 +2171,7 @@ static void pgb_have_guest_base(const char *image_name, abi_ulong guest_loaddr,
}
/* Reserve the address space for the binary, or reserved_va. */
test = g2h(guest_loaddr);
test = g2h_untagged(guest_loaddr);
addr = mmap(test, guest_hiaddr - guest_loaddr, PROT_NONE, flags, -1, 0);
if (test != addr) {
pgb_fail_in_use(image_name);
@ -2393,7 +2393,7 @@ static void pgb_reserved_va(const char *image_name, abi_ulong guest_loaddr,
/* Reserve the memory on the host. */
assert(guest_base != 0);
test = g2h(0);
test = g2h_untagged(0);
addr = mmap(test, reserved_va, PROT_NONE, flags, -1, 0);
if (addr == MAP_FAILED || addr != test) {
error_report("Unable to reserve 0x%lx bytes of virtual address "
@ -3510,7 +3510,7 @@ static int vma_get_mapping_count(const struct mm_struct *mm)
static abi_ulong vma_dump_size(const struct vm_area_struct *vma)
{
/* if we cannot even read the first page, skip it */
if (!access_ok(VERIFY_READ, vma->vma_start, TARGET_PAGE_SIZE))
if (!access_ok_untagged(VERIFY_READ, vma->vma_start, TARGET_PAGE_SIZE))
return (0);
/*

View File

@ -668,7 +668,7 @@ static int load_flat_file(struct linux_binprm * bprm,
}
/* zero the BSS. */
memset(g2h(datapos + data_len), 0, bss_len);
memset(g2h_untagged(datapos + data_len), 0, bss_len);
return 0;
}

View File

@ -23,6 +23,7 @@
static abi_ulong hppa_lws(CPUHPPAState *env)
{
CPUState *cs = env_cpu(env);
uint32_t which = env->gr[20];
abi_ulong addr = env->gr[26];
abi_ulong old = env->gr[25];
@ -34,12 +35,12 @@ static abi_ulong hppa_lws(CPUHPPAState *env)
return -TARGET_ENOSYS;
case 0: /* elf32 atomic 32bit cmpxchg */
if ((addr & 3) || !access_ok(VERIFY_WRITE, addr, 4)) {
if ((addr & 3) || !access_ok(cs, VERIFY_WRITE, addr, 4)) {
return -TARGET_EFAULT;
}
old = tswap32(old);
new = tswap32(new);
ret = qatomic_cmpxchg((uint32_t *)g2h(addr), old, new);
ret = qatomic_cmpxchg((uint32_t *)g2h(cs, addr), old, new);
ret = tswap32(ret);
break;
@ -49,47 +50,47 @@ static abi_ulong hppa_lws(CPUHPPAState *env)
return -TARGET_ENOSYS;
}
if (((addr | old | new) & ((1 << size) - 1))
|| !access_ok(VERIFY_WRITE, addr, 1 << size)
|| !access_ok(VERIFY_READ, old, 1 << size)
|| !access_ok(VERIFY_READ, new, 1 << size)) {
|| !access_ok(cs, VERIFY_WRITE, addr, 1 << size)
|| !access_ok(cs, VERIFY_READ, old, 1 << size)
|| !access_ok(cs, VERIFY_READ, new, 1 << size)) {
return -TARGET_EFAULT;
}
/* Note that below we use host-endian loads so that the cmpxchg
can be host-endian as well. */
switch (size) {
case 0:
old = *(uint8_t *)g2h(old);
new = *(uint8_t *)g2h(new);
ret = qatomic_cmpxchg((uint8_t *)g2h(addr), old, new);
old = *(uint8_t *)g2h(cs, old);
new = *(uint8_t *)g2h(cs, new);
ret = qatomic_cmpxchg((uint8_t *)g2h(cs, addr), old, new);
ret = ret != old;
break;
case 1:
old = *(uint16_t *)g2h(old);
new = *(uint16_t *)g2h(new);
ret = qatomic_cmpxchg((uint16_t *)g2h(addr), old, new);
old = *(uint16_t *)g2h(cs, old);
new = *(uint16_t *)g2h(cs, new);
ret = qatomic_cmpxchg((uint16_t *)g2h(cs, addr), old, new);
ret = ret != old;
break;
case 2:
old = *(uint32_t *)g2h(old);
new = *(uint32_t *)g2h(new);
ret = qatomic_cmpxchg((uint32_t *)g2h(addr), old, new);
old = *(uint32_t *)g2h(cs, old);
new = *(uint32_t *)g2h(cs, new);
ret = qatomic_cmpxchg((uint32_t *)g2h(cs, addr), old, new);
ret = ret != old;
break;
case 3:
{
uint64_t o64, n64, r64;
o64 = *(uint64_t *)g2h(old);
n64 = *(uint64_t *)g2h(new);
o64 = *(uint64_t *)g2h(cs, old);
n64 = *(uint64_t *)g2h(cs, new);
#ifdef CONFIG_ATOMIC64
r64 = qatomic_cmpxchg__nocheck((uint64_t *)g2h(addr),
r64 = qatomic_cmpxchg__nocheck((uint64_t *)g2h(cs, addr),
o64, n64);
ret = r64 != o64;
#else
start_exclusive();
r64 = *(uint64_t *)g2h(addr);
r64 = *(uint64_t *)g2h(cs, addr);
ret = 1;
if (r64 == o64) {
*(uint64_t *)g2h(addr) = n64;
*(uint64_t *)g2h(cs, addr) = n64;
ret = 0;
}
end_exclusive();

View File

@ -99,7 +99,7 @@ static bool write_ok_or_segv(CPUX86State *env, abi_ptr addr, size_t len)
* For all the vsyscalls, NULL means "don't write anything" not
* "write it at address 0".
*/
if (addr == 0 || access_ok(VERIFY_WRITE, addr, len)) {
if (addr == 0 || access_ok(env_cpu(env), VERIFY_WRITE, addr, len)) {
return true;
}
@ -379,7 +379,7 @@ void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
env->idt.base = target_mmap(0, sizeof(uint64_t) * (env->idt.limit + 1),
PROT_READ|PROT_WRITE,
MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
idt_table = g2h(env->idt.base);
idt_table = g2h_untagged(env->idt.base);
set_idt(0, 0);
set_idt(1, 0);
set_idt(2, 0);
@ -409,7 +409,7 @@ void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
PROT_READ|PROT_WRITE,
MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
env->gdt.limit = sizeof(uint64_t) * TARGET_GDT_ENTRIES - 1;
gdt_table = g2h(env->gdt.base);
gdt_table = g2h_untagged(env->gdt.base);
#ifdef TARGET_ABI32
write_dt(&gdt_table[__USER_CS >> 3], 0, 0xfffff,
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK |

View File

@ -513,9 +513,10 @@ restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc)
fpstate_addr = tswapl(sc->fpstate);
if (fpstate_addr != 0) {
if (!access_ok(VERIFY_READ, fpstate_addr,
sizeof(struct target_fpstate)))
if (!access_ok(env_cpu(env), VERIFY_READ, fpstate_addr,
sizeof(struct target_fpstate))) {
goto badframe;
}
#ifndef TARGET_X86_64
cpu_x86_frstor(env, fpstate_addr, 1);
#else

View File

@ -59,7 +59,7 @@ static const char *cpu_model;
static const char *cpu_type;
static const char *seed_optarg;
unsigned long mmap_min_addr;
unsigned long guest_base;
uintptr_t guest_base;
bool have_guest_base;
/*
@ -824,7 +824,7 @@ int main(int argc, char **argv, char **envp)
g_free(target_environ);
if (qemu_loglevel_mask(CPU_LOG_PAGE)) {
qemu_log("guest_base 0x%lx\n", guest_base);
qemu_log("guest_base %p\n", (void *)guest_base);
log_page_dump("binary load");
qemu_log("start_brk 0x" TARGET_ABI_FMT_lx "\n", info->start_brk);

View File

@ -84,18 +84,24 @@ static int validate_prot_to_pageflags(int *host_prot, int prot)
| (prot & PROT_EXEC ? PROT_READ : 0);
#ifdef TARGET_AARCH64
/*
* The PROT_BTI bit is only accepted if the cpu supports the feature.
* Since this is the unusual case, don't bother checking unless
* the bit has been requested. If set and valid, record the bit
* within QEMU's page_flags.
*/
if (prot & TARGET_PROT_BTI) {
{
ARMCPU *cpu = ARM_CPU(thread_cpu);
if (cpu_isar_feature(aa64_bti, cpu)) {
/*
* The PROT_BTI bit is only accepted if the cpu supports the feature.
* Since this is the unusual case, don't bother checking unless
* the bit has been requested. If set and valid, record the bit
* within QEMU's page_flags.
*/
if ((prot & TARGET_PROT_BTI) && cpu_isar_feature(aa64_bti, cpu)) {
valid |= TARGET_PROT_BTI;
page_flags |= PAGE_BTI;
}
/* Similarly for the PROT_MTE bit. */
if ((prot & TARGET_PROT_MTE) && cpu_isar_feature(aa64_mte, cpu)) {
valid |= TARGET_PROT_MTE;
page_flags |= PAGE_MTE;
}
}
#endif
@ -119,7 +125,7 @@ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
}
len = TARGET_PAGE_ALIGN(len);
end = start + len;
if (!guest_range_valid(start, len)) {
if (!guest_range_valid_untagged(start, len)) {
return -TARGET_ENOMEM;
}
if (len == 0) {
@ -141,7 +147,7 @@ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
}
end = host_end;
}
ret = mprotect(g2h(host_start), qemu_host_page_size,
ret = mprotect(g2h_untagged(host_start), qemu_host_page_size,
prot1 & PAGE_BITS);
if (ret != 0) {
goto error;
@ -153,7 +159,7 @@ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
prot1 |= page_get_flags(addr);
}
ret = mprotect(g2h(host_end - qemu_host_page_size),
ret = mprotect(g2h_untagged(host_end - qemu_host_page_size),
qemu_host_page_size, prot1 & PAGE_BITS);
if (ret != 0) {
goto error;
@ -163,7 +169,8 @@ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
/* handle the pages in the middle */
if (host_start < host_end) {
ret = mprotect(g2h(host_start), host_end - host_start, host_prot);
ret = mprotect(g2h_untagged(host_start),
host_end - host_start, host_prot);
if (ret != 0) {
goto error;
}
@ -186,7 +193,7 @@ static int mmap_frag(abi_ulong real_start,
int prot1, prot_new;
real_end = real_start + qemu_host_page_size;
host_start = g2h(real_start);
host_start = g2h_untagged(real_start);
/* get the protection of the target pages outside the mapping */
prot1 = 0;
@ -218,7 +225,7 @@ static int mmap_frag(abi_ulong real_start,
mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
/* read the corresponding file data */
if (pread(fd, g2h(start), end - start, offset) == -1)
if (pread(fd, g2h_untagged(start), end - start, offset) == -1)
return -1;
/* put final protection */
@ -229,7 +236,7 @@ static int mmap_frag(abi_ulong real_start,
mprotect(host_start, qemu_host_page_size, prot_new);
}
if (prot_new & PROT_WRITE) {
memset(g2h(start), 0, end - start);
memset(g2h_untagged(start), 0, end - start);
}
}
return 0;
@ -338,7 +345,7 @@ abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align)
* - mremap() with MREMAP_FIXED flag
* - shmat() with SHM_REMAP flag
*/
ptr = mmap(g2h(addr), size, PROT_NONE,
ptr = mmap(g2h_untagged(addr), size, PROT_NONE,
MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
/* ENOMEM, if host address space has no memory */
@ -497,7 +504,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
/* Note: we prefer to control the mapping address. It is
especially important if qemu_host_page_size >
qemu_real_host_page_size */
p = mmap(g2h(start), host_len, host_prot,
p = mmap(g2h_untagged(start), host_len, host_prot,
flags | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
if (p == MAP_FAILED) {
goto fail;
@ -505,10 +512,10 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
/* update start so that it points to the file position at 'offset' */
host_start = (unsigned long)p;
if (!(flags & MAP_ANONYMOUS)) {
p = mmap(g2h(start), len, host_prot,
p = mmap(g2h_untagged(start), len, host_prot,
flags | MAP_FIXED, fd, host_offset);
if (p == MAP_FAILED) {
munmap(g2h(start), host_len);
munmap(g2h_untagged(start), host_len);
goto fail;
}
host_start += offset - host_offset;
@ -527,7 +534,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
* It can fail only on 64-bit host with 32-bit target.
* On any other target/host host mmap() handles this error correctly.
*/
if (end < start || !guest_range_valid(start, len)) {
if (end < start || !guest_range_valid_untagged(start, len)) {
errno = ENOMEM;
goto fail;
}
@ -548,7 +555,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
-1, 0);
if (retaddr == -1)
goto fail;
if (pread(fd, g2h(start), len, offset) == -1)
if (pread(fd, g2h_untagged(start), len, offset) == -1)
goto fail;
if (!(host_prot & PROT_WRITE)) {
ret = target_mprotect(start, len, target_prot);
@ -592,13 +599,17 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
offset1 = 0;
else
offset1 = offset + real_start - start;
p = mmap(g2h(real_start), real_end - real_start,
p = mmap(g2h_untagged(real_start), real_end - real_start,
host_prot, flags, fd, offset1);
if (p == MAP_FAILED)
goto fail;
}
}
the_end1:
if (flags & MAP_ANONYMOUS) {
page_flags |= PAGE_ANON;
}
page_flags |= PAGE_RESET;
page_set_flags(start, start + len, page_flags);
the_end:
trace_target_mmap_complete(start);
@ -648,7 +659,7 @@ static void mmap_reserve(abi_ulong start, abi_ulong size)
real_end -= qemu_host_page_size;
}
if (real_start != real_end) {
mmap(g2h(real_start), real_end - real_start, PROT_NONE,
mmap(g2h_untagged(real_start), real_end - real_start, PROT_NONE,
MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE,
-1, 0);
}
@ -664,7 +675,7 @@ int target_munmap(abi_ulong start, abi_ulong len)
if (start & ~TARGET_PAGE_MASK)
return -TARGET_EINVAL;
len = TARGET_PAGE_ALIGN(len);
if (len == 0 || !guest_range_valid(start, len)) {
if (len == 0 || !guest_range_valid_untagged(start, len)) {
return -TARGET_EINVAL;
}
@ -703,7 +714,7 @@ int target_munmap(abi_ulong start, abi_ulong len)
if (reserved_va) {
mmap_reserve(real_start, real_end - real_start);
} else {
ret = munmap(g2h(real_start), real_end - real_start);
ret = munmap(g2h_untagged(real_start), real_end - real_start);
}
}
@ -722,11 +733,11 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
int prot;
void *host_addr;
if (!guest_range_valid(old_addr, old_size) ||
if (!guest_range_valid_untagged(old_addr, old_size) ||
((flags & MREMAP_FIXED) &&
!guest_range_valid(new_addr, new_size)) ||
!guest_range_valid_untagged(new_addr, new_size)) ||
((flags & MREMAP_MAYMOVE) == 0 &&
!guest_range_valid(old_addr, new_size))) {
!guest_range_valid_untagged(old_addr, new_size))) {
errno = ENOMEM;
return -1;
}
@ -734,8 +745,8 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
mmap_lock();
if (flags & MREMAP_FIXED) {
host_addr = mremap(g2h(old_addr), old_size, new_size,
flags, g2h(new_addr));
host_addr = mremap(g2h_untagged(old_addr), old_size, new_size,
flags, g2h_untagged(new_addr));
if (reserved_va && host_addr != MAP_FAILED) {
/* If new and old addresses overlap then the above mremap will
@ -751,8 +762,9 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
errno = ENOMEM;
host_addr = MAP_FAILED;
} else {
host_addr = mremap(g2h(old_addr), old_size, new_size,
flags | MREMAP_FIXED, g2h(mmap_start));
host_addr = mremap(g2h_untagged(old_addr), old_size, new_size,
flags | MREMAP_FIXED,
g2h_untagged(mmap_start));
if (reserved_va) {
mmap_reserve(old_addr, old_size);
}
@ -768,14 +780,15 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
}
}
if (prot == 0) {
host_addr = mremap(g2h(old_addr), old_size, new_size, flags);
host_addr = mremap(g2h_untagged(old_addr),
old_size, new_size, flags);
if (host_addr != MAP_FAILED) {
/* Check if address fits target address space */
if (!guest_range_valid(h2g(host_addr), new_size)) {
if (!guest_range_valid_untagged(h2g(host_addr), new_size)) {
/* Revert mremap() changes */
host_addr = mremap(g2h(old_addr), new_size, old_size,
flags);
host_addr = mremap(g2h_untagged(old_addr),
new_size, old_size, flags);
errno = ENOMEM;
host_addr = MAP_FAILED;
} else if (reserved_va && old_size > new_size) {
@ -794,7 +807,8 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
new_addr = h2g(host_addr);
prot = page_get_flags(old_addr);
page_set_flags(old_addr, old_addr + old_size, 0);
page_set_flags(new_addr, new_addr + new_size, prot | PAGE_VALID);
page_set_flags(new_addr, new_addr + new_size,
prot | PAGE_VALID | PAGE_RESET);
}
tb_invalidate_phys_range(new_addr, new_addr + new_size);
mmap_unlock();

View File

@ -365,7 +365,7 @@ static void restore_user_regs(CPUPPCState *env,
uint64_t v_addr;
/* 64-bit needs to recover the pointer to the vectors from the frame */
__get_user(v_addr, &frame->v_regs);
v_regs = g2h(v_addr);
v_regs = g2h(env_cpu(env), v_addr);
#else
v_regs = (ppc_avr_t *)frame->mc_vregs.altivec;
#endif
@ -552,7 +552,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
if (get_ppc64_abi(image) < 2) {
/* ELFv1 PPC64 function pointers are pointers to OPD entries. */
struct target_func_ptr *handler =
(struct target_func_ptr *)g2h(ka->_sa_handler);
(struct target_func_ptr *)g2h(env_cpu(env), ka->_sa_handler);
env->nip = tswapl(handler->entry);
env->gpr[2] = tswapl(handler->toc);
} else {

View File

@ -7,8 +7,6 @@
#include "exec/cpu_ldst.h"
#undef DEBUG_REMAP
#ifdef DEBUG_REMAP
#endif /* DEBUG_REMAP */
#include "exec/user/abitypes.h"
@ -488,15 +486,23 @@ extern unsigned long guest_stack_size;
/* user access */
#define VERIFY_READ 0
#define VERIFY_WRITE 1 /* implies read access */
#define VERIFY_READ PAGE_READ
#define VERIFY_WRITE (PAGE_READ | PAGE_WRITE)
static inline int access_ok(int type, abi_ulong addr, abi_ulong size)
static inline bool access_ok_untagged(int type, abi_ulong addr, abi_ulong size)
{
return guest_addr_valid(addr) &&
(size == 0 || guest_addr_valid(addr + size - 1)) &&
page_check_range((target_ulong)addr, size,
(type == VERIFY_READ) ? PAGE_READ : (PAGE_READ | PAGE_WRITE)) == 0;
if (size == 0
? !guest_addr_valid_untagged(addr)
: !guest_range_valid_untagged(addr, size)) {
return false;
}
return page_check_range((target_ulong)addr, size, type) == 0;
}
static inline bool access_ok(CPUState *cpu, int type,
abi_ulong addr, abi_ulong size)
{
return access_ok_untagged(type, cpu_untagged_addr(cpu, addr), size);
}
/* NOTE __get_user and __put_user use host pointers and don't check access.
@ -621,8 +627,8 @@ static inline int access_ok(int type, abi_ulong addr, abi_ulong size)
* buffers between the target and host. These internally perform
* locking/unlocking of the memory.
*/
abi_long copy_from_user(void *hptr, abi_ulong gaddr, size_t len);
abi_long copy_to_user(abi_ulong gaddr, void *hptr, size_t len);
int copy_from_user(void *hptr, abi_ulong gaddr, size_t len);
int copy_to_user(abi_ulong gaddr, void *hptr, size_t len);
/* Functions for accessing guest memory. The tget and tput functions
read/write single values, byteswapping as necessary. The lock_user function
@ -632,56 +638,24 @@ abi_long copy_to_user(abi_ulong gaddr, void *hptr, size_t len);
/* Lock an area of guest memory into the host. If copy is true then the
host area will have the same contents as the guest. */
static inline void *lock_user(int type, abi_ulong guest_addr, long len, int copy)
{
if (!access_ok(type, guest_addr, len))
return NULL;
#ifdef DEBUG_REMAP
{
void *addr;
addr = g_malloc(len);
if (copy)
memcpy(addr, g2h(guest_addr), len);
else
memset(addr, 0, len);
return addr;
}
#else
return g2h(guest_addr);
#endif
}
void *lock_user(int type, abi_ulong guest_addr, size_t len, bool copy);
/* Unlock an area of guest memory. The first LEN bytes must be
flushed back to guest memory. host_ptr = NULL is explicitly
allowed and does nothing. */
static inline void unlock_user(void *host_ptr, abi_ulong guest_addr,
long len)
{
#ifdef DEBUG_REMAP
if (!host_ptr)
return;
if (host_ptr == g2h(guest_addr))
return;
if (len > 0)
memcpy(g2h(guest_addr), host_ptr, len);
g_free(host_ptr);
#ifndef DEBUG_REMAP
static inline void unlock_user(void *host_ptr, abi_ulong guest_addr, size_t len)
{ }
#else
void unlock_user(void *host_ptr, abi_ulong guest_addr, long len);
#endif
}
/* Return the length of a string in target memory or -TARGET_EFAULT if
access error. */
abi_long target_strlen(abi_ulong gaddr);
ssize_t target_strlen(abi_ulong gaddr);
/* Like lock_user but for null terminated strings. */
static inline void *lock_user_string(abi_ulong guest_addr)
{
abi_long len;
len = target_strlen(guest_addr);
if (len < 0)
return NULL;
return lock_user(VERIFY_READ, guest_addr, (long)(len + 1), 1);
}
void *lock_user_string(abi_ulong guest_addr);
/* Helper macros for locking/unlocking a target struct. */
#define lock_user_struct(type, host_ptr, guest_addr, copy) \

View File

@ -894,6 +894,8 @@ abi_long do_brk(abi_ulong new_brk)
abi_long mapped_addr;
abi_ulong new_alloc_size;
/* brk pointers are always untagged */
DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
if (!new_brk) {
@ -912,7 +914,7 @@ abi_long do_brk(abi_ulong new_brk)
/* Heap contents are initialized to zero, as for anonymous
* mapped pages. */
if (new_brk > target_brk) {
memset(g2h(target_brk), 0, new_brk - target_brk);
memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
}
target_brk = new_brk;
DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
@ -938,7 +940,7 @@ abi_long do_brk(abi_ulong new_brk)
* come from the remaining part of the previous page: it may
* contains garbage data due to a previous heap usage (grown
* then shrunken). */
memset(g2h(target_brk), 0, brk_page - target_brk);
memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
target_brk = new_brk;
brk_page = HOST_PAGE_ALIGN(target_brk);
@ -3524,8 +3526,9 @@ static abi_long do_accept4(int fd, abi_ulong target_addr,
return -TARGET_EINVAL;
}
if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
return -TARGET_EFAULT;
}
addr = alloca(addrlen);
@ -3555,8 +3558,9 @@ static abi_long do_getpeername(int fd, abi_ulong target_addr,
return -TARGET_EINVAL;
}
if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
return -TARGET_EFAULT;
}
addr = alloca(addrlen);
@ -3586,8 +3590,9 @@ static abi_long do_getsockname(int fd, abi_ulong target_addr,
return -TARGET_EINVAL;
}
if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
return -TARGET_EFAULT;
}
addr = alloca(addrlen);
@ -4599,6 +4604,8 @@ static inline abi_ulong do_shmat(CPUArchState *cpu_env,
int i,ret;
abi_ulong shmlba;
/* shmat pointers are always untagged */
/* find out the length of the shared memory segment */
ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
if (is_error(ret)) {
@ -4615,14 +4622,14 @@ static inline abi_ulong do_shmat(CPUArchState *cpu_env,
return -TARGET_EINVAL;
}
}
if (!guest_range_valid(shmaddr, shm_info.shm_segsz)) {
if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
return -TARGET_EINVAL;
}
mmap_lock();
if (shmaddr)
host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
else {
abi_ulong mmap_start;
@ -4633,7 +4640,8 @@ static inline abi_ulong do_shmat(CPUArchState *cpu_env,
errno = ENOMEM;
host_raddr = (void *)-1;
} else
host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
host_raddr = shmat(shmid, g2h_untagged(mmap_start),
shmflg | SHM_REMAP);
}
if (host_raddr == (void *)-1) {
@ -4643,8 +4651,8 @@ static inline abi_ulong do_shmat(CPUArchState *cpu_env,
raddr=h2g((unsigned long)host_raddr);
page_set_flags(raddr, raddr + shm_info.shm_segsz,
PAGE_VALID | PAGE_READ |
((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
PAGE_VALID | PAGE_RESET | PAGE_READ |
(shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
for (i = 0; i < N_SHM_REGIONS; i++) {
if (!shm_regions[i].in_use) {
@ -4665,6 +4673,8 @@ static inline abi_long do_shmdt(abi_ulong shmaddr)
int i;
abi_long rv;
/* shmdt pointers are always untagged */
mmap_lock();
for (i = 0; i < N_SHM_REGIONS; ++i) {
@ -4674,7 +4684,7 @@ static inline abi_long do_shmdt(abi_ulong shmaddr)
break;
}
}
rv = get_errno(shmdt(g2h(shmaddr)));
rv = get_errno(shmdt(g2h_untagged(shmaddr)));
mmap_unlock();
@ -6145,10 +6155,10 @@ static abi_long write_ldt(CPUX86State *env,
MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
if (env->ldt.base == -1)
return -TARGET_ENOMEM;
memset(g2h(env->ldt.base), 0,
memset(g2h_untagged(env->ldt.base), 0,
TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
env->ldt.limit = 0xffff;
ldt_table = g2h(env->ldt.base);
ldt_table = g2h_untagged(env->ldt.base);
}
/* NOTE: same code as Linux kernel */
@ -6216,7 +6226,7 @@ static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
#if defined(TARGET_ABI32)
abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
{
uint64_t *gdt_table = g2h(env->gdt.base);
uint64_t *gdt_table = g2h_untagged(env->gdt.base);
struct target_modify_ldt_ldt_s ldt_info;
struct target_modify_ldt_ldt_s *target_ldt_info;
int seg_32bit, contents, read_exec_only, limit_in_pages;
@ -6302,7 +6312,7 @@ install:
static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
{
struct target_modify_ldt_ldt_s *target_ldt_info;
uint64_t *gdt_table = g2h(env->gdt.base);
uint64_t *gdt_table = g2h_untagged(env->gdt.base);
uint32_t base_addr, limit, flags;
int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
int seg_not_present, useable, lm;
@ -7597,8 +7607,8 @@ static int do_safe_futex(int *uaddr, int op, int val,
tricky. However they're probably useless because guest atomic
operations won't work either. */
#if defined(TARGET_NR_futex)
static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
target_ulong uaddr2, int val3)
static int do_futex(CPUState *cpu, target_ulong uaddr, int op, int val,
target_ulong timeout, target_ulong uaddr2, int val3)
{
struct timespec ts, *pts;
int base_op;
@ -7619,11 +7629,14 @@ static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
} else {
pts = NULL;
}
return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
return do_safe_futex(g2h(cpu, uaddr),
op, tswap32(val), pts, NULL, val3);
case FUTEX_WAKE:
return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
return do_safe_futex(g2h(cpu, uaddr),
op, val, NULL, NULL, 0);
case FUTEX_FD:
return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
return do_safe_futex(g2h(cpu, uaddr),
op, val, NULL, NULL, 0);
case FUTEX_REQUEUE:
case FUTEX_CMP_REQUEUE:
case FUTEX_WAKE_OP:
@ -7633,10 +7646,9 @@ static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
to satisfy the compiler. We do not need to tswap TIMEOUT
since it's not compared to guest memory. */
pts = (struct timespec *)(uintptr_t) timeout;
return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
(base_op == FUTEX_CMP_REQUEUE
? tswap32(val3)
: val3));
? tswap32(val3) : val3));
default:
return -TARGET_ENOSYS;
}
@ -7644,7 +7656,8 @@ static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
#endif
#if defined(TARGET_NR_futex_time64)
static int do_futex_time64(target_ulong uaddr, int op, int val, target_ulong timeout,
static int do_futex_time64(CPUState *cpu, target_ulong uaddr, int op,
int val, target_ulong timeout,
target_ulong uaddr2, int val3)
{
struct timespec ts, *pts;
@ -7668,11 +7681,12 @@ static int do_futex_time64(target_ulong uaddr, int op, int val, target_ulong tim
} else {
pts = NULL;
}
return do_safe_futex(g2h(uaddr), op, tswap32(val), pts, NULL, val3);
return do_safe_futex(g2h(cpu, uaddr), op,
tswap32(val), pts, NULL, val3);
case FUTEX_WAKE:
return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
case FUTEX_FD:
return do_safe_futex(g2h(uaddr), op, val, NULL, NULL, 0);
return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
case FUTEX_REQUEUE:
case FUTEX_CMP_REQUEUE:
case FUTEX_WAKE_OP:
@ -7682,10 +7696,9 @@ static int do_futex_time64(target_ulong uaddr, int op, int val, target_ulong tim
to satisfy the compiler. We do not need to tswap TIMEOUT
since it's not compared to guest memory. */
pts = (struct timespec *)(uintptr_t) timeout;
return do_safe_futex(g2h(uaddr), op, val, pts, g2h(uaddr2),
return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
(base_op == FUTEX_CMP_REQUEUE
? tswap32(val3)
: val3));
? tswap32(val3) : val3));
default:
return -TARGET_ENOSYS;
}
@ -7860,7 +7873,7 @@ static int open_self_maps(void *cpu_env, int fd)
const char *path;
max = h2g_valid(max - 1) ?
max : (uintptr_t) g2h(GUEST_ADDR_MAX) + 1;
max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
if (page_check_range(h2g(min), max - min, flags) == -1) {
continue;
@ -8277,8 +8290,8 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
if (ts->child_tidptr) {
put_user_u32(0, ts->child_tidptr);
do_sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
NULL, NULL, 0);
do_sys_futex(g2h(cpu, ts->child_tidptr),
FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
}
thread_cpu = NULL;
g_free(ts);
@ -8643,7 +8656,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
if (!arg5) {
ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
} else {
ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
}
ret = get_errno(ret);
@ -9699,6 +9712,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
v5, v6));
}
#else
/* mmap pointers are always untagged */
ret = get_errno(target_mmap(arg1, arg2, arg3,
target_to_host_bitmask(arg4, mmap_flags_tbl),
arg5,
@ -9717,8 +9731,10 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
return get_errno(ret);
#endif
case TARGET_NR_munmap:
arg1 = cpu_untagged_addr(cpu, arg1);
return get_errno(target_munmap(arg1, arg2));
case TARGET_NR_mprotect:
arg1 = cpu_untagged_addr(cpu, arg1);
{
TaskState *ts = cpu->opaque;
/* Special hack to detect libc making the stack executable. */
@ -9733,20 +9749,22 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
return get_errno(target_mprotect(arg1, arg2, arg3));
#ifdef TARGET_NR_mremap
case TARGET_NR_mremap:
arg1 = cpu_untagged_addr(cpu, arg1);
/* mremap new_addr (arg5) is always untagged */
return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
#endif
/* ??? msync/mlock/munlock are broken for softmmu. */
#ifdef TARGET_NR_msync
case TARGET_NR_msync:
return get_errno(msync(g2h(arg1), arg2, arg3));
return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
#endif
#ifdef TARGET_NR_mlock
case TARGET_NR_mlock:
return get_errno(mlock(g2h(arg1), arg2));
return get_errno(mlock(g2h(cpu, arg1), arg2));
#endif
#ifdef TARGET_NR_munlock
case TARGET_NR_munlock:
return get_errno(munlock(g2h(arg1), arg2));
return get_errno(munlock(g2h(cpu, arg1), arg2));
#endif
#ifdef TARGET_NR_mlockall
case TARGET_NR_mlockall:
@ -10975,6 +10993,73 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
}
}
return -TARGET_EINVAL;
case TARGET_PR_SET_TAGGED_ADDR_CTRL:
{
abi_ulong valid_mask = TARGET_PR_TAGGED_ADDR_ENABLE;
CPUARMState *env = cpu_env;
ARMCPU *cpu = env_archcpu(env);
if (cpu_isar_feature(aa64_mte, cpu)) {
valid_mask |= TARGET_PR_MTE_TCF_MASK;
valid_mask |= TARGET_PR_MTE_TAG_MASK;
}
if ((arg2 & ~valid_mask) || arg3 || arg4 || arg5) {
return -TARGET_EINVAL;
}
env->tagged_addr_enable = arg2 & TARGET_PR_TAGGED_ADDR_ENABLE;
if (cpu_isar_feature(aa64_mte, cpu)) {
switch (arg2 & TARGET_PR_MTE_TCF_MASK) {
case TARGET_PR_MTE_TCF_NONE:
case TARGET_PR_MTE_TCF_SYNC:
case TARGET_PR_MTE_TCF_ASYNC:
break;
default:
return -EINVAL;
}
/*
* Write PR_MTE_TCF to SCTLR_EL1[TCF0].
* Note that the syscall values are consistent with hw.
*/
env->cp15.sctlr_el[1] =
deposit64(env->cp15.sctlr_el[1], 38, 2,
arg2 >> TARGET_PR_MTE_TCF_SHIFT);
/*
* Write PR_MTE_TAG to GCR_EL1[Exclude].
* Note that the syscall uses an include mask,
* and hardware uses an exclude mask -- invert.
*/
env->cp15.gcr_el1 =
deposit64(env->cp15.gcr_el1, 0, 16,
~arg2 >> TARGET_PR_MTE_TAG_SHIFT);
arm_rebuild_hflags(env);
}
return 0;
}
case TARGET_PR_GET_TAGGED_ADDR_CTRL:
{
abi_long ret = 0;
CPUARMState *env = cpu_env;
ARMCPU *cpu = env_archcpu(env);
if (arg2 || arg3 || arg4 || arg5) {
return -TARGET_EINVAL;
}
if (env->tagged_addr_enable) {
ret |= TARGET_PR_TAGGED_ADDR_ENABLE;
}
if (cpu_isar_feature(aa64_mte, cpu)) {
/* See above. */
ret |= (extract64(env->cp15.sctlr_el[1], 38, 2)
<< TARGET_PR_MTE_TCF_SHIFT);
ret = deposit64(ret, TARGET_PR_MTE_TAG_SHIFT, 16,
~env->cp15.gcr_el1);
}
return ret;
}
#endif /* AARCH64 */
case PR_GET_SECCOMP:
case PR_SET_SECCOMP:
@ -12237,7 +12322,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
#if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
case TARGET_NR_set_tid_address:
return get_errno(set_tid_address((int *)g2h(arg1)));
return get_errno(set_tid_address((int *)g2h(cpu, arg1)));
#endif
case TARGET_NR_tkill:
@ -12324,11 +12409,11 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
#endif
#ifdef TARGET_NR_futex
case TARGET_NR_futex:
return do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
return do_futex(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
#endif
#ifdef TARGET_NR_futex_time64
case TARGET_NR_futex_time64:
return do_futex_time64(arg1, arg2, arg3, arg4, arg5, arg6);
return do_futex_time64(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
#endif
#if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
case TARGET_NR_inotify_init:

View File

@ -1311,6 +1311,7 @@ struct target_winsize {
#ifdef TARGET_AARCH64
#define TARGET_PROT_BTI 0x10
#define TARGET_PROT_MTE 0x20
#endif
/* Common */

View File

@ -4,46 +4,93 @@
#include "qemu.h"
void *lock_user(int type, abi_ulong guest_addr, size_t len, bool copy)
{
void *host_addr;
guest_addr = cpu_untagged_addr(thread_cpu, guest_addr);
if (!access_ok_untagged(type, guest_addr, len)) {
return NULL;
}
host_addr = g2h_untagged(guest_addr);
#ifdef DEBUG_REMAP
if (copy) {
host_addr = g_memdup(host_addr, len);
} else {
host_addr = g_malloc0(len);
}
#endif
return host_addr;
}
#ifdef DEBUG_REMAP
void unlock_user(void *host_ptr, abi_ulong guest_addr, size_t len);
{
void *host_ptr_conv;
if (!host_ptr) {
return;
}
host_ptr_conv = g2h(thread_cpu, guest_addr);
if (host_ptr == host_ptr_conv) {
return;
}
if (len != 0) {
memcpy(host_ptr_conv, host_ptr, len);
}
g_free(host_ptr);
}
#endif
void *lock_user_string(abi_ulong guest_addr)
{
ssize_t len = target_strlen(guest_addr);
if (len < 0) {
return NULL;
}
return lock_user(VERIFY_READ, guest_addr, (size_t)len + 1, 1);
}
/* copy_from_user() and copy_to_user() are usually used to copy data
* buffers between the target and host. These internally perform
* locking/unlocking of the memory.
*/
abi_long copy_from_user(void *hptr, abi_ulong gaddr, size_t len)
int copy_from_user(void *hptr, abi_ulong gaddr, size_t len)
{
abi_long ret = 0;
void *ghptr;
int ret = 0;
void *ghptr = lock_user(VERIFY_READ, gaddr, len, 1);
if ((ghptr = lock_user(VERIFY_READ, gaddr, len, 1))) {
if (ghptr) {
memcpy(hptr, ghptr, len);
unlock_user(ghptr, gaddr, 0);
} else
} else {
ret = -TARGET_EFAULT;
}
return ret;
}
abi_long copy_to_user(abi_ulong gaddr, void *hptr, size_t len)
int copy_to_user(abi_ulong gaddr, void *hptr, size_t len)
{
abi_long ret = 0;
void *ghptr;
int ret = 0;
void *ghptr = lock_user(VERIFY_WRITE, gaddr, len, 0);
if ((ghptr = lock_user(VERIFY_WRITE, gaddr, len, 0))) {
if (ghptr) {
memcpy(ghptr, hptr, len);
unlock_user(ghptr, gaddr, len);
} else
} else {
ret = -TARGET_EFAULT;
}
return ret;
}
/* Return the length of a string in target memory or -TARGET_EFAULT if
access error */
abi_long target_strlen(abi_ulong guest_addr1)
ssize_t target_strlen(abi_ulong guest_addr1)
{
uint8_t *ptr;
abi_ulong guest_addr;
int max_len, len;
size_t max_len, len;
guest_addr = guest_addr1;
for(;;) {
@ -55,11 +102,12 @@ abi_long target_strlen(abi_ulong guest_addr1)
unlock_user(ptr, guest_addr, 0);
guest_addr += len;
/* we don't allow wrapping or integer overflow */
if (guest_addr == 0 ||
(guest_addr - guest_addr1) > 0x7fffffff)
if (guest_addr == 0 || (guest_addr - guest_addr1) > 0x7fffffff) {
return -TARGET_EFAULT;
if (len != max_len)
}
if (len != max_len) {
break;
}
}
return guest_addr - guest_addr1;
}

View File

@ -20,6 +20,9 @@
#ifdef CONFIG_USER_ONLY
#define TARGET_PAGE_BITS 12
# ifdef TARGET_AARCH64
# define TARGET_TAGGED_ADDRESSES
# endif
#else
/*
* ARMv7 and later CPUs have 4K pages minimum, but ARMv5 and v6

View File

@ -205,14 +205,25 @@ static void arm_cpu_reset(DeviceState *dev)
env->vfp.zcr_el[1] = MIN(cpu->sve_max_vq - 1, 3);
}
/*
* Enable TBI0 and TBI1. While the real kernel only enables TBI0,
* turning on both here will produce smaller code and otherwise
* make no difference to the user-level emulation.
*
* In sve_probe_page, we assume that this is set.
* Do not modify this without other changes.
* Enable TBI0 but not TBI1.
* Note that this must match useronly_clean_ptr.
*/
env->cp15.tcr_el[1].raw_tcr = (3ULL << 37);
env->cp15.tcr_el[1].raw_tcr = (1ULL << 37);
/* Enable MTE */
if (cpu_isar_feature(aa64_mte, cpu)) {
/* Enable tag access, but leave TCF0 as No Effect (0). */
env->cp15.sctlr_el[1] |= SCTLR_ATA0;
/*
* Exclude all tags, so that tag 0 is always used.
* This corresponds to Linux current->thread.gcr_incl = 0.
*
* Set RRND, so that helper_irg() will generate a seed later.
* Here in cpu_reset(), the crypto subsystem has not yet been
* initialized.
*/
env->cp15.gcr_el1 = 0x1ffff;
}
#else
/* Reset into the highest available EL */
if (arm_feature(env, ARM_FEATURE_EL3)) {

View File

@ -721,6 +721,11 @@ typedef struct CPUARMState {
const struct arm_boot_info *boot_info;
/* Store GICv3CPUState to access from this struct */
void *gicv3state;
#ifdef TARGET_TAGGED_ADDRESSES
/* Linux syscall tagged address support */
bool tagged_addr_enable;
#endif
} CPUARMState;
static inline void set_feature(CPUARMState *env, int feature)
@ -3603,6 +3608,33 @@ static inline MemTxAttrs *typecheck_memtxattrs(MemTxAttrs *x)
* AArch64 usage of the PAGE_TARGET_* bits for linux-user.
*/
#define PAGE_BTI PAGE_TARGET_1
#define PAGE_MTE PAGE_TARGET_2
#ifdef TARGET_TAGGED_ADDRESSES
/**
* cpu_untagged_addr:
* @cs: CPU context
* @x: tagged address
*
* Remove any address tag from @x. This is explicitly related to the
* linux syscall TIF_TAGGED_ADDR setting, not TBI in general.
*
* There should be a better place to put this, but we need this in
* include/exec/cpu_ldst.h, and not some place linux-user specific.
*/
static inline target_ulong cpu_untagged_addr(CPUState *cs, target_ulong x)
{
ARMCPU *cpu = ARM_CPU(cs);
if (cpu->env.tagged_addr_enable) {
/*
* TBI is enabled for userspace but not kernelspace addresses.
* Only clear the tag if bit 55 is clear.
*/
x &= sextract64(x, 0, 56);
}
return x;
}
#endif
/*
* Naming convention for isar_feature functions:

View File

@ -542,7 +542,7 @@ uint64_t HELPER(paired_cmpxchg64_le)(CPUARMState *env, uint64_t addr,
#ifdef CONFIG_USER_ONLY
/* ??? Enforce alignment. */
uint64_t *haddr = g2h(addr);
uint64_t *haddr = g2h(env_cpu(env), addr);
set_helper_retaddr(ra);
o0 = ldq_le_p(haddr + 0);
@ -612,7 +612,7 @@ uint64_t HELPER(paired_cmpxchg64_be)(CPUARMState *env, uint64_t addr,
#ifdef CONFIG_USER_ONLY
/* ??? Enforce alignment. */
uint64_t *haddr = g2h(addr);
uint64_t *haddr = g2h(env_cpu(env), addr);
set_helper_retaddr(ra);
o1 = ldq_be_p(haddr + 0);

View File

@ -26,6 +26,7 @@
#define TARGET_ARM_INTERNALS_H
#include "hw/registerfields.h"
#include "syndrome.h"
/* register banks for CPU modes */
#define BANK_USRSYS 0
@ -262,250 +263,6 @@ static inline bool extended_addresses_enabled(CPUARMState *env)
(arm_feature(env, ARM_FEATURE_LPAE) && (tcr->raw_tcr & TTBCR_EAE));
}
/* Valid Syndrome Register EC field values */
enum arm_exception_class {
EC_UNCATEGORIZED = 0x00,
EC_WFX_TRAP = 0x01,
EC_CP15RTTRAP = 0x03,
EC_CP15RRTTRAP = 0x04,
EC_CP14RTTRAP = 0x05,
EC_CP14DTTRAP = 0x06,
EC_ADVSIMDFPACCESSTRAP = 0x07,
EC_FPIDTRAP = 0x08,
EC_PACTRAP = 0x09,
EC_CP14RRTTRAP = 0x0c,
EC_BTITRAP = 0x0d,
EC_ILLEGALSTATE = 0x0e,
EC_AA32_SVC = 0x11,
EC_AA32_HVC = 0x12,
EC_AA32_SMC = 0x13,
EC_AA64_SVC = 0x15,
EC_AA64_HVC = 0x16,
EC_AA64_SMC = 0x17,
EC_SYSTEMREGISTERTRAP = 0x18,
EC_SVEACCESSTRAP = 0x19,
EC_INSNABORT = 0x20,
EC_INSNABORT_SAME_EL = 0x21,
EC_PCALIGNMENT = 0x22,
EC_DATAABORT = 0x24,
EC_DATAABORT_SAME_EL = 0x25,
EC_SPALIGNMENT = 0x26,
EC_AA32_FPTRAP = 0x28,
EC_AA64_FPTRAP = 0x2c,
EC_SERROR = 0x2f,
EC_BREAKPOINT = 0x30,
EC_BREAKPOINT_SAME_EL = 0x31,
EC_SOFTWARESTEP = 0x32,
EC_SOFTWARESTEP_SAME_EL = 0x33,
EC_WATCHPOINT = 0x34,
EC_WATCHPOINT_SAME_EL = 0x35,
EC_AA32_BKPT = 0x38,
EC_VECTORCATCH = 0x3a,
EC_AA64_BKPT = 0x3c,
};
#define ARM_EL_EC_SHIFT 26
#define ARM_EL_IL_SHIFT 25
#define ARM_EL_ISV_SHIFT 24
#define ARM_EL_IL (1 << ARM_EL_IL_SHIFT)
#define ARM_EL_ISV (1 << ARM_EL_ISV_SHIFT)
static inline uint32_t syn_get_ec(uint32_t syn)
{
return syn >> ARM_EL_EC_SHIFT;
}
/* Utility functions for constructing various kinds of syndrome value.
* Note that in general we follow the AArch64 syndrome values; in a
* few cases the value in HSR for exceptions taken to AArch32 Hyp
* mode differs slightly, and we fix this up when populating HSR in
* arm_cpu_do_interrupt_aarch32_hyp().
* The exception is FP/SIMD access traps -- these report extra information
* when taking an exception to AArch32. For those we include the extra coproc
* and TA fields, and mask them out when taking the exception to AArch64.
*/
static inline uint32_t syn_uncategorized(void)
{
return (EC_UNCATEGORIZED << ARM_EL_EC_SHIFT) | ARM_EL_IL;
}
static inline uint32_t syn_aa64_svc(uint32_t imm16)
{
return (EC_AA64_SVC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
}
static inline uint32_t syn_aa64_hvc(uint32_t imm16)
{
return (EC_AA64_HVC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
}
static inline uint32_t syn_aa64_smc(uint32_t imm16)
{
return (EC_AA64_SMC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
}
static inline uint32_t syn_aa32_svc(uint32_t imm16, bool is_16bit)
{
return (EC_AA32_SVC << ARM_EL_EC_SHIFT) | (imm16 & 0xffff)
| (is_16bit ? 0 : ARM_EL_IL);
}
static inline uint32_t syn_aa32_hvc(uint32_t imm16)
{
return (EC_AA32_HVC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
}
static inline uint32_t syn_aa32_smc(void)
{
return (EC_AA32_SMC << ARM_EL_EC_SHIFT) | ARM_EL_IL;
}
static inline uint32_t syn_aa64_bkpt(uint32_t imm16)
{
return (EC_AA64_BKPT << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
}
static inline uint32_t syn_aa32_bkpt(uint32_t imm16, bool is_16bit)
{
return (EC_AA32_BKPT << ARM_EL_EC_SHIFT) | (imm16 & 0xffff)
| (is_16bit ? 0 : ARM_EL_IL);
}
static inline uint32_t syn_aa64_sysregtrap(int op0, int op1, int op2,
int crn, int crm, int rt,
int isread)
{
return (EC_SYSTEMREGISTERTRAP << ARM_EL_EC_SHIFT) | ARM_EL_IL
| (op0 << 20) | (op2 << 17) | (op1 << 14) | (crn << 10) | (rt << 5)
| (crm << 1) | isread;
}
static inline uint32_t syn_cp14_rt_trap(int cv, int cond, int opc1, int opc2,
int crn, int crm, int rt, int isread,
bool is_16bit)
{
return (EC_CP14RTTRAP << ARM_EL_EC_SHIFT)
| (is_16bit ? 0 : ARM_EL_IL)
| (cv << 24) | (cond << 20) | (opc2 << 17) | (opc1 << 14)
| (crn << 10) | (rt << 5) | (crm << 1) | isread;
}
static inline uint32_t syn_cp15_rt_trap(int cv, int cond, int opc1, int opc2,
int crn, int crm, int rt, int isread,
bool is_16bit)
{
return (EC_CP15RTTRAP << ARM_EL_EC_SHIFT)
| (is_16bit ? 0 : ARM_EL_IL)
| (cv << 24) | (cond << 20) | (opc2 << 17) | (opc1 << 14)
| (crn << 10) | (rt << 5) | (crm << 1) | isread;
}
static inline uint32_t syn_cp14_rrt_trap(int cv, int cond, int opc1, int crm,
int rt, int rt2, int isread,
bool is_16bit)
{
return (EC_CP14RRTTRAP << ARM_EL_EC_SHIFT)
| (is_16bit ? 0 : ARM_EL_IL)
| (cv << 24) | (cond << 20) | (opc1 << 16)
| (rt2 << 10) | (rt << 5) | (crm << 1) | isread;
}
static inline uint32_t syn_cp15_rrt_trap(int cv, int cond, int opc1, int crm,
int rt, int rt2, int isread,
bool is_16bit)
{
return (EC_CP15RRTTRAP << ARM_EL_EC_SHIFT)
| (is_16bit ? 0 : ARM_EL_IL)
| (cv << 24) | (cond << 20) | (opc1 << 16)
| (rt2 << 10) | (rt << 5) | (crm << 1) | isread;
}
static inline uint32_t syn_fp_access_trap(int cv, int cond, bool is_16bit)
{
/* AArch32 FP trap or any AArch64 FP/SIMD trap: TA == 0 coproc == 0xa */
return (EC_ADVSIMDFPACCESSTRAP << ARM_EL_EC_SHIFT)
| (is_16bit ? 0 : ARM_EL_IL)
| (cv << 24) | (cond << 20) | 0xa;
}
static inline uint32_t syn_simd_access_trap(int cv, int cond, bool is_16bit)
{
/* AArch32 SIMD trap: TA == 1 coproc == 0 */
return (EC_ADVSIMDFPACCESSTRAP << ARM_EL_EC_SHIFT)
| (is_16bit ? 0 : ARM_EL_IL)
| (cv << 24) | (cond << 20) | (1 << 5);
}
static inline uint32_t syn_sve_access_trap(void)
{
return EC_SVEACCESSTRAP << ARM_EL_EC_SHIFT;
}
static inline uint32_t syn_pactrap(void)
{
return EC_PACTRAP << ARM_EL_EC_SHIFT;
}
static inline uint32_t syn_btitrap(int btype)
{
return (EC_BTITRAP << ARM_EL_EC_SHIFT) | btype;
}
static inline uint32_t syn_insn_abort(int same_el, int ea, int s1ptw, int fsc)
{
return (EC_INSNABORT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
| ARM_EL_IL | (ea << 9) | (s1ptw << 7) | fsc;
}
static inline uint32_t syn_data_abort_no_iss(int same_el, int fnv,
int ea, int cm, int s1ptw,
int wnr, int fsc)
{
return (EC_DATAABORT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
| ARM_EL_IL
| (fnv << 10) | (ea << 9) | (cm << 8) | (s1ptw << 7)
| (wnr << 6) | fsc;
}
static inline uint32_t syn_data_abort_with_iss(int same_el,
int sas, int sse, int srt,
int sf, int ar,
int ea, int cm, int s1ptw,
int wnr, int fsc,
bool is_16bit)
{
return (EC_DATAABORT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
| (is_16bit ? 0 : ARM_EL_IL)
| ARM_EL_ISV | (sas << 22) | (sse << 21) | (srt << 16)
| (sf << 15) | (ar << 14)
| (ea << 9) | (cm << 8) | (s1ptw << 7) | (wnr << 6) | fsc;
}
static inline uint32_t syn_swstep(int same_el, int isv, int ex)
{
return (EC_SOFTWARESTEP << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
| ARM_EL_IL | (isv << 24) | (ex << 6) | 0x22;
}
static inline uint32_t syn_watchpoint(int same_el, int cm, int wnr)
{
return (EC_WATCHPOINT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
| ARM_EL_IL | (cm << 8) | (wnr << 6) | 0x22;
}
static inline uint32_t syn_breakpoint(int same_el)
{
return (EC_BREAKPOINT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
| ARM_EL_IL | 0x22;
}
static inline uint32_t syn_wfx(int cv, int cond, int ti, bool is_16bit)
{
return (EC_WFX_TRAP << ARM_EL_EC_SHIFT) |
(is_16bit ? 0 : (1 << ARM_EL_IL_SHIFT)) |
(cv << 24) | (cond << 20) | ti;
}
/* Update a QEMU watchpoint based on the information the guest has set in the
* DBGWCR<n>_EL1 and DBGWVR<n>_EL1 registers.
*/
@ -1425,9 +1182,9 @@ static inline bool tcma_check(uint32_t desc, int bit55, int ptr_tag)
*/
static inline uint64_t useronly_clean_ptr(uint64_t ptr)
{
/* TBI is known to be enabled. */
#ifdef CONFIG_USER_ONLY
ptr = sextract64(ptr, 0, 56);
/* TBI0 is known to be enabled, while TBI1 is disabled. */
ptr &= sextract64(ptr, 0, 56);
#endif
return ptr;
}

View File

@ -78,8 +78,33 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
int tag_size, uintptr_t ra)
{
#ifdef CONFIG_USER_ONLY
/* Tag storage not implemented. */
return NULL;
uint64_t clean_ptr = useronly_clean_ptr(ptr);
int flags = page_get_flags(clean_ptr);
uint8_t *tags;
uintptr_t index;
if (!(flags & (ptr_access == MMU_DATA_STORE ? PAGE_WRITE : PAGE_READ))) {
/* SIGSEGV */
arm_cpu_tlb_fill(env_cpu(env), ptr, ptr_size, ptr_access,
ptr_mmu_idx, false, ra);
g_assert_not_reached();
}
/* Require both MAP_ANON and PROT_MTE for the page. */
if (!(flags & PAGE_ANON) || !(flags & PAGE_MTE)) {
return NULL;
}
tags = page_get_target_data(clean_ptr);
if (tags == NULL) {
size_t alloc_size = TARGET_PAGE_SIZE >> (LOG2_TAG_GRANULE + 1);
tags = page_alloc_target_data(clean_ptr, alloc_size);
assert(tags != NULL);
}
index = extract32(ptr, LOG2_TAG_GRANULE + 1,
TARGET_PAGE_BITS - LOG2_TAG_GRANULE - 1);
return tags + index;
#else
uintptr_t index;
CPUIOTLBEntry *iotlbentry;
@ -565,6 +590,16 @@ static void mte_check_fail(CPUARMState *env, uint32_t desc,
select = 0;
}
env->cp15.tfsr_el[el] |= 1 << select;
#ifdef CONFIG_USER_ONLY
/*
* Stand in for a timer irq, setting _TIF_MTE_ASYNC_FAULT,
* which then sends a SIGSEGV when the thread is next scheduled.
* This cpu will return to the main loop at the end of the TB,
* which is rather sooner than "normal". But the alternative
* is waiting until the next syscall.
*/
qemu_cpu_kick(env_cpu(env));
#endif
break;
default:

273
target/arm/syndrome.h Normal file
View File

@ -0,0 +1,273 @@
/*
* QEMU ARM CPU -- syndrome functions and types
*
* Copyright (c) 2014 Linaro Ltd
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see
* <http://www.gnu.org/licenses/gpl-2.0.html>
*
* This header defines functions, types, etc which need to be shared
* between different source files within target/arm/ but which are
* private to it and not required by the rest of QEMU.
*/
#ifndef TARGET_ARM_SYNDROME_H
#define TARGET_ARM_SYNDROME_H
/* Valid Syndrome Register EC field values */
enum arm_exception_class {
EC_UNCATEGORIZED = 0x00,
EC_WFX_TRAP = 0x01,
EC_CP15RTTRAP = 0x03,
EC_CP15RRTTRAP = 0x04,
EC_CP14RTTRAP = 0x05,
EC_CP14DTTRAP = 0x06,
EC_ADVSIMDFPACCESSTRAP = 0x07,
EC_FPIDTRAP = 0x08,
EC_PACTRAP = 0x09,
EC_CP14RRTTRAP = 0x0c,
EC_BTITRAP = 0x0d,
EC_ILLEGALSTATE = 0x0e,
EC_AA32_SVC = 0x11,
EC_AA32_HVC = 0x12,
EC_AA32_SMC = 0x13,
EC_AA64_SVC = 0x15,
EC_AA64_HVC = 0x16,
EC_AA64_SMC = 0x17,
EC_SYSTEMREGISTERTRAP = 0x18,
EC_SVEACCESSTRAP = 0x19,
EC_INSNABORT = 0x20,
EC_INSNABORT_SAME_EL = 0x21,
EC_PCALIGNMENT = 0x22,
EC_DATAABORT = 0x24,
EC_DATAABORT_SAME_EL = 0x25,
EC_SPALIGNMENT = 0x26,
EC_AA32_FPTRAP = 0x28,
EC_AA64_FPTRAP = 0x2c,
EC_SERROR = 0x2f,
EC_BREAKPOINT = 0x30,
EC_BREAKPOINT_SAME_EL = 0x31,
EC_SOFTWARESTEP = 0x32,
EC_SOFTWARESTEP_SAME_EL = 0x33,
EC_WATCHPOINT = 0x34,
EC_WATCHPOINT_SAME_EL = 0x35,
EC_AA32_BKPT = 0x38,
EC_VECTORCATCH = 0x3a,
EC_AA64_BKPT = 0x3c,
};
#define ARM_EL_EC_SHIFT 26
#define ARM_EL_IL_SHIFT 25
#define ARM_EL_ISV_SHIFT 24
#define ARM_EL_IL (1 << ARM_EL_IL_SHIFT)
#define ARM_EL_ISV (1 << ARM_EL_ISV_SHIFT)
static inline uint32_t syn_get_ec(uint32_t syn)
{
return syn >> ARM_EL_EC_SHIFT;
}
/*
* Utility functions for constructing various kinds of syndrome value.
* Note that in general we follow the AArch64 syndrome values; in a
* few cases the value in HSR for exceptions taken to AArch32 Hyp
* mode differs slightly, and we fix this up when populating HSR in
* arm_cpu_do_interrupt_aarch32_hyp().
* The exception is FP/SIMD access traps -- these report extra information
* when taking an exception to AArch32. For those we include the extra coproc
* and TA fields, and mask them out when taking the exception to AArch64.
*/
static inline uint32_t syn_uncategorized(void)
{
return (EC_UNCATEGORIZED << ARM_EL_EC_SHIFT) | ARM_EL_IL;
}
static inline uint32_t syn_aa64_svc(uint32_t imm16)
{
return (EC_AA64_SVC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
}
static inline uint32_t syn_aa64_hvc(uint32_t imm16)
{
return (EC_AA64_HVC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
}
static inline uint32_t syn_aa64_smc(uint32_t imm16)
{
return (EC_AA64_SMC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
}
static inline uint32_t syn_aa32_svc(uint32_t imm16, bool is_16bit)
{
return (EC_AA32_SVC << ARM_EL_EC_SHIFT) | (imm16 & 0xffff)
| (is_16bit ? 0 : ARM_EL_IL);
}
static inline uint32_t syn_aa32_hvc(uint32_t imm16)
{
return (EC_AA32_HVC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
}
static inline uint32_t syn_aa32_smc(void)
{
return (EC_AA32_SMC << ARM_EL_EC_SHIFT) | ARM_EL_IL;
}
static inline uint32_t syn_aa64_bkpt(uint32_t imm16)
{
return (EC_AA64_BKPT << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
}
static inline uint32_t syn_aa32_bkpt(uint32_t imm16, bool is_16bit)
{
return (EC_AA32_BKPT << ARM_EL_EC_SHIFT) | (imm16 & 0xffff)
| (is_16bit ? 0 : ARM_EL_IL);
}
static inline uint32_t syn_aa64_sysregtrap(int op0, int op1, int op2,
int crn, int crm, int rt,
int isread)
{
return (EC_SYSTEMREGISTERTRAP << ARM_EL_EC_SHIFT) | ARM_EL_IL
| (op0 << 20) | (op2 << 17) | (op1 << 14) | (crn << 10) | (rt << 5)
| (crm << 1) | isread;
}
static inline uint32_t syn_cp14_rt_trap(int cv, int cond, int opc1, int opc2,
int crn, int crm, int rt, int isread,
bool is_16bit)
{
return (EC_CP14RTTRAP << ARM_EL_EC_SHIFT)
| (is_16bit ? 0 : ARM_EL_IL)
| (cv << 24) | (cond << 20) | (opc2 << 17) | (opc1 << 14)
| (crn << 10) | (rt << 5) | (crm << 1) | isread;
}
static inline uint32_t syn_cp15_rt_trap(int cv, int cond, int opc1, int opc2,
int crn, int crm, int rt, int isread,
bool is_16bit)
{
return (EC_CP15RTTRAP << ARM_EL_EC_SHIFT)
| (is_16bit ? 0 : ARM_EL_IL)
| (cv << 24) | (cond << 20) | (opc2 << 17) | (opc1 << 14)
| (crn << 10) | (rt << 5) | (crm << 1) | isread;
}
static inline uint32_t syn_cp14_rrt_trap(int cv, int cond, int opc1, int crm,
int rt, int rt2, int isread,
bool is_16bit)
{
return (EC_CP14RRTTRAP << ARM_EL_EC_SHIFT)
| (is_16bit ? 0 : ARM_EL_IL)
| (cv << 24) | (cond << 20) | (opc1 << 16)
| (rt2 << 10) | (rt << 5) | (crm << 1) | isread;
}
static inline uint32_t syn_cp15_rrt_trap(int cv, int cond, int opc1, int crm,
int rt, int rt2, int isread,
bool is_16bit)
{
return (EC_CP15RRTTRAP << ARM_EL_EC_SHIFT)
| (is_16bit ? 0 : ARM_EL_IL)
| (cv << 24) | (cond << 20) | (opc1 << 16)
| (rt2 << 10) | (rt << 5) | (crm << 1) | isread;
}
static inline uint32_t syn_fp_access_trap(int cv, int cond, bool is_16bit)
{
/* AArch32 FP trap or any AArch64 FP/SIMD trap: TA == 0 coproc == 0xa */
return (EC_ADVSIMDFPACCESSTRAP << ARM_EL_EC_SHIFT)
| (is_16bit ? 0 : ARM_EL_IL)
| (cv << 24) | (cond << 20) | 0xa;
}
static inline uint32_t syn_simd_access_trap(int cv, int cond, bool is_16bit)
{
/* AArch32 SIMD trap: TA == 1 coproc == 0 */
return (EC_ADVSIMDFPACCESSTRAP << ARM_EL_EC_SHIFT)
| (is_16bit ? 0 : ARM_EL_IL)
| (cv << 24) | (cond << 20) | (1 << 5);
}
static inline uint32_t syn_sve_access_trap(void)
{
return EC_SVEACCESSTRAP << ARM_EL_EC_SHIFT;
}
static inline uint32_t syn_pactrap(void)
{
return EC_PACTRAP << ARM_EL_EC_SHIFT;
}
static inline uint32_t syn_btitrap(int btype)
{
return (EC_BTITRAP << ARM_EL_EC_SHIFT) | btype;
}
static inline uint32_t syn_insn_abort(int same_el, int ea, int s1ptw, int fsc)
{
return (EC_INSNABORT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
| ARM_EL_IL | (ea << 9) | (s1ptw << 7) | fsc;
}
static inline uint32_t syn_data_abort_no_iss(int same_el, int fnv,
int ea, int cm, int s1ptw,
int wnr, int fsc)
{
return (EC_DATAABORT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
| ARM_EL_IL
| (fnv << 10) | (ea << 9) | (cm << 8) | (s1ptw << 7)
| (wnr << 6) | fsc;
}
static inline uint32_t syn_data_abort_with_iss(int same_el,
int sas, int sse, int srt,
int sf, int ar,
int ea, int cm, int s1ptw,
int wnr, int fsc,
bool is_16bit)
{
return (EC_DATAABORT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
| (is_16bit ? 0 : ARM_EL_IL)
| ARM_EL_ISV | (sas << 22) | (sse << 21) | (srt << 16)
| (sf << 15) | (ar << 14)
| (ea << 9) | (cm << 8) | (s1ptw << 7) | (wnr << 6) | fsc;
}
static inline uint32_t syn_swstep(int same_el, int isv, int ex)
{
return (EC_SOFTWARESTEP << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
| ARM_EL_IL | (isv << 24) | (ex << 6) | 0x22;
}
static inline uint32_t syn_watchpoint(int same_el, int cm, int wnr)
{
return (EC_WATCHPOINT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
| ARM_EL_IL | (cm << 8) | (wnr << 6) | 0x22;
}
static inline uint32_t syn_breakpoint(int same_el)
{
return (EC_BREAKPOINT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
| ARM_EL_IL | 0x22;
}
static inline uint32_t syn_wfx(int cv, int cond, int ti, bool is_16bit)
{
return (EC_WFX_TRAP << ARM_EL_EC_SHIFT) |
(is_16bit ? 0 : (1 << ARM_EL_IL_SHIFT)) |
(cv << 24) | (cond << 20) | ti;
}
#endif /* TARGET_ARM_SYNDROME_H */

View File

@ -154,21 +154,24 @@ bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
bool probe, uintptr_t retaddr)
{
ARMCPU *cpu = ARM_CPU(cs);
ARMMMUFaultInfo fi = {};
#ifdef CONFIG_USER_ONLY
cpu->env.exception.vaddress = address;
if (access_type == MMU_INST_FETCH) {
cs->exception_index = EXCP_PREFETCH_ABORT;
int flags = page_get_flags(useronly_clean_ptr(address));
if (flags & PAGE_VALID) {
fi.type = ARMFault_Permission;
} else {
cs->exception_index = EXCP_DATA_ABORT;
fi.type = ARMFault_Translation;
}
cpu_loop_exit_restore(cs, retaddr);
/* now we have a real cpu fault */
cpu_restore_state(cs, retaddr, true);
arm_deliver_fault(cpu, address, access_type, mmu_idx, &fi);
#else
hwaddr phys_addr;
target_ulong page_size;
int prot, ret;
MemTxAttrs attrs = {};
ARMMMUFaultInfo fi = {};
ARMCacheAttrs cacheattrs = {};
/*

View File

@ -183,17 +183,20 @@ static void gen_top_byte_ignore(DisasContext *s, TCGv_i64 dst,
/* Sign-extend from bit 55. */
tcg_gen_sextract_i64(dst, src, 0, 56);
if (tbi != 3) {
TCGv_i64 tcg_zero = tcg_const_i64(0);
/*
* The two TBI bits differ.
* If tbi0, then !tbi1: only use the extension if positive.
* if !tbi0, then tbi1: only use the extension if negative.
*/
tcg_gen_movcond_i64(tbi == 1 ? TCG_COND_GE : TCG_COND_LT,
dst, dst, tcg_zero, dst, src);
tcg_temp_free_i64(tcg_zero);
switch (tbi) {
case 1:
/* tbi0 but !tbi1: only use the extension if positive */
tcg_gen_and_i64(dst, dst, src);
break;
case 2:
/* !tbi0 but tbi1: only use the extension if negative */
tcg_gen_or_i64(dst, dst, src);
break;
case 3:
/* tbi0 and tbi1: always use the extension */
break;
default:
g_assert_not_reached();
}
}
}

View File

@ -63,7 +63,7 @@ static void atomic_store_3(CPUHPPAState *env, target_ulong addr, uint32_t val,
#ifdef CONFIG_USER_ONLY
uint32_t old, new, cmp;
uint32_t *haddr = g2h(addr - 1);
uint32_t *haddr = g2h(env_cpu(env), addr - 1);
old = *haddr;
while (1) {
new = (old & ~mask) | (val & mask);

View File

@ -66,7 +66,7 @@ void helper_cmpxchg8b(CPUX86State *env, target_ulong a0)
#ifdef CONFIG_USER_ONLY
{
uint64_t *haddr = g2h(a0);
uint64_t *haddr = g2h(env_cpu(env), a0);
cmpv = cpu_to_le64(cmpv);
newv = cpu_to_le64(newv);
oldv = qatomic_cmpxchg__nocheck(haddr, cmpv, newv);

View File

@ -1780,7 +1780,7 @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1,
if (parallel) {
#ifdef CONFIG_USER_ONLY
uint32_t *haddr = g2h(a1);
uint32_t *haddr = g2h(env_cpu(env), a1);
ov = qatomic_cmpxchg__nocheck(haddr, cv, nv);
#else
TCGMemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mem_idx);
@ -1804,7 +1804,7 @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1,
if (parallel) {
#ifdef CONFIG_ATOMIC64
# ifdef CONFIG_USER_ONLY
uint64_t *haddr = g2h(a1);
uint64_t *haddr = g2h(env_cpu(env), a1);
ov = qatomic_cmpxchg__nocheck(haddr, cv, nv);
# else
TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN, mem_idx);

View File

@ -139,6 +139,7 @@ qtests_npcm7xx = \
'npcm7xx_gpio-test',
'npcm7xx_pwm-test',
'npcm7xx_rng-test',
'npcm7xx_smbus-test',
'npcm7xx_timer-test',
'npcm7xx_watchdog_timer-test']
qtests_arm = \

View File

@ -0,0 +1,495 @@
/*
* QTests for Nuvoton NPCM7xx SMBus Modules.
*
* Copyright 2020 Google LLC
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#include "qemu/osdep.h"
#include "qemu/bitops.h"
#include "libqos/i2c.h"
#include "libqos/libqtest.h"
#include "hw/misc/tmp105_regs.h"
#define NR_SMBUS_DEVICES 16
#define SMBUS_ADDR(x) (0xf0080000 + 0x1000 * (x))
#define SMBUS_IRQ(x) (64 + (x))
#define EVB_DEVICE_ADDR 0x48
#define INVALID_DEVICE_ADDR 0x01
const int evb_bus_list[] = {0, 1, 2, 6};
/* Offsets */
enum CommonRegister {
OFFSET_SDA = 0x0,
OFFSET_ST = 0x2,
OFFSET_CST = 0x4,
OFFSET_CTL1 = 0x6,
OFFSET_ADDR1 = 0x8,
OFFSET_CTL2 = 0xa,
OFFSET_ADDR2 = 0xc,
OFFSET_CTL3 = 0xe,
OFFSET_CST2 = 0x18,
OFFSET_CST3 = 0x19,
};
enum NPCM7xxSMBusBank0Register {
OFFSET_ADDR3 = 0x10,
OFFSET_ADDR7 = 0x11,
OFFSET_ADDR4 = 0x12,
OFFSET_ADDR8 = 0x13,
OFFSET_ADDR5 = 0x14,
OFFSET_ADDR9 = 0x15,
OFFSET_ADDR6 = 0x16,
OFFSET_ADDR10 = 0x17,
OFFSET_CTL4 = 0x1a,
OFFSET_CTL5 = 0x1b,
OFFSET_SCLLT = 0x1c,
OFFSET_FIF_CTL = 0x1d,
OFFSET_SCLHT = 0x1e,
};
enum NPCM7xxSMBusBank1Register {
OFFSET_FIF_CTS = 0x10,
OFFSET_FAIR_PER = 0x11,
OFFSET_TXF_CTL = 0x12,
OFFSET_T_OUT = 0x14,
OFFSET_TXF_STS = 0x1a,
OFFSET_RXF_STS = 0x1c,
OFFSET_RXF_CTL = 0x1e,
};
/* ST fields */
#define ST_STP BIT(7)
#define ST_SDAST BIT(6)
#define ST_BER BIT(5)
#define ST_NEGACK BIT(4)
#define ST_STASTR BIT(3)
#define ST_NMATCH BIT(2)
#define ST_MODE BIT(1)
#define ST_XMIT BIT(0)
/* CST fields */
#define CST_ARPMATCH BIT(7)
#define CST_MATCHAF BIT(6)
#define CST_TGSCL BIT(5)
#define CST_TSDA BIT(4)
#define CST_GCMATCH BIT(3)
#define CST_MATCH BIT(2)
#define CST_BB BIT(1)
#define CST_BUSY BIT(0)
/* CST2 fields */
#define CST2_INSTTS BIT(7)
#define CST2_MATCH7F BIT(6)
#define CST2_MATCH6F BIT(5)
#define CST2_MATCH5F BIT(4)
#define CST2_MATCH4F BIT(3)
#define CST2_MATCH3F BIT(2)
#define CST2_MATCH2F BIT(1)
#define CST2_MATCH1F BIT(0)
/* CST3 fields */
#define CST3_EO_BUSY BIT(7)
#define CST3_MATCH10F BIT(2)
#define CST3_MATCH9F BIT(1)
#define CST3_MATCH8F BIT(0)
/* CTL1 fields */
#define CTL1_STASTRE BIT(7)
#define CTL1_NMINTE BIT(6)
#define CTL1_GCMEN BIT(5)
#define CTL1_ACK BIT(4)
#define CTL1_EOBINTE BIT(3)
#define CTL1_INTEN BIT(2)
#define CTL1_STOP BIT(1)
#define CTL1_START BIT(0)
/* CTL2 fields */
#define CTL2_SCLFRQ(rv) extract8((rv), 1, 6)
#define CTL2_ENABLE BIT(0)
/* CTL3 fields */
#define CTL3_SCL_LVL BIT(7)
#define CTL3_SDA_LVL BIT(6)
#define CTL3_BNK_SEL BIT(5)
#define CTL3_400K_MODE BIT(4)
#define CTL3_IDL_START BIT(3)
#define CTL3_ARPMEN BIT(2)
#define CTL3_SCLFRQ(rv) extract8((rv), 0, 2)
/* ADDR fields */
#define ADDR_EN BIT(7)
#define ADDR_A(rv) extract8((rv), 0, 6)
/* FIF_CTL fields */
#define FIF_CTL_FIFO_EN BIT(4)
/* FIF_CTS fields */
#define FIF_CTS_CLR_FIFO BIT(6)
#define FIF_CTS_RFTE_IE BIT(3)
#define FIF_CTS_RXF_TXE BIT(1)
/* TXF_CTL fields */
#define TXF_CTL_THR_TXIE BIT(6)
#define TXF_CTL_TX_THR(rv) extract8((rv), 0, 5)
/* TXF_STS fields */
#define TXF_STS_TX_THST BIT(6)
#define TXF_STS_TX_BYTES(rv) extract8((rv), 0, 5)
/* RXF_CTL fields */
#define RXF_CTL_THR_RXIE BIT(6)
#define RXF_CTL_LAST BIT(5)
#define RXF_CTL_RX_THR(rv) extract8((rv), 0, 5)
/* RXF_STS fields */
#define RXF_STS_RX_THST BIT(6)
#define RXF_STS_RX_BYTES(rv) extract8((rv), 0, 5)
static void choose_bank(QTestState *qts, uint64_t base_addr, uint8_t bank)
{
uint8_t ctl3 = qtest_readb(qts, base_addr + OFFSET_CTL3);
if (bank) {
ctl3 |= CTL3_BNK_SEL;
} else {
ctl3 &= ~CTL3_BNK_SEL;
}
qtest_writeb(qts, base_addr + OFFSET_CTL3, ctl3);
}
static void check_running(QTestState *qts, uint64_t base_addr)
{
g_assert_true(qtest_readb(qts, base_addr + OFFSET_CST) & CST_BUSY);
g_assert_true(qtest_readb(qts, base_addr + OFFSET_CST) & CST_BB);
}
static void check_stopped(QTestState *qts, uint64_t base_addr)
{
uint8_t cst3;
g_assert_cmphex(qtest_readb(qts, base_addr + OFFSET_ST), ==, 0);
g_assert_false(qtest_readb(qts, base_addr + OFFSET_CST) & CST_BUSY);
g_assert_false(qtest_readb(qts, base_addr + OFFSET_CST) & CST_BB);
cst3 = qtest_readb(qts, base_addr + OFFSET_CST3);
g_assert_true(cst3 & CST3_EO_BUSY);
qtest_writeb(qts, base_addr + OFFSET_CST3, cst3);
cst3 = qtest_readb(qts, base_addr + OFFSET_CST3);
g_assert_false(cst3 & CST3_EO_BUSY);
}
static void enable_bus(QTestState *qts, uint64_t base_addr)
{
uint8_t ctl2 = qtest_readb(qts, base_addr + OFFSET_CTL2);
ctl2 |= CTL2_ENABLE;
qtest_writeb(qts, base_addr + OFFSET_CTL2, ctl2);
g_assert_true(qtest_readb(qts, base_addr + OFFSET_CTL2) & CTL2_ENABLE);
}
static void disable_bus(QTestState *qts, uint64_t base_addr)
{
uint8_t ctl2 = qtest_readb(qts, base_addr + OFFSET_CTL2);
ctl2 &= ~CTL2_ENABLE;
qtest_writeb(qts, base_addr + OFFSET_CTL2, ctl2);
g_assert_false(qtest_readb(qts, base_addr + OFFSET_CTL2) & CTL2_ENABLE);
}
static void start_transfer(QTestState *qts, uint64_t base_addr)
{
uint8_t ctl1;
ctl1 = CTL1_START | CTL1_INTEN | CTL1_STASTRE;
qtest_writeb(qts, base_addr + OFFSET_CTL1, ctl1);
g_assert_cmphex(qtest_readb(qts, base_addr + OFFSET_CTL1), ==,
CTL1_INTEN | CTL1_STASTRE | CTL1_INTEN);
g_assert_cmphex(qtest_readb(qts, base_addr + OFFSET_ST), ==,
ST_MODE | ST_XMIT | ST_SDAST);
check_running(qts, base_addr);
}
static void stop_transfer(QTestState *qts, uint64_t base_addr)
{
uint8_t ctl1 = qtest_readb(qts, base_addr + OFFSET_CTL1);
ctl1 &= ~(CTL1_START | CTL1_ACK);
ctl1 |= CTL1_STOP | CTL1_INTEN | CTL1_EOBINTE;
qtest_writeb(qts, base_addr + OFFSET_CTL1, ctl1);
ctl1 = qtest_readb(qts, base_addr + OFFSET_CTL1);
g_assert_false(ctl1 & CTL1_STOP);
}
static void send_byte(QTestState *qts, uint64_t base_addr, uint8_t byte)
{
g_assert_cmphex(qtest_readb(qts, base_addr + OFFSET_ST), ==,
ST_MODE | ST_XMIT | ST_SDAST);
qtest_writeb(qts, base_addr + OFFSET_SDA, byte);
}
static bool check_recv(QTestState *qts, uint64_t base_addr)
{
uint8_t st, fif_ctl, rxf_ctl, rxf_sts;
bool fifo;
st = qtest_readb(qts, base_addr + OFFSET_ST);
choose_bank(qts, base_addr, 0);
fif_ctl = qtest_readb(qts, base_addr + OFFSET_FIF_CTL);
fifo = fif_ctl & FIF_CTL_FIFO_EN;
if (!fifo) {
return st == (ST_MODE | ST_SDAST);
}
choose_bank(qts, base_addr, 1);
rxf_ctl = qtest_readb(qts, base_addr + OFFSET_RXF_CTL);
rxf_sts = qtest_readb(qts, base_addr + OFFSET_RXF_STS);
if ((rxf_ctl & RXF_CTL_THR_RXIE) && RXF_STS_RX_BYTES(rxf_sts) < 16) {
return st == ST_MODE;
} else {
return st == (ST_MODE | ST_SDAST);
}
}
static uint8_t recv_byte(QTestState *qts, uint64_t base_addr)
{
g_assert_true(check_recv(qts, base_addr));
return qtest_readb(qts, base_addr + OFFSET_SDA);
}
static void send_address(QTestState *qts, uint64_t base_addr, uint8_t addr,
bool recv, bool valid)
{
uint8_t encoded_addr = (addr << 1) | (recv ? 1 : 0);
uint8_t st;
qtest_writeb(qts, base_addr + OFFSET_SDA, encoded_addr);
st = qtest_readb(qts, base_addr + OFFSET_ST);
if (valid) {
if (recv) {
g_assert_cmphex(st, ==, ST_MODE | ST_SDAST | ST_STASTR);
} else {
g_assert_cmphex(st, ==, ST_MODE | ST_XMIT | ST_SDAST | ST_STASTR);
}
qtest_writeb(qts, base_addr + OFFSET_ST, ST_STASTR);
st = qtest_readb(qts, base_addr + OFFSET_ST);
if (recv) {
g_assert_true(check_recv(qts, base_addr));
} else {
g_assert_cmphex(st, ==, ST_MODE | ST_XMIT | ST_SDAST);
}
} else {
if (recv) {
g_assert_cmphex(st, ==, ST_MODE | ST_NEGACK);
} else {
g_assert_cmphex(st, ==, ST_MODE | ST_XMIT | ST_NEGACK);
}
}
}
static void send_nack(QTestState *qts, uint64_t base_addr)
{
uint8_t ctl1 = qtest_readb(qts, base_addr + OFFSET_CTL1);
ctl1 &= ~(CTL1_START | CTL1_STOP);
ctl1 |= CTL1_ACK | CTL1_INTEN;
qtest_writeb(qts, base_addr + OFFSET_CTL1, ctl1);
}
static void start_fifo_mode(QTestState *qts, uint64_t base_addr)
{
choose_bank(qts, base_addr, 0);
qtest_writeb(qts, base_addr + OFFSET_FIF_CTL, FIF_CTL_FIFO_EN);
g_assert_true(qtest_readb(qts, base_addr + OFFSET_FIF_CTL) &
FIF_CTL_FIFO_EN);
choose_bank(qts, base_addr, 1);
qtest_writeb(qts, base_addr + OFFSET_FIF_CTS,
FIF_CTS_CLR_FIFO | FIF_CTS_RFTE_IE);
g_assert_cmphex(qtest_readb(qts, base_addr + OFFSET_FIF_CTS), ==,
FIF_CTS_RFTE_IE);
g_assert_cmphex(qtest_readb(qts, base_addr + OFFSET_TXF_STS), ==, 0);
g_assert_cmphex(qtest_readb(qts, base_addr + OFFSET_RXF_STS), ==, 0);
}
static void start_recv_fifo(QTestState *qts, uint64_t base_addr, uint8_t bytes)
{
choose_bank(qts, base_addr, 1);
qtest_writeb(qts, base_addr + OFFSET_TXF_CTL, 0);
qtest_writeb(qts, base_addr + OFFSET_RXF_CTL,
RXF_CTL_THR_RXIE | RXF_CTL_LAST | bytes);
}
/* Check the SMBus's status is set correctly when disabled. */
static void test_disable_bus(gconstpointer data)
{
intptr_t index = (intptr_t)data;
uint64_t base_addr = SMBUS_ADDR(index);
QTestState *qts = qtest_init("-machine npcm750-evb");
disable_bus(qts, base_addr);
g_assert_cmphex(qtest_readb(qts, base_addr + OFFSET_CTL1), ==, 0);
g_assert_cmphex(qtest_readb(qts, base_addr + OFFSET_ST), ==, 0);
g_assert_false(qtest_readb(qts, base_addr + OFFSET_CST3) & CST3_EO_BUSY);
g_assert_cmphex(qtest_readb(qts, base_addr + OFFSET_CST), ==, 0);
qtest_quit(qts);
}
/* Check the SMBus returns a NACK for an invalid address. */
static void test_invalid_addr(gconstpointer data)
{
intptr_t index = (intptr_t)data;
uint64_t base_addr = SMBUS_ADDR(index);
int irq = SMBUS_IRQ(index);
QTestState *qts = qtest_init("-machine npcm750-evb");
qtest_irq_intercept_in(qts, "/machine/soc/a9mpcore/gic");
enable_bus(qts, base_addr);
g_assert_false(qtest_get_irq(qts, irq));
start_transfer(qts, base_addr);
send_address(qts, base_addr, INVALID_DEVICE_ADDR, false, false);
g_assert_true(qtest_get_irq(qts, irq));
stop_transfer(qts, base_addr);
check_running(qts, base_addr);
qtest_writeb(qts, base_addr + OFFSET_ST, ST_NEGACK);
g_assert_false(qtest_readb(qts, base_addr + OFFSET_ST) & ST_NEGACK);
check_stopped(qts, base_addr);
qtest_quit(qts);
}
/* Check the SMBus can send and receive bytes to a device in single mode. */
static void test_single_mode(gconstpointer data)
{
intptr_t index = (intptr_t)data;
uint64_t base_addr = SMBUS_ADDR(index);
int irq = SMBUS_IRQ(index);
uint8_t value = 0x60;
QTestState *qts = qtest_init("-machine npcm750-evb");
qtest_irq_intercept_in(qts, "/machine/soc/a9mpcore/gic");
enable_bus(qts, base_addr);
/* Sending */
g_assert_false(qtest_get_irq(qts, irq));
start_transfer(qts, base_addr);
g_assert_true(qtest_get_irq(qts, irq));
send_address(qts, base_addr, EVB_DEVICE_ADDR, false, true);
send_byte(qts, base_addr, TMP105_REG_CONFIG);
send_byte(qts, base_addr, value);
stop_transfer(qts, base_addr);
check_stopped(qts, base_addr);
/* Receiving */
start_transfer(qts, base_addr);
send_address(qts, base_addr, EVB_DEVICE_ADDR, false, true);
send_byte(qts, base_addr, TMP105_REG_CONFIG);
start_transfer(qts, base_addr);
send_address(qts, base_addr, EVB_DEVICE_ADDR, true, true);
send_nack(qts, base_addr);
stop_transfer(qts, base_addr);
check_running(qts, base_addr);
g_assert_cmphex(recv_byte(qts, base_addr), ==, value);
check_stopped(qts, base_addr);
qtest_quit(qts);
}
/* Check the SMBus can send and receive bytes in FIFO mode. */
static void test_fifo_mode(gconstpointer data)
{
intptr_t index = (intptr_t)data;
uint64_t base_addr = SMBUS_ADDR(index);
int irq = SMBUS_IRQ(index);
uint8_t value = 0x60;
QTestState *qts = qtest_init("-machine npcm750-evb");
qtest_irq_intercept_in(qts, "/machine/soc/a9mpcore/gic");
enable_bus(qts, base_addr);
start_fifo_mode(qts, base_addr);
g_assert_false(qtest_get_irq(qts, irq));
/* Sending */
start_transfer(qts, base_addr);
send_address(qts, base_addr, EVB_DEVICE_ADDR, false, true);
choose_bank(qts, base_addr, 1);
g_assert_true(qtest_readb(qts, base_addr + OFFSET_FIF_CTS) &
FIF_CTS_RXF_TXE);
qtest_writeb(qts, base_addr + OFFSET_TXF_CTL, TXF_CTL_THR_TXIE);
send_byte(qts, base_addr, TMP105_REG_CONFIG);
send_byte(qts, base_addr, value);
g_assert_true(qtest_readb(qts, base_addr + OFFSET_FIF_CTS) &
FIF_CTS_RXF_TXE);
g_assert_true(qtest_readb(qts, base_addr + OFFSET_TXF_STS) &
TXF_STS_TX_THST);
g_assert_cmpuint(TXF_STS_TX_BYTES(
qtest_readb(qts, base_addr + OFFSET_TXF_STS)), ==, 0);
g_assert_true(qtest_get_irq(qts, irq));
stop_transfer(qts, base_addr);
check_stopped(qts, base_addr);
/* Receiving */
start_fifo_mode(qts, base_addr);
start_transfer(qts, base_addr);
send_address(qts, base_addr, EVB_DEVICE_ADDR, false, true);
send_byte(qts, base_addr, TMP105_REG_CONFIG);
start_transfer(qts, base_addr);
qtest_writeb(qts, base_addr + OFFSET_FIF_CTS, FIF_CTS_RXF_TXE);
start_recv_fifo(qts, base_addr, 1);
send_address(qts, base_addr, EVB_DEVICE_ADDR, true, true);
g_assert_false(qtest_readb(qts, base_addr + OFFSET_FIF_CTS) &
FIF_CTS_RXF_TXE);
g_assert_true(qtest_readb(qts, base_addr + OFFSET_RXF_STS) &
RXF_STS_RX_THST);
g_assert_cmpuint(RXF_STS_RX_BYTES(
qtest_readb(qts, base_addr + OFFSET_RXF_STS)), ==, 1);
send_nack(qts, base_addr);
stop_transfer(qts, base_addr);
check_running(qts, base_addr);
g_assert_cmphex(recv_byte(qts, base_addr), ==, value);
g_assert_cmpuint(RXF_STS_RX_BYTES(
qtest_readb(qts, base_addr + OFFSET_RXF_STS)), ==, 0);
check_stopped(qts, base_addr);
qtest_quit(qts);
}
static void smbus_add_test(const char *name, int index, GTestDataFunc fn)
{
g_autofree char *full_name = g_strdup_printf(
"npcm7xx_smbus[%d]/%s", index, name);
qtest_add_data_func(full_name, (void *)(intptr_t)index, fn);
}
#define add_test(name, td) smbus_add_test(#name, td, test_##name)
int main(int argc, char **argv)
{
int i;
g_test_init(&argc, &argv, NULL);
g_test_set_nonfatal_assertions();
for (i = 0; i < NR_SMBUS_DEVICES; ++i) {
add_test(disable_bus, i);
add_test(invalid_addr, i);
}
for (i = 0; i < ARRAY_SIZE(evb_bus_list); ++i) {
add_test(single_mode, evb_bus_list[i]);
add_test(fifo_mode, evb_bus_list[i]);
}
return g_test_run();
}

View File

@ -35,6 +35,12 @@ endif
# bti-2 tests PROT_BTI, so no special compiler support required.
AARCH64_TESTS += bti-2
# MTE Tests
ifneq ($(DOCKER_IMAGE)$(CROSS_CC_HAS_ARMV8_MTE),)
AARCH64_TESTS += mte-1 mte-2 mte-3 mte-4
mte-%: CFLAGS += -march=armv8.5-a+memtag
endif
# Semihosting smoke test for linux-user
AARCH64_TESTS += semihosting
run-semihosting: semihosting

28
tests/tcg/aarch64/mte-1.c Normal file
View File

@ -0,0 +1,28 @@
/*
* Memory tagging, basic pass cases.
*
* Copyright (c) 2021 Linaro Ltd
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#include "mte.h"
int main(int ac, char **av)
{
int *p0, *p1, *p2;
long c;
enable_mte(PR_MTE_TCF_NONE);
p0 = alloc_mte_mem(sizeof(*p0));
asm("irg %0,%1,%2" : "=r"(p1) : "r"(p0), "r"(1));
assert(p1 != p0);
asm("subp %0,%1,%2" : "=r"(c) : "r"(p0), "r"(p1));
assert(c == 0);
asm("stg %0, [%0]" : : "r"(p1));
asm("ldg %0, [%1]" : "=r"(p2) : "r"(p0), "0"(p0));
assert(p1 == p2);
return 0;
}

45
tests/tcg/aarch64/mte-2.c Normal file
View File

@ -0,0 +1,45 @@
/*
* Memory tagging, basic fail cases, synchronous signals.
*
* Copyright (c) 2021 Linaro Ltd
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#include "mte.h"
void pass(int sig, siginfo_t *info, void *uc)
{
assert(info->si_code == SEGV_MTESERR);
exit(0);
}
int main(int ac, char **av)
{
struct sigaction sa;
int *p0, *p1, *p2;
long excl = 1;
enable_mte(PR_MTE_TCF_SYNC);
p0 = alloc_mte_mem(sizeof(*p0));
/* Create two differently tagged pointers. */
asm("irg %0,%1,%2" : "=r"(p1) : "r"(p0), "r"(excl));
asm("gmi %0,%1,%0" : "+r"(excl) : "r" (p1));
assert(excl != 1);
asm("irg %0,%1,%2" : "=r"(p2) : "r"(p0), "r"(excl));
assert(p1 != p2);
/* Store the tag from the first pointer. */
asm("stg %0, [%0]" : : "r"(p1));
*p1 = 0;
memset(&sa, 0, sizeof(sa));
sa.sa_sigaction = pass;
sa.sa_flags = SA_SIGINFO;
sigaction(SIGSEGV, &sa, NULL);
*p2 = 0;
abort();
}

51
tests/tcg/aarch64/mte-3.c Normal file
View File

@ -0,0 +1,51 @@
/*
* Memory tagging, basic fail cases, asynchronous signals.
*
* Copyright (c) 2021 Linaro Ltd
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#include "mte.h"
void pass(int sig, siginfo_t *info, void *uc)
{
assert(info->si_code == SEGV_MTEAERR);
exit(0);
}
int main(int ac, char **av)
{
struct sigaction sa;
long *p0, *p1, *p2;
long excl = 1;
enable_mte(PR_MTE_TCF_ASYNC);
p0 = alloc_mte_mem(sizeof(*p0));
/* Create two differently tagged pointers. */
asm("irg %0,%1,%2" : "=r"(p1) : "r"(p0), "r"(excl));
asm("gmi %0,%1,%0" : "+r"(excl) : "r" (p1));
assert(excl != 1);
asm("irg %0,%1,%2" : "=r"(p2) : "r"(p0), "r"(excl));
assert(p1 != p2);
/* Store the tag from the first pointer. */
asm("stg %0, [%0]" : : "r"(p1));
*p1 = 0;
memset(&sa, 0, sizeof(sa));
sa.sa_sigaction = pass;
sa.sa_flags = SA_SIGINFO;
sigaction(SIGSEGV, &sa, NULL);
/*
* Signal for async error will happen eventually.
* For a real kernel this should be after the next IRQ (e.g. timer).
* For qemu linux-user, we kick the cpu and exit at the next TB.
* In either case, loop until this happens (or killed by timeout).
* For extra sauce, yield, producing EXCP_YIELD to cpu_loop().
*/
asm("str %0, [%0]; yield" : : "r"(p2));
while (1);
}

45
tests/tcg/aarch64/mte-4.c Normal file
View File

@ -0,0 +1,45 @@
/*
* Memory tagging, re-reading tag checks.
*
* Copyright (c) 2021 Linaro Ltd
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#include "mte.h"
void __attribute__((noinline)) tagset(void *p, size_t size)
{
size_t i;
for (i = 0; i < size; i += 16) {
asm("stg %0, [%0]" : : "r"(p + i));
}
}
void __attribute__((noinline)) tagcheck(void *p, size_t size)
{
size_t i;
void *c;
for (i = 0; i < size; i += 16) {
asm("ldg %0, [%1]" : "=r"(c) : "r"(p + i), "0"(p));
assert(c == p);
}
}
int main(int ac, char **av)
{
size_t size = getpagesize() * 4;
long excl = 1;
int *p0, *p1;
enable_mte(PR_MTE_TCF_ASYNC);
p0 = alloc_mte_mem(size);
/* Tag the pointer. */
asm("irg %0,%1,%2" : "=r"(p1) : "r"(p0), "r"(excl));
tagset(p1, size);
tagcheck(p1, size);
return 0;
}

60
tests/tcg/aarch64/mte.h Normal file
View File

@ -0,0 +1,60 @@
/*
* Linux kernel fallback API definitions for MTE and test helpers.
*
* Copyright (c) 2021 Linaro Ltd
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#include <assert.h>
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <signal.h>
#include <sys/mman.h>
#include <sys/prctl.h>
#ifndef PR_SET_TAGGED_ADDR_CTRL
# define PR_SET_TAGGED_ADDR_CTRL 55
#endif
#ifndef PR_TAGGED_ADDR_ENABLE
# define PR_TAGGED_ADDR_ENABLE (1UL << 0)
#endif
#ifndef PR_MTE_TCF_SHIFT
# define PR_MTE_TCF_SHIFT 1
# define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT)
# define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT)
# define PR_MTE_TCF_ASYNC (2UL << PR_MTE_TCF_SHIFT)
# define PR_MTE_TAG_SHIFT 3
#endif
#ifndef PROT_MTE
# define PROT_MTE 0x20
#endif
#ifndef SEGV_MTEAERR
# define SEGV_MTEAERR 8
# define SEGV_MTESERR 9
#endif
static void enable_mte(int tcf)
{
int r = prctl(PR_SET_TAGGED_ADDR_CTRL,
PR_TAGGED_ADDR_ENABLE | tcf | (0xfffe << PR_MTE_TAG_SHIFT),
0, 0, 0);
if (r < 0) {
perror("PR_SET_TAGGED_ADDR_CTRL");
exit(2);
}
}
static void *alloc_mte_mem(size_t size)
{
void *p = mmap(NULL, size, PROT_READ | PROT_WRITE | PROT_MTE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (p == MAP_FAILED) {
perror("mmap PROT_MTE");
exit(2);
}
return p;
}

View File

@ -53,7 +53,6 @@ void do_test(uint64_t value)
int main()
{
do_test(0);
do_test(-1);
do_test(0xda004acedeadbeefull);
return 0;
}

View File

@ -244,6 +244,10 @@ for target in $target_list; do
-mbranch-protection=standard -o $TMPE $TMPC; then
echo "CROSS_CC_HAS_ARMV8_BTI=y" >> $config_target_mak
fi
if do_compiler "$target_compiler" $target_compiler_cflags \
-march=armv8.5-a+memtag -o $TMPE $TMPC; then
echo "CROSS_CC_HAS_ARMV8_MTE=y" >> $config_target_mak
fi
;;
esac