mirror of https://gitee.com/openkylin/qemu.git
More fixes for fedora-i386-cross
Add dup_const_tl Expand MemOp MO_SIZE Move MemOpIdx out of tcg.h Vector support for tcg/s390x -----BEGIN PGP SIGNATURE----- iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmFdvPUdHHJpY2hhcmQu aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV8gcwf/T+J5dCgGisLnjOlh mG16tZczILfmpw76Yne0zwJ8T2WFohkyOegBnfRZHzUM/0JZlMbvNMUKSJd4WKhB fpzHKEeTVo7OlW5i6eo1HqQYcbEKzBMEBLEoDWeyRt3k3hpTcjNuD6tC3CaZoCvs gf9UcYgsp3htRPsoOhmarjv5Wded7N1BDQa0W7amlT2rLPO4L2UILfDXiWmapkcp 0kgiKaI1Criua3BNA1+oGPQQQPVSi1MQmiwX/IW/6fExpC65xLBMI3DIyr1ejFPX rrIyx49dQuUHCrPi9jcZ9eT3z8h1PhmAuREv1/VaDl2BROnXUGCanWZoLEd+YWFH R6mXnQ== =cs7J -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/rth/tags/pull-tcg-20211006' into staging More fixes for fedora-i386-cross Add dup_const_tl Expand MemOp MO_SIZE Move MemOpIdx out of tcg.h Vector support for tcg/s390x # gpg: Signature made Wed 06 Oct 2021 08:12:53 AM PDT # gpg: using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F # gpg: issuer "richard.henderson@linaro.org" # gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [ultimate] * remotes/rth/tags/pull-tcg-20211006: (28 commits) tcg/s390x: Implement TCG_TARGET_HAS_cmpsel_vec tcg/s390x: Implement TCG_TARGET_HAS_bitsel_vec tcg/s390x: Implement TCG_TARGET_HAS_sat_vec tcg/s390x: Implement TCG_TARGET_HAS_minmax_vec tcg/s390x: Implement vector shift operations tcg/s390x: Implement TCG_TARGET_HAS_mul_vec tcg/s390x: Implement andc, orc, abs, neg, not vector operations tcg/s390x: Implement minimal vector operations tcg/s390x: Implement tcg_out_dup*_vec tcg/s390x: Implement tcg_out_mov for vector types tcg/s390x: Implement tcg_out_ld/st for vector types tcg/s390x: Add host vector framework tcg/s390x: Merge TCG_AREG0 and TCG_REG_CALL_STACK into TCGReg tcg/s390x: Change FACILITY representation tcg/s390x: Rename from tcg/s390 tcg: Expand usadd/ussub with umin/umax hw/core/cpu: Re-sort the non-pointers to the end of CPUClass trace: Split guest_mem_before plugins: Reorg arguments to qemu_plugin_vcpu_mem_cb accel/tcg: Pass MemOpIdx to atomic_trace_*_post ... Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
commit
6723ff639c
|
@ -13,56 +13,43 @@
|
||||||
* See the COPYING file in the top-level directory.
|
* See the COPYING file in the top-level directory.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static uint16_t atomic_trace_rmw_pre(CPUArchState *env, target_ulong addr,
|
static void atomic_trace_rmw_pre(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi)
|
MemOpIdx oi)
|
||||||
{
|
{
|
||||||
CPUState *cpu = env_cpu(env);
|
CPUState *cpu = env_cpu(env);
|
||||||
uint16_t info = trace_mem_get_info(get_memop(oi), get_mmuidx(oi), false);
|
|
||||||
|
|
||||||
trace_guest_mem_before_exec(cpu, addr, info);
|
trace_guest_rmw_before_exec(cpu, addr, oi);
|
||||||
trace_guest_mem_before_exec(cpu, addr, info | TRACE_MEM_ST);
|
|
||||||
|
|
||||||
return info;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void atomic_trace_rmw_post(CPUArchState *env, target_ulong addr,
|
static void atomic_trace_rmw_post(CPUArchState *env, target_ulong addr,
|
||||||
uint16_t info)
|
MemOpIdx oi)
|
||||||
{
|
{
|
||||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info);
|
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_RW);
|
||||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info | TRACE_MEM_ST);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#if HAVE_ATOMIC128
|
#if HAVE_ATOMIC128
|
||||||
static uint16_t atomic_trace_ld_pre(CPUArchState *env, target_ulong addr,
|
static void atomic_trace_ld_pre(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi)
|
MemOpIdx oi)
|
||||||
{
|
{
|
||||||
uint16_t info = trace_mem_get_info(get_memop(oi), get_mmuidx(oi), false);
|
trace_guest_ld_before_exec(env_cpu(env), addr, oi);
|
||||||
|
|
||||||
trace_guest_mem_before_exec(env_cpu(env), addr, info);
|
|
||||||
|
|
||||||
return info;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void atomic_trace_ld_post(CPUArchState *env, target_ulong addr,
|
static void atomic_trace_ld_post(CPUArchState *env, target_ulong addr,
|
||||||
uint16_t info)
|
MemOpIdx oi)
|
||||||
{
|
{
|
||||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info);
|
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint16_t atomic_trace_st_pre(CPUArchState *env, target_ulong addr,
|
static void atomic_trace_st_pre(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi)
|
MemOpIdx oi)
|
||||||
{
|
{
|
||||||
uint16_t info = trace_mem_get_info(get_memop(oi), get_mmuidx(oi), true);
|
trace_guest_st_before_exec(env_cpu(env), addr, oi);
|
||||||
|
|
||||||
trace_guest_mem_before_exec(env_cpu(env), addr, info);
|
|
||||||
|
|
||||||
return info;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void atomic_trace_st_post(CPUArchState *env, target_ulong addr,
|
static void atomic_trace_st_post(CPUArchState *env, target_ulong addr,
|
||||||
uint16_t info)
|
MemOpIdx oi)
|
||||||
{
|
{
|
||||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info);
|
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -19,7 +19,6 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include "qemu/plugin.h"
|
#include "qemu/plugin.h"
|
||||||
#include "trace/mem.h"
|
|
||||||
|
|
||||||
#if DATA_SIZE == 16
|
#if DATA_SIZE == 16
|
||||||
# define SUFFIX o
|
# define SUFFIX o
|
||||||
|
@ -72,77 +71,77 @@
|
||||||
|
|
||||||
ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
|
ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
|
||||||
ABI_TYPE cmpv, ABI_TYPE newv,
|
ABI_TYPE cmpv, ABI_TYPE newv,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
MemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
|
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
|
||||||
PAGE_READ | PAGE_WRITE, retaddr);
|
PAGE_READ | PAGE_WRITE, retaddr);
|
||||||
DATA_TYPE ret;
|
DATA_TYPE ret;
|
||||||
uint16_t info = atomic_trace_rmw_pre(env, addr, oi);
|
|
||||||
|
|
||||||
|
atomic_trace_rmw_pre(env, addr, oi);
|
||||||
#if DATA_SIZE == 16
|
#if DATA_SIZE == 16
|
||||||
ret = atomic16_cmpxchg(haddr, cmpv, newv);
|
ret = atomic16_cmpxchg(haddr, cmpv, newv);
|
||||||
#else
|
#else
|
||||||
ret = qatomic_cmpxchg__nocheck(haddr, cmpv, newv);
|
ret = qatomic_cmpxchg__nocheck(haddr, cmpv, newv);
|
||||||
#endif
|
#endif
|
||||||
ATOMIC_MMU_CLEANUP;
|
ATOMIC_MMU_CLEANUP;
|
||||||
atomic_trace_rmw_post(env, addr, info);
|
atomic_trace_rmw_post(env, addr, oi);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if DATA_SIZE >= 16
|
#if DATA_SIZE >= 16
|
||||||
#if HAVE_ATOMIC128
|
#if HAVE_ATOMIC128
|
||||||
ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr,
|
ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
MemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
|
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
|
||||||
PAGE_READ, retaddr);
|
PAGE_READ, retaddr);
|
||||||
DATA_TYPE val;
|
DATA_TYPE val;
|
||||||
uint16_t info = atomic_trace_ld_pre(env, addr, oi);
|
|
||||||
|
|
||||||
|
atomic_trace_ld_pre(env, addr, oi);
|
||||||
val = atomic16_read(haddr);
|
val = atomic16_read(haddr);
|
||||||
ATOMIC_MMU_CLEANUP;
|
ATOMIC_MMU_CLEANUP;
|
||||||
atomic_trace_ld_post(env, addr, info);
|
atomic_trace_ld_post(env, addr, oi);
|
||||||
return val;
|
return val;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
|
void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
MemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
|
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
|
||||||
PAGE_WRITE, retaddr);
|
PAGE_WRITE, retaddr);
|
||||||
uint16_t info = atomic_trace_st_pre(env, addr, oi);
|
|
||||||
|
|
||||||
|
atomic_trace_st_pre(env, addr, oi);
|
||||||
atomic16_set(haddr, val);
|
atomic16_set(haddr, val);
|
||||||
ATOMIC_MMU_CLEANUP;
|
ATOMIC_MMU_CLEANUP;
|
||||||
atomic_trace_st_post(env, addr, info);
|
atomic_trace_st_post(env, addr, oi);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
#else
|
#else
|
||||||
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
|
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
MemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
|
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
|
||||||
PAGE_READ | PAGE_WRITE, retaddr);
|
PAGE_READ | PAGE_WRITE, retaddr);
|
||||||
DATA_TYPE ret;
|
DATA_TYPE ret;
|
||||||
uint16_t info = atomic_trace_rmw_pre(env, addr, oi);
|
|
||||||
|
|
||||||
|
atomic_trace_rmw_pre(env, addr, oi);
|
||||||
ret = qatomic_xchg__nocheck(haddr, val);
|
ret = qatomic_xchg__nocheck(haddr, val);
|
||||||
ATOMIC_MMU_CLEANUP;
|
ATOMIC_MMU_CLEANUP;
|
||||||
atomic_trace_rmw_post(env, addr, info);
|
atomic_trace_rmw_post(env, addr, oi);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define GEN_ATOMIC_HELPER(X) \
|
#define GEN_ATOMIC_HELPER(X) \
|
||||||
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
|
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
|
||||||
ABI_TYPE val, TCGMemOpIdx oi, uintptr_t retaddr) \
|
ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
|
||||||
{ \
|
{ \
|
||||||
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
|
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
|
||||||
PAGE_READ | PAGE_WRITE, retaddr); \
|
PAGE_READ | PAGE_WRITE, retaddr); \
|
||||||
DATA_TYPE ret; \
|
DATA_TYPE ret; \
|
||||||
uint16_t info = atomic_trace_rmw_pre(env, addr, oi); \
|
atomic_trace_rmw_pre(env, addr, oi); \
|
||||||
ret = qatomic_##X(haddr, val); \
|
ret = qatomic_##X(haddr, val); \
|
||||||
ATOMIC_MMU_CLEANUP; \
|
ATOMIC_MMU_CLEANUP; \
|
||||||
atomic_trace_rmw_post(env, addr, info); \
|
atomic_trace_rmw_post(env, addr, oi); \
|
||||||
return ret; \
|
return ret; \
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -167,12 +166,12 @@ GEN_ATOMIC_HELPER(xor_fetch)
|
||||||
*/
|
*/
|
||||||
#define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \
|
#define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \
|
||||||
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
|
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
|
||||||
ABI_TYPE xval, TCGMemOpIdx oi, uintptr_t retaddr) \
|
ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
|
||||||
{ \
|
{ \
|
||||||
XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
|
XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
|
||||||
PAGE_READ | PAGE_WRITE, retaddr); \
|
PAGE_READ | PAGE_WRITE, retaddr); \
|
||||||
XDATA_TYPE cmp, old, new, val = xval; \
|
XDATA_TYPE cmp, old, new, val = xval; \
|
||||||
uint16_t info = atomic_trace_rmw_pre(env, addr, oi); \
|
atomic_trace_rmw_pre(env, addr, oi); \
|
||||||
smp_mb(); \
|
smp_mb(); \
|
||||||
cmp = qatomic_read__nocheck(haddr); \
|
cmp = qatomic_read__nocheck(haddr); \
|
||||||
do { \
|
do { \
|
||||||
|
@ -180,7 +179,7 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
|
||||||
cmp = qatomic_cmpxchg__nocheck(haddr, old, new); \
|
cmp = qatomic_cmpxchg__nocheck(haddr, old, new); \
|
||||||
} while (cmp != old); \
|
} while (cmp != old); \
|
||||||
ATOMIC_MMU_CLEANUP; \
|
ATOMIC_MMU_CLEANUP; \
|
||||||
atomic_trace_rmw_post(env, addr, info); \
|
atomic_trace_rmw_post(env, addr, oi); \
|
||||||
return RET; \
|
return RET; \
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -211,78 +210,78 @@ GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new)
|
||||||
|
|
||||||
ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
|
ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
|
||||||
ABI_TYPE cmpv, ABI_TYPE newv,
|
ABI_TYPE cmpv, ABI_TYPE newv,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
MemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
|
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
|
||||||
PAGE_READ | PAGE_WRITE, retaddr);
|
PAGE_READ | PAGE_WRITE, retaddr);
|
||||||
DATA_TYPE ret;
|
DATA_TYPE ret;
|
||||||
uint16_t info = atomic_trace_rmw_pre(env, addr, oi);
|
|
||||||
|
|
||||||
|
atomic_trace_rmw_pre(env, addr, oi);
|
||||||
#if DATA_SIZE == 16
|
#if DATA_SIZE == 16
|
||||||
ret = atomic16_cmpxchg(haddr, BSWAP(cmpv), BSWAP(newv));
|
ret = atomic16_cmpxchg(haddr, BSWAP(cmpv), BSWAP(newv));
|
||||||
#else
|
#else
|
||||||
ret = qatomic_cmpxchg__nocheck(haddr, BSWAP(cmpv), BSWAP(newv));
|
ret = qatomic_cmpxchg__nocheck(haddr, BSWAP(cmpv), BSWAP(newv));
|
||||||
#endif
|
#endif
|
||||||
ATOMIC_MMU_CLEANUP;
|
ATOMIC_MMU_CLEANUP;
|
||||||
atomic_trace_rmw_post(env, addr, info);
|
atomic_trace_rmw_post(env, addr, oi);
|
||||||
return BSWAP(ret);
|
return BSWAP(ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if DATA_SIZE >= 16
|
#if DATA_SIZE >= 16
|
||||||
#if HAVE_ATOMIC128
|
#if HAVE_ATOMIC128
|
||||||
ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr,
|
ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
MemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
|
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
|
||||||
PAGE_READ, retaddr);
|
PAGE_READ, retaddr);
|
||||||
DATA_TYPE val;
|
DATA_TYPE val;
|
||||||
uint16_t info = atomic_trace_ld_pre(env, addr, oi);
|
|
||||||
|
|
||||||
|
atomic_trace_ld_pre(env, addr, oi);
|
||||||
val = atomic16_read(haddr);
|
val = atomic16_read(haddr);
|
||||||
ATOMIC_MMU_CLEANUP;
|
ATOMIC_MMU_CLEANUP;
|
||||||
atomic_trace_ld_post(env, addr, info);
|
atomic_trace_ld_post(env, addr, oi);
|
||||||
return BSWAP(val);
|
return BSWAP(val);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
|
void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
MemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
|
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
|
||||||
PAGE_WRITE, retaddr);
|
PAGE_WRITE, retaddr);
|
||||||
uint16_t info = atomic_trace_st_pre(env, addr, oi);
|
|
||||||
|
|
||||||
|
atomic_trace_st_pre(env, addr, oi);
|
||||||
val = BSWAP(val);
|
val = BSWAP(val);
|
||||||
atomic16_set(haddr, val);
|
atomic16_set(haddr, val);
|
||||||
ATOMIC_MMU_CLEANUP;
|
ATOMIC_MMU_CLEANUP;
|
||||||
atomic_trace_st_post(env, addr, info);
|
atomic_trace_st_post(env, addr, oi);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
#else
|
#else
|
||||||
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
|
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
MemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
|
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
|
||||||
PAGE_READ | PAGE_WRITE, retaddr);
|
PAGE_READ | PAGE_WRITE, retaddr);
|
||||||
ABI_TYPE ret;
|
ABI_TYPE ret;
|
||||||
uint16_t info = atomic_trace_rmw_pre(env, addr, oi);
|
|
||||||
|
|
||||||
|
atomic_trace_rmw_pre(env, addr, oi);
|
||||||
ret = qatomic_xchg__nocheck(haddr, BSWAP(val));
|
ret = qatomic_xchg__nocheck(haddr, BSWAP(val));
|
||||||
ATOMIC_MMU_CLEANUP;
|
ATOMIC_MMU_CLEANUP;
|
||||||
atomic_trace_rmw_post(env, addr, info);
|
atomic_trace_rmw_post(env, addr, oi);
|
||||||
return BSWAP(ret);
|
return BSWAP(ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define GEN_ATOMIC_HELPER(X) \
|
#define GEN_ATOMIC_HELPER(X) \
|
||||||
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
|
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
|
||||||
ABI_TYPE val, TCGMemOpIdx oi, uintptr_t retaddr) \
|
ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
|
||||||
{ \
|
{ \
|
||||||
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
|
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
|
||||||
PAGE_READ | PAGE_WRITE, retaddr); \
|
PAGE_READ | PAGE_WRITE, retaddr); \
|
||||||
DATA_TYPE ret; \
|
DATA_TYPE ret; \
|
||||||
uint16_t info = atomic_trace_rmw_pre(env, addr, oi); \
|
atomic_trace_rmw_pre(env, addr, oi); \
|
||||||
ret = qatomic_##X(haddr, BSWAP(val)); \
|
ret = qatomic_##X(haddr, BSWAP(val)); \
|
||||||
ATOMIC_MMU_CLEANUP; \
|
ATOMIC_MMU_CLEANUP; \
|
||||||
atomic_trace_rmw_post(env, addr, info); \
|
atomic_trace_rmw_post(env, addr, oi); \
|
||||||
return BSWAP(ret); \
|
return BSWAP(ret); \
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -304,12 +303,12 @@ GEN_ATOMIC_HELPER(xor_fetch)
|
||||||
*/
|
*/
|
||||||
#define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \
|
#define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \
|
||||||
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
|
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
|
||||||
ABI_TYPE xval, TCGMemOpIdx oi, uintptr_t retaddr) \
|
ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
|
||||||
{ \
|
{ \
|
||||||
XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
|
XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
|
||||||
PAGE_READ | PAGE_WRITE, retaddr); \
|
PAGE_READ | PAGE_WRITE, retaddr); \
|
||||||
XDATA_TYPE ldo, ldn, old, new, val = xval; \
|
XDATA_TYPE ldo, ldn, old, new, val = xval; \
|
||||||
uint16_t info = atomic_trace_rmw_pre(env, addr, oi); \
|
atomic_trace_rmw_pre(env, addr, oi); \
|
||||||
smp_mb(); \
|
smp_mb(); \
|
||||||
ldn = qatomic_read__nocheck(haddr); \
|
ldn = qatomic_read__nocheck(haddr); \
|
||||||
do { \
|
do { \
|
||||||
|
@ -317,7 +316,7 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
|
||||||
ldn = qatomic_cmpxchg__nocheck(haddr, ldo, BSWAP(new)); \
|
ldn = qatomic_cmpxchg__nocheck(haddr, ldo, BSWAP(new)); \
|
||||||
} while (ldo != ldn); \
|
} while (ldo != ldn); \
|
||||||
ATOMIC_MMU_CLEANUP; \
|
ATOMIC_MMU_CLEANUP; \
|
||||||
atomic_trace_rmw_post(env, addr, info); \
|
atomic_trace_rmw_post(env, addr, oi); \
|
||||||
return RET; \
|
return RET; \
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -34,7 +34,6 @@
|
||||||
#include "qemu/atomic128.h"
|
#include "qemu/atomic128.h"
|
||||||
#include "exec/translate-all.h"
|
#include "exec/translate-all.h"
|
||||||
#include "trace/trace-root.h"
|
#include "trace/trace-root.h"
|
||||||
#include "trace/mem.h"
|
|
||||||
#include "tb-hash.h"
|
#include "tb-hash.h"
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
#ifdef CONFIG_PLUGIN
|
#ifdef CONFIG_PLUGIN
|
||||||
|
@ -1749,7 +1748,7 @@ bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx,
|
||||||
* @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE.
|
* @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE.
|
||||||
*/
|
*/
|
||||||
static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
|
static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, int size, int prot,
|
MemOpIdx oi, int size, int prot,
|
||||||
uintptr_t retaddr)
|
uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
size_t mmu_idx = get_mmuidx(oi);
|
size_t mmu_idx = get_mmuidx(oi);
|
||||||
|
@ -1850,7 +1849,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
|
||||||
*/
|
*/
|
||||||
|
|
||||||
typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr,
|
typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr);
|
MemOpIdx oi, uintptr_t retaddr);
|
||||||
|
|
||||||
static inline uint64_t QEMU_ALWAYS_INLINE
|
static inline uint64_t QEMU_ALWAYS_INLINE
|
||||||
load_memop(const void *haddr, MemOp op)
|
load_memop(const void *haddr, MemOp op)
|
||||||
|
@ -1876,7 +1875,7 @@ load_memop(const void *haddr, MemOp op)
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline uint64_t QEMU_ALWAYS_INLINE
|
static inline uint64_t QEMU_ALWAYS_INLINE
|
||||||
load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
|
load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi,
|
||||||
uintptr_t retaddr, MemOp op, bool code_read,
|
uintptr_t retaddr, MemOp op, bool code_read,
|
||||||
FullLoadHelper *full_load)
|
FullLoadHelper *full_load)
|
||||||
{
|
{
|
||||||
|
@ -1991,78 +1990,78 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr,
|
static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
MemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu);
|
return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu);
|
||||||
}
|
}
|
||||||
|
|
||||||
tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
|
tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
MemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
return full_ldub_mmu(env, addr, oi, retaddr);
|
return full_ldub_mmu(env, addr, oi, retaddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr,
|
static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
MemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
return load_helper(env, addr, oi, retaddr, MO_LEUW, false,
|
return load_helper(env, addr, oi, retaddr, MO_LEUW, false,
|
||||||
full_le_lduw_mmu);
|
full_le_lduw_mmu);
|
||||||
}
|
}
|
||||||
|
|
||||||
tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
|
tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
MemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
return full_le_lduw_mmu(env, addr, oi, retaddr);
|
return full_le_lduw_mmu(env, addr, oi, retaddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr,
|
static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
MemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
return load_helper(env, addr, oi, retaddr, MO_BEUW, false,
|
return load_helper(env, addr, oi, retaddr, MO_BEUW, false,
|
||||||
full_be_lduw_mmu);
|
full_be_lduw_mmu);
|
||||||
}
|
}
|
||||||
|
|
||||||
tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
|
tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
MemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
return full_be_lduw_mmu(env, addr, oi, retaddr);
|
return full_be_lduw_mmu(env, addr, oi, retaddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr,
|
static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
MemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
return load_helper(env, addr, oi, retaddr, MO_LEUL, false,
|
return load_helper(env, addr, oi, retaddr, MO_LEUL, false,
|
||||||
full_le_ldul_mmu);
|
full_le_ldul_mmu);
|
||||||
}
|
}
|
||||||
|
|
||||||
tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
|
tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
MemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
return full_le_ldul_mmu(env, addr, oi, retaddr);
|
return full_le_ldul_mmu(env, addr, oi, retaddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr,
|
static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
MemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
return load_helper(env, addr, oi, retaddr, MO_BEUL, false,
|
return load_helper(env, addr, oi, retaddr, MO_BEUL, false,
|
||||||
full_be_ldul_mmu);
|
full_be_ldul_mmu);
|
||||||
}
|
}
|
||||||
|
|
||||||
tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
|
tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
MemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
return full_be_ldul_mmu(env, addr, oi, retaddr);
|
return full_be_ldul_mmu(env, addr, oi, retaddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
|
uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
MemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
return load_helper(env, addr, oi, retaddr, MO_LEQ, false,
|
return load_helper(env, addr, oi, retaddr, MO_LEQ, false,
|
||||||
helper_le_ldq_mmu);
|
helper_le_ldq_mmu);
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
|
uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
MemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
return load_helper(env, addr, oi, retaddr, MO_BEQ, false,
|
return load_helper(env, addr, oi, retaddr, MO_BEQ, false,
|
||||||
helper_be_ldq_mmu);
|
helper_be_ldq_mmu);
|
||||||
|
@ -2075,31 +2074,31 @@ uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
|
||||||
|
|
||||||
|
|
||||||
tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
|
tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
MemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr);
|
return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
|
tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
MemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr);
|
return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
|
tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
MemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr);
|
return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
|
tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
MemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr);
|
return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
|
tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
MemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr);
|
return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr);
|
||||||
}
|
}
|
||||||
|
@ -2112,18 +2111,14 @@ static inline uint64_t cpu_load_helper(CPUArchState *env, abi_ptr addr,
|
||||||
int mmu_idx, uintptr_t retaddr,
|
int mmu_idx, uintptr_t retaddr,
|
||||||
MemOp op, FullLoadHelper *full_load)
|
MemOp op, FullLoadHelper *full_load)
|
||||||
{
|
{
|
||||||
uint16_t meminfo;
|
MemOpIdx oi = make_memop_idx(op, mmu_idx);
|
||||||
TCGMemOpIdx oi;
|
|
||||||
uint64_t ret;
|
uint64_t ret;
|
||||||
|
|
||||||
meminfo = trace_mem_get_info(op, mmu_idx, false);
|
trace_guest_ld_before_exec(env_cpu(env), addr, oi);
|
||||||
trace_guest_mem_before_exec(env_cpu(env), addr, meminfo);
|
|
||||||
|
|
||||||
op &= ~MO_SIGN;
|
|
||||||
oi = make_memop_idx(op, mmu_idx);
|
|
||||||
ret = full_load(env, addr, oi, retaddr);
|
ret = full_load(env, addr, oi, retaddr);
|
||||||
|
|
||||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, meminfo);
|
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -2137,8 +2132,7 @@ uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr,
|
||||||
int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
|
int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
|
||||||
int mmu_idx, uintptr_t ra)
|
int mmu_idx, uintptr_t ra)
|
||||||
{
|
{
|
||||||
return (int8_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_SB,
|
return (int8_t)cpu_ldub_mmuidx_ra(env, addr, mmu_idx, ra);
|
||||||
full_ldub_mmu);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
|
uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
|
||||||
|
@ -2150,8 +2144,7 @@ uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
|
||||||
int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
|
int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
|
||||||
int mmu_idx, uintptr_t ra)
|
int mmu_idx, uintptr_t ra)
|
||||||
{
|
{
|
||||||
return (int16_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_BESW,
|
return (int16_t)cpu_lduw_be_mmuidx_ra(env, addr, mmu_idx, ra);
|
||||||
full_be_lduw_mmu);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
|
uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
|
||||||
|
@ -2175,8 +2168,7 @@ uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
|
||||||
int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
|
int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
|
||||||
int mmu_idx, uintptr_t ra)
|
int mmu_idx, uintptr_t ra)
|
||||||
{
|
{
|
||||||
return (int16_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_LESW,
|
return (int16_t)cpu_lduw_le_mmuidx_ra(env, addr, mmu_idx, ra);
|
||||||
full_le_lduw_mmu);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
|
uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
|
||||||
|
@ -2341,7 +2333,7 @@ store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val,
|
||||||
uintptr_t index, index2;
|
uintptr_t index, index2;
|
||||||
CPUTLBEntry *entry, *entry2;
|
CPUTLBEntry *entry, *entry2;
|
||||||
target_ulong page2, tlb_addr, tlb_addr2;
|
target_ulong page2, tlb_addr, tlb_addr2;
|
||||||
TCGMemOpIdx oi;
|
MemOpIdx oi;
|
||||||
size_t size2;
|
size_t size2;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
@ -2408,7 +2400,7 @@ store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val,
|
||||||
|
|
||||||
static inline void QEMU_ALWAYS_INLINE
|
static inline void QEMU_ALWAYS_INLINE
|
||||||
store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
|
store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr, MemOp op)
|
MemOpIdx oi, uintptr_t retaddr, MemOp op)
|
||||||
{
|
{
|
||||||
uintptr_t mmu_idx = get_mmuidx(oi);
|
uintptr_t mmu_idx = get_mmuidx(oi);
|
||||||
uintptr_t index = tlb_index(env, mmu_idx, addr);
|
uintptr_t index = tlb_index(env, mmu_idx, addr);
|
||||||
|
@ -2506,43 +2498,43 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
|
||||||
|
|
||||||
void __attribute__((noinline))
|
void __attribute__((noinline))
|
||||||
helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
|
helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
MemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
store_helper(env, addr, val, oi, retaddr, MO_UB);
|
store_helper(env, addr, val, oi, retaddr, MO_UB);
|
||||||
}
|
}
|
||||||
|
|
||||||
void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
|
void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
MemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
store_helper(env, addr, val, oi, retaddr, MO_LEUW);
|
store_helper(env, addr, val, oi, retaddr, MO_LEUW);
|
||||||
}
|
}
|
||||||
|
|
||||||
void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
|
void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
MemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
store_helper(env, addr, val, oi, retaddr, MO_BEUW);
|
store_helper(env, addr, val, oi, retaddr, MO_BEUW);
|
||||||
}
|
}
|
||||||
|
|
||||||
void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
|
void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
MemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
store_helper(env, addr, val, oi, retaddr, MO_LEUL);
|
store_helper(env, addr, val, oi, retaddr, MO_LEUL);
|
||||||
}
|
}
|
||||||
|
|
||||||
void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
|
void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
MemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
store_helper(env, addr, val, oi, retaddr, MO_BEUL);
|
store_helper(env, addr, val, oi, retaddr, MO_BEUL);
|
||||||
}
|
}
|
||||||
|
|
||||||
void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
|
void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
MemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
store_helper(env, addr, val, oi, retaddr, MO_LEQ);
|
store_helper(env, addr, val, oi, retaddr, MO_LEQ);
|
||||||
}
|
}
|
||||||
|
|
||||||
void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
|
void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
MemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
store_helper(env, addr, val, oi, retaddr, MO_BEQ);
|
store_helper(env, addr, val, oi, retaddr, MO_BEQ);
|
||||||
}
|
}
|
||||||
|
@ -2555,16 +2547,13 @@ static inline void QEMU_ALWAYS_INLINE
|
||||||
cpu_store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
|
cpu_store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
|
||||||
int mmu_idx, uintptr_t retaddr, MemOp op)
|
int mmu_idx, uintptr_t retaddr, MemOp op)
|
||||||
{
|
{
|
||||||
TCGMemOpIdx oi;
|
MemOpIdx oi = make_memop_idx(op, mmu_idx);
|
||||||
uint16_t meminfo;
|
|
||||||
|
|
||||||
meminfo = trace_mem_get_info(op, mmu_idx, true);
|
trace_guest_st_before_exec(env_cpu(env), addr, oi);
|
||||||
trace_guest_mem_before_exec(env_cpu(env), addr, meminfo);
|
|
||||||
|
|
||||||
oi = make_memop_idx(op, mmu_idx);
|
|
||||||
store_helper(env, addr, val, oi, retaddr, op);
|
store_helper(env, addr, val, oi, retaddr, op);
|
||||||
|
|
||||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, meminfo);
|
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
|
||||||
}
|
}
|
||||||
|
|
||||||
void cpu_stb_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
|
void cpu_stb_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
|
||||||
|
@ -2721,49 +2710,49 @@ void cpu_stq_le_data(CPUArchState *env, target_ulong ptr, uint64_t val)
|
||||||
/* Code access functions. */
|
/* Code access functions. */
|
||||||
|
|
||||||
static uint64_t full_ldub_code(CPUArchState *env, target_ulong addr,
|
static uint64_t full_ldub_code(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
MemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_code);
|
return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_code);
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr)
|
uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr)
|
||||||
{
|
{
|
||||||
TCGMemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true));
|
MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true));
|
||||||
return full_ldub_code(env, addr, oi, 0);
|
return full_ldub_code(env, addr, oi, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint64_t full_lduw_code(CPUArchState *env, target_ulong addr,
|
static uint64_t full_lduw_code(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
MemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
return load_helper(env, addr, oi, retaddr, MO_TEUW, true, full_lduw_code);
|
return load_helper(env, addr, oi, retaddr, MO_TEUW, true, full_lduw_code);
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr)
|
uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr)
|
||||||
{
|
{
|
||||||
TCGMemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true));
|
MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true));
|
||||||
return full_lduw_code(env, addr, oi, 0);
|
return full_lduw_code(env, addr, oi, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint64_t full_ldl_code(CPUArchState *env, target_ulong addr,
|
static uint64_t full_ldl_code(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
MemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
return load_helper(env, addr, oi, retaddr, MO_TEUL, true, full_ldl_code);
|
return load_helper(env, addr, oi, retaddr, MO_TEUL, true, full_ldl_code);
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr)
|
uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr)
|
||||||
{
|
{
|
||||||
TCGMemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true));
|
MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true));
|
||||||
return full_ldl_code(env, addr, oi, 0);
|
return full_ldl_code(env, addr, oi, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint64_t full_ldq_code(CPUArchState *env, target_ulong addr,
|
static uint64_t full_ldq_code(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr)
|
MemOpIdx oi, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
return load_helper(env, addr, oi, retaddr, MO_TEQ, true, full_ldq_code);
|
return load_helper(env, addr, oi, retaddr, MO_TEQ, true, full_ldq_code);
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr)
|
uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr)
|
||||||
{
|
{
|
||||||
TCGMemOpIdx oi = make_memop_idx(MO_TEQ, cpu_mmu_index(env, true));
|
MemOpIdx oi = make_memop_idx(MO_TEQ, cpu_mmu_index(env, true));
|
||||||
return full_ldq_code(env, addr, oi, 0);
|
return full_ldq_code(env, addr, oi, 0);
|
||||||
}
|
}
|
||||||
|
|
|
@ -45,7 +45,6 @@
|
||||||
#include "qemu/osdep.h"
|
#include "qemu/osdep.h"
|
||||||
#include "tcg/tcg.h"
|
#include "tcg/tcg.h"
|
||||||
#include "tcg/tcg-op.h"
|
#include "tcg/tcg-op.h"
|
||||||
#include "trace/mem.h"
|
|
||||||
#include "exec/exec-all.h"
|
#include "exec/exec-all.h"
|
||||||
#include "exec/plugin-gen.h"
|
#include "exec/plugin-gen.h"
|
||||||
#include "exec/translator.h"
|
#include "exec/translator.h"
|
||||||
|
@ -211,9 +210,9 @@ static void gen_mem_wrapped(enum plugin_gen_cb type,
|
||||||
const union mem_gen_fn *f, TCGv addr,
|
const union mem_gen_fn *f, TCGv addr,
|
||||||
uint32_t info, bool is_mem)
|
uint32_t info, bool is_mem)
|
||||||
{
|
{
|
||||||
int wr = !!(info & TRACE_MEM_ST);
|
enum qemu_plugin_mem_rw rw = get_plugin_meminfo_rw(info);
|
||||||
|
|
||||||
gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM, type, wr);
|
gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM, type, rw);
|
||||||
if (is_mem) {
|
if (is_mem) {
|
||||||
f->mem_fn(addr, info);
|
f->mem_fn(addr, info);
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -27,7 +27,7 @@
|
||||||
#include "exec/helper-proto.h"
|
#include "exec/helper-proto.h"
|
||||||
#include "qemu/atomic128.h"
|
#include "qemu/atomic128.h"
|
||||||
#include "trace/trace-root.h"
|
#include "trace/trace-root.h"
|
||||||
#include "trace/mem.h"
|
#include "internal.h"
|
||||||
|
|
||||||
#undef EAX
|
#undef EAX
|
||||||
#undef ECX
|
#undef ECX
|
||||||
|
@ -888,111 +888,93 @@ int cpu_signal_handler(int host_signum, void *pinfo,
|
||||||
|
|
||||||
uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr)
|
uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr)
|
||||||
{
|
{
|
||||||
|
MemOpIdx oi = make_memop_idx(MO_UB, MMU_USER_IDX);
|
||||||
uint32_t ret;
|
uint32_t ret;
|
||||||
uint16_t meminfo = trace_mem_get_info(MO_UB, MMU_USER_IDX, false);
|
|
||||||
|
|
||||||
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
|
trace_guest_ld_before_exec(env_cpu(env), ptr, oi);
|
||||||
ret = ldub_p(g2h(env_cpu(env), ptr));
|
ret = ldub_p(g2h(env_cpu(env), ptr));
|
||||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
|
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int cpu_ldsb_data(CPUArchState *env, abi_ptr ptr)
|
int cpu_ldsb_data(CPUArchState *env, abi_ptr ptr)
|
||||||
{
|
{
|
||||||
int ret;
|
return (int8_t)cpu_ldub_data(env, ptr);
|
||||||
uint16_t meminfo = trace_mem_get_info(MO_SB, MMU_USER_IDX, false);
|
|
||||||
|
|
||||||
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
|
|
||||||
ret = ldsb_p(g2h(env_cpu(env), ptr));
|
|
||||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr ptr)
|
uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr ptr)
|
||||||
{
|
{
|
||||||
|
MemOpIdx oi = make_memop_idx(MO_BEUW, MMU_USER_IDX);
|
||||||
uint32_t ret;
|
uint32_t ret;
|
||||||
uint16_t meminfo = trace_mem_get_info(MO_BEUW, MMU_USER_IDX, false);
|
|
||||||
|
|
||||||
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
|
trace_guest_ld_before_exec(env_cpu(env), ptr, oi);
|
||||||
ret = lduw_be_p(g2h(env_cpu(env), ptr));
|
ret = lduw_be_p(g2h(env_cpu(env), ptr));
|
||||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
|
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int cpu_ldsw_be_data(CPUArchState *env, abi_ptr ptr)
|
int cpu_ldsw_be_data(CPUArchState *env, abi_ptr ptr)
|
||||||
{
|
{
|
||||||
int ret;
|
return (int16_t)cpu_lduw_be_data(env, ptr);
|
||||||
uint16_t meminfo = trace_mem_get_info(MO_BESW, MMU_USER_IDX, false);
|
|
||||||
|
|
||||||
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
|
|
||||||
ret = ldsw_be_p(g2h(env_cpu(env), ptr));
|
|
||||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr ptr)
|
uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr ptr)
|
||||||
{
|
{
|
||||||
|
MemOpIdx oi = make_memop_idx(MO_BEUL, MMU_USER_IDX);
|
||||||
uint32_t ret;
|
uint32_t ret;
|
||||||
uint16_t meminfo = trace_mem_get_info(MO_BEUL, MMU_USER_IDX, false);
|
|
||||||
|
|
||||||
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
|
trace_guest_ld_before_exec(env_cpu(env), ptr, oi);
|
||||||
ret = ldl_be_p(g2h(env_cpu(env), ptr));
|
ret = ldl_be_p(g2h(env_cpu(env), ptr));
|
||||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
|
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr ptr)
|
uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr ptr)
|
||||||
{
|
{
|
||||||
|
MemOpIdx oi = make_memop_idx(MO_BEQ, MMU_USER_IDX);
|
||||||
uint64_t ret;
|
uint64_t ret;
|
||||||
uint16_t meminfo = trace_mem_get_info(MO_BEQ, MMU_USER_IDX, false);
|
|
||||||
|
|
||||||
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
|
trace_guest_ld_before_exec(env_cpu(env), ptr, oi);
|
||||||
ret = ldq_be_p(g2h(env_cpu(env), ptr));
|
ret = ldq_be_p(g2h(env_cpu(env), ptr));
|
||||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
|
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr ptr)
|
uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr ptr)
|
||||||
{
|
{
|
||||||
|
MemOpIdx oi = make_memop_idx(MO_LEUW, MMU_USER_IDX);
|
||||||
uint32_t ret;
|
uint32_t ret;
|
||||||
uint16_t meminfo = trace_mem_get_info(MO_LEUW, MMU_USER_IDX, false);
|
|
||||||
|
|
||||||
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
|
trace_guest_ld_before_exec(env_cpu(env), ptr, oi);
|
||||||
ret = lduw_le_p(g2h(env_cpu(env), ptr));
|
ret = lduw_le_p(g2h(env_cpu(env), ptr));
|
||||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
|
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int cpu_ldsw_le_data(CPUArchState *env, abi_ptr ptr)
|
int cpu_ldsw_le_data(CPUArchState *env, abi_ptr ptr)
|
||||||
{
|
{
|
||||||
int ret;
|
return (int16_t)cpu_lduw_le_data(env, ptr);
|
||||||
uint16_t meminfo = trace_mem_get_info(MO_LESW, MMU_USER_IDX, false);
|
|
||||||
|
|
||||||
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
|
|
||||||
ret = ldsw_le_p(g2h(env_cpu(env), ptr));
|
|
||||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr ptr)
|
uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr ptr)
|
||||||
{
|
{
|
||||||
|
MemOpIdx oi = make_memop_idx(MO_LEUL, MMU_USER_IDX);
|
||||||
uint32_t ret;
|
uint32_t ret;
|
||||||
uint16_t meminfo = trace_mem_get_info(MO_LEUL, MMU_USER_IDX, false);
|
|
||||||
|
|
||||||
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
|
trace_guest_ld_before_exec(env_cpu(env), ptr, oi);
|
||||||
ret = ldl_le_p(g2h(env_cpu(env), ptr));
|
ret = ldl_le_p(g2h(env_cpu(env), ptr));
|
||||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
|
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t cpu_ldq_le_data(CPUArchState *env, abi_ptr ptr)
|
uint64_t cpu_ldq_le_data(CPUArchState *env, abi_ptr ptr)
|
||||||
{
|
{
|
||||||
|
MemOpIdx oi = make_memop_idx(MO_LEQ, MMU_USER_IDX);
|
||||||
uint64_t ret;
|
uint64_t ret;
|
||||||
uint16_t meminfo = trace_mem_get_info(MO_LEQ, MMU_USER_IDX, false);
|
|
||||||
|
|
||||||
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
|
trace_guest_ld_before_exec(env_cpu(env), ptr, oi);
|
||||||
ret = ldq_le_p(g2h(env_cpu(env), ptr));
|
ret = ldq_le_p(g2h(env_cpu(env), ptr));
|
||||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
|
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_R);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1008,12 +990,7 @@ uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
|
||||||
|
|
||||||
int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
|
int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
int ret;
|
return (int8_t)cpu_ldub_data_ra(env, ptr, retaddr);
|
||||||
|
|
||||||
set_helper_retaddr(retaddr);
|
|
||||||
ret = cpu_ldsb_data(env, ptr);
|
|
||||||
clear_helper_retaddr();
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
|
uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
|
||||||
|
@ -1028,12 +1005,7 @@ uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
|
||||||
|
|
||||||
int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
|
int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
int ret;
|
return (int16_t)cpu_lduw_be_data_ra(env, ptr, retaddr);
|
||||||
|
|
||||||
set_helper_retaddr(retaddr);
|
|
||||||
ret = cpu_ldsw_be_data(env, ptr);
|
|
||||||
clear_helper_retaddr();
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
|
uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
|
||||||
|
@ -1068,12 +1040,7 @@ uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
|
||||||
|
|
||||||
int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
|
int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
int ret;
|
return (int16_t)cpu_lduw_le_data_ra(env, ptr, retaddr);
|
||||||
|
|
||||||
set_helper_retaddr(retaddr);
|
|
||||||
ret = cpu_ldsw_le_data(env, ptr);
|
|
||||||
clear_helper_retaddr();
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
|
uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
|
||||||
|
@ -1098,65 +1065,65 @@ uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr)
|
||||||
|
|
||||||
void cpu_stb_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
|
void cpu_stb_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
|
||||||
{
|
{
|
||||||
uint16_t meminfo = trace_mem_get_info(MO_UB, MMU_USER_IDX, true);
|
MemOpIdx oi = make_memop_idx(MO_UB, MMU_USER_IDX);
|
||||||
|
|
||||||
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
|
trace_guest_st_before_exec(env_cpu(env), ptr, oi);
|
||||||
stb_p(g2h(env_cpu(env), ptr), val);
|
stb_p(g2h(env_cpu(env), ptr), val);
|
||||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
|
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W);
|
||||||
}
|
}
|
||||||
|
|
||||||
void cpu_stw_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
|
void cpu_stw_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
|
||||||
{
|
{
|
||||||
uint16_t meminfo = trace_mem_get_info(MO_BEUW, MMU_USER_IDX, true);
|
MemOpIdx oi = make_memop_idx(MO_BEUW, MMU_USER_IDX);
|
||||||
|
|
||||||
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
|
trace_guest_st_before_exec(env_cpu(env), ptr, oi);
|
||||||
stw_be_p(g2h(env_cpu(env), ptr), val);
|
stw_be_p(g2h(env_cpu(env), ptr), val);
|
||||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
|
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W);
|
||||||
}
|
}
|
||||||
|
|
||||||
void cpu_stl_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
|
void cpu_stl_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
|
||||||
{
|
{
|
||||||
uint16_t meminfo = trace_mem_get_info(MO_BEUL, MMU_USER_IDX, true);
|
MemOpIdx oi = make_memop_idx(MO_BEUL, MMU_USER_IDX);
|
||||||
|
|
||||||
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
|
trace_guest_st_before_exec(env_cpu(env), ptr, oi);
|
||||||
stl_be_p(g2h(env_cpu(env), ptr), val);
|
stl_be_p(g2h(env_cpu(env), ptr), val);
|
||||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
|
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W);
|
||||||
}
|
}
|
||||||
|
|
||||||
void cpu_stq_be_data(CPUArchState *env, abi_ptr ptr, uint64_t val)
|
void cpu_stq_be_data(CPUArchState *env, abi_ptr ptr, uint64_t val)
|
||||||
{
|
{
|
||||||
uint16_t meminfo = trace_mem_get_info(MO_BEQ, MMU_USER_IDX, true);
|
MemOpIdx oi = make_memop_idx(MO_BEQ, MMU_USER_IDX);
|
||||||
|
|
||||||
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
|
trace_guest_st_before_exec(env_cpu(env), ptr, oi);
|
||||||
stq_be_p(g2h(env_cpu(env), ptr), val);
|
stq_be_p(g2h(env_cpu(env), ptr), val);
|
||||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
|
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W);
|
||||||
}
|
}
|
||||||
|
|
||||||
void cpu_stw_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
|
void cpu_stw_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
|
||||||
{
|
{
|
||||||
uint16_t meminfo = trace_mem_get_info(MO_LEUW, MMU_USER_IDX, true);
|
MemOpIdx oi = make_memop_idx(MO_LEUW, MMU_USER_IDX);
|
||||||
|
|
||||||
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
|
trace_guest_st_before_exec(env_cpu(env), ptr, oi);
|
||||||
stw_le_p(g2h(env_cpu(env), ptr), val);
|
stw_le_p(g2h(env_cpu(env), ptr), val);
|
||||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
|
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W);
|
||||||
}
|
}
|
||||||
|
|
||||||
void cpu_stl_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
|
void cpu_stl_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val)
|
||||||
{
|
{
|
||||||
uint16_t meminfo = trace_mem_get_info(MO_LEUL, MMU_USER_IDX, true);
|
MemOpIdx oi = make_memop_idx(MO_LEUL, MMU_USER_IDX);
|
||||||
|
|
||||||
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
|
trace_guest_st_before_exec(env_cpu(env), ptr, oi);
|
||||||
stl_le_p(g2h(env_cpu(env), ptr), val);
|
stl_le_p(g2h(env_cpu(env), ptr), val);
|
||||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
|
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W);
|
||||||
}
|
}
|
||||||
|
|
||||||
void cpu_stq_le_data(CPUArchState *env, abi_ptr ptr, uint64_t val)
|
void cpu_stq_le_data(CPUArchState *env, abi_ptr ptr, uint64_t val)
|
||||||
{
|
{
|
||||||
uint16_t meminfo = trace_mem_get_info(MO_LEQ, MMU_USER_IDX, true);
|
MemOpIdx oi = make_memop_idx(MO_LEQ, MMU_USER_IDX);
|
||||||
|
|
||||||
trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo);
|
trace_guest_st_before_exec(env_cpu(env), ptr, oi);
|
||||||
stq_le_p(g2h(env_cpu(env), ptr), val);
|
stq_le_p(g2h(env_cpu(env), ptr), val);
|
||||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo);
|
qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, oi, QEMU_PLUGIN_MEM_W);
|
||||||
}
|
}
|
||||||
|
|
||||||
void cpu_stb_data_ra(CPUArchState *env, abi_ptr ptr,
|
void cpu_stb_data_ra(CPUArchState *env, abi_ptr ptr,
|
||||||
|
@ -1261,7 +1228,7 @@ uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr)
|
||||||
* @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE.
|
* @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE.
|
||||||
*/
|
*/
|
||||||
static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
|
static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, int size, int prot,
|
MemOpIdx oi, int size, int prot,
|
||||||
uintptr_t retaddr)
|
uintptr_t retaddr)
|
||||||
{
|
{
|
||||||
/* Enforce qemu required alignment. */
|
/* Enforce qemu required alignment. */
|
||||||
|
|
|
@ -19,11 +19,15 @@ typedef enum MemOp {
|
||||||
MO_16 = 1,
|
MO_16 = 1,
|
||||||
MO_32 = 2,
|
MO_32 = 2,
|
||||||
MO_64 = 3,
|
MO_64 = 3,
|
||||||
MO_SIZE = 3, /* Mask for the above. */
|
MO_128 = 4,
|
||||||
|
MO_256 = 5,
|
||||||
|
MO_512 = 6,
|
||||||
|
MO_1024 = 7,
|
||||||
|
MO_SIZE = 0x07, /* Mask for the above. */
|
||||||
|
|
||||||
MO_SIGN = 4, /* Sign-extended, otherwise zero-extended. */
|
MO_SIGN = 0x08, /* Sign-extended, otherwise zero-extended. */
|
||||||
|
|
||||||
MO_BSWAP = 8, /* Host reverse endian. */
|
MO_BSWAP = 0x10, /* Host reverse endian. */
|
||||||
#ifdef HOST_WORDS_BIGENDIAN
|
#ifdef HOST_WORDS_BIGENDIAN
|
||||||
MO_LE = MO_BSWAP,
|
MO_LE = MO_BSWAP,
|
||||||
MO_BE = 0,
|
MO_BE = 0,
|
||||||
|
@ -59,8 +63,8 @@ typedef enum MemOp {
|
||||||
* - an alignment to a specified size, which may be more or less than
|
* - an alignment to a specified size, which may be more or less than
|
||||||
* the access size (MO_ALIGN_x where 'x' is a size in bytes);
|
* the access size (MO_ALIGN_x where 'x' is a size in bytes);
|
||||||
*/
|
*/
|
||||||
MO_ASHIFT = 4,
|
MO_ASHIFT = 5,
|
||||||
MO_AMASK = 7 << MO_ASHIFT,
|
MO_AMASK = 0x7 << MO_ASHIFT,
|
||||||
#ifdef NEED_CPU_H
|
#ifdef NEED_CPU_H
|
||||||
#ifdef TARGET_ALIGNED_ONLY
|
#ifdef TARGET_ALIGNED_ONLY
|
||||||
MO_ALIGN = 0,
|
MO_ALIGN = 0,
|
||||||
|
|
|
@ -0,0 +1,55 @@
|
||||||
|
/*
|
||||||
|
* Combine the MemOp and mmu_idx parameters into a single value.
|
||||||
|
*
|
||||||
|
* Authors:
|
||||||
|
* Richard Henderson <rth@twiddle.net>
|
||||||
|
*
|
||||||
|
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||||
|
* See the COPYING file in the top-level directory.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef EXEC_MEMOPIDX_H
|
||||||
|
#define EXEC_MEMOPIDX_H 1
|
||||||
|
|
||||||
|
#include "exec/memop.h"
|
||||||
|
|
||||||
|
typedef uint32_t MemOpIdx;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* make_memop_idx
|
||||||
|
* @op: memory operation
|
||||||
|
* @idx: mmu index
|
||||||
|
*
|
||||||
|
* Encode these values into a single parameter.
|
||||||
|
*/
|
||||||
|
static inline MemOpIdx make_memop_idx(MemOp op, unsigned idx)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_DEBUG_TCG
|
||||||
|
assert(idx <= 15);
|
||||||
|
#endif
|
||||||
|
return (op << 4) | idx;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* get_memop
|
||||||
|
* @oi: combined op/idx parameter
|
||||||
|
*
|
||||||
|
* Extract the memory operation from the combined value.
|
||||||
|
*/
|
||||||
|
static inline MemOp get_memop(MemOpIdx oi)
|
||||||
|
{
|
||||||
|
return oi >> 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* get_mmuidx
|
||||||
|
* @oi: combined op/idx parameter
|
||||||
|
*
|
||||||
|
* Extract the mmu index from the combined value.
|
||||||
|
*/
|
||||||
|
static inline unsigned get_mmuidx(MemOpIdx oi)
|
||||||
|
{
|
||||||
|
return oi & 15;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
|
@ -131,7 +131,6 @@ struct CPUClass {
|
||||||
ObjectClass *(*class_by_name)(const char *cpu_model);
|
ObjectClass *(*class_by_name)(const char *cpu_model);
|
||||||
void (*parse_features)(const char *typename, char *str, Error **errp);
|
void (*parse_features)(const char *typename, char *str, Error **errp);
|
||||||
|
|
||||||
int reset_dump_flags;
|
|
||||||
bool (*has_work)(CPUState *cpu);
|
bool (*has_work)(CPUState *cpu);
|
||||||
int (*memory_rw_debug)(CPUState *cpu, vaddr addr,
|
int (*memory_rw_debug)(CPUState *cpu, vaddr addr,
|
||||||
uint8_t *buf, int len, bool is_write);
|
uint8_t *buf, int len, bool is_write);
|
||||||
|
@ -149,9 +148,6 @@ struct CPUClass {
|
||||||
void (*disas_set_info)(CPUState *cpu, disassemble_info *info);
|
void (*disas_set_info)(CPUState *cpu, disassemble_info *info);
|
||||||
|
|
||||||
const char *deprecation_note;
|
const char *deprecation_note;
|
||||||
/* Keep non-pointer data at the end to minimize holes. */
|
|
||||||
int gdb_num_core_regs;
|
|
||||||
bool gdb_stop_before_watchpoint;
|
|
||||||
struct AccelCPUClass *accel_cpu;
|
struct AccelCPUClass *accel_cpu;
|
||||||
|
|
||||||
/* when system emulation is not available, this pointer is NULL */
|
/* when system emulation is not available, this pointer is NULL */
|
||||||
|
@ -165,6 +161,13 @@ struct CPUClass {
|
||||||
* class data that depends on the accelerator, see accel/accel-common.c.
|
* class data that depends on the accelerator, see accel/accel-common.c.
|
||||||
*/
|
*/
|
||||||
void (*init_accel_cpu)(struct AccelCPUClass *accel_cpu, CPUClass *cc);
|
void (*init_accel_cpu)(struct AccelCPUClass *accel_cpu, CPUClass *cc);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Keep non-pointer data at the end to minimize holes.
|
||||||
|
*/
|
||||||
|
int reset_dump_flags;
|
||||||
|
int gdb_num_core_regs;
|
||||||
|
bool gdb_stop_before_watchpoint;
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -12,6 +12,7 @@
|
||||||
#include "qemu/error-report.h"
|
#include "qemu/error-report.h"
|
||||||
#include "qemu/queue.h"
|
#include "qemu/queue.h"
|
||||||
#include "qemu/option.h"
|
#include "qemu/option.h"
|
||||||
|
#include "exec/memopidx.h"
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Events that plugins can subscribe to.
|
* Events that plugins can subscribe to.
|
||||||
|
@ -36,6 +37,25 @@ enum qemu_plugin_event {
|
||||||
struct qemu_plugin_desc;
|
struct qemu_plugin_desc;
|
||||||
typedef QTAILQ_HEAD(, qemu_plugin_desc) QemuPluginList;
|
typedef QTAILQ_HEAD(, qemu_plugin_desc) QemuPluginList;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Construct a qemu_plugin_meminfo_t.
|
||||||
|
*/
|
||||||
|
static inline qemu_plugin_meminfo_t
|
||||||
|
make_plugin_meminfo(MemOpIdx oi, enum qemu_plugin_mem_rw rw)
|
||||||
|
{
|
||||||
|
return oi | (rw << 16);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Extract the memory operation direction from a qemu_plugin_meminfo_t.
|
||||||
|
* Other portions may be extracted via get_memop and get_mmuidx.
|
||||||
|
*/
|
||||||
|
static inline enum qemu_plugin_mem_rw
|
||||||
|
get_plugin_meminfo_rw(qemu_plugin_meminfo_t i)
|
||||||
|
{
|
||||||
|
return i >> 16;
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_PLUGIN
|
#ifdef CONFIG_PLUGIN
|
||||||
extern QemuOptsList qemu_plugin_opts;
|
extern QemuOptsList qemu_plugin_opts;
|
||||||
|
|
||||||
|
@ -180,7 +200,8 @@ qemu_plugin_vcpu_syscall(CPUState *cpu, int64_t num, uint64_t a1,
|
||||||
uint64_t a6, uint64_t a7, uint64_t a8);
|
uint64_t a6, uint64_t a7, uint64_t a8);
|
||||||
void qemu_plugin_vcpu_syscall_ret(CPUState *cpu, int64_t num, int64_t ret);
|
void qemu_plugin_vcpu_syscall_ret(CPUState *cpu, int64_t num, int64_t ret);
|
||||||
|
|
||||||
void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr, uint32_t meminfo);
|
void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr,
|
||||||
|
MemOpIdx oi, enum qemu_plugin_mem_rw rw);
|
||||||
|
|
||||||
void qemu_plugin_flush_cb(void);
|
void qemu_plugin_flush_cb(void);
|
||||||
|
|
||||||
|
@ -244,7 +265,8 @@ void qemu_plugin_vcpu_syscall_ret(CPUState *cpu, int64_t num, int64_t ret)
|
||||||
{ }
|
{ }
|
||||||
|
|
||||||
static inline void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr,
|
static inline void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr,
|
||||||
uint32_t meminfo)
|
MemOpIdx oi,
|
||||||
|
enum qemu_plugin_mem_rw rw)
|
||||||
{ }
|
{ }
|
||||||
|
|
||||||
static inline void qemu_plugin_flush_cb(void)
|
static inline void qemu_plugin_flush_cb(void)
|
||||||
|
|
|
@ -27,6 +27,7 @@
|
||||||
|
|
||||||
#include "cpu.h"
|
#include "cpu.h"
|
||||||
#include "exec/memop.h"
|
#include "exec/memop.h"
|
||||||
|
#include "exec/memopidx.h"
|
||||||
#include "qemu/bitops.h"
|
#include "qemu/bitops.h"
|
||||||
#include "qemu/plugin.h"
|
#include "qemu/plugin.h"
|
||||||
#include "qemu/queue.h"
|
#include "qemu/queue.h"
|
||||||
|
@ -1147,44 +1148,6 @@ static inline size_t tcg_current_code_size(TCGContext *s)
|
||||||
return tcg_ptr_byte_diff(s->code_ptr, s->code_buf);
|
return tcg_ptr_byte_diff(s->code_ptr, s->code_buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Combine the MemOp and mmu_idx parameters into a single value. */
|
|
||||||
typedef uint32_t TCGMemOpIdx;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* make_memop_idx
|
|
||||||
* @op: memory operation
|
|
||||||
* @idx: mmu index
|
|
||||||
*
|
|
||||||
* Encode these values into a single parameter.
|
|
||||||
*/
|
|
||||||
static inline TCGMemOpIdx make_memop_idx(MemOp op, unsigned idx)
|
|
||||||
{
|
|
||||||
tcg_debug_assert(idx <= 15);
|
|
||||||
return (op << 4) | idx;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* get_memop
|
|
||||||
* @oi: combined op/idx parameter
|
|
||||||
*
|
|
||||||
* Extract the memory operation from the combined value.
|
|
||||||
*/
|
|
||||||
static inline MemOp get_memop(TCGMemOpIdx oi)
|
|
||||||
{
|
|
||||||
return oi >> 4;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* get_mmuidx
|
|
||||||
* @oi: combined op/idx parameter
|
|
||||||
*
|
|
||||||
* Extract the mmu index from the combined value.
|
|
||||||
*/
|
|
||||||
static inline unsigned get_mmuidx(TCGMemOpIdx oi)
|
|
||||||
{
|
|
||||||
return oi & 15;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* tcg_qemu_tb_exec:
|
* tcg_qemu_tb_exec:
|
||||||
* @env: pointer to CPUArchState for the CPU
|
* @env: pointer to CPUArchState for the CPU
|
||||||
|
@ -1272,52 +1235,64 @@ uint64_t dup_const(unsigned vece, uint64_t c);
|
||||||
: (qemu_build_not_reached_always(), 0)) \
|
: (qemu_build_not_reached_always(), 0)) \
|
||||||
: dup_const(VECE, C))
|
: dup_const(VECE, C))
|
||||||
|
|
||||||
|
#if TARGET_LONG_BITS == 64
|
||||||
|
# define dup_const_tl dup_const
|
||||||
|
#else
|
||||||
|
# define dup_const_tl(VECE, C) \
|
||||||
|
(__builtin_constant_p(VECE) \
|
||||||
|
? ( (VECE) == MO_8 ? 0x01010101ul * (uint8_t)(C) \
|
||||||
|
: (VECE) == MO_16 ? 0x00010001ul * (uint16_t)(C) \
|
||||||
|
: (VECE) == MO_32 ? 0x00000001ul * (uint32_t)(C) \
|
||||||
|
: (qemu_build_not_reached_always(), 0)) \
|
||||||
|
: (target_long)dup_const(VECE, C))
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Memory helpers that will be used by TCG generated code.
|
* Memory helpers that will be used by TCG generated code.
|
||||||
*/
|
*/
|
||||||
#ifdef CONFIG_SOFTMMU
|
#ifdef CONFIG_SOFTMMU
|
||||||
/* Value zero-extended to tcg register size. */
|
/* Value zero-extended to tcg register size. */
|
||||||
tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
|
tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr);
|
MemOpIdx oi, uintptr_t retaddr);
|
||||||
tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
|
tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr);
|
MemOpIdx oi, uintptr_t retaddr);
|
||||||
tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
|
tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr);
|
MemOpIdx oi, uintptr_t retaddr);
|
||||||
uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
|
uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr);
|
MemOpIdx oi, uintptr_t retaddr);
|
||||||
tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
|
tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr);
|
MemOpIdx oi, uintptr_t retaddr);
|
||||||
tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
|
tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr);
|
MemOpIdx oi, uintptr_t retaddr);
|
||||||
uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
|
uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr);
|
MemOpIdx oi, uintptr_t retaddr);
|
||||||
|
|
||||||
/* Value sign-extended to tcg register size. */
|
/* Value sign-extended to tcg register size. */
|
||||||
tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
|
tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr);
|
MemOpIdx oi, uintptr_t retaddr);
|
||||||
tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
|
tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr);
|
MemOpIdx oi, uintptr_t retaddr);
|
||||||
tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
|
tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr);
|
MemOpIdx oi, uintptr_t retaddr);
|
||||||
tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
|
tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr);
|
MemOpIdx oi, uintptr_t retaddr);
|
||||||
tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
|
tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr);
|
MemOpIdx oi, uintptr_t retaddr);
|
||||||
|
|
||||||
void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
|
void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr);
|
MemOpIdx oi, uintptr_t retaddr);
|
||||||
void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
|
void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr);
|
MemOpIdx oi, uintptr_t retaddr);
|
||||||
void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
|
void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr);
|
MemOpIdx oi, uintptr_t retaddr);
|
||||||
void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
|
void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr);
|
MemOpIdx oi, uintptr_t retaddr);
|
||||||
void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
|
void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr);
|
MemOpIdx oi, uintptr_t retaddr);
|
||||||
void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
|
void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr);
|
MemOpIdx oi, uintptr_t retaddr);
|
||||||
void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
|
void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr);
|
MemOpIdx oi, uintptr_t retaddr);
|
||||||
|
|
||||||
/* Temporary aliases until backends are converted. */
|
/* Temporary aliases until backends are converted. */
|
||||||
#ifdef TARGET_WORDS_BIGENDIAN
|
#ifdef TARGET_WORDS_BIGENDIAN
|
||||||
|
@ -1345,30 +1320,30 @@ void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
|
||||||
|
|
||||||
uint32_t cpu_atomic_cmpxchgb_mmu(CPUArchState *env, target_ulong addr,
|
uint32_t cpu_atomic_cmpxchgb_mmu(CPUArchState *env, target_ulong addr,
|
||||||
uint32_t cmpv, uint32_t newv,
|
uint32_t cmpv, uint32_t newv,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr);
|
MemOpIdx oi, uintptr_t retaddr);
|
||||||
uint32_t cpu_atomic_cmpxchgw_le_mmu(CPUArchState *env, target_ulong addr,
|
uint32_t cpu_atomic_cmpxchgw_le_mmu(CPUArchState *env, target_ulong addr,
|
||||||
uint32_t cmpv, uint32_t newv,
|
uint32_t cmpv, uint32_t newv,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr);
|
MemOpIdx oi, uintptr_t retaddr);
|
||||||
uint32_t cpu_atomic_cmpxchgl_le_mmu(CPUArchState *env, target_ulong addr,
|
uint32_t cpu_atomic_cmpxchgl_le_mmu(CPUArchState *env, target_ulong addr,
|
||||||
uint32_t cmpv, uint32_t newv,
|
uint32_t cmpv, uint32_t newv,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr);
|
MemOpIdx oi, uintptr_t retaddr);
|
||||||
uint64_t cpu_atomic_cmpxchgq_le_mmu(CPUArchState *env, target_ulong addr,
|
uint64_t cpu_atomic_cmpxchgq_le_mmu(CPUArchState *env, target_ulong addr,
|
||||||
uint64_t cmpv, uint64_t newv,
|
uint64_t cmpv, uint64_t newv,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr);
|
MemOpIdx oi, uintptr_t retaddr);
|
||||||
uint32_t cpu_atomic_cmpxchgw_be_mmu(CPUArchState *env, target_ulong addr,
|
uint32_t cpu_atomic_cmpxchgw_be_mmu(CPUArchState *env, target_ulong addr,
|
||||||
uint32_t cmpv, uint32_t newv,
|
uint32_t cmpv, uint32_t newv,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr);
|
MemOpIdx oi, uintptr_t retaddr);
|
||||||
uint32_t cpu_atomic_cmpxchgl_be_mmu(CPUArchState *env, target_ulong addr,
|
uint32_t cpu_atomic_cmpxchgl_be_mmu(CPUArchState *env, target_ulong addr,
|
||||||
uint32_t cmpv, uint32_t newv,
|
uint32_t cmpv, uint32_t newv,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr);
|
MemOpIdx oi, uintptr_t retaddr);
|
||||||
uint64_t cpu_atomic_cmpxchgq_be_mmu(CPUArchState *env, target_ulong addr,
|
uint64_t cpu_atomic_cmpxchgq_be_mmu(CPUArchState *env, target_ulong addr,
|
||||||
uint64_t cmpv, uint64_t newv,
|
uint64_t cmpv, uint64_t newv,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr);
|
MemOpIdx oi, uintptr_t retaddr);
|
||||||
|
|
||||||
#define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX) \
|
#define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX) \
|
||||||
TYPE cpu_atomic_ ## NAME ## SUFFIX ## _mmu \
|
TYPE cpu_atomic_ ## NAME ## SUFFIX ## _mmu \
|
||||||
(CPUArchState *env, target_ulong addr, TYPE val, \
|
(CPUArchState *env, target_ulong addr, TYPE val, \
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr);
|
MemOpIdx oi, uintptr_t retaddr);
|
||||||
|
|
||||||
#ifdef CONFIG_ATOMIC64
|
#ifdef CONFIG_ATOMIC64
|
||||||
#define GEN_ATOMIC_HELPER_ALL(NAME) \
|
#define GEN_ATOMIC_HELPER_ALL(NAME) \
|
||||||
|
@ -1415,19 +1390,19 @@ GEN_ATOMIC_HELPER_ALL(xchg)
|
||||||
|
|
||||||
Int128 cpu_atomic_cmpxchgo_le_mmu(CPUArchState *env, target_ulong addr,
|
Int128 cpu_atomic_cmpxchgo_le_mmu(CPUArchState *env, target_ulong addr,
|
||||||
Int128 cmpv, Int128 newv,
|
Int128 cmpv, Int128 newv,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr);
|
MemOpIdx oi, uintptr_t retaddr);
|
||||||
Int128 cpu_atomic_cmpxchgo_be_mmu(CPUArchState *env, target_ulong addr,
|
Int128 cpu_atomic_cmpxchgo_be_mmu(CPUArchState *env, target_ulong addr,
|
||||||
Int128 cmpv, Int128 newv,
|
Int128 cmpv, Int128 newv,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr);
|
MemOpIdx oi, uintptr_t retaddr);
|
||||||
|
|
||||||
Int128 cpu_atomic_ldo_le_mmu(CPUArchState *env, target_ulong addr,
|
Int128 cpu_atomic_ldo_le_mmu(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr);
|
MemOpIdx oi, uintptr_t retaddr);
|
||||||
Int128 cpu_atomic_ldo_be_mmu(CPUArchState *env, target_ulong addr,
|
Int128 cpu_atomic_ldo_be_mmu(CPUArchState *env, target_ulong addr,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr);
|
MemOpIdx oi, uintptr_t retaddr);
|
||||||
void cpu_atomic_sto_le_mmu(CPUArchState *env, target_ulong addr, Int128 val,
|
void cpu_atomic_sto_le_mmu(CPUArchState *env, target_ulong addr, Int128 val,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr);
|
MemOpIdx oi, uintptr_t retaddr);
|
||||||
void cpu_atomic_sto_be_mmu(CPUArchState *env, target_ulong addr, Int128 val,
|
void cpu_atomic_sto_be_mmu(CPUArchState *env, target_ulong addr, Int128 val,
|
||||||
TCGMemOpIdx oi, uintptr_t retaddr);
|
MemOpIdx oi, uintptr_t retaddr);
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_TCG
|
#ifdef CONFIG_DEBUG_TCG
|
||||||
void tcg_assert_listed_vecop(TCGOpcode);
|
void tcg_assert_listed_vecop(TCGOpcode);
|
||||||
|
|
|
@ -268,8 +268,6 @@ if not get_option('tcg').disabled()
|
||||||
tcg_arch = 'tci'
|
tcg_arch = 'tci'
|
||||||
elif config_host['ARCH'] == 'sparc64'
|
elif config_host['ARCH'] == 'sparc64'
|
||||||
tcg_arch = 'sparc'
|
tcg_arch = 'sparc'
|
||||||
elif config_host['ARCH'] == 's390x'
|
|
||||||
tcg_arch = 's390'
|
|
||||||
elif config_host['ARCH'] in ['x86_64', 'x32']
|
elif config_host['ARCH'] in ['x86_64', 'x32']
|
||||||
tcg_arch = 'i386'
|
tcg_arch = 'i386'
|
||||||
elif config_host['ARCH'] == 'ppc64'
|
elif config_host['ARCH'] == 'ppc64'
|
||||||
|
|
|
@ -45,7 +45,6 @@
|
||||||
#include "qemu/plugin-memory.h"
|
#include "qemu/plugin-memory.h"
|
||||||
#include "hw/boards.h"
|
#include "hw/boards.h"
|
||||||
#endif
|
#endif
|
||||||
#include "trace/mem.h"
|
|
||||||
|
|
||||||
/* Uninstall and Reset handlers */
|
/* Uninstall and Reset handlers */
|
||||||
|
|
||||||
|
@ -246,22 +245,25 @@ const char *qemu_plugin_insn_symbol(const struct qemu_plugin_insn *insn)
|
||||||
|
|
||||||
unsigned qemu_plugin_mem_size_shift(qemu_plugin_meminfo_t info)
|
unsigned qemu_plugin_mem_size_shift(qemu_plugin_meminfo_t info)
|
||||||
{
|
{
|
||||||
return info & TRACE_MEM_SZ_SHIFT_MASK;
|
MemOp op = get_memop(info);
|
||||||
|
return op & MO_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool qemu_plugin_mem_is_sign_extended(qemu_plugin_meminfo_t info)
|
bool qemu_plugin_mem_is_sign_extended(qemu_plugin_meminfo_t info)
|
||||||
{
|
{
|
||||||
return !!(info & TRACE_MEM_SE);
|
MemOp op = get_memop(info);
|
||||||
|
return op & MO_SIGN;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool qemu_plugin_mem_is_big_endian(qemu_plugin_meminfo_t info)
|
bool qemu_plugin_mem_is_big_endian(qemu_plugin_meminfo_t info)
|
||||||
{
|
{
|
||||||
return !!(info & TRACE_MEM_BE);
|
MemOp op = get_memop(info);
|
||||||
|
return (op & MO_BSWAP) == MO_BE;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool qemu_plugin_mem_is_store(qemu_plugin_meminfo_t info)
|
bool qemu_plugin_mem_is_store(qemu_plugin_meminfo_t info)
|
||||||
{
|
{
|
||||||
return !!(info & TRACE_MEM_ST);
|
return get_plugin_meminfo_rw(info) & QEMU_PLUGIN_MEM_W;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -277,11 +279,12 @@ struct qemu_plugin_hwaddr *qemu_plugin_get_hwaddr(qemu_plugin_meminfo_t info,
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_SOFTMMU
|
#ifdef CONFIG_SOFTMMU
|
||||||
CPUState *cpu = current_cpu;
|
CPUState *cpu = current_cpu;
|
||||||
unsigned int mmu_idx = info >> TRACE_MEM_MMU_SHIFT;
|
unsigned int mmu_idx = get_mmuidx(info);
|
||||||
hwaddr_info.is_store = info & TRACE_MEM_ST;
|
enum qemu_plugin_mem_rw rw = get_plugin_meminfo_rw(info);
|
||||||
|
hwaddr_info.is_store = (rw & QEMU_PLUGIN_MEM_W) != 0;
|
||||||
|
|
||||||
if (!tlb_plugin_lookup(cpu, vaddr, mmu_idx,
|
if (!tlb_plugin_lookup(cpu, vaddr, mmu_idx,
|
||||||
info & TRACE_MEM_ST, &hwaddr_info)) {
|
hwaddr_info.is_store, &hwaddr_info)) {
|
||||||
error_report("invalid use of qemu_plugin_get_hwaddr");
|
error_report("invalid use of qemu_plugin_get_hwaddr");
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,7 +27,6 @@
|
||||||
#include "exec/helper-proto.h"
|
#include "exec/helper-proto.h"
|
||||||
#include "tcg/tcg.h"
|
#include "tcg/tcg.h"
|
||||||
#include "tcg/tcg-op.h"
|
#include "tcg/tcg-op.h"
|
||||||
#include "trace/mem.h" /* mem_info macros */
|
|
||||||
#include "plugin.h"
|
#include "plugin.h"
|
||||||
#include "qemu/compiler.h"
|
#include "qemu/compiler.h"
|
||||||
|
|
||||||
|
@ -446,7 +445,8 @@ void exec_inline_op(struct qemu_plugin_dyn_cb *cb)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr, uint32_t info)
|
void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr,
|
||||||
|
MemOpIdx oi, enum qemu_plugin_mem_rw rw)
|
||||||
{
|
{
|
||||||
GArray *arr = cpu->plugin_mem_cbs;
|
GArray *arr = cpu->plugin_mem_cbs;
|
||||||
size_t i;
|
size_t i;
|
||||||
|
@ -457,14 +457,14 @@ void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr, uint32_t info)
|
||||||
for (i = 0; i < arr->len; i++) {
|
for (i = 0; i < arr->len; i++) {
|
||||||
struct qemu_plugin_dyn_cb *cb =
|
struct qemu_plugin_dyn_cb *cb =
|
||||||
&g_array_index(arr, struct qemu_plugin_dyn_cb, i);
|
&g_array_index(arr, struct qemu_plugin_dyn_cb, i);
|
||||||
int w = !!(info & TRACE_MEM_ST) + 1;
|
|
||||||
|
|
||||||
if (!(w & cb->rw)) {
|
if (!(rw & cb->rw)) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
switch (cb->type) {
|
switch (cb->type) {
|
||||||
case PLUGIN_CB_REGULAR:
|
case PLUGIN_CB_REGULAR:
|
||||||
cb->f.vcpu_mem(cpu->cpu_index, info, vaddr, cb->userp);
|
cb->f.vcpu_mem(cpu->cpu_index, make_plugin_meminfo(oi, rw),
|
||||||
|
vaddr, cb->userp);
|
||||||
break;
|
break;
|
||||||
case PLUGIN_CB_INLINE:
|
case PLUGIN_CB_INLINE:
|
||||||
exec_inline_op(cb);
|
exec_inline_op(cb);
|
||||||
|
|
|
@ -531,8 +531,8 @@ uint64_t HELPER(paired_cmpxchg64_le)(CPUARMState *env, uint64_t addr,
|
||||||
clear_helper_retaddr();
|
clear_helper_retaddr();
|
||||||
#else
|
#else
|
||||||
int mem_idx = cpu_mmu_index(env, false);
|
int mem_idx = cpu_mmu_index(env, false);
|
||||||
TCGMemOpIdx oi0 = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx);
|
MemOpIdx oi0 = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx);
|
||||||
TCGMemOpIdx oi1 = make_memop_idx(MO_LEQ, mem_idx);
|
MemOpIdx oi1 = make_memop_idx(MO_LEQ, mem_idx);
|
||||||
|
|
||||||
o0 = helper_le_ldq_mmu(env, addr + 0, oi0, ra);
|
o0 = helper_le_ldq_mmu(env, addr + 0, oi0, ra);
|
||||||
o1 = helper_le_ldq_mmu(env, addr + 8, oi1, ra);
|
o1 = helper_le_ldq_mmu(env, addr + 8, oi1, ra);
|
||||||
|
@ -555,7 +555,7 @@ uint64_t HELPER(paired_cmpxchg64_le_parallel)(CPUARMState *env, uint64_t addr,
|
||||||
uintptr_t ra = GETPC();
|
uintptr_t ra = GETPC();
|
||||||
bool success;
|
bool success;
|
||||||
int mem_idx;
|
int mem_idx;
|
||||||
TCGMemOpIdx oi;
|
MemOpIdx oi;
|
||||||
|
|
||||||
assert(HAVE_CMPXCHG128);
|
assert(HAVE_CMPXCHG128);
|
||||||
|
|
||||||
|
@ -601,8 +601,8 @@ uint64_t HELPER(paired_cmpxchg64_be)(CPUARMState *env, uint64_t addr,
|
||||||
clear_helper_retaddr();
|
clear_helper_retaddr();
|
||||||
#else
|
#else
|
||||||
int mem_idx = cpu_mmu_index(env, false);
|
int mem_idx = cpu_mmu_index(env, false);
|
||||||
TCGMemOpIdx oi0 = make_memop_idx(MO_BEQ | MO_ALIGN_16, mem_idx);
|
MemOpIdx oi0 = make_memop_idx(MO_BEQ | MO_ALIGN_16, mem_idx);
|
||||||
TCGMemOpIdx oi1 = make_memop_idx(MO_BEQ, mem_idx);
|
MemOpIdx oi1 = make_memop_idx(MO_BEQ, mem_idx);
|
||||||
|
|
||||||
o1 = helper_be_ldq_mmu(env, addr + 0, oi0, ra);
|
o1 = helper_be_ldq_mmu(env, addr + 0, oi0, ra);
|
||||||
o0 = helper_be_ldq_mmu(env, addr + 8, oi1, ra);
|
o0 = helper_be_ldq_mmu(env, addr + 8, oi1, ra);
|
||||||
|
@ -625,7 +625,7 @@ uint64_t HELPER(paired_cmpxchg64_be_parallel)(CPUARMState *env, uint64_t addr,
|
||||||
uintptr_t ra = GETPC();
|
uintptr_t ra = GETPC();
|
||||||
bool success;
|
bool success;
|
||||||
int mem_idx;
|
int mem_idx;
|
||||||
TCGMemOpIdx oi;
|
MemOpIdx oi;
|
||||||
|
|
||||||
assert(HAVE_CMPXCHG128);
|
assert(HAVE_CMPXCHG128);
|
||||||
|
|
||||||
|
@ -651,7 +651,7 @@ void HELPER(casp_le_parallel)(CPUARMState *env, uint32_t rs, uint64_t addr,
|
||||||
Int128 oldv, cmpv, newv;
|
Int128 oldv, cmpv, newv;
|
||||||
uintptr_t ra = GETPC();
|
uintptr_t ra = GETPC();
|
||||||
int mem_idx;
|
int mem_idx;
|
||||||
TCGMemOpIdx oi;
|
MemOpIdx oi;
|
||||||
|
|
||||||
assert(HAVE_CMPXCHG128);
|
assert(HAVE_CMPXCHG128);
|
||||||
|
|
||||||
|
@ -672,7 +672,7 @@ void HELPER(casp_be_parallel)(CPUARMState *env, uint32_t rs, uint64_t addr,
|
||||||
Int128 oldv, cmpv, newv;
|
Int128 oldv, cmpv, newv;
|
||||||
uintptr_t ra = GETPC();
|
uintptr_t ra = GETPC();
|
||||||
int mem_idx;
|
int mem_idx;
|
||||||
TCGMemOpIdx oi;
|
MemOpIdx oi;
|
||||||
|
|
||||||
assert(HAVE_CMPXCHG128);
|
assert(HAVE_CMPXCHG128);
|
||||||
|
|
||||||
|
|
|
@ -1930,7 +1930,7 @@ static bool do_v7m_function_return(ARMCPU *cpu)
|
||||||
|
|
||||||
{
|
{
|
||||||
bool threadmode, spsel;
|
bool threadmode, spsel;
|
||||||
TCGMemOpIdx oi;
|
MemOpIdx oi;
|
||||||
ARMMMUIdx mmu_idx;
|
ARMMMUIdx mmu_idx;
|
||||||
uint32_t *frame_sp_p;
|
uint32_t *frame_sp_p;
|
||||||
uint32_t frameptr;
|
uint32_t frameptr;
|
||||||
|
|
|
@ -1045,7 +1045,7 @@ static void read_vec_element(DisasContext *s, TCGv_i64 tcg_dest, int srcidx,
|
||||||
int element, MemOp memop)
|
int element, MemOp memop)
|
||||||
{
|
{
|
||||||
int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
|
int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
|
||||||
switch (memop) {
|
switch ((unsigned)memop) {
|
||||||
case MO_8:
|
case MO_8:
|
||||||
tcg_gen_ld8u_i64(tcg_dest, cpu_env, vect_off);
|
tcg_gen_ld8u_i64(tcg_dest, cpu_env, vect_off);
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -67,7 +67,7 @@ void helper_cmpxchg8b(CPUX86State *env, target_ulong a0)
|
||||||
{
|
{
|
||||||
uintptr_t ra = GETPC();
|
uintptr_t ra = GETPC();
|
||||||
int mem_idx = cpu_mmu_index(env, false);
|
int mem_idx = cpu_mmu_index(env, false);
|
||||||
TCGMemOpIdx oi = make_memop_idx(MO_TEQ, mem_idx);
|
MemOpIdx oi = make_memop_idx(MO_TEQ, mem_idx);
|
||||||
oldv = cpu_atomic_cmpxchgq_le_mmu(env, a0, cmpv, newv, oi, ra);
|
oldv = cpu_atomic_cmpxchgq_le_mmu(env, a0, cmpv, newv, oi, ra);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -136,7 +136,7 @@ void helper_cmpxchg16b(CPUX86State *env, target_ulong a0)
|
||||||
Int128 newv = int128_make128(env->regs[R_EBX], env->regs[R_ECX]);
|
Int128 newv = int128_make128(env->regs[R_EBX], env->regs[R_ECX]);
|
||||||
|
|
||||||
int mem_idx = cpu_mmu_index(env, false);
|
int mem_idx = cpu_mmu_index(env, false);
|
||||||
TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
|
MemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
|
||||||
Int128 oldv = cpu_atomic_cmpxchgo_le_mmu(env, a0, cmpv, newv, oi, ra);
|
Int128 oldv = cpu_atomic_cmpxchgo_le_mmu(env, a0, cmpv, newv, oi, ra);
|
||||||
|
|
||||||
if (int128_eq(oldv, cmpv)) {
|
if (int128_eq(oldv, cmpv)) {
|
||||||
|
|
|
@ -775,7 +775,7 @@ static void do_cas2l(CPUM68KState *env, uint32_t regs, uint32_t a1, uint32_t a2,
|
||||||
uintptr_t ra = GETPC();
|
uintptr_t ra = GETPC();
|
||||||
#if defined(CONFIG_ATOMIC64)
|
#if defined(CONFIG_ATOMIC64)
|
||||||
int mmu_idx = cpu_mmu_index(env, 0);
|
int mmu_idx = cpu_mmu_index(env, 0);
|
||||||
TCGMemOpIdx oi = make_memop_idx(MO_BEQ, mmu_idx);
|
MemOpIdx oi = make_memop_idx(MO_BEQ, mmu_idx);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (parallel) {
|
if (parallel) {
|
||||||
|
|
|
@ -8211,9 +8211,9 @@ void helper_msa_ffint_u_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
|
||||||
#define DF_ELEMENTS(df) (MSA_WRLEN / DF_BITS(df))
|
#define DF_ELEMENTS(df) (MSA_WRLEN / DF_BITS(df))
|
||||||
|
|
||||||
#if !defined(CONFIG_USER_ONLY)
|
#if !defined(CONFIG_USER_ONLY)
|
||||||
#define MEMOP_IDX(DF) \
|
#define MEMOP_IDX(DF) \
|
||||||
TCGMemOpIdx oi = make_memop_idx(MO_TE | DF | MO_UNALN, \
|
MemOpIdx oi = make_memop_idx(MO_TE | DF | MO_UNALN, \
|
||||||
cpu_mmu_index(env, false));
|
cpu_mmu_index(env, false));
|
||||||
#else
|
#else
|
||||||
#define MEMOP_IDX(DF)
|
#define MEMOP_IDX(DF)
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -239,7 +239,7 @@ static void do_access_memset(CPUS390XState *env, vaddr vaddr, char *haddr,
|
||||||
g_assert(haddr);
|
g_assert(haddr);
|
||||||
memset(haddr, byte, size);
|
memset(haddr, byte, size);
|
||||||
#else
|
#else
|
||||||
TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
|
MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (likely(haddr)) {
|
if (likely(haddr)) {
|
||||||
|
@ -282,7 +282,7 @@ static uint8_t do_access_get_byte(CPUS390XState *env, vaddr vaddr, char **haddr,
|
||||||
#ifdef CONFIG_USER_ONLY
|
#ifdef CONFIG_USER_ONLY
|
||||||
return ldub_p(*haddr + offset);
|
return ldub_p(*haddr + offset);
|
||||||
#else
|
#else
|
||||||
TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
|
MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
|
||||||
uint8_t byte;
|
uint8_t byte;
|
||||||
|
|
||||||
if (likely(*haddr)) {
|
if (likely(*haddr)) {
|
||||||
|
@ -316,7 +316,7 @@ static void do_access_set_byte(CPUS390XState *env, vaddr vaddr, char **haddr,
|
||||||
#ifdef CONFIG_USER_ONLY
|
#ifdef CONFIG_USER_ONLY
|
||||||
stb_p(*haddr + offset, byte);
|
stb_p(*haddr + offset, byte);
|
||||||
#else
|
#else
|
||||||
TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
|
MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
|
||||||
|
|
||||||
if (likely(*haddr)) {
|
if (likely(*haddr)) {
|
||||||
stb_p(*haddr + offset, byte);
|
stb_p(*haddr + offset, byte);
|
||||||
|
@ -1804,7 +1804,7 @@ void HELPER(cdsg_parallel)(CPUS390XState *env, uint64_t addr,
|
||||||
Int128 cmpv = int128_make128(env->regs[r1 + 1], env->regs[r1]);
|
Int128 cmpv = int128_make128(env->regs[r1 + 1], env->regs[r1]);
|
||||||
Int128 newv = int128_make128(env->regs[r3 + 1], env->regs[r3]);
|
Int128 newv = int128_make128(env->regs[r3 + 1], env->regs[r3]);
|
||||||
int mem_idx;
|
int mem_idx;
|
||||||
TCGMemOpIdx oi;
|
MemOpIdx oi;
|
||||||
Int128 oldv;
|
Int128 oldv;
|
||||||
bool fail;
|
bool fail;
|
||||||
|
|
||||||
|
@ -1884,7 +1884,7 @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1,
|
||||||
uint32_t *haddr = g2h(env_cpu(env), a1);
|
uint32_t *haddr = g2h(env_cpu(env), a1);
|
||||||
ov = qatomic_cmpxchg__nocheck(haddr, cv, nv);
|
ov = qatomic_cmpxchg__nocheck(haddr, cv, nv);
|
||||||
#else
|
#else
|
||||||
TCGMemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mem_idx);
|
MemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mem_idx);
|
||||||
ov = cpu_atomic_cmpxchgl_be_mmu(env, a1, cv, nv, oi, ra);
|
ov = cpu_atomic_cmpxchgl_be_mmu(env, a1, cv, nv, oi, ra);
|
||||||
#endif
|
#endif
|
||||||
} else {
|
} else {
|
||||||
|
@ -1904,7 +1904,7 @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1,
|
||||||
|
|
||||||
if (parallel) {
|
if (parallel) {
|
||||||
#ifdef CONFIG_ATOMIC64
|
#ifdef CONFIG_ATOMIC64
|
||||||
TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN, mem_idx);
|
MemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN, mem_idx);
|
||||||
ov = cpu_atomic_cmpxchgq_be_mmu(env, a1, cv, nv, oi, ra);
|
ov = cpu_atomic_cmpxchgq_be_mmu(env, a1, cv, nv, oi, ra);
|
||||||
#else
|
#else
|
||||||
/* Note that we asserted !parallel above. */
|
/* Note that we asserted !parallel above. */
|
||||||
|
@ -1940,7 +1940,7 @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1,
|
||||||
cpu_stq_data_ra(env, a1 + 0, int128_gethi(nv), ra);
|
cpu_stq_data_ra(env, a1 + 0, int128_gethi(nv), ra);
|
||||||
cpu_stq_data_ra(env, a1 + 8, int128_getlo(nv), ra);
|
cpu_stq_data_ra(env, a1 + 8, int128_getlo(nv), ra);
|
||||||
} else if (HAVE_CMPXCHG128) {
|
} else if (HAVE_CMPXCHG128) {
|
||||||
TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
|
MemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
|
||||||
ov = cpu_atomic_cmpxchgo_be_mmu(env, a1, cv, nv, oi, ra);
|
ov = cpu_atomic_cmpxchgo_be_mmu(env, a1, cv, nv, oi, ra);
|
||||||
cc = !int128_eq(ov, cv);
|
cc = !int128_eq(ov, cv);
|
||||||
} else {
|
} else {
|
||||||
|
@ -1979,7 +1979,7 @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1,
|
||||||
cpu_stq_data_ra(env, a2 + 0, svh, ra);
|
cpu_stq_data_ra(env, a2 + 0, svh, ra);
|
||||||
cpu_stq_data_ra(env, a2 + 8, svl, ra);
|
cpu_stq_data_ra(env, a2 + 8, svl, ra);
|
||||||
} else if (HAVE_ATOMIC128) {
|
} else if (HAVE_ATOMIC128) {
|
||||||
TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
|
MemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
|
||||||
Int128 sv = int128_make128(svl, svh);
|
Int128 sv = int128_make128(svl, svh);
|
||||||
cpu_atomic_sto_be_mmu(env, a2, sv, oi, ra);
|
cpu_atomic_sto_be_mmu(env, a2, sv, oi, ra);
|
||||||
} else {
|
} else {
|
||||||
|
@ -2497,7 +2497,7 @@ uint64_t HELPER(lpq_parallel)(CPUS390XState *env, uint64_t addr)
|
||||||
uintptr_t ra = GETPC();
|
uintptr_t ra = GETPC();
|
||||||
uint64_t hi, lo;
|
uint64_t hi, lo;
|
||||||
int mem_idx;
|
int mem_idx;
|
||||||
TCGMemOpIdx oi;
|
MemOpIdx oi;
|
||||||
Int128 v;
|
Int128 v;
|
||||||
|
|
||||||
assert(HAVE_ATOMIC128);
|
assert(HAVE_ATOMIC128);
|
||||||
|
@ -2528,7 +2528,7 @@ void HELPER(stpq_parallel)(CPUS390XState *env, uint64_t addr,
|
||||||
{
|
{
|
||||||
uintptr_t ra = GETPC();
|
uintptr_t ra = GETPC();
|
||||||
int mem_idx;
|
int mem_idx;
|
||||||
TCGMemOpIdx oi;
|
MemOpIdx oi;
|
||||||
Int128 v;
|
Int128 v;
|
||||||
|
|
||||||
assert(HAVE_ATOMIC128);
|
assert(HAVE_ATOMIC128);
|
||||||
|
|
|
@ -67,7 +67,7 @@ static void read_vec_element_i64(TCGv_i64 dst, uint8_t reg, uint8_t enr,
|
||||||
{
|
{
|
||||||
const int offs = vec_reg_offset(reg, enr, memop & MO_SIZE);
|
const int offs = vec_reg_offset(reg, enr, memop & MO_SIZE);
|
||||||
|
|
||||||
switch (memop) {
|
switch ((unsigned)memop) {
|
||||||
case ES_8:
|
case ES_8:
|
||||||
tcg_gen_ld8u_i64(dst, cpu_env, offs);
|
tcg_gen_ld8u_i64(dst, cpu_env, offs);
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -1318,7 +1318,7 @@ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr,
|
||||||
case ASI_SNF:
|
case ASI_SNF:
|
||||||
case ASI_SNFL:
|
case ASI_SNFL:
|
||||||
{
|
{
|
||||||
TCGMemOpIdx oi;
|
MemOpIdx oi;
|
||||||
int idx = (env->pstate & PS_PRIV
|
int idx = (env->pstate & PS_PRIV
|
||||||
? (asi & 1 ? MMU_KERNEL_SECONDARY_IDX : MMU_KERNEL_IDX)
|
? (asi & 1 ? MMU_KERNEL_SECONDARY_IDX : MMU_KERNEL_IDX)
|
||||||
: (asi & 1 ? MMU_USER_SECONDARY_IDX : MMU_USER_IDX));
|
: (asi & 1 ? MMU_USER_SECONDARY_IDX : MMU_USER_IDX));
|
||||||
|
|
|
@ -1545,9 +1545,9 @@ static void tcg_out_cltz(TCGContext *s, TCGType ext, TCGReg d,
|
||||||
#include "../tcg-ldst.c.inc"
|
#include "../tcg-ldst.c.inc"
|
||||||
|
|
||||||
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
|
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
|
||||||
* TCGMemOpIdx oi, uintptr_t ra)
|
* MemOpIdx oi, uintptr_t ra)
|
||||||
*/
|
*/
|
||||||
static void * const qemu_ld_helpers[4] = {
|
static void * const qemu_ld_helpers[MO_SIZE + 1] = {
|
||||||
[MO_8] = helper_ret_ldub_mmu,
|
[MO_8] = helper_ret_ldub_mmu,
|
||||||
#ifdef HOST_WORDS_BIGENDIAN
|
#ifdef HOST_WORDS_BIGENDIAN
|
||||||
[MO_16] = helper_be_lduw_mmu,
|
[MO_16] = helper_be_lduw_mmu,
|
||||||
|
@ -1561,10 +1561,10 @@ static void * const qemu_ld_helpers[4] = {
|
||||||
};
|
};
|
||||||
|
|
||||||
/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
|
/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
|
||||||
* uintxx_t val, TCGMemOpIdx oi,
|
* uintxx_t val, MemOpIdx oi,
|
||||||
* uintptr_t ra)
|
* uintptr_t ra)
|
||||||
*/
|
*/
|
||||||
static void * const qemu_st_helpers[4] = {
|
static void * const qemu_st_helpers[MO_SIZE + 1] = {
|
||||||
[MO_8] = helper_ret_stb_mmu,
|
[MO_8] = helper_ret_stb_mmu,
|
||||||
#ifdef HOST_WORDS_BIGENDIAN
|
#ifdef HOST_WORDS_BIGENDIAN
|
||||||
[MO_16] = helper_be_stw_mmu,
|
[MO_16] = helper_be_stw_mmu,
|
||||||
|
@ -1586,7 +1586,7 @@ static inline void tcg_out_adr(TCGContext *s, TCGReg rd, const void *target)
|
||||||
|
|
||||||
static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
|
static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
|
||||||
{
|
{
|
||||||
TCGMemOpIdx oi = lb->oi;
|
MemOpIdx oi = lb->oi;
|
||||||
MemOp opc = get_memop(oi);
|
MemOp opc = get_memop(oi);
|
||||||
MemOp size = opc & MO_SIZE;
|
MemOp size = opc & MO_SIZE;
|
||||||
|
|
||||||
|
@ -1611,7 +1611,7 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
|
||||||
|
|
||||||
static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
|
static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
|
||||||
{
|
{
|
||||||
TCGMemOpIdx oi = lb->oi;
|
MemOpIdx oi = lb->oi;
|
||||||
MemOp opc = get_memop(oi);
|
MemOp opc = get_memop(oi);
|
||||||
MemOp size = opc & MO_SIZE;
|
MemOp size = opc & MO_SIZE;
|
||||||
|
|
||||||
|
@ -1629,7 +1629,7 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
|
static void add_qemu_ldst_label(TCGContext *s, bool is_ld, MemOpIdx oi,
|
||||||
TCGType ext, TCGReg data_reg, TCGReg addr_reg,
|
TCGType ext, TCGReg data_reg, TCGReg addr_reg,
|
||||||
tcg_insn_unit *raddr, tcg_insn_unit *label_ptr)
|
tcg_insn_unit *raddr, tcg_insn_unit *label_ptr)
|
||||||
{
|
{
|
||||||
|
@ -1778,7 +1778,7 @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp memop,
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
|
static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
|
||||||
TCGMemOpIdx oi, TCGType ext)
|
MemOpIdx oi, TCGType ext)
|
||||||
{
|
{
|
||||||
MemOp memop = get_memop(oi);
|
MemOp memop = get_memop(oi);
|
||||||
const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
|
const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
|
||||||
|
@ -1803,7 +1803,7 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
|
static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
|
||||||
TCGMemOpIdx oi)
|
MemOpIdx oi)
|
||||||
{
|
{
|
||||||
MemOp memop = get_memop(oi);
|
MemOp memop = get_memop(oi);
|
||||||
const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
|
const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
|
||||||
|
|
|
@ -1437,7 +1437,7 @@ static void tcg_out_vldst(TCGContext *s, ARMInsn insn,
|
||||||
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
|
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
|
||||||
* int mmu_idx, uintptr_t ra)
|
* int mmu_idx, uintptr_t ra)
|
||||||
*/
|
*/
|
||||||
static void * const qemu_ld_helpers[8] = {
|
static void * const qemu_ld_helpers[MO_SSIZE + 1] = {
|
||||||
[MO_UB] = helper_ret_ldub_mmu,
|
[MO_UB] = helper_ret_ldub_mmu,
|
||||||
[MO_SB] = helper_ret_ldsb_mmu,
|
[MO_SB] = helper_ret_ldsb_mmu,
|
||||||
#ifdef HOST_WORDS_BIGENDIAN
|
#ifdef HOST_WORDS_BIGENDIAN
|
||||||
|
@ -1458,7 +1458,7 @@ static void * const qemu_ld_helpers[8] = {
|
||||||
/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
|
/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
|
||||||
* uintxx_t val, int mmu_idx, uintptr_t ra)
|
* uintxx_t val, int mmu_idx, uintptr_t ra)
|
||||||
*/
|
*/
|
||||||
static void * const qemu_st_helpers[4] = {
|
static void * const qemu_st_helpers[MO_SIZE + 1] = {
|
||||||
[MO_8] = helper_ret_stb_mmu,
|
[MO_8] = helper_ret_stb_mmu,
|
||||||
#ifdef HOST_WORDS_BIGENDIAN
|
#ifdef HOST_WORDS_BIGENDIAN
|
||||||
[MO_16] = helper_be_stw_mmu,
|
[MO_16] = helper_be_stw_mmu,
|
||||||
|
@ -1632,7 +1632,7 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
|
||||||
/* Record the context of a call to the out of line helper code for the slow
|
/* Record the context of a call to the out of line helper code for the slow
|
||||||
path for a load or store, so that we can later generate the correct
|
path for a load or store, so that we can later generate the correct
|
||||||
helper code. */
|
helper code. */
|
||||||
static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
|
static void add_qemu_ldst_label(TCGContext *s, bool is_ld, MemOpIdx oi,
|
||||||
TCGReg datalo, TCGReg datahi, TCGReg addrlo,
|
TCGReg datalo, TCGReg datahi, TCGReg addrlo,
|
||||||
TCGReg addrhi, tcg_insn_unit *raddr,
|
TCGReg addrhi, tcg_insn_unit *raddr,
|
||||||
tcg_insn_unit *label_ptr)
|
tcg_insn_unit *label_ptr)
|
||||||
|
@ -1652,7 +1652,7 @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
|
||||||
static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
|
static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
|
||||||
{
|
{
|
||||||
TCGReg argreg, datalo, datahi;
|
TCGReg argreg, datalo, datahi;
|
||||||
TCGMemOpIdx oi = lb->oi;
|
MemOpIdx oi = lb->oi;
|
||||||
MemOp opc = get_memop(oi);
|
MemOp opc = get_memop(oi);
|
||||||
void *func;
|
void *func;
|
||||||
|
|
||||||
|
@ -1716,7 +1716,7 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
|
||||||
static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
|
static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
|
||||||
{
|
{
|
||||||
TCGReg argreg, datalo, datahi;
|
TCGReg argreg, datalo, datahi;
|
||||||
TCGMemOpIdx oi = lb->oi;
|
MemOpIdx oi = lb->oi;
|
||||||
MemOp opc = get_memop(oi);
|
MemOp opc = get_memop(oi);
|
||||||
|
|
||||||
if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
|
if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
|
||||||
|
@ -1846,7 +1846,7 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo,
|
||||||
static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
|
static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
|
||||||
{
|
{
|
||||||
TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused));
|
TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused));
|
||||||
TCGMemOpIdx oi;
|
MemOpIdx oi;
|
||||||
MemOp opc;
|
MemOp opc;
|
||||||
#ifdef CONFIG_SOFTMMU
|
#ifdef CONFIG_SOFTMMU
|
||||||
int mem_index;
|
int mem_index;
|
||||||
|
@ -1952,7 +1952,7 @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo,
|
||||||
static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
|
static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
|
||||||
{
|
{
|
||||||
TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused));
|
TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused));
|
||||||
TCGMemOpIdx oi;
|
MemOpIdx oi;
|
||||||
MemOp opc;
|
MemOp opc;
|
||||||
#ifdef CONFIG_SOFTMMU
|
#ifdef CONFIG_SOFTMMU
|
||||||
int mem_index;
|
int mem_index;
|
||||||
|
|
|
@ -1611,7 +1611,7 @@ static void tcg_out_nopn(TCGContext *s, int n)
|
||||||
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
|
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
|
||||||
* int mmu_idx, uintptr_t ra)
|
* int mmu_idx, uintptr_t ra)
|
||||||
*/
|
*/
|
||||||
static void * const qemu_ld_helpers[16] = {
|
static void * const qemu_ld_helpers[(MO_SIZE | MO_BSWAP) + 1] = {
|
||||||
[MO_UB] = helper_ret_ldub_mmu,
|
[MO_UB] = helper_ret_ldub_mmu,
|
||||||
[MO_LEUW] = helper_le_lduw_mmu,
|
[MO_LEUW] = helper_le_lduw_mmu,
|
||||||
[MO_LEUL] = helper_le_ldul_mmu,
|
[MO_LEUL] = helper_le_ldul_mmu,
|
||||||
|
@ -1624,7 +1624,7 @@ static void * const qemu_ld_helpers[16] = {
|
||||||
/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
|
/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
|
||||||
* uintxx_t val, int mmu_idx, uintptr_t ra)
|
* uintxx_t val, int mmu_idx, uintptr_t ra)
|
||||||
*/
|
*/
|
||||||
static void * const qemu_st_helpers[16] = {
|
static void * const qemu_st_helpers[(MO_SIZE | MO_BSWAP) + 1] = {
|
||||||
[MO_UB] = helper_ret_stb_mmu,
|
[MO_UB] = helper_ret_stb_mmu,
|
||||||
[MO_LEUW] = helper_le_stw_mmu,
|
[MO_LEUW] = helper_le_stw_mmu,
|
||||||
[MO_LEUL] = helper_le_stl_mmu,
|
[MO_LEUL] = helper_le_stl_mmu,
|
||||||
|
@ -1741,7 +1741,7 @@ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
|
||||||
* for a load or store, so that we can later generate the correct helper code
|
* for a load or store, so that we can later generate the correct helper code
|
||||||
*/
|
*/
|
||||||
static void add_qemu_ldst_label(TCGContext *s, bool is_ld, bool is_64,
|
static void add_qemu_ldst_label(TCGContext *s, bool is_ld, bool is_64,
|
||||||
TCGMemOpIdx oi,
|
MemOpIdx oi,
|
||||||
TCGReg datalo, TCGReg datahi,
|
TCGReg datalo, TCGReg datahi,
|
||||||
TCGReg addrlo, TCGReg addrhi,
|
TCGReg addrlo, TCGReg addrhi,
|
||||||
tcg_insn_unit *raddr,
|
tcg_insn_unit *raddr,
|
||||||
|
@ -1768,7 +1768,7 @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, bool is_64,
|
||||||
*/
|
*/
|
||||||
static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
|
static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
|
||||||
{
|
{
|
||||||
TCGMemOpIdx oi = l->oi;
|
MemOpIdx oi = l->oi;
|
||||||
MemOp opc = get_memop(oi);
|
MemOp opc = get_memop(oi);
|
||||||
TCGReg data_reg;
|
TCGReg data_reg;
|
||||||
tcg_insn_unit **label_ptr = &l->label_ptr[0];
|
tcg_insn_unit **label_ptr = &l->label_ptr[0];
|
||||||
|
@ -1853,7 +1853,7 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
|
||||||
*/
|
*/
|
||||||
static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
|
static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
|
||||||
{
|
{
|
||||||
TCGMemOpIdx oi = l->oi;
|
MemOpIdx oi = l->oi;
|
||||||
MemOp opc = get_memop(oi);
|
MemOp opc = get_memop(oi);
|
||||||
MemOp s_bits = opc & MO_SIZE;
|
MemOp s_bits = opc & MO_SIZE;
|
||||||
tcg_insn_unit **label_ptr = &l->label_ptr[0];
|
tcg_insn_unit **label_ptr = &l->label_ptr[0];
|
||||||
|
@ -2054,7 +2054,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
|
||||||
{
|
{
|
||||||
TCGReg datalo, datahi, addrlo;
|
TCGReg datalo, datahi, addrlo;
|
||||||
TCGReg addrhi __attribute__((unused));
|
TCGReg addrhi __attribute__((unused));
|
||||||
TCGMemOpIdx oi;
|
MemOpIdx oi;
|
||||||
MemOp opc;
|
MemOp opc;
|
||||||
#if defined(CONFIG_SOFTMMU)
|
#if defined(CONFIG_SOFTMMU)
|
||||||
int mem_index;
|
int mem_index;
|
||||||
|
@ -2143,7 +2143,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
|
||||||
{
|
{
|
||||||
TCGReg datalo, datahi, addrlo;
|
TCGReg datalo, datahi, addrlo;
|
||||||
TCGReg addrhi __attribute__((unused));
|
TCGReg addrhi __attribute__((unused));
|
||||||
TCGMemOpIdx oi;
|
MemOpIdx oi;
|
||||||
MemOp opc;
|
MemOp opc;
|
||||||
#if defined(CONFIG_SOFTMMU)
|
#if defined(CONFIG_SOFTMMU)
|
||||||
int mem_index;
|
int mem_index;
|
||||||
|
|
|
@ -1017,7 +1017,7 @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg)
|
||||||
#if defined(CONFIG_SOFTMMU)
|
#if defined(CONFIG_SOFTMMU)
|
||||||
#include "../tcg-ldst.c.inc"
|
#include "../tcg-ldst.c.inc"
|
||||||
|
|
||||||
static void * const qemu_ld_helpers[16] = {
|
static void * const qemu_ld_helpers[(MO_SSIZE | MO_BSWAP) + 1] = {
|
||||||
[MO_UB] = helper_ret_ldub_mmu,
|
[MO_UB] = helper_ret_ldub_mmu,
|
||||||
[MO_SB] = helper_ret_ldsb_mmu,
|
[MO_SB] = helper_ret_ldsb_mmu,
|
||||||
[MO_LEUW] = helper_le_lduw_mmu,
|
[MO_LEUW] = helper_le_lduw_mmu,
|
||||||
|
@ -1034,7 +1034,7 @@ static void * const qemu_ld_helpers[16] = {
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
static void * const qemu_st_helpers[16] = {
|
static void * const qemu_st_helpers[(MO_SIZE | MO_BSWAP) + 1] = {
|
||||||
[MO_UB] = helper_ret_stb_mmu,
|
[MO_UB] = helper_ret_stb_mmu,
|
||||||
[MO_LEUW] = helper_le_stw_mmu,
|
[MO_LEUW] = helper_le_stw_mmu,
|
||||||
[MO_LEUL] = helper_le_stl_mmu,
|
[MO_LEUL] = helper_le_stl_mmu,
|
||||||
|
@ -1120,7 +1120,7 @@ QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -32768);
|
||||||
* Clobbers TMP0, TMP1, TMP2, TMP3.
|
* Clobbers TMP0, TMP1, TMP2, TMP3.
|
||||||
*/
|
*/
|
||||||
static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl,
|
static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl,
|
||||||
TCGReg addrh, TCGMemOpIdx oi,
|
TCGReg addrh, MemOpIdx oi,
|
||||||
tcg_insn_unit *label_ptr[2], bool is_load)
|
tcg_insn_unit *label_ptr[2], bool is_load)
|
||||||
{
|
{
|
||||||
MemOp opc = get_memop(oi);
|
MemOp opc = get_memop(oi);
|
||||||
|
@ -1196,7 +1196,7 @@ static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl,
|
||||||
tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_TMP2, addrl);
|
tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_TMP2, addrl);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOpIdx oi,
|
static void add_qemu_ldst_label(TCGContext *s, int is_ld, MemOpIdx oi,
|
||||||
TCGType ext,
|
TCGType ext,
|
||||||
TCGReg datalo, TCGReg datahi,
|
TCGReg datalo, TCGReg datahi,
|
||||||
TCGReg addrlo, TCGReg addrhi,
|
TCGReg addrlo, TCGReg addrhi,
|
||||||
|
@ -1221,7 +1221,7 @@ static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOpIdx oi,
|
||||||
static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
|
static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
|
||||||
{
|
{
|
||||||
const tcg_insn_unit *tgt_rx = tcg_splitwx_to_rx(s->code_ptr);
|
const tcg_insn_unit *tgt_rx = tcg_splitwx_to_rx(s->code_ptr);
|
||||||
TCGMemOpIdx oi = l->oi;
|
MemOpIdx oi = l->oi;
|
||||||
MemOp opc = get_memop(oi);
|
MemOp opc = get_memop(oi);
|
||||||
TCGReg v0;
|
TCGReg v0;
|
||||||
int i;
|
int i;
|
||||||
|
@ -1275,7 +1275,7 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
|
||||||
static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
|
static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
|
||||||
{
|
{
|
||||||
const tcg_insn_unit *tgt_rx = tcg_splitwx_to_rx(s->code_ptr);
|
const tcg_insn_unit *tgt_rx = tcg_splitwx_to_rx(s->code_ptr);
|
||||||
TCGMemOpIdx oi = l->oi;
|
MemOpIdx oi = l->oi;
|
||||||
MemOp opc = get_memop(oi);
|
MemOp opc = get_memop(oi);
|
||||||
MemOp s_bits = opc & MO_SIZE;
|
MemOp s_bits = opc & MO_SIZE;
|
||||||
int i;
|
int i;
|
||||||
|
@ -1434,7 +1434,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
|
||||||
{
|
{
|
||||||
TCGReg addr_regl, addr_regh __attribute__((unused));
|
TCGReg addr_regl, addr_regh __attribute__((unused));
|
||||||
TCGReg data_regl, data_regh;
|
TCGReg data_regl, data_regh;
|
||||||
TCGMemOpIdx oi;
|
MemOpIdx oi;
|
||||||
MemOp opc;
|
MemOp opc;
|
||||||
#if defined(CONFIG_SOFTMMU)
|
#if defined(CONFIG_SOFTMMU)
|
||||||
tcg_insn_unit *label_ptr[2];
|
tcg_insn_unit *label_ptr[2];
|
||||||
|
@ -1536,7 +1536,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
|
||||||
{
|
{
|
||||||
TCGReg addr_regl, addr_regh __attribute__((unused));
|
TCGReg addr_regl, addr_regh __attribute__((unused));
|
||||||
TCGReg data_regl, data_regh;
|
TCGReg data_regl, data_regh;
|
||||||
TCGMemOpIdx oi;
|
MemOpIdx oi;
|
||||||
MemOp opc;
|
MemOp opc;
|
||||||
#if defined(CONFIG_SOFTMMU)
|
#if defined(CONFIG_SOFTMMU)
|
||||||
tcg_insn_unit *label_ptr[2];
|
tcg_insn_unit *label_ptr[2];
|
||||||
|
|
|
@ -1023,7 +1023,7 @@ void tcg_optimize(TCGContext *s)
|
||||||
|
|
||||||
CASE_OP_32_64(qemu_ld):
|
CASE_OP_32_64(qemu_ld):
|
||||||
{
|
{
|
||||||
TCGMemOpIdx oi = op->args[nb_oargs + nb_iargs];
|
MemOpIdx oi = op->args[nb_oargs + nb_iargs];
|
||||||
MemOp mop = get_memop(oi);
|
MemOp mop = get_memop(oi);
|
||||||
if (!(mop & MO_SIGN)) {
|
if (!(mop & MO_SIGN)) {
|
||||||
mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1;
|
mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1;
|
||||||
|
|
|
@ -1931,7 +1931,7 @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target)
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static const uint32_t qemu_ldx_opc[16] = {
|
static const uint32_t qemu_ldx_opc[(MO_SSIZE + MO_BSWAP) + 1] = {
|
||||||
[MO_UB] = LBZX,
|
[MO_UB] = LBZX,
|
||||||
[MO_UW] = LHZX,
|
[MO_UW] = LHZX,
|
||||||
[MO_UL] = LWZX,
|
[MO_UL] = LWZX,
|
||||||
|
@ -1944,7 +1944,7 @@ static const uint32_t qemu_ldx_opc[16] = {
|
||||||
[MO_BSWAP | MO_Q] = LDBRX,
|
[MO_BSWAP | MO_Q] = LDBRX,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const uint32_t qemu_stx_opc[16] = {
|
static const uint32_t qemu_stx_opc[(MO_SIZE + MO_BSWAP) + 1] = {
|
||||||
[MO_UB] = STBX,
|
[MO_UB] = STBX,
|
||||||
[MO_UW] = STHX,
|
[MO_UW] = STHX,
|
||||||
[MO_UL] = STWX,
|
[MO_UL] = STWX,
|
||||||
|
@ -1965,7 +1965,7 @@ static const uint32_t qemu_exts_opc[4] = {
|
||||||
/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
|
/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
|
||||||
* int mmu_idx, uintptr_t ra)
|
* int mmu_idx, uintptr_t ra)
|
||||||
*/
|
*/
|
||||||
static void * const qemu_ld_helpers[16] = {
|
static void * const qemu_ld_helpers[(MO_SIZE | MO_BSWAP) + 1] = {
|
||||||
[MO_UB] = helper_ret_ldub_mmu,
|
[MO_UB] = helper_ret_ldub_mmu,
|
||||||
[MO_LEUW] = helper_le_lduw_mmu,
|
[MO_LEUW] = helper_le_lduw_mmu,
|
||||||
[MO_LEUL] = helper_le_ldul_mmu,
|
[MO_LEUL] = helper_le_ldul_mmu,
|
||||||
|
@ -1978,7 +1978,7 @@ static void * const qemu_ld_helpers[16] = {
|
||||||
/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
|
/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
|
||||||
* uintxx_t val, int mmu_idx, uintptr_t ra)
|
* uintxx_t val, int mmu_idx, uintptr_t ra)
|
||||||
*/
|
*/
|
||||||
static void * const qemu_st_helpers[16] = {
|
static void * const qemu_st_helpers[(MO_SIZE | MO_BSWAP) + 1] = {
|
||||||
[MO_UB] = helper_ret_stb_mmu,
|
[MO_UB] = helper_ret_stb_mmu,
|
||||||
[MO_LEUW] = helper_le_stw_mmu,
|
[MO_LEUW] = helper_le_stw_mmu,
|
||||||
[MO_LEUL] = helper_le_stl_mmu,
|
[MO_LEUL] = helper_le_stl_mmu,
|
||||||
|
@ -2103,7 +2103,7 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, MemOp opc,
|
||||||
/* Record the context of a call to the out of line helper code for the slow
|
/* Record the context of a call to the out of line helper code for the slow
|
||||||
path for a load or store, so that we can later generate the correct
|
path for a load or store, so that we can later generate the correct
|
||||||
helper code. */
|
helper code. */
|
||||||
static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
|
static void add_qemu_ldst_label(TCGContext *s, bool is_ld, MemOpIdx oi,
|
||||||
TCGReg datalo_reg, TCGReg datahi_reg,
|
TCGReg datalo_reg, TCGReg datahi_reg,
|
||||||
TCGReg addrlo_reg, TCGReg addrhi_reg,
|
TCGReg addrlo_reg, TCGReg addrhi_reg,
|
||||||
tcg_insn_unit *raddr, tcg_insn_unit *lptr)
|
tcg_insn_unit *raddr, tcg_insn_unit *lptr)
|
||||||
|
@ -2122,7 +2122,7 @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
|
||||||
|
|
||||||
static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
|
static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
|
||||||
{
|
{
|
||||||
TCGMemOpIdx oi = lb->oi;
|
MemOpIdx oi = lb->oi;
|
||||||
MemOp opc = get_memop(oi);
|
MemOp opc = get_memop(oi);
|
||||||
TCGReg hi, lo, arg = TCG_REG_R3;
|
TCGReg hi, lo, arg = TCG_REG_R3;
|
||||||
|
|
||||||
|
@ -2169,7 +2169,7 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
|
||||||
|
|
||||||
static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
|
static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
|
||||||
{
|
{
|
||||||
TCGMemOpIdx oi = lb->oi;
|
MemOpIdx oi = lb->oi;
|
||||||
MemOp opc = get_memop(oi);
|
MemOp opc = get_memop(oi);
|
||||||
MemOp s_bits = opc & MO_SIZE;
|
MemOp s_bits = opc & MO_SIZE;
|
||||||
TCGReg hi, lo, arg = TCG_REG_R3;
|
TCGReg hi, lo, arg = TCG_REG_R3;
|
||||||
|
@ -2233,7 +2233,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
|
||||||
{
|
{
|
||||||
TCGReg datalo, datahi, addrlo, rbase;
|
TCGReg datalo, datahi, addrlo, rbase;
|
||||||
TCGReg addrhi __attribute__((unused));
|
TCGReg addrhi __attribute__((unused));
|
||||||
TCGMemOpIdx oi;
|
MemOpIdx oi;
|
||||||
MemOp opc, s_bits;
|
MemOp opc, s_bits;
|
||||||
#ifdef CONFIG_SOFTMMU
|
#ifdef CONFIG_SOFTMMU
|
||||||
int mem_index;
|
int mem_index;
|
||||||
|
@ -2308,7 +2308,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
|
||||||
{
|
{
|
||||||
TCGReg datalo, datahi, addrlo, rbase;
|
TCGReg datalo, datahi, addrlo, rbase;
|
||||||
TCGReg addrhi __attribute__((unused));
|
TCGReg addrhi __attribute__((unused));
|
||||||
TCGMemOpIdx oi;
|
MemOpIdx oi;
|
||||||
MemOp opc, s_bits;
|
MemOp opc, s_bits;
|
||||||
#ifdef CONFIG_SOFTMMU
|
#ifdef CONFIG_SOFTMMU
|
||||||
int mem_index;
|
int mem_index;
|
||||||
|
|
|
@ -850,9 +850,9 @@ static void tcg_out_mb(TCGContext *s, TCGArg a0)
|
||||||
#include "../tcg-ldst.c.inc"
|
#include "../tcg-ldst.c.inc"
|
||||||
|
|
||||||
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
|
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
|
||||||
* TCGMemOpIdx oi, uintptr_t ra)
|
* MemOpIdx oi, uintptr_t ra)
|
||||||
*/
|
*/
|
||||||
static void * const qemu_ld_helpers[8] = {
|
static void * const qemu_ld_helpers[MO_SSIZE + 1] = {
|
||||||
[MO_UB] = helper_ret_ldub_mmu,
|
[MO_UB] = helper_ret_ldub_mmu,
|
||||||
[MO_SB] = helper_ret_ldsb_mmu,
|
[MO_SB] = helper_ret_ldsb_mmu,
|
||||||
#ifdef HOST_WORDS_BIGENDIAN
|
#ifdef HOST_WORDS_BIGENDIAN
|
||||||
|
@ -875,10 +875,10 @@ static void * const qemu_ld_helpers[8] = {
|
||||||
};
|
};
|
||||||
|
|
||||||
/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
|
/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
|
||||||
* uintxx_t val, TCGMemOpIdx oi,
|
* uintxx_t val, MemOpIdx oi,
|
||||||
* uintptr_t ra)
|
* uintptr_t ra)
|
||||||
*/
|
*/
|
||||||
static void * const qemu_st_helpers[4] = {
|
static void * const qemu_st_helpers[MO_SIZE + 1] = {
|
||||||
[MO_8] = helper_ret_stb_mmu,
|
[MO_8] = helper_ret_stb_mmu,
|
||||||
#ifdef HOST_WORDS_BIGENDIAN
|
#ifdef HOST_WORDS_BIGENDIAN
|
||||||
[MO_16] = helper_be_stw_mmu,
|
[MO_16] = helper_be_stw_mmu,
|
||||||
|
@ -906,7 +906,7 @@ static void tcg_out_goto(TCGContext *s, const tcg_insn_unit *target)
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tcg_out_tlb_load(TCGContext *s, TCGReg addrl,
|
static void tcg_out_tlb_load(TCGContext *s, TCGReg addrl,
|
||||||
TCGReg addrh, TCGMemOpIdx oi,
|
TCGReg addrh, MemOpIdx oi,
|
||||||
tcg_insn_unit **label_ptr, bool is_load)
|
tcg_insn_unit **label_ptr, bool is_load)
|
||||||
{
|
{
|
||||||
MemOp opc = get_memop(oi);
|
MemOp opc = get_memop(oi);
|
||||||
|
@ -959,7 +959,7 @@ static void tcg_out_tlb_load(TCGContext *s, TCGReg addrl,
|
||||||
tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_REG_TMP2, addrl);
|
tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_REG_TMP2, addrl);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOpIdx oi,
|
static void add_qemu_ldst_label(TCGContext *s, int is_ld, MemOpIdx oi,
|
||||||
TCGType ext,
|
TCGType ext,
|
||||||
TCGReg datalo, TCGReg datahi,
|
TCGReg datalo, TCGReg datahi,
|
||||||
TCGReg addrlo, TCGReg addrhi,
|
TCGReg addrlo, TCGReg addrhi,
|
||||||
|
@ -980,7 +980,7 @@ static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOpIdx oi,
|
||||||
|
|
||||||
static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
|
static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
|
||||||
{
|
{
|
||||||
TCGMemOpIdx oi = l->oi;
|
MemOpIdx oi = l->oi;
|
||||||
MemOp opc = get_memop(oi);
|
MemOp opc = get_memop(oi);
|
||||||
TCGReg a0 = tcg_target_call_iarg_regs[0];
|
TCGReg a0 = tcg_target_call_iarg_regs[0];
|
||||||
TCGReg a1 = tcg_target_call_iarg_regs[1];
|
TCGReg a1 = tcg_target_call_iarg_regs[1];
|
||||||
|
@ -1012,7 +1012,7 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
|
||||||
|
|
||||||
static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
|
static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
|
||||||
{
|
{
|
||||||
TCGMemOpIdx oi = l->oi;
|
MemOpIdx oi = l->oi;
|
||||||
MemOp opc = get_memop(oi);
|
MemOp opc = get_memop(oi);
|
||||||
MemOp s_bits = opc & MO_SIZE;
|
MemOp s_bits = opc & MO_SIZE;
|
||||||
TCGReg a0 = tcg_target_call_iarg_regs[0];
|
TCGReg a0 = tcg_target_call_iarg_regs[0];
|
||||||
|
@ -1104,7 +1104,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
|
||||||
{
|
{
|
||||||
TCGReg addr_regl, addr_regh __attribute__((unused));
|
TCGReg addr_regl, addr_regh __attribute__((unused));
|
||||||
TCGReg data_regl, data_regh;
|
TCGReg data_regl, data_regh;
|
||||||
TCGMemOpIdx oi;
|
MemOpIdx oi;
|
||||||
MemOp opc;
|
MemOp opc;
|
||||||
#if defined(CONFIG_SOFTMMU)
|
#if defined(CONFIG_SOFTMMU)
|
||||||
tcg_insn_unit *label_ptr[1];
|
tcg_insn_unit *label_ptr[1];
|
||||||
|
@ -1170,7 +1170,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
|
||||||
{
|
{
|
||||||
TCGReg addr_regl, addr_regh __attribute__((unused));
|
TCGReg addr_regl, addr_regh __attribute__((unused));
|
||||||
TCGReg data_regl, data_regh;
|
TCGReg data_regl, data_regh;
|
||||||
TCGMemOpIdx oi;
|
MemOpIdx oi;
|
||||||
MemOp opc;
|
MemOp opc;
|
||||||
#if defined(CONFIG_SOFTMMU)
|
#if defined(CONFIG_SOFTMMU)
|
||||||
tcg_insn_unit *label_ptr[1];
|
tcg_insn_unit *label_ptr[1];
|
||||||
|
|
|
@ -13,13 +13,20 @@ C_O0_I1(r)
|
||||||
C_O0_I2(L, L)
|
C_O0_I2(L, L)
|
||||||
C_O0_I2(r, r)
|
C_O0_I2(r, r)
|
||||||
C_O0_I2(r, ri)
|
C_O0_I2(r, ri)
|
||||||
|
C_O0_I2(v, r)
|
||||||
C_O1_I1(r, L)
|
C_O1_I1(r, L)
|
||||||
C_O1_I1(r, r)
|
C_O1_I1(r, r)
|
||||||
|
C_O1_I1(v, r)
|
||||||
|
C_O1_I1(v, v)
|
||||||
|
C_O1_I1(v, vr)
|
||||||
C_O1_I2(r, 0, ri)
|
C_O1_I2(r, 0, ri)
|
||||||
C_O1_I2(r, 0, rI)
|
C_O1_I2(r, 0, rI)
|
||||||
C_O1_I2(r, 0, rJ)
|
C_O1_I2(r, 0, rJ)
|
||||||
C_O1_I2(r, r, ri)
|
C_O1_I2(r, r, ri)
|
||||||
C_O1_I2(r, rZ, r)
|
C_O1_I2(r, rZ, r)
|
||||||
|
C_O1_I2(v, v, r)
|
||||||
|
C_O1_I2(v, v, v)
|
||||||
|
C_O1_I3(v, v, v, v)
|
||||||
C_O1_I4(r, r, ri, r, 0)
|
C_O1_I4(r, r, ri, r, 0)
|
||||||
C_O1_I4(r, r, ri, rI, 0)
|
C_O1_I4(r, r, ri, rI, 0)
|
||||||
C_O2_I2(b, a, 0, r)
|
C_O2_I2(b, a, 0, r)
|
|
@ -10,6 +10,7 @@
|
||||||
*/
|
*/
|
||||||
REGS('r', ALL_GENERAL_REGS)
|
REGS('r', ALL_GENERAL_REGS)
|
||||||
REGS('L', ALL_GENERAL_REGS & ~SOFTMMU_RESERVE_REGS)
|
REGS('L', ALL_GENERAL_REGS & ~SOFTMMU_RESERVE_REGS)
|
||||||
|
REGS('v', ALL_VECTOR_REGS)
|
||||||
/*
|
/*
|
||||||
* A (single) even/odd pair for division.
|
* A (single) even/odd pair for division.
|
||||||
* TODO: Add something to the register allocator to allow
|
* TODO: Add something to the register allocator to allow
|
File diff suppressed because it is too large
Load Diff
|
@ -32,39 +32,44 @@
|
||||||
#define MAX_CODE_GEN_BUFFER_SIZE (3 * GiB)
|
#define MAX_CODE_GEN_BUFFER_SIZE (3 * GiB)
|
||||||
|
|
||||||
typedef enum TCGReg {
|
typedef enum TCGReg {
|
||||||
TCG_REG_R0 = 0,
|
TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3,
|
||||||
TCG_REG_R1,
|
TCG_REG_R4, TCG_REG_R5, TCG_REG_R6, TCG_REG_R7,
|
||||||
TCG_REG_R2,
|
TCG_REG_R8, TCG_REG_R9, TCG_REG_R10, TCG_REG_R11,
|
||||||
TCG_REG_R3,
|
TCG_REG_R12, TCG_REG_R13, TCG_REG_R14, TCG_REG_R15,
|
||||||
TCG_REG_R4,
|
|
||||||
TCG_REG_R5,
|
TCG_REG_V0 = 32, TCG_REG_V1, TCG_REG_V2, TCG_REG_V3,
|
||||||
TCG_REG_R6,
|
TCG_REG_V4, TCG_REG_V5, TCG_REG_V6, TCG_REG_V7,
|
||||||
TCG_REG_R7,
|
TCG_REG_V8, TCG_REG_V9, TCG_REG_V10, TCG_REG_V11,
|
||||||
TCG_REG_R8,
|
TCG_REG_V12, TCG_REG_V13, TCG_REG_V14, TCG_REG_V15,
|
||||||
TCG_REG_R9,
|
TCG_REG_V16, TCG_REG_V17, TCG_REG_V18, TCG_REG_V19,
|
||||||
TCG_REG_R10,
|
TCG_REG_V20, TCG_REG_V21, TCG_REG_V22, TCG_REG_V23,
|
||||||
TCG_REG_R11,
|
TCG_REG_V24, TCG_REG_V25, TCG_REG_V26, TCG_REG_V27,
|
||||||
TCG_REG_R12,
|
TCG_REG_V28, TCG_REG_V29, TCG_REG_V30, TCG_REG_V31,
|
||||||
TCG_REG_R13,
|
|
||||||
TCG_REG_R14,
|
TCG_AREG0 = TCG_REG_R10,
|
||||||
TCG_REG_R15
|
TCG_REG_CALL_STACK = TCG_REG_R15
|
||||||
} TCGReg;
|
} TCGReg;
|
||||||
|
|
||||||
#define TCG_TARGET_NB_REGS 16
|
#define TCG_TARGET_NB_REGS 64
|
||||||
|
|
||||||
/* A list of relevant facilities used by this translator. Some of these
|
/* A list of relevant facilities used by this translator. Some of these
|
||||||
are required for proper operation, and these are checked at startup. */
|
are required for proper operation, and these are checked at startup. */
|
||||||
|
|
||||||
#define FACILITY_ZARCH_ACTIVE (1ULL << (63 - 2))
|
#define FACILITY_ZARCH_ACTIVE 2
|
||||||
#define FACILITY_LONG_DISP (1ULL << (63 - 18))
|
#define FACILITY_LONG_DISP 18
|
||||||
#define FACILITY_EXT_IMM (1ULL << (63 - 21))
|
#define FACILITY_EXT_IMM 21
|
||||||
#define FACILITY_GEN_INST_EXT (1ULL << (63 - 34))
|
#define FACILITY_GEN_INST_EXT 34
|
||||||
#define FACILITY_LOAD_ON_COND (1ULL << (63 - 45))
|
#define FACILITY_LOAD_ON_COND 45
|
||||||
#define FACILITY_FAST_BCR_SER FACILITY_LOAD_ON_COND
|
#define FACILITY_FAST_BCR_SER FACILITY_LOAD_ON_COND
|
||||||
#define FACILITY_DISTINCT_OPS FACILITY_LOAD_ON_COND
|
#define FACILITY_DISTINCT_OPS FACILITY_LOAD_ON_COND
|
||||||
#define FACILITY_LOAD_ON_COND2 (1ULL << (63 - 53))
|
#define FACILITY_LOAD_ON_COND2 53
|
||||||
|
#define FACILITY_VECTOR 129
|
||||||
|
#define FACILITY_VECTOR_ENH1 135
|
||||||
|
|
||||||
extern uint64_t s390_facilities;
|
extern uint64_t s390_facilities[3];
|
||||||
|
|
||||||
|
#define HAVE_FACILITY(X) \
|
||||||
|
((s390_facilities[FACILITY_##X / 64] >> (63 - FACILITY_##X % 64)) & 1)
|
||||||
|
|
||||||
/* optional instructions */
|
/* optional instructions */
|
||||||
#define TCG_TARGET_HAS_div2_i32 1
|
#define TCG_TARGET_HAS_div2_i32 1
|
||||||
|
@ -85,8 +90,8 @@ extern uint64_t s390_facilities;
|
||||||
#define TCG_TARGET_HAS_clz_i32 0
|
#define TCG_TARGET_HAS_clz_i32 0
|
||||||
#define TCG_TARGET_HAS_ctz_i32 0
|
#define TCG_TARGET_HAS_ctz_i32 0
|
||||||
#define TCG_TARGET_HAS_ctpop_i32 0
|
#define TCG_TARGET_HAS_ctpop_i32 0
|
||||||
#define TCG_TARGET_HAS_deposit_i32 (s390_facilities & FACILITY_GEN_INST_EXT)
|
#define TCG_TARGET_HAS_deposit_i32 HAVE_FACILITY(GEN_INST_EXT)
|
||||||
#define TCG_TARGET_HAS_extract_i32 (s390_facilities & FACILITY_GEN_INST_EXT)
|
#define TCG_TARGET_HAS_extract_i32 HAVE_FACILITY(GEN_INST_EXT)
|
||||||
#define TCG_TARGET_HAS_sextract_i32 0
|
#define TCG_TARGET_HAS_sextract_i32 0
|
||||||
#define TCG_TARGET_HAS_extract2_i32 0
|
#define TCG_TARGET_HAS_extract2_i32 0
|
||||||
#define TCG_TARGET_HAS_movcond_i32 1
|
#define TCG_TARGET_HAS_movcond_i32 1
|
||||||
|
@ -98,7 +103,7 @@ extern uint64_t s390_facilities;
|
||||||
#define TCG_TARGET_HAS_mulsh_i32 0
|
#define TCG_TARGET_HAS_mulsh_i32 0
|
||||||
#define TCG_TARGET_HAS_extrl_i64_i32 0
|
#define TCG_TARGET_HAS_extrl_i64_i32 0
|
||||||
#define TCG_TARGET_HAS_extrh_i64_i32 0
|
#define TCG_TARGET_HAS_extrh_i64_i32 0
|
||||||
#define TCG_TARGET_HAS_direct_jump (s390_facilities & FACILITY_GEN_INST_EXT)
|
#define TCG_TARGET_HAS_direct_jump HAVE_FACILITY(GEN_INST_EXT)
|
||||||
#define TCG_TARGET_HAS_qemu_st8_i32 0
|
#define TCG_TARGET_HAS_qemu_st8_i32 0
|
||||||
|
|
||||||
#define TCG_TARGET_HAS_div2_i64 1
|
#define TCG_TARGET_HAS_div2_i64 1
|
||||||
|
@ -119,11 +124,11 @@ extern uint64_t s390_facilities;
|
||||||
#define TCG_TARGET_HAS_eqv_i64 0
|
#define TCG_TARGET_HAS_eqv_i64 0
|
||||||
#define TCG_TARGET_HAS_nand_i64 0
|
#define TCG_TARGET_HAS_nand_i64 0
|
||||||
#define TCG_TARGET_HAS_nor_i64 0
|
#define TCG_TARGET_HAS_nor_i64 0
|
||||||
#define TCG_TARGET_HAS_clz_i64 (s390_facilities & FACILITY_EXT_IMM)
|
#define TCG_TARGET_HAS_clz_i64 HAVE_FACILITY(EXT_IMM)
|
||||||
#define TCG_TARGET_HAS_ctz_i64 0
|
#define TCG_TARGET_HAS_ctz_i64 0
|
||||||
#define TCG_TARGET_HAS_ctpop_i64 0
|
#define TCG_TARGET_HAS_ctpop_i64 0
|
||||||
#define TCG_TARGET_HAS_deposit_i64 (s390_facilities & FACILITY_GEN_INST_EXT)
|
#define TCG_TARGET_HAS_deposit_i64 HAVE_FACILITY(GEN_INST_EXT)
|
||||||
#define TCG_TARGET_HAS_extract_i64 (s390_facilities & FACILITY_GEN_INST_EXT)
|
#define TCG_TARGET_HAS_extract_i64 HAVE_FACILITY(GEN_INST_EXT)
|
||||||
#define TCG_TARGET_HAS_sextract_i64 0
|
#define TCG_TARGET_HAS_sextract_i64 0
|
||||||
#define TCG_TARGET_HAS_extract2_i64 0
|
#define TCG_TARGET_HAS_extract2_i64 0
|
||||||
#define TCG_TARGET_HAS_movcond_i64 1
|
#define TCG_TARGET_HAS_movcond_i64 1
|
||||||
|
@ -134,8 +139,28 @@ extern uint64_t s390_facilities;
|
||||||
#define TCG_TARGET_HAS_muluh_i64 0
|
#define TCG_TARGET_HAS_muluh_i64 0
|
||||||
#define TCG_TARGET_HAS_mulsh_i64 0
|
#define TCG_TARGET_HAS_mulsh_i64 0
|
||||||
|
|
||||||
|
#define TCG_TARGET_HAS_v64 HAVE_FACILITY(VECTOR)
|
||||||
|
#define TCG_TARGET_HAS_v128 HAVE_FACILITY(VECTOR)
|
||||||
|
#define TCG_TARGET_HAS_v256 0
|
||||||
|
|
||||||
|
#define TCG_TARGET_HAS_andc_vec 1
|
||||||
|
#define TCG_TARGET_HAS_orc_vec HAVE_FACILITY(VECTOR_ENH1)
|
||||||
|
#define TCG_TARGET_HAS_not_vec 1
|
||||||
|
#define TCG_TARGET_HAS_neg_vec 1
|
||||||
|
#define TCG_TARGET_HAS_abs_vec 1
|
||||||
|
#define TCG_TARGET_HAS_roti_vec 1
|
||||||
|
#define TCG_TARGET_HAS_rots_vec 1
|
||||||
|
#define TCG_TARGET_HAS_rotv_vec 1
|
||||||
|
#define TCG_TARGET_HAS_shi_vec 1
|
||||||
|
#define TCG_TARGET_HAS_shs_vec 1
|
||||||
|
#define TCG_TARGET_HAS_shv_vec 1
|
||||||
|
#define TCG_TARGET_HAS_mul_vec 1
|
||||||
|
#define TCG_TARGET_HAS_sat_vec 0
|
||||||
|
#define TCG_TARGET_HAS_minmax_vec 1
|
||||||
|
#define TCG_TARGET_HAS_bitsel_vec 1
|
||||||
|
#define TCG_TARGET_HAS_cmpsel_vec 0
|
||||||
|
|
||||||
/* used for function call generation */
|
/* used for function call generation */
|
||||||
#define TCG_REG_CALL_STACK TCG_REG_R15
|
|
||||||
#define TCG_TARGET_STACK_ALIGN 8
|
#define TCG_TARGET_STACK_ALIGN 8
|
||||||
#define TCG_TARGET_CALL_STACK_OFFSET 160
|
#define TCG_TARGET_CALL_STACK_OFFSET 160
|
||||||
|
|
||||||
|
@ -144,10 +169,6 @@ extern uint64_t s390_facilities;
|
||||||
|
|
||||||
#define TCG_TARGET_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD)
|
#define TCG_TARGET_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD)
|
||||||
|
|
||||||
enum {
|
|
||||||
TCG_AREG0 = TCG_REG_R10,
|
|
||||||
};
|
|
||||||
|
|
||||||
static inline void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
|
static inline void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
|
||||||
uintptr_t jmp_rw, uintptr_t addr)
|
uintptr_t jmp_rw, uintptr_t addr)
|
||||||
{
|
{
|
|
@ -0,0 +1,15 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2021 Linaro
|
||||||
|
*
|
||||||
|
* This work is licensed under the terms of the GNU GPL, version 2 or
|
||||||
|
* (at your option) any later version.
|
||||||
|
*
|
||||||
|
* See the COPYING file in the top-level directory for details.
|
||||||
|
*
|
||||||
|
* Target-specific opcodes for host vector expansion. These will be
|
||||||
|
* emitted by tcg_expand_vec_op. For those familiar with GCC internals,
|
||||||
|
* consider these to be UNSPEC with names.
|
||||||
|
*/
|
||||||
|
DEF(s390_vuph_vec, 1, 1, 0, IMPLVEC)
|
||||||
|
DEF(s390_vupl_vec, 1, 1, 0, IMPLVEC)
|
||||||
|
DEF(s390_vpks_vec, 1, 2, 0, IMPLVEC)
|
|
@ -855,8 +855,8 @@ static void tcg_out_mb(TCGContext *s, TCGArg a0)
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SOFTMMU
|
#ifdef CONFIG_SOFTMMU
|
||||||
static const tcg_insn_unit *qemu_ld_trampoline[16];
|
static const tcg_insn_unit *qemu_ld_trampoline[(MO_SSIZE | MO_BSWAP) + 1];
|
||||||
static const tcg_insn_unit *qemu_st_trampoline[16];
|
static const tcg_insn_unit *qemu_st_trampoline[(MO_SIZE | MO_BSWAP) + 1];
|
||||||
|
|
||||||
static void emit_extend(TCGContext *s, TCGReg r, int op)
|
static void emit_extend(TCGContext *s, TCGReg r, int op)
|
||||||
{
|
{
|
||||||
|
@ -883,7 +883,7 @@ static void emit_extend(TCGContext *s, TCGReg r, int op)
|
||||||
|
|
||||||
static void build_trampolines(TCGContext *s)
|
static void build_trampolines(TCGContext *s)
|
||||||
{
|
{
|
||||||
static void * const qemu_ld_helpers[16] = {
|
static void * const qemu_ld_helpers[] = {
|
||||||
[MO_UB] = helper_ret_ldub_mmu,
|
[MO_UB] = helper_ret_ldub_mmu,
|
||||||
[MO_SB] = helper_ret_ldsb_mmu,
|
[MO_SB] = helper_ret_ldsb_mmu,
|
||||||
[MO_LEUW] = helper_le_lduw_mmu,
|
[MO_LEUW] = helper_le_lduw_mmu,
|
||||||
|
@ -895,7 +895,7 @@ static void build_trampolines(TCGContext *s)
|
||||||
[MO_BEUL] = helper_be_ldul_mmu,
|
[MO_BEUL] = helper_be_ldul_mmu,
|
||||||
[MO_BEQ] = helper_be_ldq_mmu,
|
[MO_BEQ] = helper_be_ldq_mmu,
|
||||||
};
|
};
|
||||||
static void * const qemu_st_helpers[16] = {
|
static void * const qemu_st_helpers[] = {
|
||||||
[MO_UB] = helper_ret_stb_mmu,
|
[MO_UB] = helper_ret_stb_mmu,
|
||||||
[MO_LEUW] = helper_le_stw_mmu,
|
[MO_LEUW] = helper_le_stw_mmu,
|
||||||
[MO_LEUL] = helper_le_stl_mmu,
|
[MO_LEUL] = helper_le_stl_mmu,
|
||||||
|
@ -908,7 +908,7 @@ static void build_trampolines(TCGContext *s)
|
||||||
int i;
|
int i;
|
||||||
TCGReg ra;
|
TCGReg ra;
|
||||||
|
|
||||||
for (i = 0; i < 16; ++i) {
|
for (i = 0; i < ARRAY_SIZE(qemu_ld_helpers); ++i) {
|
||||||
if (qemu_ld_helpers[i] == NULL) {
|
if (qemu_ld_helpers[i] == NULL) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -936,7 +936,7 @@ static void build_trampolines(TCGContext *s)
|
||||||
tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra);
|
tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < 16; ++i) {
|
for (i = 0; i < ARRAY_SIZE(qemu_st_helpers); ++i) {
|
||||||
if (qemu_st_helpers[i] == NULL) {
|
if (qemu_st_helpers[i] == NULL) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -1118,7 +1118,7 @@ static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, int mem_index,
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_SOFTMMU */
|
#endif /* CONFIG_SOFTMMU */
|
||||||
|
|
||||||
static const int qemu_ld_opc[16] = {
|
static const int qemu_ld_opc[(MO_SSIZE | MO_BSWAP) + 1] = {
|
||||||
[MO_UB] = LDUB,
|
[MO_UB] = LDUB,
|
||||||
[MO_SB] = LDSB,
|
[MO_SB] = LDSB,
|
||||||
|
|
||||||
|
@ -1135,7 +1135,7 @@ static const int qemu_ld_opc[16] = {
|
||||||
[MO_LEQ] = LDX_LE,
|
[MO_LEQ] = LDX_LE,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const int qemu_st_opc[16] = {
|
static const int qemu_st_opc[(MO_SIZE | MO_BSWAP) + 1] = {
|
||||||
[MO_UB] = STB,
|
[MO_UB] = STB,
|
||||||
|
|
||||||
[MO_BEUW] = STH,
|
[MO_BEUW] = STH,
|
||||||
|
@ -1148,7 +1148,7 @@ static const int qemu_st_opc[16] = {
|
||||||
};
|
};
|
||||||
|
|
||||||
static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
|
static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
|
||||||
TCGMemOpIdx oi, bool is_64)
|
MemOpIdx oi, bool is_64)
|
||||||
{
|
{
|
||||||
MemOp memop = get_memop(oi);
|
MemOp memop = get_memop(oi);
|
||||||
#ifdef CONFIG_SOFTMMU
|
#ifdef CONFIG_SOFTMMU
|
||||||
|
@ -1230,7 +1230,7 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
|
static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
|
||||||
TCGMemOpIdx oi)
|
MemOpIdx oi)
|
||||||
{
|
{
|
||||||
MemOp memop = get_memop(oi);
|
MemOp memop = get_memop(oi);
|
||||||
#ifdef CONFIG_SOFTMMU
|
#ifdef CONFIG_SOFTMMU
|
||||||
|
|
|
@ -22,7 +22,7 @@
|
||||||
|
|
||||||
typedef struct TCGLabelQemuLdst {
|
typedef struct TCGLabelQemuLdst {
|
||||||
bool is_ld; /* qemu_ld: true, qemu_st: false */
|
bool is_ld; /* qemu_ld: true, qemu_st: false */
|
||||||
TCGMemOpIdx oi;
|
MemOpIdx oi;
|
||||||
TCGType type; /* result type of a load */
|
TCGType type; /* result type of a load */
|
||||||
TCGReg addrlo_reg; /* reg index for low word of guest virtual addr */
|
TCGReg addrlo_reg; /* reg index for low word of guest virtual addr */
|
||||||
TCGReg addrhi_reg; /* reg index for high word of guest virtual addr */
|
TCGReg addrhi_reg; /* reg index for high word of guest virtual addr */
|
||||||
|
|
|
@ -119,6 +119,18 @@ bool tcg_can_emit_vecop_list(const TCGOpcode *list,
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
case INDEX_op_usadd_vec:
|
||||||
|
if (tcg_can_emit_vec_op(INDEX_op_umin_vec, type, vece) ||
|
||||||
|
tcg_can_emit_vec_op(INDEX_op_cmp_vec, type, vece)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case INDEX_op_ussub_vec:
|
||||||
|
if (tcg_can_emit_vec_op(INDEX_op_umax_vec, type, vece) ||
|
||||||
|
tcg_can_emit_vec_op(INDEX_op_cmp_vec, type, vece)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
break;
|
||||||
case INDEX_op_cmpsel_vec:
|
case INDEX_op_cmpsel_vec:
|
||||||
case INDEX_op_smin_vec:
|
case INDEX_op_smin_vec:
|
||||||
case INDEX_op_smax_vec:
|
case INDEX_op_smax_vec:
|
||||||
|
@ -603,7 +615,18 @@ void tcg_gen_ssadd_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
|
||||||
|
|
||||||
void tcg_gen_usadd_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
|
void tcg_gen_usadd_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
|
||||||
{
|
{
|
||||||
do_op3_nofail(vece, r, a, b, INDEX_op_usadd_vec);
|
if (!do_op3(vece, r, a, b, INDEX_op_usadd_vec)) {
|
||||||
|
const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL);
|
||||||
|
TCGv_vec t = tcg_temp_new_vec_matching(r);
|
||||||
|
|
||||||
|
/* usadd(a, b) = min(a, ~b) + b */
|
||||||
|
tcg_gen_not_vec(vece, t, b);
|
||||||
|
tcg_gen_umin_vec(vece, t, t, a);
|
||||||
|
tcg_gen_add_vec(vece, r, t, b);
|
||||||
|
|
||||||
|
tcg_temp_free_vec(t);
|
||||||
|
tcg_swap_vecop_list(hold_list);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void tcg_gen_sssub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
|
void tcg_gen_sssub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
|
||||||
|
@ -613,7 +636,17 @@ void tcg_gen_sssub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
|
||||||
|
|
||||||
void tcg_gen_ussub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
|
void tcg_gen_ussub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
|
||||||
{
|
{
|
||||||
do_op3_nofail(vece, r, a, b, INDEX_op_ussub_vec);
|
if (!do_op3(vece, r, a, b, INDEX_op_ussub_vec)) {
|
||||||
|
const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL);
|
||||||
|
TCGv_vec t = tcg_temp_new_vec_matching(r);
|
||||||
|
|
||||||
|
/* ussub(a, b) = max(a, b) - b */
|
||||||
|
tcg_gen_umax_vec(vece, t, a, b);
|
||||||
|
tcg_gen_sub_vec(vece, r, t, b);
|
||||||
|
|
||||||
|
tcg_temp_free_vec(t);
|
||||||
|
tcg_swap_vecop_list(hold_list);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void do_minmax(unsigned vece, TCGv_vec r, TCGv_vec a,
|
static void do_minmax(unsigned vece, TCGv_vec r, TCGv_vec a,
|
||||||
|
|
60
tcg/tcg-op.c
60
tcg/tcg-op.c
|
@ -28,7 +28,6 @@
|
||||||
#include "tcg/tcg-op.h"
|
#include "tcg/tcg-op.h"
|
||||||
#include "tcg/tcg-mo.h"
|
#include "tcg/tcg-mo.h"
|
||||||
#include "trace-tcg.h"
|
#include "trace-tcg.h"
|
||||||
#include "trace/mem.h"
|
|
||||||
#include "exec/plugin-gen.h"
|
#include "exec/plugin-gen.h"
|
||||||
|
|
||||||
/* Reduce the number of ifdefs below. This assumes that all uses of
|
/* Reduce the number of ifdefs below. This assumes that all uses of
|
||||||
|
@ -2780,10 +2779,13 @@ static inline MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st)
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case MO_64:
|
case MO_64:
|
||||||
if (!is64) {
|
if (is64) {
|
||||||
tcg_abort();
|
op &= ~MO_SIGN;
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
break;
|
/* fall through */
|
||||||
|
default:
|
||||||
|
g_assert_not_reached();
|
||||||
}
|
}
|
||||||
if (st) {
|
if (st) {
|
||||||
op &= ~MO_SIGN;
|
op &= ~MO_SIGN;
|
||||||
|
@ -2794,7 +2796,7 @@ static inline MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st)
|
||||||
static void gen_ldst_i32(TCGOpcode opc, TCGv_i32 val, TCGv addr,
|
static void gen_ldst_i32(TCGOpcode opc, TCGv_i32 val, TCGv addr,
|
||||||
MemOp memop, TCGArg idx)
|
MemOp memop, TCGArg idx)
|
||||||
{
|
{
|
||||||
TCGMemOpIdx oi = make_memop_idx(memop, idx);
|
MemOpIdx oi = make_memop_idx(memop, idx);
|
||||||
#if TARGET_LONG_BITS == 32
|
#if TARGET_LONG_BITS == 32
|
||||||
tcg_gen_op3i_i32(opc, val, addr, oi);
|
tcg_gen_op3i_i32(opc, val, addr, oi);
|
||||||
#else
|
#else
|
||||||
|
@ -2809,7 +2811,7 @@ static void gen_ldst_i32(TCGOpcode opc, TCGv_i32 val, TCGv addr,
|
||||||
static void gen_ldst_i64(TCGOpcode opc, TCGv_i64 val, TCGv addr,
|
static void gen_ldst_i64(TCGOpcode opc, TCGv_i64 val, TCGv addr,
|
||||||
MemOp memop, TCGArg idx)
|
MemOp memop, TCGArg idx)
|
||||||
{
|
{
|
||||||
TCGMemOpIdx oi = make_memop_idx(memop, idx);
|
MemOpIdx oi = make_memop_idx(memop, idx);
|
||||||
#if TARGET_LONG_BITS == 32
|
#if TARGET_LONG_BITS == 32
|
||||||
if (TCG_TARGET_REG_BITS == 32) {
|
if (TCG_TARGET_REG_BITS == 32) {
|
||||||
tcg_gen_op4i_i32(opc, TCGV_LOW(val), TCGV_HIGH(val), addr, oi);
|
tcg_gen_op4i_i32(opc, TCGV_LOW(val), TCGV_HIGH(val), addr, oi);
|
||||||
|
@ -2850,10 +2852,12 @@ static inline TCGv plugin_prep_mem_callbacks(TCGv vaddr)
|
||||||
return vaddr;
|
return vaddr;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void plugin_gen_mem_callbacks(TCGv vaddr, uint16_t info)
|
static void plugin_gen_mem_callbacks(TCGv vaddr, MemOpIdx oi,
|
||||||
|
enum qemu_plugin_mem_rw rw)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_PLUGIN
|
#ifdef CONFIG_PLUGIN
|
||||||
if (tcg_ctx->plugin_insn != NULL) {
|
if (tcg_ctx->plugin_insn != NULL) {
|
||||||
|
qemu_plugin_meminfo_t info = make_plugin_meminfo(oi, rw);
|
||||||
plugin_gen_empty_mem_callback(vaddr, info);
|
plugin_gen_empty_mem_callback(vaddr, info);
|
||||||
tcg_temp_free(vaddr);
|
tcg_temp_free(vaddr);
|
||||||
}
|
}
|
||||||
|
@ -2863,11 +2867,12 @@ static inline void plugin_gen_mem_callbacks(TCGv vaddr, uint16_t info)
|
||||||
void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop)
|
void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop)
|
||||||
{
|
{
|
||||||
MemOp orig_memop;
|
MemOp orig_memop;
|
||||||
uint16_t info = trace_mem_get_info(memop, idx, 0);
|
MemOpIdx oi;
|
||||||
|
|
||||||
tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
|
tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
|
||||||
memop = tcg_canonicalize_memop(memop, 0, 0);
|
memop = tcg_canonicalize_memop(memop, 0, 0);
|
||||||
trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, addr, info);
|
oi = make_memop_idx(memop, idx);
|
||||||
|
trace_guest_ld_before_tcg(tcg_ctx->cpu, cpu_env, addr, oi);
|
||||||
|
|
||||||
orig_memop = memop;
|
orig_memop = memop;
|
||||||
if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) {
|
if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) {
|
||||||
|
@ -2880,7 +2885,7 @@ void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop)
|
||||||
|
|
||||||
addr = plugin_prep_mem_callbacks(addr);
|
addr = plugin_prep_mem_callbacks(addr);
|
||||||
gen_ldst_i32(INDEX_op_qemu_ld_i32, val, addr, memop, idx);
|
gen_ldst_i32(INDEX_op_qemu_ld_i32, val, addr, memop, idx);
|
||||||
plugin_gen_mem_callbacks(addr, info);
|
plugin_gen_mem_callbacks(addr, oi, QEMU_PLUGIN_MEM_R);
|
||||||
|
|
||||||
if ((orig_memop ^ memop) & MO_BSWAP) {
|
if ((orig_memop ^ memop) & MO_BSWAP) {
|
||||||
switch (orig_memop & MO_SIZE) {
|
switch (orig_memop & MO_SIZE) {
|
||||||
|
@ -2901,11 +2906,12 @@ void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop)
|
||||||
void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop)
|
void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop)
|
||||||
{
|
{
|
||||||
TCGv_i32 swap = NULL;
|
TCGv_i32 swap = NULL;
|
||||||
uint16_t info = trace_mem_get_info(memop, idx, 1);
|
MemOpIdx oi;
|
||||||
|
|
||||||
tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
|
tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
|
||||||
memop = tcg_canonicalize_memop(memop, 0, 1);
|
memop = tcg_canonicalize_memop(memop, 0, 1);
|
||||||
trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, addr, info);
|
oi = make_memop_idx(memop, idx);
|
||||||
|
trace_guest_st_before_tcg(tcg_ctx->cpu, cpu_env, addr, oi);
|
||||||
|
|
||||||
if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) {
|
if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) {
|
||||||
swap = tcg_temp_new_i32();
|
swap = tcg_temp_new_i32();
|
||||||
|
@ -2929,7 +2935,7 @@ void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop)
|
||||||
} else {
|
} else {
|
||||||
gen_ldst_i32(INDEX_op_qemu_st_i32, val, addr, memop, idx);
|
gen_ldst_i32(INDEX_op_qemu_st_i32, val, addr, memop, idx);
|
||||||
}
|
}
|
||||||
plugin_gen_mem_callbacks(addr, info);
|
plugin_gen_mem_callbacks(addr, oi, QEMU_PLUGIN_MEM_W);
|
||||||
|
|
||||||
if (swap) {
|
if (swap) {
|
||||||
tcg_temp_free_i32(swap);
|
tcg_temp_free_i32(swap);
|
||||||
|
@ -2939,7 +2945,7 @@ void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop)
|
||||||
void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop)
|
void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop)
|
||||||
{
|
{
|
||||||
MemOp orig_memop;
|
MemOp orig_memop;
|
||||||
uint16_t info;
|
MemOpIdx oi;
|
||||||
|
|
||||||
if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
|
if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
|
||||||
tcg_gen_qemu_ld_i32(TCGV_LOW(val), addr, idx, memop);
|
tcg_gen_qemu_ld_i32(TCGV_LOW(val), addr, idx, memop);
|
||||||
|
@ -2953,8 +2959,8 @@ void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop)
|
||||||
|
|
||||||
tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
|
tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
|
||||||
memop = tcg_canonicalize_memop(memop, 1, 0);
|
memop = tcg_canonicalize_memop(memop, 1, 0);
|
||||||
info = trace_mem_get_info(memop, idx, 0);
|
oi = make_memop_idx(memop, idx);
|
||||||
trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, addr, info);
|
trace_guest_ld_before_tcg(tcg_ctx->cpu, cpu_env, addr, oi);
|
||||||
|
|
||||||
orig_memop = memop;
|
orig_memop = memop;
|
||||||
if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) {
|
if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) {
|
||||||
|
@ -2967,7 +2973,7 @@ void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop)
|
||||||
|
|
||||||
addr = plugin_prep_mem_callbacks(addr);
|
addr = plugin_prep_mem_callbacks(addr);
|
||||||
gen_ldst_i64(INDEX_op_qemu_ld_i64, val, addr, memop, idx);
|
gen_ldst_i64(INDEX_op_qemu_ld_i64, val, addr, memop, idx);
|
||||||
plugin_gen_mem_callbacks(addr, info);
|
plugin_gen_mem_callbacks(addr, oi, QEMU_PLUGIN_MEM_R);
|
||||||
|
|
||||||
if ((orig_memop ^ memop) & MO_BSWAP) {
|
if ((orig_memop ^ memop) & MO_BSWAP) {
|
||||||
int flags = (orig_memop & MO_SIGN
|
int flags = (orig_memop & MO_SIGN
|
||||||
|
@ -2992,7 +2998,7 @@ void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop)
|
||||||
void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop)
|
void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop)
|
||||||
{
|
{
|
||||||
TCGv_i64 swap = NULL;
|
TCGv_i64 swap = NULL;
|
||||||
uint16_t info;
|
MemOpIdx oi;
|
||||||
|
|
||||||
if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
|
if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
|
||||||
tcg_gen_qemu_st_i32(TCGV_LOW(val), addr, idx, memop);
|
tcg_gen_qemu_st_i32(TCGV_LOW(val), addr, idx, memop);
|
||||||
|
@ -3001,8 +3007,8 @@ void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop)
|
||||||
|
|
||||||
tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
|
tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
|
||||||
memop = tcg_canonicalize_memop(memop, 1, 1);
|
memop = tcg_canonicalize_memop(memop, 1, 1);
|
||||||
info = trace_mem_get_info(memop, idx, 1);
|
oi = make_memop_idx(memop, idx);
|
||||||
trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, addr, info);
|
trace_guest_st_before_tcg(tcg_ctx->cpu, cpu_env, addr, oi);
|
||||||
|
|
||||||
if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) {
|
if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) {
|
||||||
swap = tcg_temp_new_i64();
|
swap = tcg_temp_new_i64();
|
||||||
|
@ -3025,7 +3031,7 @@ void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop)
|
||||||
|
|
||||||
addr = plugin_prep_mem_callbacks(addr);
|
addr = plugin_prep_mem_callbacks(addr);
|
||||||
gen_ldst_i64(INDEX_op_qemu_st_i64, val, addr, memop, idx);
|
gen_ldst_i64(INDEX_op_qemu_st_i64, val, addr, memop, idx);
|
||||||
plugin_gen_mem_callbacks(addr, info);
|
plugin_gen_mem_callbacks(addr, oi, QEMU_PLUGIN_MEM_W);
|
||||||
|
|
||||||
if (swap) {
|
if (swap) {
|
||||||
tcg_temp_free_i64(swap);
|
tcg_temp_free_i64(swap);
|
||||||
|
@ -3095,7 +3101,7 @@ typedef void (*gen_atomic_op_i64)(TCGv_i64, TCGv_env, TCGv,
|
||||||
# define WITH_ATOMIC64(X)
|
# define WITH_ATOMIC64(X)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static void * const table_cmpxchg[16] = {
|
static void * const table_cmpxchg[(MO_SIZE | MO_BSWAP) + 1] = {
|
||||||
[MO_8] = gen_helper_atomic_cmpxchgb,
|
[MO_8] = gen_helper_atomic_cmpxchgb,
|
||||||
[MO_16 | MO_LE] = gen_helper_atomic_cmpxchgw_le,
|
[MO_16 | MO_LE] = gen_helper_atomic_cmpxchgw_le,
|
||||||
[MO_16 | MO_BE] = gen_helper_atomic_cmpxchgw_be,
|
[MO_16 | MO_BE] = gen_helper_atomic_cmpxchgw_be,
|
||||||
|
@ -3129,7 +3135,7 @@ void tcg_gen_atomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv,
|
||||||
tcg_temp_free_i32(t1);
|
tcg_temp_free_i32(t1);
|
||||||
} else {
|
} else {
|
||||||
gen_atomic_cx_i32 gen;
|
gen_atomic_cx_i32 gen;
|
||||||
TCGMemOpIdx oi;
|
MemOpIdx oi;
|
||||||
|
|
||||||
gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)];
|
gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)];
|
||||||
tcg_debug_assert(gen != NULL);
|
tcg_debug_assert(gen != NULL);
|
||||||
|
@ -3168,7 +3174,7 @@ void tcg_gen_atomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv,
|
||||||
} else if ((memop & MO_SIZE) == MO_64) {
|
} else if ((memop & MO_SIZE) == MO_64) {
|
||||||
#ifdef CONFIG_ATOMIC64
|
#ifdef CONFIG_ATOMIC64
|
||||||
gen_atomic_cx_i64 gen;
|
gen_atomic_cx_i64 gen;
|
||||||
TCGMemOpIdx oi;
|
MemOpIdx oi;
|
||||||
|
|
||||||
gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)];
|
gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)];
|
||||||
tcg_debug_assert(gen != NULL);
|
tcg_debug_assert(gen != NULL);
|
||||||
|
@ -3224,7 +3230,7 @@ static void do_atomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val,
|
||||||
TCGArg idx, MemOp memop, void * const table[])
|
TCGArg idx, MemOp memop, void * const table[])
|
||||||
{
|
{
|
||||||
gen_atomic_op_i32 gen;
|
gen_atomic_op_i32 gen;
|
||||||
TCGMemOpIdx oi;
|
MemOpIdx oi;
|
||||||
|
|
||||||
memop = tcg_canonicalize_memop(memop, 0, 0);
|
memop = tcg_canonicalize_memop(memop, 0, 0);
|
||||||
|
|
||||||
|
@ -3266,7 +3272,7 @@ static void do_atomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val,
|
||||||
if ((memop & MO_SIZE) == MO_64) {
|
if ((memop & MO_SIZE) == MO_64) {
|
||||||
#ifdef CONFIG_ATOMIC64
|
#ifdef CONFIG_ATOMIC64
|
||||||
gen_atomic_op_i64 gen;
|
gen_atomic_op_i64 gen;
|
||||||
TCGMemOpIdx oi;
|
MemOpIdx oi;
|
||||||
|
|
||||||
gen = table[memop & (MO_SIZE | MO_BSWAP)];
|
gen = table[memop & (MO_SIZE | MO_BSWAP)];
|
||||||
tcg_debug_assert(gen != NULL);
|
tcg_debug_assert(gen != NULL);
|
||||||
|
@ -3297,7 +3303,7 @@ static void do_atomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val,
|
||||||
}
|
}
|
||||||
|
|
||||||
#define GEN_ATOMIC_HELPER(NAME, OP, NEW) \
|
#define GEN_ATOMIC_HELPER(NAME, OP, NEW) \
|
||||||
static void * const table_##NAME[16] = { \
|
static void * const table_##NAME[(MO_SIZE | MO_BSWAP) + 1] = { \
|
||||||
[MO_8] = gen_helper_atomic_##NAME##b, \
|
[MO_8] = gen_helper_atomic_##NAME##b, \
|
||||||
[MO_16 | MO_LE] = gen_helper_atomic_##NAME##w_le, \
|
[MO_16 | MO_LE] = gen_helper_atomic_##NAME##w_le, \
|
||||||
[MO_16 | MO_BE] = gen_helper_atomic_##NAME##w_be, \
|
[MO_16 | MO_BE] = gen_helper_atomic_##NAME##w_be, \
|
||||||
|
|
|
@ -1910,7 +1910,7 @@ static void tcg_dump_ops(TCGContext *s, bool have_prefs)
|
||||||
case INDEX_op_qemu_ld_i64:
|
case INDEX_op_qemu_ld_i64:
|
||||||
case INDEX_op_qemu_st_i64:
|
case INDEX_op_qemu_st_i64:
|
||||||
{
|
{
|
||||||
TCGMemOpIdx oi = op->args[k++];
|
MemOpIdx oi = op->args[k++];
|
||||||
MemOp op = get_memop(oi);
|
MemOp op = get_memop(oi);
|
||||||
unsigned ix = get_mmuidx(oi);
|
unsigned ix = get_mmuidx(oi);
|
||||||
|
|
||||||
|
|
14
tcg/tci.c
14
tcg/tci.c
|
@ -61,7 +61,7 @@ static uint64_t tci_uint64(uint32_t high, uint32_t low)
|
||||||
* i = immediate (uint32_t)
|
* i = immediate (uint32_t)
|
||||||
* I = immediate (tcg_target_ulong)
|
* I = immediate (tcg_target_ulong)
|
||||||
* l = label or pointer
|
* l = label or pointer
|
||||||
* m = immediate (TCGMemOpIdx)
|
* m = immediate (MemOpIdx)
|
||||||
* n = immediate (call return length)
|
* n = immediate (call return length)
|
||||||
* r = register
|
* r = register
|
||||||
* s = signed ldst offset
|
* s = signed ldst offset
|
||||||
|
@ -105,7 +105,7 @@ static void tci_args_ri(uint32_t insn, TCGReg *r0, tcg_target_ulong *i1)
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tci_args_rrm(uint32_t insn, TCGReg *r0,
|
static void tci_args_rrm(uint32_t insn, TCGReg *r0,
|
||||||
TCGReg *r1, TCGMemOpIdx *m2)
|
TCGReg *r1, MemOpIdx *m2)
|
||||||
{
|
{
|
||||||
*r0 = extract32(insn, 8, 4);
|
*r0 = extract32(insn, 8, 4);
|
||||||
*r1 = extract32(insn, 12, 4);
|
*r1 = extract32(insn, 12, 4);
|
||||||
|
@ -145,7 +145,7 @@ static void tci_args_rrrc(uint32_t insn,
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tci_args_rrrm(uint32_t insn,
|
static void tci_args_rrrm(uint32_t insn,
|
||||||
TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGMemOpIdx *m3)
|
TCGReg *r0, TCGReg *r1, TCGReg *r2, MemOpIdx *m3)
|
||||||
{
|
{
|
||||||
*r0 = extract32(insn, 8, 4);
|
*r0 = extract32(insn, 8, 4);
|
||||||
*r1 = extract32(insn, 12, 4);
|
*r1 = extract32(insn, 12, 4);
|
||||||
|
@ -289,7 +289,7 @@ static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition)
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint64_t tci_qemu_ld(CPUArchState *env, target_ulong taddr,
|
static uint64_t tci_qemu_ld(CPUArchState *env, target_ulong taddr,
|
||||||
TCGMemOpIdx oi, const void *tb_ptr)
|
MemOpIdx oi, const void *tb_ptr)
|
||||||
{
|
{
|
||||||
MemOp mop = get_memop(oi) & (MO_BSWAP | MO_SSIZE);
|
MemOp mop = get_memop(oi) & (MO_BSWAP | MO_SSIZE);
|
||||||
uintptr_t ra = (uintptr_t)tb_ptr;
|
uintptr_t ra = (uintptr_t)tb_ptr;
|
||||||
|
@ -374,7 +374,7 @@ static uint64_t tci_qemu_ld(CPUArchState *env, target_ulong taddr,
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tci_qemu_st(CPUArchState *env, target_ulong taddr, uint64_t val,
|
static void tci_qemu_st(CPUArchState *env, target_ulong taddr, uint64_t val,
|
||||||
TCGMemOpIdx oi, const void *tb_ptr)
|
MemOpIdx oi, const void *tb_ptr)
|
||||||
{
|
{
|
||||||
MemOp mop = get_memop(oi) & (MO_BSWAP | MO_SSIZE);
|
MemOp mop = get_memop(oi) & (MO_BSWAP | MO_SSIZE);
|
||||||
uintptr_t ra = (uintptr_t)tb_ptr;
|
uintptr_t ra = (uintptr_t)tb_ptr;
|
||||||
|
@ -482,7 +482,7 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
|
||||||
uint32_t tmp32;
|
uint32_t tmp32;
|
||||||
uint64_t tmp64;
|
uint64_t tmp64;
|
||||||
uint64_t T1, T2;
|
uint64_t T1, T2;
|
||||||
TCGMemOpIdx oi;
|
MemOpIdx oi;
|
||||||
int32_t ofs;
|
int32_t ofs;
|
||||||
void *ptr;
|
void *ptr;
|
||||||
|
|
||||||
|
@ -1148,7 +1148,7 @@ int print_insn_tci(bfd_vma addr, disassemble_info *info)
|
||||||
tcg_target_ulong i1;
|
tcg_target_ulong i1;
|
||||||
int32_t s2;
|
int32_t s2;
|
||||||
TCGCond c;
|
TCGCond c;
|
||||||
TCGMemOpIdx oi;
|
MemOpIdx oi;
|
||||||
uint8_t pos, len;
|
uint8_t pos, len;
|
||||||
void *ptr;
|
void *ptr;
|
||||||
|
|
||||||
|
|
|
@ -184,7 +184,7 @@ DOCKER_PARTIAL_IMAGES += debian-riscv64-cross
|
||||||
DOCKER_PARTIAL_IMAGES += debian-sh4-cross debian-sparc64-cross
|
DOCKER_PARTIAL_IMAGES += debian-sh4-cross debian-sparc64-cross
|
||||||
DOCKER_PARTIAL_IMAGES += debian-tricore-cross
|
DOCKER_PARTIAL_IMAGES += debian-tricore-cross
|
||||||
DOCKER_PARTIAL_IMAGES += debian-xtensa-cross
|
DOCKER_PARTIAL_IMAGES += debian-xtensa-cross
|
||||||
DOCKER_PARTIAL_IMAGES += fedora-i386-cross fedora-cris-cross
|
DOCKER_PARTIAL_IMAGES += fedora-cris-cross
|
||||||
|
|
||||||
# Rules for building linux-user powered images
|
# Rules for building linux-user powered images
|
||||||
#
|
#
|
||||||
|
|
|
@ -18,13 +18,14 @@ ENV PACKAGES \
|
||||||
glibc-static.i686 \
|
glibc-static.i686 \
|
||||||
gnutls-devel.i686 \
|
gnutls-devel.i686 \
|
||||||
nettle-devel.i686 \
|
nettle-devel.i686 \
|
||||||
|
pcre-devel.i686 \
|
||||||
perl-Test-Harness \
|
perl-Test-Harness \
|
||||||
pixman-devel.i686 \
|
pixman-devel.i686 \
|
||||||
sysprof-capture-devel.i686 \
|
sysprof-capture-devel.i686 \
|
||||||
zlib-devel.i686
|
zlib-devel.i686
|
||||||
|
|
||||||
ENV QEMU_CONFIGURE_OPTS --extra-cflags=-m32 --disable-vhost-user
|
ENV QEMU_CONFIGURE_OPTS --cpu=i386 --disable-vhost-user
|
||||||
ENV PKG_CONFIG_PATH /usr/lib/pkgconfig
|
ENV PKG_CONFIG_LIBDIR /usr/lib/pkgconfig
|
||||||
|
|
||||||
RUN dnf update -y && dnf install -y $PACKAGES
|
RUN dnf update -y && dnf install -y $PACKAGES
|
||||||
RUN rpm -q $PACKAGES | sort > /packages.txt
|
RUN rpm -q $PACKAGES | sort > /packages.txt
|
||||||
|
|
18
trace-events
18
trace-events
|
@ -120,26 +120,16 @@ vcpu guest_cpu_reset(void)
|
||||||
# tcg/tcg-op.c
|
# tcg/tcg-op.c
|
||||||
|
|
||||||
# @vaddr: Access' virtual address.
|
# @vaddr: Access' virtual address.
|
||||||
# @info : Access' information (see below).
|
# @memopidx: Access' information (see below).
|
||||||
#
|
#
|
||||||
# Start virtual memory access (before any potential access violation).
|
# Start virtual memory access (before any potential access violation).
|
||||||
#
|
|
||||||
# Does not include memory accesses performed by devices.
|
# Does not include memory accesses performed by devices.
|
||||||
#
|
#
|
||||||
# Access information can be parsed as:
|
|
||||||
#
|
|
||||||
# struct mem_info {
|
|
||||||
# uint8_t size_shift : 4; /* interpreted as "1 << size_shift" bytes */
|
|
||||||
# bool sign_extend: 1; /* sign-extended */
|
|
||||||
# uint8_t endianness : 1; /* 0: little, 1: big */
|
|
||||||
# bool store : 1; /* whether it is a store operation */
|
|
||||||
# pad : 1;
|
|
||||||
# uint8_t mmuidx : 4; /* mmuidx (softmmu only) */
|
|
||||||
# };
|
|
||||||
#
|
|
||||||
# Mode: user, softmmu
|
# Mode: user, softmmu
|
||||||
# Targets: TCG(all)
|
# Targets: TCG(all)
|
||||||
vcpu tcg guest_mem_before(TCGv vaddr, uint16_t info) "info=%d", "vaddr=0x%016"PRIx64" info=%d"
|
vcpu tcg guest_ld_before(TCGv vaddr, uint32_t memopidx) "info=%d", "vaddr=0x%016"PRIx64" memopidx=0x%x"
|
||||||
|
vcpu tcg guest_st_before(TCGv vaddr, uint32_t memopidx) "info=%d", "vaddr=0x%016"PRIx64" memopidx=0x%x"
|
||||||
|
vcpu tcg guest_rmw_before(TCGv vaddr, uint32_t memopidx) "info=%d", "vaddr=0x%016"PRIx64" memopidx=0x%x"
|
||||||
|
|
||||||
# include/user/syscall-trace.h
|
# include/user/syscall-trace.h
|
||||||
|
|
||||||
|
|
63
trace/mem.h
63
trace/mem.h
|
@ -1,63 +0,0 @@
|
||||||
/*
|
|
||||||
* Helper functions for guest memory tracing
|
|
||||||
*
|
|
||||||
* Copyright (C) 2016 Lluís Vilanova <vilanova@ac.upc.edu>
|
|
||||||
*
|
|
||||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
|
||||||
* See the COPYING file in the top-level directory.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef TRACE__MEM_H
|
|
||||||
#define TRACE__MEM_H
|
|
||||||
|
|
||||||
#include "tcg/tcg.h"
|
|
||||||
|
|
||||||
#define TRACE_MEM_SZ_SHIFT_MASK 0xf /* size shift mask */
|
|
||||||
#define TRACE_MEM_SE (1ULL << 4) /* sign extended (y/n) */
|
|
||||||
#define TRACE_MEM_BE (1ULL << 5) /* big endian (y/n) */
|
|
||||||
#define TRACE_MEM_ST (1ULL << 6) /* store (y/n) */
|
|
||||||
#define TRACE_MEM_MMU_SHIFT 8 /* mmu idx */
|
|
||||||
|
|
||||||
/**
|
|
||||||
* trace_mem_build_info:
|
|
||||||
*
|
|
||||||
* Return a value for the 'info' argument in guest memory access traces.
|
|
||||||
*/
|
|
||||||
static inline uint16_t trace_mem_build_info(int size_shift, bool sign_extend,
|
|
||||||
MemOp endianness, bool store,
|
|
||||||
unsigned int mmu_idx)
|
|
||||||
{
|
|
||||||
uint16_t res;
|
|
||||||
|
|
||||||
res = size_shift & TRACE_MEM_SZ_SHIFT_MASK;
|
|
||||||
if (sign_extend) {
|
|
||||||
res |= TRACE_MEM_SE;
|
|
||||||
}
|
|
||||||
if (endianness == MO_BE) {
|
|
||||||
res |= TRACE_MEM_BE;
|
|
||||||
}
|
|
||||||
if (store) {
|
|
||||||
res |= TRACE_MEM_ST;
|
|
||||||
}
|
|
||||||
#ifdef CONFIG_SOFTMMU
|
|
||||||
res |= mmu_idx << TRACE_MEM_MMU_SHIFT;
|
|
||||||
#endif
|
|
||||||
return res;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
|
||||||
* trace_mem_get_info:
|
|
||||||
*
|
|
||||||
* Return a value for the 'info' argument in guest memory access traces.
|
|
||||||
*/
|
|
||||||
static inline uint16_t trace_mem_get_info(MemOp op,
|
|
||||||
unsigned int mmu_idx,
|
|
||||||
bool store)
|
|
||||||
{
|
|
||||||
return trace_mem_build_info(op & MO_SIZE, !!(op & MO_SIGN),
|
|
||||||
op & MO_BSWAP, store,
|
|
||||||
mmu_idx);
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* TRACE__MEM_H */
|
|
Loading…
Reference in New Issue