mirror of https://gitee.com/openkylin/qemu.git
target/arm: Adjust gen_aa32_{ld, st}_i32 for align+endianness
Create a finalize_memop function that computes alignment and endianness and returns the final MemOp for the operation. Split out gen_aa32_{ld,st}_internal_i32 which bypasses any special handling of endianness or alignment. Adjust gen_aa32_{ld,st}_i32 so that s->be_data is not added by the callers. Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20210419202257.161730-12-richard.henderson@linaro.org Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
parent
4479ec30c9
commit
9d486b40e8
|
@ -559,8 +559,7 @@ static bool trans_VLD_all_lanes(DisasContext *s, arg_VLD_all_lanes *a)
|
|||
addr = tcg_temp_new_i32();
|
||||
load_reg_var(s, addr, a->rn);
|
||||
for (reg = 0; reg < nregs; reg++) {
|
||||
gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
|
||||
s->be_data | size);
|
||||
gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), size);
|
||||
if ((vd & 1) && vec_size == 16) {
|
||||
/*
|
||||
* We cannot write 16 bytes at once because the
|
||||
|
@ -650,13 +649,11 @@ static bool trans_VLDST_single(DisasContext *s, arg_VLDST_single *a)
|
|||
*/
|
||||
for (reg = 0; reg < nregs; reg++) {
|
||||
if (a->l) {
|
||||
gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
|
||||
s->be_data | a->size);
|
||||
gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), a->size);
|
||||
neon_store_element(vd, a->reg_idx, a->size, tmp);
|
||||
} else { /* Store */
|
||||
neon_load_element(tmp, vd, a->reg_idx, a->size);
|
||||
gen_aa32_st_i32(s, tmp, addr, get_mem_index(s),
|
||||
s->be_data | a->size);
|
||||
gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), a->size);
|
||||
}
|
||||
vd += a->stride;
|
||||
tcg_gen_addi_i32(addr, addr, 1 << a->size);
|
||||
|
|
|
@ -908,7 +908,8 @@ static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
|
|||
#define IS_USER_ONLY 0
|
||||
#endif
|
||||
|
||||
/* Abstractions of "generate code to do a guest load/store for
|
||||
/*
|
||||
* Abstractions of "generate code to do a guest load/store for
|
||||
* AArch32", where a vaddr is always 32 bits (and is zero
|
||||
* extended if we're a 64 bit core) and data is also
|
||||
* 32 bits unless specifically doing a 64 bit access.
|
||||
|
@ -916,7 +917,7 @@ static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
|
|||
* that the address argument is TCGv_i32 rather than TCGv.
|
||||
*/
|
||||
|
||||
static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, MemOp op)
|
||||
static TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, MemOp op)
|
||||
{
|
||||
TCGv addr = tcg_temp_new();
|
||||
tcg_gen_extu_i32_tl(addr, a32);
|
||||
|
@ -928,47 +929,51 @@ static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, MemOp op)
|
|||
return addr;
|
||||
}
|
||||
|
||||
/*
|
||||
* Internal routines are used for NEON cases where the endianness
|
||||
* and/or alignment has already been taken into account and manipulated.
|
||||
*/
|
||||
static void gen_aa32_ld_internal_i32(DisasContext *s, TCGv_i32 val,
|
||||
TCGv_i32 a32, int index, MemOp opc)
|
||||
{
|
||||
TCGv addr = gen_aa32_addr(s, a32, opc);
|
||||
tcg_gen_qemu_ld_i32(val, addr, index, opc);
|
||||
tcg_temp_free(addr);
|
||||
}
|
||||
|
||||
static void gen_aa32_st_internal_i32(DisasContext *s, TCGv_i32 val,
|
||||
TCGv_i32 a32, int index, MemOp opc)
|
||||
{
|
||||
TCGv addr = gen_aa32_addr(s, a32, opc);
|
||||
tcg_gen_qemu_st_i32(val, addr, index, opc);
|
||||
tcg_temp_free(addr);
|
||||
}
|
||||
|
||||
static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
|
||||
int index, MemOp opc)
|
||||
{
|
||||
TCGv addr;
|
||||
|
||||
if (s->align_mem) {
|
||||
opc |= MO_ALIGN;
|
||||
}
|
||||
|
||||
addr = gen_aa32_addr(s, a32, opc);
|
||||
tcg_gen_qemu_ld_i32(val, addr, index, opc);
|
||||
tcg_temp_free(addr);
|
||||
gen_aa32_ld_internal_i32(s, val, a32, index, finalize_memop(s, opc));
|
||||
}
|
||||
|
||||
static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
|
||||
int index, MemOp opc)
|
||||
{
|
||||
TCGv addr;
|
||||
gen_aa32_st_internal_i32(s, val, a32, index, finalize_memop(s, opc));
|
||||
}
|
||||
|
||||
if (s->align_mem) {
|
||||
opc |= MO_ALIGN;
|
||||
#define DO_GEN_LD(SUFF, OPC) \
|
||||
static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
|
||||
TCGv_i32 a32, int index) \
|
||||
{ \
|
||||
gen_aa32_ld_i32(s, val, a32, index, OPC); \
|
||||
}
|
||||
|
||||
addr = gen_aa32_addr(s, a32, opc);
|
||||
tcg_gen_qemu_st_i32(val, addr, index, opc);
|
||||
tcg_temp_free(addr);
|
||||
}
|
||||
|
||||
#define DO_GEN_LD(SUFF, OPC) \
|
||||
static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
|
||||
TCGv_i32 a32, int index) \
|
||||
{ \
|
||||
gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
|
||||
}
|
||||
|
||||
#define DO_GEN_ST(SUFF, OPC) \
|
||||
static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
|
||||
TCGv_i32 a32, int index) \
|
||||
{ \
|
||||
gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
|
||||
}
|
||||
#define DO_GEN_ST(SUFF, OPC) \
|
||||
static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
|
||||
TCGv_i32 a32, int index) \
|
||||
{ \
|
||||
gen_aa32_st_i32(s, val, a32, index, OPC); \
|
||||
}
|
||||
|
||||
static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
|
||||
{
|
||||
|
@ -6456,7 +6461,7 @@ static bool op_load_rr(DisasContext *s, arg_ldst_rr *a,
|
|||
addr = op_addr_rr_pre(s, a);
|
||||
|
||||
tmp = tcg_temp_new_i32();
|
||||
gen_aa32_ld_i32(s, tmp, addr, mem_idx, mop | s->be_data);
|
||||
gen_aa32_ld_i32(s, tmp, addr, mem_idx, mop);
|
||||
disas_set_da_iss(s, mop, issinfo);
|
||||
|
||||
/*
|
||||
|
@ -6485,7 +6490,7 @@ static bool op_store_rr(DisasContext *s, arg_ldst_rr *a,
|
|||
addr = op_addr_rr_pre(s, a);
|
||||
|
||||
tmp = load_reg(s, a->rt);
|
||||
gen_aa32_st_i32(s, tmp, addr, mem_idx, mop | s->be_data);
|
||||
gen_aa32_st_i32(s, tmp, addr, mem_idx, mop);
|
||||
disas_set_da_iss(s, mop, issinfo);
|
||||
tcg_temp_free_i32(tmp);
|
||||
|
||||
|
@ -6508,13 +6513,13 @@ static bool trans_LDRD_rr(DisasContext *s, arg_ldst_rr *a)
|
|||
addr = op_addr_rr_pre(s, a);
|
||||
|
||||
tmp = tcg_temp_new_i32();
|
||||
gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
|
||||
gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL);
|
||||
store_reg(s, a->rt, tmp);
|
||||
|
||||
tcg_gen_addi_i32(addr, addr, 4);
|
||||
|
||||
tmp = tcg_temp_new_i32();
|
||||
gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
|
||||
gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL);
|
||||
store_reg(s, a->rt + 1, tmp);
|
||||
|
||||
/* LDRD w/ base writeback is undefined if the registers overlap. */
|
||||
|
@ -6537,13 +6542,13 @@ static bool trans_STRD_rr(DisasContext *s, arg_ldst_rr *a)
|
|||
addr = op_addr_rr_pre(s, a);
|
||||
|
||||
tmp = load_reg(s, a->rt);
|
||||
gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
|
||||
gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL);
|
||||
tcg_temp_free_i32(tmp);
|
||||
|
||||
tcg_gen_addi_i32(addr, addr, 4);
|
||||
|
||||
tmp = load_reg(s, a->rt + 1);
|
||||
gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
|
||||
gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL);
|
||||
tcg_temp_free_i32(tmp);
|
||||
|
||||
op_addr_rr_post(s, a, addr, -4);
|
||||
|
@ -6608,7 +6613,7 @@ static bool op_load_ri(DisasContext *s, arg_ldst_ri *a,
|
|||
addr = op_addr_ri_pre(s, a);
|
||||
|
||||
tmp = tcg_temp_new_i32();
|
||||
gen_aa32_ld_i32(s, tmp, addr, mem_idx, mop | s->be_data);
|
||||
gen_aa32_ld_i32(s, tmp, addr, mem_idx, mop);
|
||||
disas_set_da_iss(s, mop, issinfo);
|
||||
|
||||
/*
|
||||
|
@ -6637,7 +6642,7 @@ static bool op_store_ri(DisasContext *s, arg_ldst_ri *a,
|
|||
addr = op_addr_ri_pre(s, a);
|
||||
|
||||
tmp = load_reg(s, a->rt);
|
||||
gen_aa32_st_i32(s, tmp, addr, mem_idx, mop | s->be_data);
|
||||
gen_aa32_st_i32(s, tmp, addr, mem_idx, mop);
|
||||
disas_set_da_iss(s, mop, issinfo);
|
||||
tcg_temp_free_i32(tmp);
|
||||
|
||||
|
@ -6653,13 +6658,13 @@ static bool op_ldrd_ri(DisasContext *s, arg_ldst_ri *a, int rt2)
|
|||
addr = op_addr_ri_pre(s, a);
|
||||
|
||||
tmp = tcg_temp_new_i32();
|
||||
gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
|
||||
gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL);
|
||||
store_reg(s, a->rt, tmp);
|
||||
|
||||
tcg_gen_addi_i32(addr, addr, 4);
|
||||
|
||||
tmp = tcg_temp_new_i32();
|
||||
gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
|
||||
gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL);
|
||||
store_reg(s, rt2, tmp);
|
||||
|
||||
/* LDRD w/ base writeback is undefined if the registers overlap. */
|
||||
|
@ -6692,13 +6697,13 @@ static bool op_strd_ri(DisasContext *s, arg_ldst_ri *a, int rt2)
|
|||
addr = op_addr_ri_pre(s, a);
|
||||
|
||||
tmp = load_reg(s, a->rt);
|
||||
gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
|
||||
gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL);
|
||||
tcg_temp_free_i32(tmp);
|
||||
|
||||
tcg_gen_addi_i32(addr, addr, 4);
|
||||
|
||||
tmp = load_reg(s, rt2);
|
||||
gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
|
||||
gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL);
|
||||
tcg_temp_free_i32(tmp);
|
||||
|
||||
op_addr_ri_post(s, a, addr, -4);
|
||||
|
@ -6924,7 +6929,7 @@ static bool op_stl(DisasContext *s, arg_STL *a, MemOp mop)
|
|||
addr = load_reg(s, a->rn);
|
||||
tmp = load_reg(s, a->rt);
|
||||
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
|
||||
gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), mop | s->be_data);
|
||||
gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), mop);
|
||||
disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel | ISSIsWrite);
|
||||
|
||||
tcg_temp_free_i32(tmp);
|
||||
|
@ -7080,7 +7085,7 @@ static bool op_lda(DisasContext *s, arg_LDA *a, MemOp mop)
|
|||
|
||||
addr = load_reg(s, a->rn);
|
||||
tmp = tcg_temp_new_i32();
|
||||
gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), mop | s->be_data);
|
||||
gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), mop);
|
||||
disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel);
|
||||
tcg_temp_free_i32(addr);
|
||||
|
||||
|
@ -8264,8 +8269,7 @@ static bool op_tbranch(DisasContext *s, arg_tbranch *a, bool half)
|
|||
addr = load_reg(s, a->rn);
|
||||
tcg_gen_add_i32(addr, addr, tmp);
|
||||
|
||||
gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
|
||||
half ? MO_UW | s->be_data : MO_UB);
|
||||
gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), half ? MO_UW : MO_UB);
|
||||
tcg_temp_free_i32(addr);
|
||||
|
||||
tcg_gen_add_i32(tmp, tmp, tmp);
|
||||
|
|
|
@ -459,4 +459,28 @@ static inline TCGv_ptr fpstatus_ptr(ARMFPStatusFlavour flavour)
|
|||
return statusptr;
|
||||
}
|
||||
|
||||
/**
|
||||
* finalize_memop:
|
||||
* @s: DisasContext
|
||||
* @opc: size+sign+align of the memory operation
|
||||
*
|
||||
* Build the complete MemOp for a memory operation, including alignment
|
||||
* and endianness.
|
||||
*
|
||||
* If (op & MO_AMASK) then the operation already contains the required
|
||||
* alignment, e.g. for AccType_ATOMIC. Otherwise, this an optionally
|
||||
* unaligned operation, e.g. for AccType_NORMAL.
|
||||
*
|
||||
* In the latter case, there are configuration bits that require alignment,
|
||||
* and this is applied here. Note that there is no way to indicate that
|
||||
* no alignment should ever be enforced; this must be handled manually.
|
||||
*/
|
||||
static inline MemOp finalize_memop(DisasContext *s, MemOp opc)
|
||||
{
|
||||
if (s->align_mem && !(opc & MO_AMASK)) {
|
||||
opc |= MO_ALIGN;
|
||||
}
|
||||
return opc | s->be_data;
|
||||
}
|
||||
|
||||
#endif /* TARGET_ARM_TRANSLATE_H */
|
||||
|
|
Loading…
Reference in New Issue