diff --git a/tcg/aarch64/tcg-target.c b/tcg/aarch64/tcg-target.c index 323582420f..7ff4be7663 100644 --- a/tcg/aarch64/tcg-target.c +++ b/tcg/aarch64/tcg-target.c @@ -669,11 +669,6 @@ static void tcg_out_ldst(TCGContext *s, AArch64Insn insn, { TCGMemOp size = (uint32_t)insn >> 30; - if (offset >= -256 && offset < 256) { - tcg_out_insn_3312(s, insn, rd, rn, offset); - return; - } - /* If the offset is naturally aligned and in range, then we can use the scaled uimm12 encoding */ if (offset >= 0 && !(offset & ((1 << size) - 1))) { @@ -684,6 +679,12 @@ static void tcg_out_ldst(TCGContext *s, AArch64Insn insn, } } + /* Small signed offsets can use the unscaled encoding. */ + if (offset >= -256 && offset < 256) { + tcg_out_insn_3312(s, insn, rd, rn, offset); + return; + } + /* Worst-case scenario, move offset to temp register, use reg offset. */ tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, offset); tcg_out_ldst_r(s, insn, rd, rn, TCG_REG_TMP);