TCG backend constraints cleanup

-----BEGIN PGP SIGNATURE-----
 
 iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmAZ2UcdHHJpY2hhcmQu
 aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV/2lggAi8Es/LyFve8gHcIf
 A18SuZ46SqDrUYFHUZNWYxbsoIlLGdDTWYxGJHN90Zeo7Gjzhm2zA3UOEP5E+vnF
 bDNZAMCiQqo4imAZMMS6LAebTG+D3COJVQ6HBiGoIccgGIPXwu0GCqObnXinRcP+
 EA7WQcxujEbELIIKIVpuoC5QV949MSA4npGrfJyX0AaEtu6womPQxaPmF0xqqwJP
 HDxH2l9D9oqWdrFOo/TPyma9VXLTaMvxNXPNdnhPwmjwh1139TenZW1lElyMKPYD
 KfvVz2v808kC2WShZFAi/7Gl3+f24IqMBNIGuethP9APwcRgV8zTCeAlZTo2mdND
 6ohENA==
 =pliz
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/rth-gitlab/tags/pull-tcg-20210202' into staging

TCG backend constraints cleanup

# gpg: Signature made Tue 02 Feb 2021 22:59:19 GMT
# gpg:                using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F
# gpg:                issuer "richard.henderson@linaro.org"
# gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full]
# Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A  05C0 64DF 38E8 AF7E 215F

* remotes/rth-gitlab/tags/pull-tcg-20210202: (24 commits)
  tcg: Remove TCG_TARGET_CON_SET_H
  tcg/tci: Split out constraint sets to tcg-target-con-set.h
  tcg/sparc: Split out constraint sets to tcg-target-con-set.h
  tcg/s390: Split out constraint sets to tcg-target-con-set.h
  tcg/riscv: Split out constraint sets to tcg-target-con-set.h
  tcg/ppc: Split out constraint sets to tcg-target-con-set.h
  tcg/mips: Split out constraint sets to tcg-target-con-set.h
  tcg/arm: Split out constraint sets to tcg-target-con-set.h
  tcg/aarch64: Split out constraint sets to tcg-target-con-set.h
  tcg/i386: Split out constraint sets to tcg-target-con-set.h
  tcg: Remove TCG_TARGET_CON_STR_H
  tcg/sparc: Split out target constraints to tcg-target-con-str.h
  tcg/s390: Split out target constraints to tcg-target-con-str.h
  tcg/riscv: Split out target constraints to tcg-target-con-str.h
  tcg/mips: Split out target constraints to tcg-target-con-str.h
  tcg/tci: Split out target constraints to tcg-target-con-str.h
  tcg/ppc: Split out target constraints to tcg-target-con-str.h
  tcg/aarch64: Split out target constraints to tcg-target-con-str.h
  tcg/arm: Split out target constraints to tcg-target-con-str.h
  tcg/i386: Split out target constraints to tcg-target-con-str.h
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2021-02-03 19:35:57 +00:00
commit db754f8cca
29 changed files with 1244 additions and 1260 deletions

View File

@ -0,0 +1,36 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Define AArch64 target-specific constraint sets.
* Copyright (c) 2021 Linaro
*/
/*
* C_On_Im(...) defines a constraint set with <n> outputs and <m> inputs.
* Each operand should be a sequence of constraint letters as defined by
* tcg-target-con-str.h; the constraint combination is inclusive or.
*/
C_O0_I1(r)
C_O0_I2(lZ, l)
C_O0_I2(r, rA)
C_O0_I2(rZ, r)
C_O0_I2(w, r)
C_O1_I1(r, l)
C_O1_I1(r, r)
C_O1_I1(w, r)
C_O1_I1(w, w)
C_O1_I1(w, wr)
C_O1_I2(r, 0, rZ)
C_O1_I2(r, r, r)
C_O1_I2(r, r, rA)
C_O1_I2(r, r, rAL)
C_O1_I2(r, r, ri)
C_O1_I2(r, r, rL)
C_O1_I2(r, rZ, rZ)
C_O1_I2(w, 0, w)
C_O1_I2(w, w, w)
C_O1_I2(w, w, wN)
C_O1_I2(w, w, wO)
C_O1_I2(w, w, wZ)
C_O1_I3(w, w, w, w)
C_O1_I4(r, r, rA, rZ, rZ)
C_O2_I4(r, r, rZ, rZ, rA, rMZ)

View File

@ -0,0 +1,24 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Define AArch64 target-specific operand constraints.
* Copyright (c) 2021 Linaro
*/
/*
* Define constraint letters for register sets:
* REGS(letter, register_mask)
*/
REGS('r', ALL_GENERAL_REGS)
REGS('l', ALL_QLDST_REGS)
REGS('w', ALL_VECTOR_REGS)
/*
* Define constraint letters for constants:
* CONST(letter, TCG_CT_CONST_* bit set)
*/
CONST('A', TCG_CT_CONST_AIMM)
CONST('L', TCG_CT_CONST_LIMM)
CONST('M', TCG_CT_CONST_MONE)
CONST('O', TCG_CT_CONST_ORRI)
CONST('N', TCG_CT_CONST_ANDI)
CONST('Z', TCG_CT_CONST_ZERO)

View File

@ -126,51 +126,16 @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
#define TCG_CT_CONST_ORRI 0x1000
#define TCG_CT_CONST_ANDI 0x2000
/* parse target specific constraints */
static const char *target_parse_constraint(TCGArgConstraint *ct,
const char *ct_str, TCGType type)
{
switch (*ct_str++) {
case 'r': /* general registers */
ct->regs |= 0xffffffffu;
break;
case 'w': /* advsimd registers */
ct->regs |= 0xffffffff00000000ull;
break;
case 'l': /* qemu_ld / qemu_st address, data_reg */
ct->regs = 0xffffffffu;
#define ALL_GENERAL_REGS 0xffffffffu
#define ALL_VECTOR_REGS 0xffffffff00000000ull
#ifdef CONFIG_SOFTMMU
/* x0 and x1 will be overwritten when reading the tlb entry,
and x2, and x3 for helper args, better to avoid using them. */
tcg_regset_reset_reg(ct->regs, TCG_REG_X0);
tcg_regset_reset_reg(ct->regs, TCG_REG_X1);
tcg_regset_reset_reg(ct->regs, TCG_REG_X2);
tcg_regset_reset_reg(ct->regs, TCG_REG_X3);
#define ALL_QLDST_REGS \
(ALL_GENERAL_REGS & ~((1 << TCG_REG_X0) | (1 << TCG_REG_X1) | \
(1 << TCG_REG_X2) | (1 << TCG_REG_X3)))
#else
#define ALL_QLDST_REGS ALL_GENERAL_REGS
#endif
break;
case 'A': /* Valid for arithmetic immediate (positive or negative). */
ct->ct |= TCG_CT_CONST_AIMM;
break;
case 'L': /* Valid for logical immediate. */
ct->ct |= TCG_CT_CONST_LIMM;
break;
case 'M': /* minus one */
ct->ct |= TCG_CT_CONST_MONE;
break;
case 'O': /* vector orr/bic immediate */
ct->ct |= TCG_CT_CONST_ORRI;
break;
case 'N': /* vector orr/bic immediate, inverted */
ct->ct |= TCG_CT_CONST_ANDI;
break;
case 'Z': /* zero */
ct->ct |= TCG_CT_CONST_ZERO;
break;
default:
return NULL;
}
return ct_str;
}
/* Match a constant valid for addition (12-bit, optionally shifted). */
static inline bool is_aimm(uint64_t val)
@ -2582,42 +2547,11 @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
va_end(va);
}
static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
{
static const TCGTargetOpDef r = { .args_ct_str = { "r" } };
static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } };
static const TCGTargetOpDef w_w = { .args_ct_str = { "w", "w" } };
static const TCGTargetOpDef w_r = { .args_ct_str = { "w", "r" } };
static const TCGTargetOpDef w_wr = { .args_ct_str = { "w", "wr" } };
static const TCGTargetOpDef r_l = { .args_ct_str = { "r", "l" } };
static const TCGTargetOpDef r_rA = { .args_ct_str = { "r", "rA" } };
static const TCGTargetOpDef rZ_r = { .args_ct_str = { "rZ", "r" } };
static const TCGTargetOpDef lZ_l = { .args_ct_str = { "lZ", "l" } };
static const TCGTargetOpDef r_r_r = { .args_ct_str = { "r", "r", "r" } };
static const TCGTargetOpDef w_w_w = { .args_ct_str = { "w", "w", "w" } };
static const TCGTargetOpDef w_0_w = { .args_ct_str = { "w", "0", "w" } };
static const TCGTargetOpDef w_w_wO = { .args_ct_str = { "w", "w", "wO" } };
static const TCGTargetOpDef w_w_wN = { .args_ct_str = { "w", "w", "wN" } };
static const TCGTargetOpDef w_w_wZ = { .args_ct_str = { "w", "w", "wZ" } };
static const TCGTargetOpDef r_r_ri = { .args_ct_str = { "r", "r", "ri" } };
static const TCGTargetOpDef r_r_rA = { .args_ct_str = { "r", "r", "rA" } };
static const TCGTargetOpDef r_r_rL = { .args_ct_str = { "r", "r", "rL" } };
static const TCGTargetOpDef r_r_rAL
= { .args_ct_str = { "r", "r", "rAL" } };
static const TCGTargetOpDef dep
= { .args_ct_str = { "r", "0", "rZ" } };
static const TCGTargetOpDef ext2
= { .args_ct_str = { "r", "rZ", "rZ" } };
static const TCGTargetOpDef movc
= { .args_ct_str = { "r", "r", "rA", "rZ", "rZ" } };
static const TCGTargetOpDef add2
= { .args_ct_str = { "r", "r", "rZ", "rZ", "rA", "rMZ" } };
static const TCGTargetOpDef w_w_w_w
= { .args_ct_str = { "w", "w", "w", "w" } };
switch (op) {
case INDEX_op_goto_ptr:
return &r;
return C_O0_I1(r);
case INDEX_op_ld8u_i32:
case INDEX_op_ld8s_i32:
@ -2656,7 +2590,7 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
case INDEX_op_extract_i64:
case INDEX_op_sextract_i32:
case INDEX_op_sextract_i64:
return &r_r;
return C_O1_I1(r, r);
case INDEX_op_st8_i32:
case INDEX_op_st16_i32:
@ -2665,7 +2599,7 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
case INDEX_op_st16_i64:
case INDEX_op_st32_i64:
case INDEX_op_st_i64:
return &rZ_r;
return C_O0_I2(rZ, r);
case INDEX_op_add_i32:
case INDEX_op_add_i64:
@ -2673,7 +2607,7 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
case INDEX_op_sub_i64:
case INDEX_op_setcond_i32:
case INDEX_op_setcond_i64:
return &r_r_rA;
return C_O1_I2(r, r, rA);
case INDEX_op_mul_i32:
case INDEX_op_mul_i64:
@ -2687,7 +2621,7 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
case INDEX_op_remu_i64:
case INDEX_op_muluh_i64:
case INDEX_op_mulsh_i64:
return &r_r_r;
return C_O1_I2(r, r, r);
case INDEX_op_and_i32:
case INDEX_op_and_i64:
@ -2701,7 +2635,7 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
case INDEX_op_orc_i64:
case INDEX_op_eqv_i32:
case INDEX_op_eqv_i64:
return &r_r_rL;
return C_O1_I2(r, r, rL);
case INDEX_op_shl_i32:
case INDEX_op_shr_i32:
@ -2713,42 +2647,42 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
case INDEX_op_sar_i64:
case INDEX_op_rotl_i64:
case INDEX_op_rotr_i64:
return &r_r_ri;
return C_O1_I2(r, r, ri);
case INDEX_op_clz_i32:
case INDEX_op_ctz_i32:
case INDEX_op_clz_i64:
case INDEX_op_ctz_i64:
return &r_r_rAL;
return C_O1_I2(r, r, rAL);
case INDEX_op_brcond_i32:
case INDEX_op_brcond_i64:
return &r_rA;
return C_O0_I2(r, rA);
case INDEX_op_movcond_i32:
case INDEX_op_movcond_i64:
return &movc;
return C_O1_I4(r, r, rA, rZ, rZ);
case INDEX_op_qemu_ld_i32:
case INDEX_op_qemu_ld_i64:
return &r_l;
return C_O1_I1(r, l);
case INDEX_op_qemu_st_i32:
case INDEX_op_qemu_st_i64:
return &lZ_l;
return C_O0_I2(lZ, l);
case INDEX_op_deposit_i32:
case INDEX_op_deposit_i64:
return &dep;
return C_O1_I2(r, 0, rZ);
case INDEX_op_extract2_i32:
case INDEX_op_extract2_i64:
return &ext2;
return C_O1_I2(r, rZ, rZ);
case INDEX_op_add2_i32:
case INDEX_op_add2_i64:
case INDEX_op_sub2_i32:
case INDEX_op_sub2_i64:
return &add2;
return C_O2_I4(r, r, rZ, rZ, rA, rMZ);
case INDEX_op_add_vec:
case INDEX_op_sub_vec:
@ -2766,35 +2700,36 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
case INDEX_op_shrv_vec:
case INDEX_op_sarv_vec:
case INDEX_op_aa64_sshl_vec:
return &w_w_w;
return C_O1_I2(w, w, w);
case INDEX_op_not_vec:
case INDEX_op_neg_vec:
case INDEX_op_abs_vec:
case INDEX_op_shli_vec:
case INDEX_op_shri_vec:
case INDEX_op_sari_vec:
return &w_w;
return C_O1_I1(w, w);
case INDEX_op_ld_vec:
case INDEX_op_st_vec:
case INDEX_op_dupm_vec:
return &w_r;
return C_O1_I1(w, r);
case INDEX_op_st_vec:
return C_O0_I2(w, r);
case INDEX_op_dup_vec:
return &w_wr;
return C_O1_I1(w, wr);
case INDEX_op_or_vec:
case INDEX_op_andc_vec:
return &w_w_wO;
return C_O1_I2(w, w, wO);
case INDEX_op_and_vec:
case INDEX_op_orc_vec:
return &w_w_wN;
return C_O1_I2(w, w, wN);
case INDEX_op_cmp_vec:
return &w_w_wZ;
return C_O1_I2(w, w, wZ);
case INDEX_op_bitsel_vec:
return &w_w_w_w;
return C_O1_I3(w, w, w, w);
case INDEX_op_aa64_sli_vec:
return &w_0_w;
return C_O1_I2(w, 0, w);
default:
return NULL;
g_assert_not_reached();
}
}

View File

@ -0,0 +1,35 @@
/* SPDX-License-Identifier: MIT */
/*
* Define Arm target-specific constraint sets.
* Copyright (c) 2021 Linaro
*/
/*
* C_On_Im(...) defines a constraint set with <n> outputs and <m> inputs.
* Each operand should be a sequence of constraint letters as defined by
* tcg-target-con-str.h; the constraint combination is inclusive or.
*/
C_O0_I1(r)
C_O0_I2(r, r)
C_O0_I2(r, rIN)
C_O0_I2(s, s)
C_O0_I3(s, s, s)
C_O0_I4(r, r, rI, rI)
C_O0_I4(s, s, s, s)
C_O1_I1(r, l)
C_O1_I1(r, r)
C_O1_I2(r, 0, rZ)
C_O1_I2(r, l, l)
C_O1_I2(r, r, r)
C_O1_I2(r, r, rI)
C_O1_I2(r, r, rIK)
C_O1_I2(r, r, rIN)
C_O1_I2(r, r, ri)
C_O1_I2(r, rZ, rZ)
C_O1_I4(r, r, r, rI, rI)
C_O1_I4(r, r, rIN, rIK, 0)
C_O2_I1(r, r, l)
C_O2_I2(r, r, l, l)
C_O2_I2(r, r, r, r)
C_O2_I4(r, r, r, r, rIN, rIK)
C_O2_I4(r, r, rI, rI, rIN, rIK)

View File

@ -0,0 +1,22 @@
/* SPDX-License-Identifier: MIT */
/*
* Define Arm target-specific operand constraints.
* Copyright (c) 2021 Linaro
*/
/*
* Define constraint letters for register sets:
* REGS(letter, register_mask)
*/
REGS('r', ALL_GENERAL_REGS)
REGS('l', ALL_QLOAD_REGS)
REGS('s', ALL_QSTORE_REGS)
/*
* Define constraint letters for constants:
* CONST(letter, TCG_CT_CONST_* bit set)
*/
CONST('I', TCG_CT_CONST_ARM)
CONST('K', TCG_CT_CONST_INV)
CONST('N', TCG_CT_CONST_NEG)
CONST('Z', TCG_CT_CONST_ZERO)

View File

@ -237,65 +237,27 @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
#define TCG_CT_CONST_NEG 0x400
#define TCG_CT_CONST_ZERO 0x800
/* parse target specific constraints */
static const char *target_parse_constraint(TCGArgConstraint *ct,
const char *ct_str, TCGType type)
{
switch (*ct_str++) {
case 'I':
ct->ct |= TCG_CT_CONST_ARM;
break;
case 'K':
ct->ct |= TCG_CT_CONST_INV;
break;
case 'N': /* The gcc constraint letter is L, already used here. */
ct->ct |= TCG_CT_CONST_NEG;
break;
case 'Z':
ct->ct |= TCG_CT_CONST_ZERO;
break;
#define ALL_GENERAL_REGS 0xffffu
case 'r':
ct->regs = 0xffff;
break;
/* qemu_ld address */
case 'l':
ct->regs = 0xffff;
/*
* r0-r2 will be overwritten when reading the tlb entry (softmmu only)
* and r0-r1 doing the byte swapping, so don't use these.
* r3 is removed for softmmu to avoid clashes with helper arguments.
*/
#ifdef CONFIG_SOFTMMU
/* r0-r2,lr will be overwritten when reading the tlb entry,
so don't use these. */
tcg_regset_reset_reg(ct->regs, TCG_REG_R0);
tcg_regset_reset_reg(ct->regs, TCG_REG_R1);
tcg_regset_reset_reg(ct->regs, TCG_REG_R2);
tcg_regset_reset_reg(ct->regs, TCG_REG_R3);
tcg_regset_reset_reg(ct->regs, TCG_REG_R14);
#define ALL_QLOAD_REGS \
(ALL_GENERAL_REGS & ~((1 << TCG_REG_R0) | (1 << TCG_REG_R1) | \
(1 << TCG_REG_R2) | (1 << TCG_REG_R3) | \
(1 << TCG_REG_R14)))
#define ALL_QSTORE_REGS \
(ALL_GENERAL_REGS & ~((1 << TCG_REG_R0) | (1 << TCG_REG_R1) | \
(1 << TCG_REG_R2) | (1 << TCG_REG_R14) | \
((TARGET_LONG_BITS == 64) << TCG_REG_R3)))
#else
#define ALL_QLOAD_REGS ALL_GENERAL_REGS
#define ALL_QSTORE_REGS \
(ALL_GENERAL_REGS & ~((1 << TCG_REG_R0) | (1 << TCG_REG_R1)))
#endif
break;
/* qemu_st address & data */
case 's':
ct->regs = 0xffff;
/* r0-r2 will be overwritten when reading the tlb entry (softmmu only)
and r0-r1 doing the byte swapping, so don't use these. */
tcg_regset_reset_reg(ct->regs, TCG_REG_R0);
tcg_regset_reset_reg(ct->regs, TCG_REG_R1);
#if defined(CONFIG_SOFTMMU)
/* Avoid clashes with registers being used for helper args */
tcg_regset_reset_reg(ct->regs, TCG_REG_R2);
#if TARGET_LONG_BITS == 64
/* Avoid clashes with registers being used for helper args */
tcg_regset_reset_reg(ct->regs, TCG_REG_R3);
#endif
tcg_regset_reset_reg(ct->regs, TCG_REG_R14);
#endif
break;
default:
return NULL;
}
return ct_str;
}
static inline uint32_t rotl(uint32_t val, int n)
{
@ -2074,57 +2036,17 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
}
}
static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
{
static const TCGTargetOpDef r = { .args_ct_str = { "r" } };
static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } };
static const TCGTargetOpDef s_s = { .args_ct_str = { "s", "s" } };
static const TCGTargetOpDef r_l = { .args_ct_str = { "r", "l" } };
static const TCGTargetOpDef r_r_r = { .args_ct_str = { "r", "r", "r" } };
static const TCGTargetOpDef r_r_l = { .args_ct_str = { "r", "r", "l" } };
static const TCGTargetOpDef r_l_l = { .args_ct_str = { "r", "l", "l" } };
static const TCGTargetOpDef s_s_s = { .args_ct_str = { "s", "s", "s" } };
static const TCGTargetOpDef r_r_ri = { .args_ct_str = { "r", "r", "ri" } };
static const TCGTargetOpDef r_r_rI = { .args_ct_str = { "r", "r", "rI" } };
static const TCGTargetOpDef r_r_rIN
= { .args_ct_str = { "r", "r", "rIN" } };
static const TCGTargetOpDef r_r_rIK
= { .args_ct_str = { "r", "r", "rIK" } };
static const TCGTargetOpDef r_r_r_r
= { .args_ct_str = { "r", "r", "r", "r" } };
static const TCGTargetOpDef r_r_l_l
= { .args_ct_str = { "r", "r", "l", "l" } };
static const TCGTargetOpDef s_s_s_s
= { .args_ct_str = { "s", "s", "s", "s" } };
static const TCGTargetOpDef br
= { .args_ct_str = { "r", "rIN" } };
static const TCGTargetOpDef ext2
= { .args_ct_str = { "r", "rZ", "rZ" } };
static const TCGTargetOpDef dep
= { .args_ct_str = { "r", "0", "rZ" } };
static const TCGTargetOpDef movc
= { .args_ct_str = { "r", "r", "rIN", "rIK", "0" } };
static const TCGTargetOpDef add2
= { .args_ct_str = { "r", "r", "r", "r", "rIN", "rIK" } };
static const TCGTargetOpDef sub2
= { .args_ct_str = { "r", "r", "rI", "rI", "rIN", "rIK" } };
static const TCGTargetOpDef br2
= { .args_ct_str = { "r", "r", "rI", "rI" } };
static const TCGTargetOpDef setc2
= { .args_ct_str = { "r", "r", "r", "rI", "rI" } };
switch (op) {
case INDEX_op_goto_ptr:
return &r;
return C_O0_I1(r);
case INDEX_op_ld8u_i32:
case INDEX_op_ld8s_i32:
case INDEX_op_ld16u_i32:
case INDEX_op_ld16s_i32:
case INDEX_op_ld_i32:
case INDEX_op_st8_i32:
case INDEX_op_st16_i32:
case INDEX_op_st_i32:
case INDEX_op_neg_i32:
case INDEX_op_not_i32:
case INDEX_op_bswap16_i32:
@ -2134,62 +2056,72 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
case INDEX_op_ext16u_i32:
case INDEX_op_extract_i32:
case INDEX_op_sextract_i32:
return &r_r;
return C_O1_I1(r, r);
case INDEX_op_st8_i32:
case INDEX_op_st16_i32:
case INDEX_op_st_i32:
return C_O0_I2(r, r);
case INDEX_op_add_i32:
case INDEX_op_sub_i32:
case INDEX_op_setcond_i32:
return &r_r_rIN;
return C_O1_I2(r, r, rIN);
case INDEX_op_and_i32:
case INDEX_op_andc_i32:
case INDEX_op_clz_i32:
case INDEX_op_ctz_i32:
return &r_r_rIK;
return C_O1_I2(r, r, rIK);
case INDEX_op_mul_i32:
case INDEX_op_div_i32:
case INDEX_op_divu_i32:
return &r_r_r;
return C_O1_I2(r, r, r);
case INDEX_op_mulu2_i32:
case INDEX_op_muls2_i32:
return &r_r_r_r;
return C_O2_I2(r, r, r, r);
case INDEX_op_or_i32:
case INDEX_op_xor_i32:
return &r_r_rI;
return C_O1_I2(r, r, rI);
case INDEX_op_shl_i32:
case INDEX_op_shr_i32:
case INDEX_op_sar_i32:
case INDEX_op_rotl_i32:
case INDEX_op_rotr_i32:
return &r_r_ri;
return C_O1_I2(r, r, ri);
case INDEX_op_brcond_i32:
return &br;
return C_O0_I2(r, rIN);
case INDEX_op_deposit_i32:
return &dep;
return C_O1_I2(r, 0, rZ);
case INDEX_op_extract2_i32:
return &ext2;
return C_O1_I2(r, rZ, rZ);
case INDEX_op_movcond_i32:
return &movc;
return C_O1_I4(r, r, rIN, rIK, 0);
case INDEX_op_add2_i32:
return &add2;
return C_O2_I4(r, r, r, r, rIN, rIK);
case INDEX_op_sub2_i32:
return &sub2;
return C_O2_I4(r, r, rI, rI, rIN, rIK);
case INDEX_op_brcond2_i32:
return &br2;
return C_O0_I4(r, r, rI, rI);
case INDEX_op_setcond2_i32:
return &setc2;
return C_O1_I4(r, r, r, rI, rI);
case INDEX_op_qemu_ld_i32:
return TARGET_LONG_BITS == 32 ? &r_l : &r_l_l;
return TARGET_LONG_BITS == 32 ? C_O1_I1(r, l) : C_O1_I2(r, l, l);
case INDEX_op_qemu_ld_i64:
return TARGET_LONG_BITS == 32 ? &r_r_l : &r_r_l_l;
return TARGET_LONG_BITS == 32 ? C_O2_I1(r, r, l) : C_O2_I2(r, r, l, l);
case INDEX_op_qemu_st_i32:
return TARGET_LONG_BITS == 32 ? &s_s : &s_s_s;
return TARGET_LONG_BITS == 32 ? C_O0_I2(s, s) : C_O0_I3(s, s, s);
case INDEX_op_qemu_st_i64:
return TARGET_LONG_BITS == 32 ? &s_s_s : &s_s_s_s;
return TARGET_LONG_BITS == 32 ? C_O0_I3(s, s, s) : C_O0_I4(s, s, s, s);
default:
return NULL;
g_assert_not_reached();
}
}

View File

@ -0,0 +1,55 @@
/* SPDX-License-Identifier: MIT */
/*
* Define i386 target-specific constraint sets.
* Copyright (c) 2021 Linaro
*/
/*
* C_On_Im(...) defines a constraint set with <n> outputs and <m> inputs.
* Each operand should be a sequence of constraint letters as defined by
* tcg-target-con-str.h; the constraint combination is inclusive or.
*
* C_N1_Im(...) defines a constraint set with 1 output and <m> inputs,
* except that the output must use a new register.
*/
C_O0_I1(r)
C_O0_I2(L, L)
C_O0_I2(qi, r)
C_O0_I2(re, r)
C_O0_I2(ri, r)
C_O0_I2(r, re)
C_O0_I2(s, L)
C_O0_I2(x, r)
C_O0_I3(L, L, L)
C_O0_I3(s, L, L)
C_O0_I4(L, L, L, L)
C_O0_I4(r, r, ri, ri)
C_O1_I1(r, 0)
C_O1_I1(r, L)
C_O1_I1(r, q)
C_O1_I1(r, r)
C_O1_I1(x, r)
C_O1_I1(x, x)
C_O1_I2(Q, 0, Q)
C_O1_I2(q, r, re)
C_O1_I2(r, 0, ci)
C_O1_I2(r, 0, r)
C_O1_I2(r, 0, re)
C_O1_I2(r, 0, reZ)
C_O1_I2(r, 0, ri)
C_O1_I2(r, 0, rI)
C_O1_I2(r, L, L)
C_O1_I2(r, r, re)
C_O1_I2(r, r, ri)
C_O1_I2(r, r, rI)
C_O1_I2(x, x, x)
C_N1_I2(r, r, r)
C_N1_I2(r, r, rW)
C_O1_I3(x, x, x, x)
C_O1_I4(r, r, re, r, 0)
C_O1_I4(r, r, r, ri, ri)
C_O2_I1(r, r, L)
C_O2_I2(a, d, a, r)
C_O2_I2(r, r, L, L)
C_O2_I3(a, d, 0, 1, r)
C_O2_I4(r, r, 0, 1, re, re)

View File

@ -0,0 +1,33 @@
/* SPDX-License-Identifier: MIT */
/*
* Define i386 target-specific operand constraints.
* Copyright (c) 2021 Linaro
*
*/
/*
* Define constraint letters for register sets:
* REGS(letter, register_mask)
*/
REGS('a', 1u << TCG_REG_EAX)
REGS('b', 1u << TCG_REG_EBX)
REGS('c', 1u << TCG_REG_ECX)
REGS('d', 1u << TCG_REG_EDX)
REGS('S', 1u << TCG_REG_ESI)
REGS('D', 1u << TCG_REG_EDI)
REGS('r', ALL_GENERAL_REGS)
REGS('x', ALL_VECTOR_REGS)
REGS('q', ALL_BYTEL_REGS) /* regs that can be used as a byte operand */
REGS('Q', ALL_BYTEH_REGS) /* regs with a second byte (e.g. %ah) */
REGS('L', ALL_GENERAL_REGS & ~SOFTMMU_RESERVE_REGS) /* qemu_ld/st */
REGS('s', ALL_BYTEL_REGS & ~SOFTMMU_RESERVE_REGS) /* qemu_st8_i32 data */
/*
* Define constraint letters for constants:
* CONST(letter, TCG_CT_CONST_* bit set)
*/
CONST('e', TCG_CT_CONST_S32)
CONST('I', TCG_CT_CONST_I32)
CONST('W', TCG_CT_CONST_WSZ)
CONST('Z', TCG_CT_CONST_U32)

View File

@ -132,6 +132,22 @@ static const int tcg_target_call_oarg_regs[] = {
# define TCG_REG_L1 TCG_REG_EDX
#endif
#define ALL_BYTEH_REGS 0x0000000fu
#if TCG_TARGET_REG_BITS == 64
# define ALL_GENERAL_REGS 0x0000ffffu
# define ALL_VECTOR_REGS 0xffff0000u
# define ALL_BYTEL_REGS ALL_GENERAL_REGS
#else
# define ALL_GENERAL_REGS 0x000000ffu
# define ALL_VECTOR_REGS 0x00ff0000u
# define ALL_BYTEL_REGS ALL_BYTEH_REGS
#endif
#ifdef CONFIG_SOFTMMU
# define SOFTMMU_RESERVE_REGS ((1 << TCG_REG_L0) | (1 << TCG_REG_L1))
#else
# define SOFTMMU_RESERVE_REGS 0
#endif
/* The host compiler should supply <cpuid.h> to enable runtime features
detection, as we're not going to go so far as our own inline assembly.
If not available, default values will be assumed. */
@ -193,91 +209,6 @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
return true;
}
#if TCG_TARGET_REG_BITS == 64
#define ALL_GENERAL_REGS 0x0000ffffu
#define ALL_VECTOR_REGS 0xffff0000u
#else
#define ALL_GENERAL_REGS 0x000000ffu
#define ALL_VECTOR_REGS 0x00ff0000u
#endif
/* parse target specific constraints */
static const char *target_parse_constraint(TCGArgConstraint *ct,
const char *ct_str, TCGType type)
{
switch(*ct_str++) {
case 'a':
tcg_regset_set_reg(ct->regs, TCG_REG_EAX);
break;
case 'b':
tcg_regset_set_reg(ct->regs, TCG_REG_EBX);
break;
case 'c':
tcg_regset_set_reg(ct->regs, TCG_REG_ECX);
break;
case 'd':
tcg_regset_set_reg(ct->regs, TCG_REG_EDX);
break;
case 'S':
tcg_regset_set_reg(ct->regs, TCG_REG_ESI);
break;
case 'D':
tcg_regset_set_reg(ct->regs, TCG_REG_EDI);
break;
case 'q':
/* A register that can be used as a byte operand. */
ct->regs = TCG_TARGET_REG_BITS == 64 ? 0xffff : 0xf;
break;
case 'Q':
/* A register with an addressable second byte (e.g. %ah). */
ct->regs = 0xf;
break;
case 'r':
/* A general register. */
ct->regs |= ALL_GENERAL_REGS;
break;
case 'W':
/* With TZCNT/LZCNT, we can have operand-size as an input. */
ct->ct |= TCG_CT_CONST_WSZ;
break;
case 'x':
/* A vector register. */
ct->regs |= ALL_VECTOR_REGS;
break;
case 'L':
/* qemu_ld/st data+address constraint */
ct->regs = TCG_TARGET_REG_BITS == 64 ? 0xffff : 0xff;
#ifdef CONFIG_SOFTMMU
tcg_regset_reset_reg(ct->regs, TCG_REG_L0);
tcg_regset_reset_reg(ct->regs, TCG_REG_L1);
#endif
break;
case 's':
/* qemu_st8_i32 data constraint */
ct->regs = 0xf;
#ifdef CONFIG_SOFTMMU
tcg_regset_reset_reg(ct->regs, TCG_REG_L0);
tcg_regset_reset_reg(ct->regs, TCG_REG_L1);
#endif
break;
case 'e':
ct->ct |= (type == TCG_TYPE_I32 ? TCG_CT_CONST : TCG_CT_CONST_S32);
break;
case 'Z':
ct->ct |= (type == TCG_TYPE_I32 ? TCG_CT_CONST : TCG_CT_CONST_U32);
break;
case 'I':
ct->ct |= (type == TCG_TYPE_I32 ? TCG_CT_CONST : TCG_CT_CONST_I32);
break;
default:
return NULL;
}
return ct_str;
}
/* test if a constant matches the constraint */
static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
const TCGArgConstraint *arg_ct)
@ -286,14 +217,20 @@ static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
if (ct & TCG_CT_CONST) {
return 1;
}
if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) {
return 1;
}
if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) {
return 1;
}
if ((ct & TCG_CT_CONST_I32) && ~val == (int32_t)~val) {
return 1;
if (type == TCG_TYPE_I32) {
if (ct & (TCG_CT_CONST_S32 | TCG_CT_CONST_U32 | TCG_CT_CONST_I32)) {
return 1;
}
} else {
if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) {
return 1;
}
if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) {
return 1;
}
if ((ct & TCG_CT_CONST_I32) && ~val == (int32_t)~val) {
return 1;
}
}
if ((ct & TCG_CT_CONST_WSZ) && val == (type == TCG_TYPE_I32 ? 32 : 64)) {
return 1;
@ -2957,41 +2894,11 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
}
}
static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
{
static const TCGTargetOpDef r = { .args_ct_str = { "r" } };
static const TCGTargetOpDef ri_r = { .args_ct_str = { "ri", "r" } };
static const TCGTargetOpDef re_r = { .args_ct_str = { "re", "r" } };
static const TCGTargetOpDef qi_r = { .args_ct_str = { "qi", "r" } };
static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } };
static const TCGTargetOpDef r_q = { .args_ct_str = { "r", "q" } };
static const TCGTargetOpDef r_re = { .args_ct_str = { "r", "re" } };
static const TCGTargetOpDef r_0 = { .args_ct_str = { "r", "0" } };
static const TCGTargetOpDef r_r_ri = { .args_ct_str = { "r", "r", "ri" } };
static const TCGTargetOpDef r_r_re = { .args_ct_str = { "r", "r", "re" } };
static const TCGTargetOpDef r_0_r = { .args_ct_str = { "r", "0", "r" } };
static const TCGTargetOpDef r_0_re = { .args_ct_str = { "r", "0", "re" } };
static const TCGTargetOpDef r_0_ci = { .args_ct_str = { "r", "0", "ci" } };
static const TCGTargetOpDef r_L = { .args_ct_str = { "r", "L" } };
static const TCGTargetOpDef L_L = { .args_ct_str = { "L", "L" } };
static const TCGTargetOpDef s_L = { .args_ct_str = { "s", "L" } };
static const TCGTargetOpDef r_L_L = { .args_ct_str = { "r", "L", "L" } };
static const TCGTargetOpDef r_r_L = { .args_ct_str = { "r", "r", "L" } };
static const TCGTargetOpDef L_L_L = { .args_ct_str = { "L", "L", "L" } };
static const TCGTargetOpDef s_L_L = { .args_ct_str = { "s", "L", "L" } };
static const TCGTargetOpDef r_r_L_L
= { .args_ct_str = { "r", "r", "L", "L" } };
static const TCGTargetOpDef L_L_L_L
= { .args_ct_str = { "L", "L", "L", "L" } };
static const TCGTargetOpDef x_x = { .args_ct_str = { "x", "x" } };
static const TCGTargetOpDef x_x_x = { .args_ct_str = { "x", "x", "x" } };
static const TCGTargetOpDef x_x_x_x
= { .args_ct_str = { "x", "x", "x", "x" } };
static const TCGTargetOpDef x_r = { .args_ct_str = { "x", "r" } };
switch (op) {
case INDEX_op_goto_ptr:
return &r;
return C_O0_I1(r);
case INDEX_op_ld8u_i32:
case INDEX_op_ld8u_i64:
@ -3005,22 +2912,25 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
case INDEX_op_ld32u_i64:
case INDEX_op_ld32s_i64:
case INDEX_op_ld_i64:
return &r_r;
return C_O1_I1(r, r);
case INDEX_op_st8_i32:
case INDEX_op_st8_i64:
return &qi_r;
return C_O0_I2(qi, r);
case INDEX_op_st16_i32:
case INDEX_op_st16_i64:
case INDEX_op_st_i32:
case INDEX_op_st32_i64:
return &ri_r;
return C_O0_I2(ri, r);
case INDEX_op_st_i64:
return &re_r;
return C_O0_I2(re, r);
case INDEX_op_add_i32:
case INDEX_op_add_i64:
return &r_r_re;
return C_O1_I2(r, r, re);
case INDEX_op_sub_i32:
case INDEX_op_sub_i64:
case INDEX_op_mul_i32:
@ -3029,24 +2939,15 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
case INDEX_op_or_i64:
case INDEX_op_xor_i32:
case INDEX_op_xor_i64:
return &r_0_re;
return C_O1_I2(r, 0, re);
case INDEX_op_and_i32:
case INDEX_op_and_i64:
{
static const TCGTargetOpDef and
= { .args_ct_str = { "r", "0", "reZ" } };
return &and;
}
break;
return C_O1_I2(r, 0, reZ);
case INDEX_op_andc_i32:
case INDEX_op_andc_i64:
{
static const TCGTargetOpDef andc
= { .args_ct_str = { "r", "r", "rI" } };
return &andc;
}
break;
return C_O1_I2(r, r, rI);
case INDEX_op_shl_i32:
case INDEX_op_shl_i64:
@ -3054,16 +2955,17 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
case INDEX_op_shr_i64:
case INDEX_op_sar_i32:
case INDEX_op_sar_i64:
return have_bmi2 ? &r_r_ri : &r_0_ci;
return have_bmi2 ? C_O1_I2(r, r, ri) : C_O1_I2(r, 0, ci);
case INDEX_op_rotl_i32:
case INDEX_op_rotl_i64:
case INDEX_op_rotr_i32:
case INDEX_op_rotr_i64:
return &r_0_ci;
return C_O1_I2(r, 0, ci);
case INDEX_op_brcond_i32:
case INDEX_op_brcond_i64:
return &r_re;
return C_O0_I2(r, re);
case INDEX_op_bswap16_i32:
case INDEX_op_bswap16_i64:
@ -3075,13 +2977,14 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
case INDEX_op_not_i32:
case INDEX_op_not_i64:
case INDEX_op_extrh_i64_i32:
return &r_0;
return C_O1_I1(r, 0);
case INDEX_op_ext8s_i32:
case INDEX_op_ext8s_i64:
case INDEX_op_ext8u_i32:
case INDEX_op_ext8u_i64:
return &r_q;
return C_O1_I1(r, q);
case INDEX_op_ext16s_i32:
case INDEX_op_ext16s_i64:
case INDEX_op_ext16u_i32:
@ -3096,110 +2999,83 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
case INDEX_op_sextract_i32:
case INDEX_op_ctpop_i32:
case INDEX_op_ctpop_i64:
return &r_r;
return C_O1_I1(r, r);
case INDEX_op_extract2_i32:
case INDEX_op_extract2_i64:
return &r_0_r;
return C_O1_I2(r, 0, r);
case INDEX_op_deposit_i32:
case INDEX_op_deposit_i64:
{
static const TCGTargetOpDef dep
= { .args_ct_str = { "Q", "0", "Q" } };
return &dep;
}
return C_O1_I2(Q, 0, Q);
case INDEX_op_setcond_i32:
case INDEX_op_setcond_i64:
{
static const TCGTargetOpDef setc
= { .args_ct_str = { "q", "r", "re" } };
return &setc;
}
return C_O1_I2(q, r, re);
case INDEX_op_movcond_i32:
case INDEX_op_movcond_i64:
{
static const TCGTargetOpDef movc
= { .args_ct_str = { "r", "r", "re", "r", "0" } };
return &movc;
}
return C_O1_I4(r, r, re, r, 0);
case INDEX_op_div2_i32:
case INDEX_op_div2_i64:
case INDEX_op_divu2_i32:
case INDEX_op_divu2_i64:
{
static const TCGTargetOpDef div2
= { .args_ct_str = { "a", "d", "0", "1", "r" } };
return &div2;
}
return C_O2_I3(a, d, 0, 1, r);
case INDEX_op_mulu2_i32:
case INDEX_op_mulu2_i64:
case INDEX_op_muls2_i32:
case INDEX_op_muls2_i64:
{
static const TCGTargetOpDef mul2
= { .args_ct_str = { "a", "d", "a", "r" } };
return &mul2;
}
return C_O2_I2(a, d, a, r);
case INDEX_op_add2_i32:
case INDEX_op_add2_i64:
case INDEX_op_sub2_i32:
case INDEX_op_sub2_i64:
{
static const TCGTargetOpDef arith2
= { .args_ct_str = { "r", "r", "0", "1", "re", "re" } };
return &arith2;
}
return C_O2_I4(r, r, 0, 1, re, re);
case INDEX_op_ctz_i32:
case INDEX_op_ctz_i64:
{
static const TCGTargetOpDef ctz[2] = {
{ .args_ct_str = { "&r", "r", "r" } },
{ .args_ct_str = { "&r", "r", "rW" } },
};
return &ctz[have_bmi1];
}
return have_bmi1 ? C_N1_I2(r, r, rW) : C_N1_I2(r, r, r);
case INDEX_op_clz_i32:
case INDEX_op_clz_i64:
{
static const TCGTargetOpDef clz[2] = {
{ .args_ct_str = { "&r", "r", "r" } },
{ .args_ct_str = { "&r", "r", "rW" } },
};
return &clz[have_lzcnt];
}
return have_lzcnt ? C_N1_I2(r, r, rW) : C_N1_I2(r, r, r);
case INDEX_op_qemu_ld_i32:
return TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &r_L : &r_L_L;
return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
? C_O1_I1(r, L) : C_O1_I2(r, L, L));
case INDEX_op_qemu_st_i32:
return TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &L_L : &L_L_L;
return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
? C_O0_I2(L, L) : C_O0_I3(L, L, L));
case INDEX_op_qemu_st8_i32:
return TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &s_L : &s_L_L;
return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
? C_O0_I2(s, L) : C_O0_I3(s, L, L));
case INDEX_op_qemu_ld_i64:
return (TCG_TARGET_REG_BITS == 64 ? &r_L
: TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &r_r_L
: &r_r_L_L);
return (TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, L)
: TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? C_O2_I1(r, r, L)
: C_O2_I2(r, r, L, L));
case INDEX_op_qemu_st_i64:
return (TCG_TARGET_REG_BITS == 64 ? &L_L
: TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &L_L_L
: &L_L_L_L);
return (TCG_TARGET_REG_BITS == 64 ? C_O0_I2(L, L)
: TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? C_O0_I3(L, L, L)
: C_O0_I4(L, L, L, L));
case INDEX_op_brcond2_i32:
{
static const TCGTargetOpDef b2
= { .args_ct_str = { "r", "r", "ri", "ri" } };
return &b2;
}
return C_O0_I4(r, r, ri, ri);
case INDEX_op_setcond2_i32:
{
static const TCGTargetOpDef s2
= { .args_ct_str = { "r", "r", "r", "ri", "ri" } };
return &s2;
}
return C_O1_I4(r, r, r, ri, ri);
case INDEX_op_ld_vec:
case INDEX_op_st_vec:
case INDEX_op_dupm_vec:
return &x_r;
return C_O1_I1(x, r);
case INDEX_op_st_vec:
return C_O0_I2(x, r);
case INDEX_op_add_vec:
case INDEX_op_sub_vec:
@ -3234,21 +3110,22 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
#if TCG_TARGET_REG_BITS == 32
case INDEX_op_dup2_vec:
#endif
return &x_x_x;
return C_O1_I2(x, x, x);
case INDEX_op_abs_vec:
case INDEX_op_dup_vec:
case INDEX_op_shli_vec:
case INDEX_op_shri_vec:
case INDEX_op_sari_vec:
case INDEX_op_x86_psrldq_vec:
return &x_x;
return C_O1_I1(x, x);
case INDEX_op_x86_vpblendvb_vec:
return &x_x_x_x;
return C_O1_I3(x, x, x, x);
default:
break;
g_assert_not_reached();
}
return NULL;
}
int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)

View File

@ -0,0 +1,36 @@
/* SPDX-License-Identifier: MIT */
/*
* Define MIPS target-specific constraint sets.
* Copyright (c) 2021 Linaro
*/
/*
* C_On_Im(...) defines a constraint set with <n> outputs and <m> inputs.
* Each operand should be a sequence of constraint letters as defined by
* tcg-target-con-str.h; the constraint combination is inclusive or.
*/
C_O0_I1(r)
C_O0_I2(rZ, r)
C_O0_I2(rZ, rZ)
C_O0_I2(SZ, S)
C_O0_I3(SZ, S, S)
C_O0_I3(SZ, SZ, S)
C_O0_I4(rZ, rZ, rZ, rZ)
C_O0_I4(SZ, SZ, S, S)
C_O1_I1(r, L)
C_O1_I1(r, r)
C_O1_I2(r, 0, rZ)
C_O1_I2(r, L, L)
C_O1_I2(r, r, ri)
C_O1_I2(r, r, rI)
C_O1_I2(r, r, rIK)
C_O1_I2(r, r, rJ)
C_O1_I2(r, r, rWZ)
C_O1_I2(r, rZ, rN)
C_O1_I2(r, rZ, rZ)
C_O1_I4(r, rZ, rZ, rZ, 0)
C_O1_I4(r, rZ, rZ, rZ, rZ)
C_O2_I1(r, r, L)
C_O2_I2(r, r, L, L)
C_O2_I2(r, r, r, r)
C_O2_I4(r, r, rZ, rZ, rN, rN)

View File

@ -0,0 +1,24 @@
/* SPDX-License-Identifier: MIT */
/*
* Define MIPS target-specific operand constraints.
* Copyright (c) 2021 Linaro
*/
/*
* Define constraint letters for register sets:
* REGS(letter, register_mask)
*/
REGS('r', ALL_GENERAL_REGS)
REGS('L', ALL_QLOAD_REGS)
REGS('S', ALL_QSTORE_REGS)
/*
* Define constraint letters for constants:
* CONST(letter, TCG_CT_CONST_* bit set)
*/
CONST('I', TCG_CT_CONST_U16)
CONST('J', TCG_CT_CONST_S16)
CONST('K', TCG_CT_CONST_P2M1)
CONST('N', TCG_CT_CONST_N16)
CONST('W', TCG_CT_CONST_WSZ)
CONST('Z', TCG_CT_CONST_ZERO)

View File

@ -171,67 +171,27 @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
#define TCG_CT_CONST_N16 0x1000 /* "Negatable" 16-bit: -32767 - 32767 */
#define TCG_CT_CONST_WSZ 0x2000 /* word size */
#define ALL_GENERAL_REGS 0xffffffffu
#define NOA0_REGS (ALL_GENERAL_REGS & ~(1 << TCG_REG_A0))
#ifdef CONFIG_SOFTMMU
#define ALL_QLOAD_REGS \
(NOA0_REGS & ~((TCG_TARGET_REG_BITS < TARGET_LONG_BITS) << TCG_REG_A2))
#define ALL_QSTORE_REGS \
(NOA0_REGS & ~(TCG_TARGET_REG_BITS < TARGET_LONG_BITS \
? (1 << TCG_REG_A2) | (1 << TCG_REG_A3) \
: (1 << TCG_REG_A1)))
#else
#define ALL_QLOAD_REGS NOA0_REGS
#define ALL_QSTORE_REGS NOA0_REGS
#endif
static inline bool is_p2m1(tcg_target_long val)
{
return val && ((val + 1) & val) == 0;
}
/* parse target specific constraints */
static const char *target_parse_constraint(TCGArgConstraint *ct,
const char *ct_str, TCGType type)
{
switch(*ct_str++) {
case 'r':
ct->regs = 0xffffffff;
break;
case 'L': /* qemu_ld input arg constraint */
ct->regs = 0xffffffff;
tcg_regset_reset_reg(ct->regs, TCG_REG_A0);
#if defined(CONFIG_SOFTMMU)
if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
tcg_regset_reset_reg(ct->regs, TCG_REG_A2);
}
#endif
break;
case 'S': /* qemu_st constraint */
ct->regs = 0xffffffff;
tcg_regset_reset_reg(ct->regs, TCG_REG_A0);
#if defined(CONFIG_SOFTMMU)
if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
tcg_regset_reset_reg(ct->regs, TCG_REG_A2);
tcg_regset_reset_reg(ct->regs, TCG_REG_A3);
} else {
tcg_regset_reset_reg(ct->regs, TCG_REG_A1);
}
#endif
break;
case 'I':
ct->ct |= TCG_CT_CONST_U16;
break;
case 'J':
ct->ct |= TCG_CT_CONST_S16;
break;
case 'K':
ct->ct |= TCG_CT_CONST_P2M1;
break;
case 'N':
ct->ct |= TCG_CT_CONST_N16;
break;
case 'W':
ct->ct |= TCG_CT_CONST_WSZ;
break;
case 'Z':
/* We are cheating a bit here, using the fact that the register
ZERO is also the register number 0. Hence there is no need
to check for const_args in each instruction. */
ct->ct |= TCG_CT_CONST_ZERO;
break;
default:
return NULL;
}
return ct_str;
}
/* test if a constant matches the constraint */
static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
const TCGArgConstraint *arg_ct)
@ -1697,6 +1657,11 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
TCGArg a0, a1, a2;
int c2;
/*
* Note that many operands use the constraint set "rZ".
* We make use of the fact that 0 is the ZERO register,
* and hence such cases need not check for const_args.
*/
a0 = args[0];
a1 = args[1];
a2 = args[2];
@ -2147,52 +2112,11 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
}
}
static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
{
static const TCGTargetOpDef r = { .args_ct_str = { "r" } };
static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } };
static const TCGTargetOpDef r_L = { .args_ct_str = { "r", "L" } };
static const TCGTargetOpDef rZ_r = { .args_ct_str = { "rZ", "r" } };
static const TCGTargetOpDef SZ_S = { .args_ct_str = { "SZ", "S" } };
static const TCGTargetOpDef rZ_rZ = { .args_ct_str = { "rZ", "rZ" } };
static const TCGTargetOpDef r_r_L = { .args_ct_str = { "r", "r", "L" } };
static const TCGTargetOpDef r_L_L = { .args_ct_str = { "r", "L", "L" } };
static const TCGTargetOpDef r_r_ri = { .args_ct_str = { "r", "r", "ri" } };
static const TCGTargetOpDef r_r_rI = { .args_ct_str = { "r", "r", "rI" } };
static const TCGTargetOpDef r_r_rJ = { .args_ct_str = { "r", "r", "rJ" } };
static const TCGTargetOpDef SZ_S_S = { .args_ct_str = { "SZ", "S", "S" } };
static const TCGTargetOpDef SZ_SZ_S
= { .args_ct_str = { "SZ", "SZ", "S" } };
static const TCGTargetOpDef SZ_SZ_S_S
= { .args_ct_str = { "SZ", "SZ", "S", "S" } };
static const TCGTargetOpDef r_rZ_rN
= { .args_ct_str = { "r", "rZ", "rN" } };
static const TCGTargetOpDef r_rZ_rZ
= { .args_ct_str = { "r", "rZ", "rZ" } };
static const TCGTargetOpDef r_r_rIK
= { .args_ct_str = { "r", "r", "rIK" } };
static const TCGTargetOpDef r_r_rWZ
= { .args_ct_str = { "r", "r", "rWZ" } };
static const TCGTargetOpDef r_r_r_r
= { .args_ct_str = { "r", "r", "r", "r" } };
static const TCGTargetOpDef r_r_L_L
= { .args_ct_str = { "r", "r", "L", "L" } };
static const TCGTargetOpDef dep
= { .args_ct_str = { "r", "0", "rZ" } };
static const TCGTargetOpDef movc
= { .args_ct_str = { "r", "rZ", "rZ", "rZ", "0" } };
static const TCGTargetOpDef movc_r6
= { .args_ct_str = { "r", "rZ", "rZ", "rZ", "rZ" } };
static const TCGTargetOpDef add2
= { .args_ct_str = { "r", "r", "rZ", "rZ", "rN", "rN" } };
static const TCGTargetOpDef br2
= { .args_ct_str = { "rZ", "rZ", "rZ", "rZ" } };
static const TCGTargetOpDef setc2
= { .args_ct_str = { "r", "rZ", "rZ", "rZ", "rZ" } };
switch (op) {
case INDEX_op_goto_ptr:
return &r;
return C_O0_I1(r);
case INDEX_op_ld8u_i32:
case INDEX_op_ld8s_i32:
@ -2225,7 +2149,7 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
case INDEX_op_extrl_i64_i32:
case INDEX_op_extrh_i64_i32:
case INDEX_op_extract_i64:
return &r_r;
return C_O1_I1(r, r);
case INDEX_op_st8_i32:
case INDEX_op_st16_i32:
@ -2234,14 +2158,14 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
case INDEX_op_st16_i64:
case INDEX_op_st32_i64:
case INDEX_op_st_i64:
return &rZ_r;
return C_O0_I2(rZ, r);
case INDEX_op_add_i32:
case INDEX_op_add_i64:
return &r_r_rJ;
return C_O1_I2(r, r, rJ);
case INDEX_op_sub_i32:
case INDEX_op_sub_i64:
return &r_rZ_rN;
return C_O1_I2(r, rZ, rN);
case INDEX_op_mul_i32:
case INDEX_op_mulsh_i32:
case INDEX_op_muluh_i32:
@ -2260,20 +2184,20 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
case INDEX_op_remu_i64:
case INDEX_op_nor_i64:
case INDEX_op_setcond_i64:
return &r_rZ_rZ;
return C_O1_I2(r, rZ, rZ);
case INDEX_op_muls2_i32:
case INDEX_op_mulu2_i32:
case INDEX_op_muls2_i64:
case INDEX_op_mulu2_i64:
return &r_r_r_r;
return C_O2_I2(r, r, r, r);
case INDEX_op_and_i32:
case INDEX_op_and_i64:
return &r_r_rIK;
return C_O1_I2(r, r, rIK);
case INDEX_op_or_i32:
case INDEX_op_xor_i32:
case INDEX_op_or_i64:
case INDEX_op_xor_i64:
return &r_r_rI;
return C_O1_I2(r, r, rI);
case INDEX_op_shl_i32:
case INDEX_op_shr_i32:
case INDEX_op_sar_i32:
@ -2284,44 +2208,47 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
case INDEX_op_sar_i64:
case INDEX_op_rotr_i64:
case INDEX_op_rotl_i64:
return &r_r_ri;
return C_O1_I2(r, r, ri);
case INDEX_op_clz_i32:
case INDEX_op_clz_i64:
return &r_r_rWZ;
return C_O1_I2(r, r, rWZ);
case INDEX_op_deposit_i32:
case INDEX_op_deposit_i64:
return &dep;
return C_O1_I2(r, 0, rZ);
case INDEX_op_brcond_i32:
case INDEX_op_brcond_i64:
return &rZ_rZ;
return C_O0_I2(rZ, rZ);
case INDEX_op_movcond_i32:
case INDEX_op_movcond_i64:
return use_mips32r6_instructions ? &movc_r6 : &movc;
return (use_mips32r6_instructions
? C_O1_I4(r, rZ, rZ, rZ, rZ)
: C_O1_I4(r, rZ, rZ, rZ, 0));
case INDEX_op_add2_i32:
case INDEX_op_sub2_i32:
return &add2;
return C_O2_I4(r, r, rZ, rZ, rN, rN);
case INDEX_op_setcond2_i32:
return &setc2;
return C_O1_I4(r, rZ, rZ, rZ, rZ);
case INDEX_op_brcond2_i32:
return &br2;
return C_O0_I4(rZ, rZ, rZ, rZ);
case INDEX_op_qemu_ld_i32:
return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32
? &r_L : &r_L_L);
? C_O1_I1(r, L) : C_O1_I2(r, L, L));
case INDEX_op_qemu_st_i32:
return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32
? &SZ_S : &SZ_S_S);
? C_O0_I2(SZ, S) : C_O0_I3(SZ, S, S));
case INDEX_op_qemu_ld_i64:
return (TCG_TARGET_REG_BITS == 64 ? &r_L
: TARGET_LONG_BITS == 32 ? &r_r_L : &r_r_L_L);
return (TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, L)
: TARGET_LONG_BITS == 32 ? C_O2_I1(r, r, L)
: C_O2_I2(r, r, L, L));
case INDEX_op_qemu_st_i64:
return (TCG_TARGET_REG_BITS == 64 ? &SZ_S
: TARGET_LONG_BITS == 32 ? &SZ_SZ_S : &SZ_SZ_S_S);
return (TCG_TARGET_REG_BITS == 64 ? C_O0_I2(SZ, S)
: TARGET_LONG_BITS == 32 ? C_O0_I3(SZ, SZ, S)
: C_O0_I4(SZ, SZ, S, S));
default:
return NULL;
g_assert_not_reached();
}
}

View File

@ -0,0 +1,42 @@
/* SPDX-License-Identifier: MIT */
/*
* Define PowerPC target-specific constraint sets.
* Copyright (c) 2021 Linaro
*/
/*
* C_On_Im(...) defines a constraint set with <n> outputs and <m> inputs.
* Each operand should be a sequence of constraint letters as defined by
* tcg-target-con-str.h; the constraint combination is inclusive or.
*/
C_O0_I1(r)
C_O0_I2(r, r)
C_O0_I2(r, ri)
C_O0_I2(S, S)
C_O0_I2(v, r)
C_O0_I3(S, S, S)
C_O0_I4(r, r, ri, ri)
C_O0_I4(S, S, S, S)
C_O1_I1(r, L)
C_O1_I1(r, r)
C_O1_I1(v, r)
C_O1_I1(v, v)
C_O1_I1(v, vr)
C_O1_I2(r, 0, rZ)
C_O1_I2(r, L, L)
C_O1_I2(r, rI, ri)
C_O1_I2(r, rI, rT)
C_O1_I2(r, r, r)
C_O1_I2(r, r, ri)
C_O1_I2(r, r, rI)
C_O1_I2(r, r, rT)
C_O1_I2(r, r, rU)
C_O1_I2(r, r, rZW)
C_O1_I2(v, v, v)
C_O1_I3(v, v, v, v)
C_O1_I4(r, r, ri, rZ, rZ)
C_O1_I4(r, r, r, ri, ri)
C_O2_I1(L, L, L)
C_O2_I2(L, L, L, L)
C_O2_I4(r, r, rI, rZM, r, r)
C_O2_I4(r, r, r, r, rI, rZM)

View File

@ -0,0 +1,30 @@
/* SPDX-License-Identifier: MIT */
/*
* Define PowerPC target-specific operand constraints.
* Copyright (c) 2021 Linaro
*/
/*
* Define constraint letters for register sets:
* REGS(letter, register_mask)
*/
REGS('r', ALL_GENERAL_REGS)
REGS('v', ALL_VECTOR_REGS)
REGS('A', 1u << TCG_REG_R3)
REGS('B', 1u << TCG_REG_R4)
REGS('C', 1u << TCG_REG_R5)
REGS('D', 1u << TCG_REG_R6)
REGS('L', ALL_QLOAD_REGS)
REGS('S', ALL_QSTORE_REGS)
/*
* Define constraint letters for constants:
* CONST(letter, TCG_CT_CONST_* bit set)
*/
CONST('I', TCG_CT_CONST_S16)
CONST('J', TCG_CT_CONST_U16)
CONST('M', TCG_CT_CONST_MONE)
CONST('T', TCG_CT_CONST_S32)
CONST('U', TCG_CT_CONST_U32)
CONST('W', TCG_CT_CONST_WSZ)
CONST('Z', TCG_CT_CONST_ZERO)

View File

@ -62,6 +62,21 @@
#define TCG_CT_CONST_MONE 0x2000
#define TCG_CT_CONST_WSZ 0x4000
#define ALL_GENERAL_REGS 0xffffffffu
#define ALL_VECTOR_REGS 0xffffffff00000000ull
#ifdef CONFIG_SOFTMMU
#define ALL_QLOAD_REGS \
(ALL_GENERAL_REGS & \
~((1 << TCG_REG_R3) | (1 << TCG_REG_R4) | (1 << TCG_REG_R5)))
#define ALL_QSTORE_REGS \
(ALL_GENERAL_REGS & ~((1 << TCG_REG_R3) | (1 << TCG_REG_R4) | \
(1 << TCG_REG_R5) | (1 << TCG_REG_R6)))
#else
#define ALL_QLOAD_REGS (ALL_GENERAL_REGS & ~(1 << TCG_REG_R3))
#define ALL_QSTORE_REGS ALL_QLOAD_REGS
#endif
TCGPowerISA have_isa;
static bool have_isel;
bool have_altivec;
@ -222,64 +237,6 @@ static bool reloc_pc14(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
return false;
}
/* parse target specific constraints */
static const char *target_parse_constraint(TCGArgConstraint *ct,
const char *ct_str, TCGType type)
{
switch (*ct_str++) {
case 'A': case 'B': case 'C': case 'D':
tcg_regset_set_reg(ct->regs, 3 + ct_str[0] - 'A');
break;
case 'r':
ct->regs = 0xffffffff;
break;
case 'v':
ct->regs = 0xffffffff00000000ull;
break;
case 'L': /* qemu_ld constraint */
ct->regs = 0xffffffff;
tcg_regset_reset_reg(ct->regs, TCG_REG_R3);
#ifdef CONFIG_SOFTMMU
tcg_regset_reset_reg(ct->regs, TCG_REG_R4);
tcg_regset_reset_reg(ct->regs, TCG_REG_R5);
#endif
break;
case 'S': /* qemu_st constraint */
ct->regs = 0xffffffff;
tcg_regset_reset_reg(ct->regs, TCG_REG_R3);
#ifdef CONFIG_SOFTMMU
tcg_regset_reset_reg(ct->regs, TCG_REG_R4);
tcg_regset_reset_reg(ct->regs, TCG_REG_R5);
tcg_regset_reset_reg(ct->regs, TCG_REG_R6);
#endif
break;
case 'I':
ct->ct |= TCG_CT_CONST_S16;
break;
case 'J':
ct->ct |= TCG_CT_CONST_U16;
break;
case 'M':
ct->ct |= TCG_CT_CONST_MONE;
break;
case 'T':
ct->ct |= TCG_CT_CONST_S32;
break;
case 'U':
ct->ct |= TCG_CT_CONST_U32;
break;
case 'W':
ct->ct |= TCG_CT_CONST_WSZ;
break;
case 'Z':
ct->ct |= TCG_CT_CONST_ZERO;
break;
default:
return NULL;
}
return ct_str;
}
/* test if a constant matches the constraint */
static int tcg_target_const_match(tcg_target_long val, TCGType type,
const TCGArgConstraint *arg_ct)
@ -3499,62 +3456,17 @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
va_end(va);
}
static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
{
static const TCGTargetOpDef r = { .args_ct_str = { "r" } };
static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } };
static const TCGTargetOpDef r_L = { .args_ct_str = { "r", "L" } };
static const TCGTargetOpDef S_S = { .args_ct_str = { "S", "S" } };
static const TCGTargetOpDef r_ri = { .args_ct_str = { "r", "ri" } };
static const TCGTargetOpDef r_r_r = { .args_ct_str = { "r", "r", "r" } };
static const TCGTargetOpDef r_L_L = { .args_ct_str = { "r", "L", "L" } };
static const TCGTargetOpDef L_L_L = { .args_ct_str = { "L", "L", "L" } };
static const TCGTargetOpDef S_S_S = { .args_ct_str = { "S", "S", "S" } };
static const TCGTargetOpDef r_r_ri = { .args_ct_str = { "r", "r", "ri" } };
static const TCGTargetOpDef r_r_rI = { .args_ct_str = { "r", "r", "rI" } };
static const TCGTargetOpDef r_r_rT = { .args_ct_str = { "r", "r", "rT" } };
static const TCGTargetOpDef r_r_rU = { .args_ct_str = { "r", "r", "rU" } };
static const TCGTargetOpDef r_rI_ri
= { .args_ct_str = { "r", "rI", "ri" } };
static const TCGTargetOpDef r_rI_rT
= { .args_ct_str = { "r", "rI", "rT" } };
static const TCGTargetOpDef r_r_rZW
= { .args_ct_str = { "r", "r", "rZW" } };
static const TCGTargetOpDef L_L_L_L
= { .args_ct_str = { "L", "L", "L", "L" } };
static const TCGTargetOpDef S_S_S_S
= { .args_ct_str = { "S", "S", "S", "S" } };
static const TCGTargetOpDef movc
= { .args_ct_str = { "r", "r", "ri", "rZ", "rZ" } };
static const TCGTargetOpDef dep
= { .args_ct_str = { "r", "0", "rZ" } };
static const TCGTargetOpDef br2
= { .args_ct_str = { "r", "r", "ri", "ri" } };
static const TCGTargetOpDef setc2
= { .args_ct_str = { "r", "r", "r", "ri", "ri" } };
static const TCGTargetOpDef add2
= { .args_ct_str = { "r", "r", "r", "r", "rI", "rZM" } };
static const TCGTargetOpDef sub2
= { .args_ct_str = { "r", "r", "rI", "rZM", "r", "r" } };
static const TCGTargetOpDef v_r = { .args_ct_str = { "v", "r" } };
static const TCGTargetOpDef v_vr = { .args_ct_str = { "v", "vr" } };
static const TCGTargetOpDef v_v = { .args_ct_str = { "v", "v" } };
static const TCGTargetOpDef v_v_v = { .args_ct_str = { "v", "v", "v" } };
static const TCGTargetOpDef v_v_v_v
= { .args_ct_str = { "v", "v", "v", "v" } };
switch (op) {
case INDEX_op_goto_ptr:
return &r;
return C_O0_I1(r);
case INDEX_op_ld8u_i32:
case INDEX_op_ld8s_i32:
case INDEX_op_ld16u_i32:
case INDEX_op_ld16s_i32:
case INDEX_op_ld_i32:
case INDEX_op_st8_i32:
case INDEX_op_st16_i32:
case INDEX_op_st_i32:
case INDEX_op_ctpop_i32:
case INDEX_op_neg_i32:
case INDEX_op_not_i32:
@ -3570,10 +3482,6 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
case INDEX_op_ld32u_i64:
case INDEX_op_ld32s_i64:
case INDEX_op_ld_i64:
case INDEX_op_st8_i64:
case INDEX_op_st16_i64:
case INDEX_op_st32_i64:
case INDEX_op_st_i64:
case INDEX_op_ctpop_i64:
case INDEX_op_neg_i64:
case INDEX_op_not_i64:
@ -3586,7 +3494,16 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
case INDEX_op_bswap32_i64:
case INDEX_op_bswap64_i64:
case INDEX_op_extract_i64:
return &r_r;
return C_O1_I1(r, r);
case INDEX_op_st8_i32:
case INDEX_op_st16_i32:
case INDEX_op_st_i32:
case INDEX_op_st8_i64:
case INDEX_op_st16_i64:
case INDEX_op_st32_i64:
case INDEX_op_st_i64:
return C_O0_I2(r, r);
case INDEX_op_add_i32:
case INDEX_op_and_i32:
@ -3609,10 +3526,12 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
case INDEX_op_rotl_i64:
case INDEX_op_rotr_i64:
case INDEX_op_setcond_i64:
return &r_r_ri;
return C_O1_I2(r, r, ri);
case INDEX_op_mul_i32:
case INDEX_op_mul_i64:
return &r_r_rI;
return C_O1_I2(r, r, rI);
case INDEX_op_div_i32:
case INDEX_op_divu_i32:
case INDEX_op_nand_i32:
@ -3627,55 +3546,63 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
case INDEX_op_divu_i64:
case INDEX_op_mulsh_i64:
case INDEX_op_muluh_i64:
return &r_r_r;
return C_O1_I2(r, r, r);
case INDEX_op_sub_i32:
return &r_rI_ri;
return C_O1_I2(r, rI, ri);
case INDEX_op_add_i64:
return &r_r_rT;
return C_O1_I2(r, r, rT);
case INDEX_op_or_i64:
case INDEX_op_xor_i64:
return &r_r_rU;
return C_O1_I2(r, r, rU);
case INDEX_op_sub_i64:
return &r_rI_rT;
return C_O1_I2(r, rI, rT);
case INDEX_op_clz_i32:
case INDEX_op_ctz_i32:
case INDEX_op_clz_i64:
case INDEX_op_ctz_i64:
return &r_r_rZW;
return C_O1_I2(r, r, rZW);
case INDEX_op_brcond_i32:
case INDEX_op_brcond_i64:
return &r_ri;
return C_O0_I2(r, ri);
case INDEX_op_movcond_i32:
case INDEX_op_movcond_i64:
return &movc;
return C_O1_I4(r, r, ri, rZ, rZ);
case INDEX_op_deposit_i32:
case INDEX_op_deposit_i64:
return &dep;
return C_O1_I2(r, 0, rZ);
case INDEX_op_brcond2_i32:
return &br2;
return C_O0_I4(r, r, ri, ri);
case INDEX_op_setcond2_i32:
return &setc2;
return C_O1_I4(r, r, r, ri, ri);
case INDEX_op_add2_i64:
case INDEX_op_add2_i32:
return &add2;
return C_O2_I4(r, r, r, r, rI, rZM);
case INDEX_op_sub2_i64:
case INDEX_op_sub2_i32:
return &sub2;
return C_O2_I4(r, r, rI, rZM, r, r);
case INDEX_op_qemu_ld_i32:
return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32
? &r_L : &r_L_L);
? C_O1_I1(r, L)
: C_O1_I2(r, L, L));
case INDEX_op_qemu_st_i32:
return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32
? &S_S : &S_S_S);
? C_O0_I2(S, S)
: C_O0_I3(S, S, S));
case INDEX_op_qemu_ld_i64:
return (TCG_TARGET_REG_BITS == 64 ? &r_L
: TARGET_LONG_BITS == 32 ? &L_L_L : &L_L_L_L);
return (TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, L)
: TARGET_LONG_BITS == 32 ? C_O2_I1(L, L, L)
: C_O2_I2(L, L, L, L));
case INDEX_op_qemu_st_i64:
return (TCG_TARGET_REG_BITS == 64 ? &S_S
: TARGET_LONG_BITS == 32 ? &S_S_S : &S_S_S_S);
return (TCG_TARGET_REG_BITS == 64 ? C_O0_I2(S, S)
: TARGET_LONG_BITS == 32 ? C_O0_I3(S, S, S)
: C_O0_I4(S, S, S, S));
case INDEX_op_add_vec:
case INDEX_op_sub_vec:
@ -3705,22 +3632,28 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
case INDEX_op_ppc_mulou_vec:
case INDEX_op_ppc_pkum_vec:
case INDEX_op_dup2_vec:
return &v_v_v;
return C_O1_I2(v, v, v);
case INDEX_op_not_vec:
case INDEX_op_neg_vec:
return &v_v;
return C_O1_I1(v, v);
case INDEX_op_dup_vec:
return have_isa_3_00 ? &v_vr : &v_v;
return have_isa_3_00 ? C_O1_I1(v, vr) : C_O1_I1(v, v);
case INDEX_op_ld_vec:
case INDEX_op_st_vec:
case INDEX_op_dupm_vec:
return &v_r;
return C_O1_I1(v, r);
case INDEX_op_st_vec:
return C_O0_I2(v, r);
case INDEX_op_bitsel_vec:
case INDEX_op_ppc_msum_vec:
return &v_v_v_v;
return C_O1_I3(v, v, v, v);
default:
return NULL;
g_assert_not_reached();
}
}

View File

@ -0,0 +1,30 @@
/* SPDX-License-Identifier: MIT */
/*
* Define RISC-V target-specific constraint sets.
* Copyright (c) 2021 Linaro
*/
/*
* C_On_Im(...) defines a constraint set with <n> outputs and <m> inputs.
* Each operand should be a sequence of constraint letters as defined by
* tcg-target-con-str.h; the constraint combination is inclusive or.
*/
C_O0_I1(r)
C_O0_I2(LZ, L)
C_O0_I2(rZ, r)
C_O0_I2(rZ, rZ)
C_O0_I3(LZ, L, L)
C_O0_I3(LZ, LZ, L)
C_O0_I4(LZ, LZ, L, L)
C_O0_I4(rZ, rZ, rZ, rZ)
C_O1_I1(r, L)
C_O1_I1(r, r)
C_O1_I2(r, L, L)
C_O1_I2(r, r, ri)
C_O1_I2(r, r, rI)
C_O1_I2(r, rZ, rN)
C_O1_I2(r, rZ, rZ)
C_O1_I4(r, rZ, rZ, rZ, rZ)
C_O2_I1(r, r, L)
C_O2_I2(r, r, L, L)
C_O2_I4(r, r, rZ, rZ, rM, rM)

View File

@ -0,0 +1,21 @@
/* SPDX-License-Identifier: MIT */
/*
* Define RISC-V target-specific operand constraints.
* Copyright (c) 2021 Linaro
*/
/*
* Define constraint letters for register sets:
* REGS(letter, register_mask)
*/
REGS('r', ALL_GENERAL_REGS)
REGS('L', ALL_GENERAL_REGS & ~SOFTMMU_RESERVE_REGS)
/*
* Define constraint letters for constants:
* CONST(letter, TCG_CT_CONST_* bit set)
*/
CONST('I', TCG_CT_CONST_S12)
CONST('N', TCG_CT_CONST_N12)
CONST('M', TCG_CT_CONST_M12)
CONST('Z', TCG_CT_CONST_ZERO)

View File

@ -122,6 +122,19 @@ static const int tcg_target_call_oarg_regs[] = {
#define TCG_CT_CONST_N12 0x400
#define TCG_CT_CONST_M12 0x800
#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
/*
* For softmmu, we need to avoid conflicts with the first 5
* argument registers to call the helper. Some of these are
* also used for the tlb lookup.
*/
#ifdef CONFIG_SOFTMMU
#define SOFTMMU_RESERVE_REGS MAKE_64BIT_MASK(TCG_REG_A0, 5)
#else
#define SOFTMMU_RESERVE_REGS 0
#endif
static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len)
{
if (TCG_TARGET_REG_BITS == 32) {
@ -131,45 +144,6 @@ static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len)
}
}
/* parse target specific constraints */
static const char *target_parse_constraint(TCGArgConstraint *ct,
const char *ct_str, TCGType type)
{
switch (*ct_str++) {
case 'r':
ct->regs = 0xffffffff;
break;
case 'L':
/* qemu_ld/qemu_st constraint */
ct->regs = 0xffffffff;
/* qemu_ld/qemu_st uses TCG_REG_TMP0 */
#if defined(CONFIG_SOFTMMU)
tcg_regset_reset_reg(ct->regs, tcg_target_call_iarg_regs[0]);
tcg_regset_reset_reg(ct->regs, tcg_target_call_iarg_regs[1]);
tcg_regset_reset_reg(ct->regs, tcg_target_call_iarg_regs[2]);
tcg_regset_reset_reg(ct->regs, tcg_target_call_iarg_regs[3]);
tcg_regset_reset_reg(ct->regs, tcg_target_call_iarg_regs[4]);
#endif
break;
case 'I':
ct->ct |= TCG_CT_CONST_S12;
break;
case 'N':
ct->ct |= TCG_CT_CONST_N12;
break;
case 'M':
ct->ct |= TCG_CT_CONST_M12;
break;
case 'Z':
/* we can use a zero immediate as a zero register argument. */
ct->ct |= TCG_CT_CONST_ZERO;
break;
default:
return NULL;
}
return ct_str;
}
/* test if a constant matches the constraint */
static int tcg_target_const_match(tcg_target_long val, TCGType type,
const TCGArgConstraint *arg_ct)
@ -1569,50 +1543,11 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
}
}
static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
{
static const TCGTargetOpDef r
= { .args_ct_str = { "r" } };
static const TCGTargetOpDef r_r
= { .args_ct_str = { "r", "r" } };
static const TCGTargetOpDef rZ_r
= { .args_ct_str = { "rZ", "r" } };
static const TCGTargetOpDef rZ_rZ
= { .args_ct_str = { "rZ", "rZ" } };
static const TCGTargetOpDef rZ_rZ_rZ_rZ
= { .args_ct_str = { "rZ", "rZ", "rZ", "rZ" } };
static const TCGTargetOpDef r_r_ri
= { .args_ct_str = { "r", "r", "ri" } };
static const TCGTargetOpDef r_r_rI
= { .args_ct_str = { "r", "r", "rI" } };
static const TCGTargetOpDef r_rZ_rN
= { .args_ct_str = { "r", "rZ", "rN" } };
static const TCGTargetOpDef r_rZ_rZ
= { .args_ct_str = { "r", "rZ", "rZ" } };
static const TCGTargetOpDef r_rZ_rZ_rZ_rZ
= { .args_ct_str = { "r", "rZ", "rZ", "rZ", "rZ" } };
static const TCGTargetOpDef r_L
= { .args_ct_str = { "r", "L" } };
static const TCGTargetOpDef r_r_L
= { .args_ct_str = { "r", "r", "L" } };
static const TCGTargetOpDef r_L_L
= { .args_ct_str = { "r", "L", "L" } };
static const TCGTargetOpDef r_r_L_L
= { .args_ct_str = { "r", "r", "L", "L" } };
static const TCGTargetOpDef LZ_L
= { .args_ct_str = { "LZ", "L" } };
static const TCGTargetOpDef LZ_L_L
= { .args_ct_str = { "LZ", "L", "L" } };
static const TCGTargetOpDef LZ_LZ_L
= { .args_ct_str = { "LZ", "LZ", "L" } };
static const TCGTargetOpDef LZ_LZ_L_L
= { .args_ct_str = { "LZ", "LZ", "L", "L" } };
static const TCGTargetOpDef r_r_rZ_rZ_rM_rM
= { .args_ct_str = { "r", "r", "rZ", "rZ", "rM", "rM" } };
switch (op) {
case INDEX_op_goto_ptr:
return &r;
return C_O0_I1(r);
case INDEX_op_ld8u_i32:
case INDEX_op_ld8s_i32:
@ -1644,7 +1579,7 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
case INDEX_op_extrl_i64_i32:
case INDEX_op_extrh_i64_i32:
case INDEX_op_ext_i32_i64:
return &r_r;
return C_O1_I1(r, r);
case INDEX_op_st8_i32:
case INDEX_op_st16_i32:
@ -1653,7 +1588,7 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
case INDEX_op_st16_i64:
case INDEX_op_st32_i64:
case INDEX_op_st_i64:
return &rZ_r;
return C_O0_I2(rZ, r);
case INDEX_op_add_i32:
case INDEX_op_and_i32:
@ -1663,11 +1598,11 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
case INDEX_op_and_i64:
case INDEX_op_or_i64:
case INDEX_op_xor_i64:
return &r_r_rI;
return C_O1_I2(r, r, rI);
case INDEX_op_sub_i32:
case INDEX_op_sub_i64:
return &r_rZ_rN;
return C_O1_I2(r, rZ, rN);
case INDEX_op_mul_i32:
case INDEX_op_mulsh_i32:
@ -1685,7 +1620,7 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
case INDEX_op_rem_i64:
case INDEX_op_remu_i64:
case INDEX_op_setcond_i64:
return &r_rZ_rZ;
return C_O1_I2(r, rZ, rZ);
case INDEX_op_shl_i32:
case INDEX_op_shr_i32:
@ -1693,39 +1628,41 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
case INDEX_op_shl_i64:
case INDEX_op_shr_i64:
case INDEX_op_sar_i64:
return &r_r_ri;
return C_O1_I2(r, r, ri);
case INDEX_op_brcond_i32:
case INDEX_op_brcond_i64:
return &rZ_rZ;
return C_O0_I2(rZ, rZ);
case INDEX_op_add2_i32:
case INDEX_op_add2_i64:
case INDEX_op_sub2_i32:
case INDEX_op_sub2_i64:
return &r_r_rZ_rZ_rM_rM;
return C_O2_I4(r, r, rZ, rZ, rM, rM);
case INDEX_op_brcond2_i32:
return &rZ_rZ_rZ_rZ;
return C_O0_I4(rZ, rZ, rZ, rZ);
case INDEX_op_setcond2_i32:
return &r_rZ_rZ_rZ_rZ;
return C_O1_I4(r, rZ, rZ, rZ, rZ);
case INDEX_op_qemu_ld_i32:
return TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &r_L : &r_L_L;
return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
? C_O1_I1(r, L) : C_O1_I2(r, L, L));
case INDEX_op_qemu_st_i32:
return TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &LZ_L : &LZ_L_L;
return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
? C_O0_I2(LZ, L) : C_O0_I3(LZ, L, L));
case INDEX_op_qemu_ld_i64:
return TCG_TARGET_REG_BITS == 64 ? &r_L
: TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &r_r_L
: &r_r_L_L;
return (TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, L)
: TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? C_O2_I1(r, r, L)
: C_O2_I2(r, r, L, L));
case INDEX_op_qemu_st_i64:
return TCG_TARGET_REG_BITS == 64 ? &LZ_L
: TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &LZ_LZ_L
: &LZ_LZ_L_L;
return (TCG_TARGET_REG_BITS == 64 ? C_O0_I2(LZ, L)
: TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? C_O0_I3(LZ, LZ, L)
: C_O0_I4(LZ, LZ, L, L));
default:
return NULL;
g_assert_not_reached();
}
}

View File

@ -0,0 +1,29 @@
/* SPDX-License-Identifier: MIT */
/*
* Define S390 target-specific constraint sets.
* Copyright (c) 2021 Linaro
*/
/*
* C_On_Im(...) defines a constraint set with <n> outputs and <m> inputs.
* Each operand should be a sequence of constraint letters as defined by
* tcg-target-con-str.h; the constraint combination is inclusive or.
*/
C_O0_I1(r)
C_O0_I2(L, L)
C_O0_I2(r, r)
C_O0_I2(r, ri)
C_O1_I1(r, L)
C_O1_I1(r, r)
C_O1_I2(r, 0, ri)
C_O1_I2(r, 0, rI)
C_O1_I2(r, 0, rJ)
C_O1_I2(r, r, ri)
C_O1_I2(r, rZ, r)
C_O1_I4(r, r, ri, r, 0)
C_O1_I4(r, r, ri, rI, 0)
C_O2_I2(b, a, 0, r)
C_O2_I3(b, a, 0, 1, r)
C_O2_I4(r, r, 0, 1, rA, r)
C_O2_I4(r, r, 0, 1, ri, r)
C_O2_I4(r, r, 0, 1, r, r)

View File

@ -0,0 +1,28 @@
/* SPDX-License-Identifier: MIT */
/*
* Define S390 target-specific operand constraints.
* Copyright (c) 2021 Linaro
*/
/*
* Define constraint letters for register sets:
* REGS(letter, register_mask)
*/
REGS('r', ALL_GENERAL_REGS)
REGS('L', ALL_GENERAL_REGS & ~SOFTMMU_RESERVE_REGS)
/*
* A (single) even/odd pair for division.
* TODO: Add something to the register allocator to allow
* this kind of regno+1 pairing to be done more generally.
*/
REGS('a', 1u << TCG_REG_R2)
REGS('b', 1u << TCG_REG_R3)
/*
* Define constraint letters for constants:
* CONST(letter, TCG_CT_CONST_* bit set)
*/
CONST('A', TCG_CT_CONST_S33)
CONST('I', TCG_CT_CONST_S16)
CONST('J', TCG_CT_CONST_S32)
CONST('Z', TCG_CT_CONST_ZERO)

View File

@ -42,6 +42,19 @@
#define TCG_CT_CONST_S33 0x400
#define TCG_CT_CONST_ZERO 0x800
#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 16)
/*
* For softmmu, we need to avoid conflicts with the first 3
* argument registers to perform the tlb lookup, and to call
* the helper function.
*/
#ifdef CONFIG_SOFTMMU
#define SOFTMMU_RESERVE_REGS MAKE_64BIT_MASK(TCG_REG_R2, 3)
#else
#define SOFTMMU_RESERVE_REGS 0
#endif
/* Several places within the instruction set 0 means "no register"
rather than TCG_REG_R0. */
#define TCG_REG_NONE 0
@ -403,46 +416,6 @@ static bool patch_reloc(tcg_insn_unit *src_rw, int type,
return false;
}
/* parse target specific constraints */
static const char *target_parse_constraint(TCGArgConstraint *ct,
const char *ct_str, TCGType type)
{
switch (*ct_str++) {
case 'r': /* all registers */
ct->regs = 0xffff;
break;
case 'L': /* qemu_ld/st constraint */
ct->regs = 0xffff;
tcg_regset_reset_reg(ct->regs, TCG_REG_R2);
tcg_regset_reset_reg(ct->regs, TCG_REG_R3);
tcg_regset_reset_reg(ct->regs, TCG_REG_R4);
break;
case 'a': /* force R2 for division */
ct->regs = 0;
tcg_regset_set_reg(ct->regs, TCG_REG_R2);
break;
case 'b': /* force R3 for division */
ct->regs = 0;
tcg_regset_set_reg(ct->regs, TCG_REG_R3);
break;
case 'A':
ct->ct |= TCG_CT_CONST_S33;
break;
case 'I':
ct->ct |= TCG_CT_CONST_S16;
break;
case 'J':
ct->ct |= TCG_CT_CONST_S32;
break;
case 'Z':
ct->ct |= TCG_CT_CONST_ZERO;
break;
default:
return NULL;
}
return ct_str;
}
/* Test if a constant matches the constraint. */
static int tcg_target_const_match(tcg_target_long val, TCGType type,
const TCGArgConstraint *arg_ct)
@ -2301,27 +2274,11 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
}
}
static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
{
static const TCGTargetOpDef r = { .args_ct_str = { "r" } };
static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } };
static const TCGTargetOpDef r_L = { .args_ct_str = { "r", "L" } };
static const TCGTargetOpDef L_L = { .args_ct_str = { "L", "L" } };
static const TCGTargetOpDef r_ri = { .args_ct_str = { "r", "ri" } };
static const TCGTargetOpDef r_r_ri = { .args_ct_str = { "r", "r", "ri" } };
static const TCGTargetOpDef r_0_ri = { .args_ct_str = { "r", "0", "ri" } };
static const TCGTargetOpDef r_0_rI = { .args_ct_str = { "r", "0", "rI" } };
static const TCGTargetOpDef r_0_rJ = { .args_ct_str = { "r", "0", "rJ" } };
static const TCGTargetOpDef a2_r
= { .args_ct_str = { "r", "r", "0", "1", "r", "r" } };
static const TCGTargetOpDef a2_ri
= { .args_ct_str = { "r", "r", "0", "1", "ri", "r" } };
static const TCGTargetOpDef a2_rA
= { .args_ct_str = { "r", "r", "0", "1", "rA", "r" } };
switch (op) {
case INDEX_op_goto_ptr:
return &r;
return C_O0_I1(r);
case INDEX_op_ld8u_i32:
case INDEX_op_ld8u_i64:
@ -2335,6 +2292,8 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
case INDEX_op_ld32u_i64:
case INDEX_op_ld32s_i64:
case INDEX_op_ld_i64:
return C_O1_I1(r, r);
case INDEX_op_st8_i32:
case INDEX_op_st8_i64:
case INDEX_op_st16_i32:
@ -2342,11 +2301,22 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
case INDEX_op_st_i32:
case INDEX_op_st32_i64:
case INDEX_op_st_i64:
return &r_r;
return C_O0_I2(r, r);
case INDEX_op_add_i32:
case INDEX_op_add_i64:
return &r_r_ri;
case INDEX_op_shl_i64:
case INDEX_op_shr_i64:
case INDEX_op_sar_i64:
case INDEX_op_rotl_i32:
case INDEX_op_rotl_i64:
case INDEX_op_rotr_i32:
case INDEX_op_rotr_i64:
case INDEX_op_clz_i64:
case INDEX_op_setcond_i32:
case INDEX_op_setcond_i64:
return C_O1_I2(r, r, ri);
case INDEX_op_sub_i32:
case INDEX_op_sub_i64:
case INDEX_op_and_i32:
@ -2355,35 +2325,33 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
case INDEX_op_or_i64:
case INDEX_op_xor_i32:
case INDEX_op_xor_i64:
return (s390_facilities & FACILITY_DISTINCT_OPS ? &r_r_ri : &r_0_ri);
return (s390_facilities & FACILITY_DISTINCT_OPS
? C_O1_I2(r, r, ri)
: C_O1_I2(r, 0, ri));
case INDEX_op_mul_i32:
/* If we have the general-instruction-extensions, then we have
MULTIPLY SINGLE IMMEDIATE with a signed 32-bit, otherwise we
have only MULTIPLY HALFWORD IMMEDIATE, with a signed 16-bit. */
return (s390_facilities & FACILITY_GEN_INST_EXT ? &r_0_ri : &r_0_rI);
return (s390_facilities & FACILITY_GEN_INST_EXT
? C_O1_I2(r, 0, ri)
: C_O1_I2(r, 0, rI));
case INDEX_op_mul_i64:
return (s390_facilities & FACILITY_GEN_INST_EXT ? &r_0_rJ : &r_0_rI);
return (s390_facilities & FACILITY_GEN_INST_EXT
? C_O1_I2(r, 0, rJ)
: C_O1_I2(r, 0, rI));
case INDEX_op_shl_i32:
case INDEX_op_shr_i32:
case INDEX_op_sar_i32:
return (s390_facilities & FACILITY_DISTINCT_OPS ? &r_r_ri : &r_0_ri);
case INDEX_op_shl_i64:
case INDEX_op_shr_i64:
case INDEX_op_sar_i64:
return &r_r_ri;
case INDEX_op_rotl_i32:
case INDEX_op_rotl_i64:
case INDEX_op_rotr_i32:
case INDEX_op_rotr_i64:
return &r_r_ri;
return (s390_facilities & FACILITY_DISTINCT_OPS
? C_O1_I2(r, r, ri)
: C_O1_I2(r, 0, ri));
case INDEX_op_brcond_i32:
case INDEX_op_brcond_i64:
return &r_ri;
return C_O0_I2(r, ri);
case INDEX_op_bswap16_i32:
case INDEX_op_bswap16_i64:
@ -2406,63 +2374,49 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
case INDEX_op_extu_i32_i64:
case INDEX_op_extract_i32:
case INDEX_op_extract_i64:
return &r_r;
case INDEX_op_clz_i64:
case INDEX_op_setcond_i32:
case INDEX_op_setcond_i64:
return &r_r_ri;
return C_O1_I1(r, r);
case INDEX_op_qemu_ld_i32:
case INDEX_op_qemu_ld_i64:
return &r_L;
return C_O1_I1(r, L);
case INDEX_op_qemu_st_i64:
case INDEX_op_qemu_st_i32:
return &L_L;
return C_O0_I2(L, L);
case INDEX_op_deposit_i32:
case INDEX_op_deposit_i64:
{
static const TCGTargetOpDef dep
= { .args_ct_str = { "r", "rZ", "r" } };
return &dep;
}
return C_O1_I2(r, rZ, r);
case INDEX_op_movcond_i32:
case INDEX_op_movcond_i64:
{
static const TCGTargetOpDef movc
= { .args_ct_str = { "r", "r", "ri", "r", "0" } };
static const TCGTargetOpDef movc_l
= { .args_ct_str = { "r", "r", "ri", "rI", "0" } };
return (s390_facilities & FACILITY_LOAD_ON_COND2 ? &movc_l : &movc);
}
return (s390_facilities & FACILITY_LOAD_ON_COND2
? C_O1_I4(r, r, ri, rI, 0)
: C_O1_I4(r, r, ri, r, 0));
case INDEX_op_div2_i32:
case INDEX_op_div2_i64:
case INDEX_op_divu2_i32:
case INDEX_op_divu2_i64:
{
static const TCGTargetOpDef div2
= { .args_ct_str = { "b", "a", "0", "1", "r" } };
return &div2;
}
return C_O2_I3(b, a, 0, 1, r);
case INDEX_op_mulu2_i64:
{
static const TCGTargetOpDef mul2
= { .args_ct_str = { "b", "a", "0", "r" } };
return &mul2;
}
return C_O2_I2(b, a, 0, r);
case INDEX_op_add2_i32:
case INDEX_op_sub2_i32:
return (s390_facilities & FACILITY_EXT_IMM ? &a2_ri : &a2_r);
return (s390_facilities & FACILITY_EXT_IMM
? C_O2_I4(r, r, 0, 1, ri, r)
: C_O2_I4(r, r, 0, 1, r, r));
case INDEX_op_add2_i64:
case INDEX_op_sub2_i64:
return (s390_facilities & FACILITY_EXT_IMM ? &a2_rA : &a2_r);
return (s390_facilities & FACILITY_EXT_IMM
? C_O2_I4(r, r, 0, 1, rA, r)
: C_O2_I4(r, r, 0, 1, r, r));
default:
break;
g_assert_not_reached();
}
return NULL;
}
static void query_s390_facilities(void)

View File

@ -0,0 +1,32 @@
/* SPDX-License-Identifier: MIT */
/*
* Define Sparc target-specific constraint sets.
* Copyright (c) 2021 Linaro
*/
/*
* C_On_Im(...) defines a constraint set with <n> outputs and <m> inputs.
* Each operand should be a sequence of constraint letters as defined by
* tcg-target-con-str.h; the constraint combination is inclusive or.
*/
C_O0_I1(r)
C_O0_I2(rZ, r)
C_O0_I2(RZ, r)
C_O0_I2(rZ, rJ)
C_O0_I2(RZ, RJ)
C_O0_I2(sZ, A)
C_O0_I2(SZ, A)
C_O1_I1(r, A)
C_O1_I1(R, A)
C_O1_I1(r, r)
C_O1_I1(r, R)
C_O1_I1(R, r)
C_O1_I1(R, R)
C_O1_I2(R, R, R)
C_O1_I2(r, rZ, rJ)
C_O1_I2(R, RZ, RJ)
C_O1_I4(r, rZ, rJ, rI, 0)
C_O1_I4(R, RZ, RJ, RI, 0)
C_O2_I2(r, r, rZ, rJ)
C_O2_I4(R, R, RZ, RZ, RJ, RI)
C_O2_I4(r, r, rZ, rZ, rJ, rJ)

View File

@ -0,0 +1,23 @@
/* SPDX-License-Identifier: MIT */
/*
* Define Sparc target-specific operand constraints.
* Copyright (c) 2021 Linaro
*/
/*
* Define constraint letters for register sets:
* REGS(letter, register_mask)
*/
REGS('r', ALL_GENERAL_REGS)
REGS('R', ALL_GENERAL_REGS64)
REGS('s', ALL_QLDST_REGS)
REGS('S', ALL_QLDST_REGS64)
REGS('A', TARGET_LONG_BITS == 64 ? ALL_QLDST_REGS64 : ALL_QLDST_REGS)
/*
* Define constraint letters for constants:
* CONST(letter, TCG_CT_CONST_* bit set)
*/
CONST('I', TCG_CT_CONST_S11)
CONST('J', TCG_CT_CONST_S13)
CONST('Z', TCG_CT_CONST_ZERO)

View File

@ -67,18 +67,38 @@ static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
# define SPARC64 0
#endif
/* Note that sparcv8plus can only hold 64 bit quantities in %g and %o
registers. These are saved manually by the kernel in full 64-bit
slots. The %i and %l registers are saved by the register window
mechanism, which only allocates space for 32 bits. Given that this
window spill/fill can happen on any signal, we must consider the
high bits of the %i and %l registers garbage at all times. */
#if SPARC64
# define ALL_64 0xffffffffu
#define TCG_CT_CONST_S11 0x100
#define TCG_CT_CONST_S13 0x200
#define TCG_CT_CONST_ZERO 0x400
/*
* For softmmu, we need to avoid conflicts with the first 3
* argument registers to perform the tlb lookup, and to call
* the helper function.
*/
#ifdef CONFIG_SOFTMMU
#define SOFTMMU_RESERVE_REGS MAKE_64BIT_MASK(TCG_REG_O0, 3)
#else
# define ALL_64 0xffffu
#define SOFTMMU_RESERVE_REGS 0
#endif
/*
* Note that sparcv8plus can only hold 64 bit quantities in %g and %o
* registers. These are saved manually by the kernel in full 64-bit
* slots. The %i and %l registers are saved by the register window
* mechanism, which only allocates space for 32 bits. Given that this
* window spill/fill can happen on any signal, we must consider the
* high bits of the %i and %l registers garbage at all times.
*/
#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
#if SPARC64
# define ALL_GENERAL_REGS64 ALL_GENERAL_REGS
#else
# define ALL_GENERAL_REGS64 MAKE_64BIT_MASK(0, 16)
#endif
#define ALL_QLDST_REGS (ALL_GENERAL_REGS & ~SOFTMMU_RESERVE_REGS)
#define ALL_QLDST_REGS64 (ALL_GENERAL_REGS64 & ~SOFTMMU_RESERVE_REGS)
/* Define some temporary registers. T2 is used for constant generation. */
#define TCG_REG_T1 TCG_REG_G1
#define TCG_REG_T2 TCG_REG_O7
@ -320,45 +340,6 @@ static bool patch_reloc(tcg_insn_unit *src_rw, int type,
return true;
}
/* parse target specific constraints */
static const char *target_parse_constraint(TCGArgConstraint *ct,
const char *ct_str, TCGType type)
{
switch (*ct_str++) {
case 'r':
ct->regs = 0xffffffff;
break;
case 'R':
ct->regs = ALL_64;
break;
case 'A': /* qemu_ld/st address constraint */
ct->regs = TARGET_LONG_BITS == 64 ? ALL_64 : 0xffffffff;
reserve_helpers:
tcg_regset_reset_reg(ct->regs, TCG_REG_O0);
tcg_regset_reset_reg(ct->regs, TCG_REG_O1);
tcg_regset_reset_reg(ct->regs, TCG_REG_O2);
break;
case 's': /* qemu_st data 32-bit constraint */
ct->regs = 0xffffffff;
goto reserve_helpers;
case 'S': /* qemu_st data 64-bit constraint */
ct->regs = ALL_64;
goto reserve_helpers;
case 'I':
ct->ct |= TCG_CT_CONST_S11;
break;
case 'J':
ct->ct |= TCG_CT_CONST_S13;
break;
case 'Z':
ct->ct |= TCG_CT_CONST_ZERO;
break;
default:
return NULL;
}
return ct_str;
}
/* test if a constant matches the constraint */
static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
const TCGArgConstraint *arg_ct)
@ -1592,40 +1573,11 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
}
}
static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
{
static const TCGTargetOpDef r = { .args_ct_str = { "r" } };
static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } };
static const TCGTargetOpDef R_r = { .args_ct_str = { "R", "r" } };
static const TCGTargetOpDef r_R = { .args_ct_str = { "r", "R" } };
static const TCGTargetOpDef R_R = { .args_ct_str = { "R", "R" } };
static const TCGTargetOpDef r_A = { .args_ct_str = { "r", "A" } };
static const TCGTargetOpDef R_A = { .args_ct_str = { "R", "A" } };
static const TCGTargetOpDef rZ_r = { .args_ct_str = { "rZ", "r" } };
static const TCGTargetOpDef RZ_r = { .args_ct_str = { "RZ", "r" } };
static const TCGTargetOpDef sZ_A = { .args_ct_str = { "sZ", "A" } };
static const TCGTargetOpDef SZ_A = { .args_ct_str = { "SZ", "A" } };
static const TCGTargetOpDef rZ_rJ = { .args_ct_str = { "rZ", "rJ" } };
static const TCGTargetOpDef RZ_RJ = { .args_ct_str = { "RZ", "RJ" } };
static const TCGTargetOpDef R_R_R = { .args_ct_str = { "R", "R", "R" } };
static const TCGTargetOpDef r_rZ_rJ
= { .args_ct_str = { "r", "rZ", "rJ" } };
static const TCGTargetOpDef R_RZ_RJ
= { .args_ct_str = { "R", "RZ", "RJ" } };
static const TCGTargetOpDef r_r_rZ_rJ
= { .args_ct_str = { "r", "r", "rZ", "rJ" } };
static const TCGTargetOpDef movc_32
= { .args_ct_str = { "r", "rZ", "rJ", "rI", "0" } };
static const TCGTargetOpDef movc_64
= { .args_ct_str = { "R", "RZ", "RJ", "RI", "0" } };
static const TCGTargetOpDef add2_32
= { .args_ct_str = { "r", "r", "rZ", "rZ", "rJ", "rJ" } };
static const TCGTargetOpDef add2_64
= { .args_ct_str = { "R", "R", "RZ", "RZ", "RJ", "RI" } };
switch (op) {
case INDEX_op_goto_ptr:
return &r;
return C_O0_I1(r);
case INDEX_op_ld8u_i32:
case INDEX_op_ld8s_i32:
@ -1634,12 +1586,12 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
case INDEX_op_ld_i32:
case INDEX_op_neg_i32:
case INDEX_op_not_i32:
return &r_r;
return C_O1_I1(r, r);
case INDEX_op_st8_i32:
case INDEX_op_st16_i32:
case INDEX_op_st_i32:
return &rZ_r;
return C_O0_I2(rZ, r);
case INDEX_op_add_i32:
case INDEX_op_mul_i32:
@ -1655,18 +1607,18 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
case INDEX_op_shr_i32:
case INDEX_op_sar_i32:
case INDEX_op_setcond_i32:
return &r_rZ_rJ;
return C_O1_I2(r, rZ, rJ);
case INDEX_op_brcond_i32:
return &rZ_rJ;
return C_O0_I2(rZ, rJ);
case INDEX_op_movcond_i32:
return &movc_32;
return C_O1_I4(r, rZ, rJ, rI, 0);
case INDEX_op_add2_i32:
case INDEX_op_sub2_i32:
return &add2_32;
return C_O2_I4(r, r, rZ, rZ, rJ, rJ);
case INDEX_op_mulu2_i32:
case INDEX_op_muls2_i32:
return &r_r_rZ_rJ;
return C_O2_I2(r, r, rZ, rJ);
case INDEX_op_ld8u_i64:
case INDEX_op_ld8s_i64:
@ -1677,13 +1629,13 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
case INDEX_op_ld_i64:
case INDEX_op_ext_i32_i64:
case INDEX_op_extu_i32_i64:
return &R_r;
return C_O1_I1(R, r);
case INDEX_op_st8_i64:
case INDEX_op_st16_i64:
case INDEX_op_st32_i64:
case INDEX_op_st_i64:
return &RZ_r;
return C_O0_I2(RZ, r);
case INDEX_op_add_i64:
case INDEX_op_mul_i64:
@ -1699,39 +1651,39 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
case INDEX_op_shr_i64:
case INDEX_op_sar_i64:
case INDEX_op_setcond_i64:
return &R_RZ_RJ;
return C_O1_I2(R, RZ, RJ);
case INDEX_op_neg_i64:
case INDEX_op_not_i64:
case INDEX_op_ext32s_i64:
case INDEX_op_ext32u_i64:
return &R_R;
return C_O1_I1(R, R);
case INDEX_op_extrl_i64_i32:
case INDEX_op_extrh_i64_i32:
return &r_R;
return C_O1_I1(r, R);
case INDEX_op_brcond_i64:
return &RZ_RJ;
return C_O0_I2(RZ, RJ);
case INDEX_op_movcond_i64:
return &movc_64;
return C_O1_I4(R, RZ, RJ, RI, 0);
case INDEX_op_add2_i64:
case INDEX_op_sub2_i64:
return &add2_64;
return C_O2_I4(R, R, RZ, RZ, RJ, RI);
case INDEX_op_muluh_i64:
return &R_R_R;
return C_O1_I2(R, R, R);
case INDEX_op_qemu_ld_i32:
return &r_A;
return C_O1_I1(r, A);
case INDEX_op_qemu_ld_i64:
return &R_A;
return C_O1_I1(R, A);
case INDEX_op_qemu_st_i32:
return &sZ_A;
return C_O0_I2(sZ, A);
case INDEX_op_qemu_st_i64:
return &SZ_A;
return C_O0_I2(SZ, A);
default:
return NULL;
g_assert_not_reached();
}
}
@ -1746,8 +1698,8 @@ static void tcg_target_init(TCGContext *s)
}
#endif
tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff;
tcg_target_available_regs[TCG_TYPE_I64] = ALL_64;
tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS64;
tcg_target_call_clobber_regs = 0;
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G1);

View File

@ -66,10 +66,6 @@ typedef enum {
TCG_REG_I7,
} TCGReg;
#define TCG_CT_CONST_S11 0x100
#define TCG_CT_CONST_S13 0x200
#define TCG_CT_CONST_ZERO 0x400
/* used for function call generation */
#define TCG_REG_CALL_STACK TCG_REG_O6

136
tcg/tcg.c
View File

@ -69,7 +69,6 @@
/* Forward declarations for functions declared in tcg-target.c.inc and
used here. */
static void tcg_target_init(TCGContext *s);
static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode);
static void tcg_target_qemu_prologue(TCGContext *s);
static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
intptr_t value, intptr_t addend);
@ -103,8 +102,6 @@ static void tcg_register_jit_int(const void *buf, size_t size,
__attribute__((unused));
/* Forward declarations for functions declared and used in tcg-target.c.inc. */
static const char *target_parse_constraint(TCGArgConstraint *ct,
const char *ct_str, TCGType type);
static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
intptr_t arg2);
static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
@ -349,6 +346,109 @@ static void set_jmp_reset_offset(TCGContext *s, int which)
s->tb_jmp_reset_offset[which] = tcg_current_code_size(s);
}
#define C_PFX1(P, A) P##A
#define C_PFX2(P, A, B) P##A##_##B
#define C_PFX3(P, A, B, C) P##A##_##B##_##C
#define C_PFX4(P, A, B, C, D) P##A##_##B##_##C##_##D
#define C_PFX5(P, A, B, C, D, E) P##A##_##B##_##C##_##D##_##E
#define C_PFX6(P, A, B, C, D, E, F) P##A##_##B##_##C##_##D##_##E##_##F
/* Define an enumeration for the various combinations. */
#define C_O0_I1(I1) C_PFX1(c_o0_i1_, I1),
#define C_O0_I2(I1, I2) C_PFX2(c_o0_i2_, I1, I2),
#define C_O0_I3(I1, I2, I3) C_PFX3(c_o0_i3_, I1, I2, I3),
#define C_O0_I4(I1, I2, I3, I4) C_PFX4(c_o0_i4_, I1, I2, I3, I4),
#define C_O1_I1(O1, I1) C_PFX2(c_o1_i1_, O1, I1),
#define C_O1_I2(O1, I1, I2) C_PFX3(c_o1_i2_, O1, I1, I2),
#define C_O1_I3(O1, I1, I2, I3) C_PFX4(c_o1_i3_, O1, I1, I2, I3),
#define C_O1_I4(O1, I1, I2, I3, I4) C_PFX5(c_o1_i4_, O1, I1, I2, I3, I4),
#define C_N1_I2(O1, I1, I2) C_PFX3(c_n1_i2_, O1, I1, I2),
#define C_O2_I1(O1, O2, I1) C_PFX3(c_o2_i1_, O1, O2, I1),
#define C_O2_I2(O1, O2, I1, I2) C_PFX4(c_o2_i2_, O1, O2, I1, I2),
#define C_O2_I3(O1, O2, I1, I2, I3) C_PFX5(c_o2_i3_, O1, O2, I1, I2, I3),
#define C_O2_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_o2_i4_, O1, O2, I1, I2, I3, I4),
typedef enum {
#include "tcg-target-con-set.h"
} TCGConstraintSetIndex;
static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode);
#undef C_O0_I1
#undef C_O0_I2
#undef C_O0_I3
#undef C_O0_I4
#undef C_O1_I1
#undef C_O1_I2
#undef C_O1_I3
#undef C_O1_I4
#undef C_N1_I2
#undef C_O2_I1
#undef C_O2_I2
#undef C_O2_I3
#undef C_O2_I4
/* Put all of the constraint sets into an array, indexed by the enum. */
#define C_O0_I1(I1) { .args_ct_str = { #I1 } },
#define C_O0_I2(I1, I2) { .args_ct_str = { #I1, #I2 } },
#define C_O0_I3(I1, I2, I3) { .args_ct_str = { #I1, #I2, #I3 } },
#define C_O0_I4(I1, I2, I3, I4) { .args_ct_str = { #I1, #I2, #I3, #I4 } },
#define C_O1_I1(O1, I1) { .args_ct_str = { #O1, #I1 } },
#define C_O1_I2(O1, I1, I2) { .args_ct_str = { #O1, #I1, #I2 } },
#define C_O1_I3(O1, I1, I2, I3) { .args_ct_str = { #O1, #I1, #I2, #I3 } },
#define C_O1_I4(O1, I1, I2, I3, I4) { .args_ct_str = { #O1, #I1, #I2, #I3, #I4 } },
#define C_N1_I2(O1, I1, I2) { .args_ct_str = { "&" #O1, #I1, #I2 } },
#define C_O2_I1(O1, O2, I1) { .args_ct_str = { #O1, #O2, #I1 } },
#define C_O2_I2(O1, O2, I1, I2) { .args_ct_str = { #O1, #O2, #I1, #I2 } },
#define C_O2_I3(O1, O2, I1, I2, I3) { .args_ct_str = { #O1, #O2, #I1, #I2, #I3 } },
#define C_O2_I4(O1, O2, I1, I2, I3, I4) { .args_ct_str = { #O1, #O2, #I1, #I2, #I3, #I4 } },
static const TCGTargetOpDef constraint_sets[] = {
#include "tcg-target-con-set.h"
};
#undef C_O0_I1
#undef C_O0_I2
#undef C_O0_I3
#undef C_O0_I4
#undef C_O1_I1
#undef C_O1_I2
#undef C_O1_I3
#undef C_O1_I4
#undef C_N1_I2
#undef C_O2_I1
#undef C_O2_I2
#undef C_O2_I3
#undef C_O2_I4
/* Expand the enumerator to be returned from tcg_target_op_def(). */
#define C_O0_I1(I1) C_PFX1(c_o0_i1_, I1)
#define C_O0_I2(I1, I2) C_PFX2(c_o0_i2_, I1, I2)
#define C_O0_I3(I1, I2, I3) C_PFX3(c_o0_i3_, I1, I2, I3)
#define C_O0_I4(I1, I2, I3, I4) C_PFX4(c_o0_i4_, I1, I2, I3, I4)
#define C_O1_I1(O1, I1) C_PFX2(c_o1_i1_, O1, I1)
#define C_O1_I2(O1, I1, I2) C_PFX3(c_o1_i2_, O1, I1, I2)
#define C_O1_I3(O1, I1, I2, I3) C_PFX4(c_o1_i3_, O1, I1, I2, I3)
#define C_O1_I4(O1, I1, I2, I3, I4) C_PFX5(c_o1_i4_, O1, I1, I2, I3, I4)
#define C_N1_I2(O1, I1, I2) C_PFX3(c_n1_i2_, O1, I1, I2)
#define C_O2_I1(O1, O2, I1) C_PFX3(c_o2_i1_, O1, O2, I1)
#define C_O2_I2(O1, O2, I1, I2) C_PFX4(c_o2_i2_, O1, O2, I1, I2)
#define C_O2_I3(O1, O2, I1, I2, I3) C_PFX5(c_o2_i3_, O1, O2, I1, I2, I3)
#define C_O2_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_o2_i4_, O1, O2, I1, I2, I3, I4)
#include "tcg-target.c.inc"
/* compare a pointer @ptr and a tb_tc @s */
@ -2415,7 +2515,6 @@ static void process_op_defs(TCGContext *s)
for (op = 0; op < NB_OPS; op++) {
TCGOpDef *def = &tcg_op_defs[op];
const TCGTargetOpDef *tdefs;
TCGType type;
int i, nb_args;
if (def->flags & TCG_OPF_NOT_PRESENT) {
@ -2427,11 +2526,15 @@ static void process_op_defs(TCGContext *s)
continue;
}
tdefs = tcg_target_op_def(op);
/* Missing TCGTargetOpDef entry. */
tcg_debug_assert(tdefs != NULL);
/*
* Macro magic should make it impossible, but double-check that
* the array index is in range. Since the signness of an enum
* is implementation defined, force the result to unsigned.
*/
unsigned con_set = tcg_target_op_def(op);
tcg_debug_assert(con_set < ARRAY_SIZE(constraint_sets));
tdefs = &constraint_sets[con_set];
type = (def->flags & TCG_OPF_64BIT ? TCG_TYPE_I64 : TCG_TYPE_I32);
for (i = 0; i < nb_args; i++) {
const char *ct_str = tdefs->args_ct_str[i];
/* Incomplete TCGTargetOpDef entry. */
@ -2463,11 +2566,22 @@ static void process_op_defs(TCGContext *s)
def->args_ct[i].ct |= TCG_CT_CONST;
ct_str++;
break;
/* Include all of the target-specific constraints. */
#undef CONST
#define CONST(CASE, MASK) \
case CASE: def->args_ct[i].ct |= MASK; ct_str++; break;
#define REGS(CASE, MASK) \
case CASE: def->args_ct[i].regs |= MASK; ct_str++; break;
#include "tcg-target-con-str.h"
#undef REGS
#undef CONST
default:
ct_str = target_parse_constraint(&def->args_ct[i],
ct_str, type);
/* Typo in TCGTargetOpDef constraint. */
tcg_debug_assert(ct_str != NULL);
g_assert_not_reached();
}
}
}

View File

@ -0,0 +1,25 @@
/* SPDX-License-Identifier: MIT */
/*
* TCI target-specific constraint sets.
* Copyright (c) 2021 Linaro
*/
/*
* C_On_Im(...) defines a constraint set with <n> outputs and <m> inputs.
* Each operand should be a sequence of constraint letters as defined by
* tcg-target-con-str.h; the constraint combination is inclusive or.
*/
C_O0_I2(r, r)
C_O0_I2(r, ri)
C_O0_I3(r, r, r)
C_O0_I4(r, r, ri, ri)
C_O0_I4(r, r, r, r)
C_O1_I1(r, r)
C_O1_I2(r, 0, r)
C_O1_I2(r, ri, ri)
C_O1_I2(r, r, r)
C_O1_I2(r, r, ri)
C_O1_I4(r, r, r, ri, ri)
C_O2_I1(r, r, r)
C_O2_I2(r, r, r, r)
C_O2_I4(r, r, r, r, r, r)

View File

@ -0,0 +1,11 @@
/* SPDX-License-Identifier: MIT */
/*
* Define TCI target-specific operand constraints.
* Copyright (c) 2021 Linaro
*/
/*
* Define constraint letters for register sets:
* REGS(letter, register_mask)
*/
REGS('r', MAKE_64BIT_MASK(0, TCG_TARGET_NB_REGS))

View File

@ -37,236 +37,143 @@
/* Bitfield n...m (in 32 bit value). */
#define BITS(n, m) (((0xffffffffU << (31 - n)) >> (31 - n + m)) << m)
/* Macros used in tcg_target_op_defs. */
#define R "r"
#define RI "ri"
#if TCG_TARGET_REG_BITS == 32
# define R64 "r", "r"
#else
# define R64 "r"
#endif
#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
# define L "L", "L"
# define S "S", "S"
#else
# define L "L"
# define S "S"
#endif
static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
{
switch (op) {
case INDEX_op_ld8u_i32:
case INDEX_op_ld8s_i32:
case INDEX_op_ld16u_i32:
case INDEX_op_ld16s_i32:
case INDEX_op_ld_i32:
case INDEX_op_ld8u_i64:
case INDEX_op_ld8s_i64:
case INDEX_op_ld16u_i64:
case INDEX_op_ld16s_i64:
case INDEX_op_ld32u_i64:
case INDEX_op_ld32s_i64:
case INDEX_op_ld_i64:
case INDEX_op_not_i32:
case INDEX_op_not_i64:
case INDEX_op_neg_i32:
case INDEX_op_neg_i64:
case INDEX_op_ext8s_i32:
case INDEX_op_ext8s_i64:
case INDEX_op_ext16s_i32:
case INDEX_op_ext16s_i64:
case INDEX_op_ext8u_i32:
case INDEX_op_ext8u_i64:
case INDEX_op_ext16u_i32:
case INDEX_op_ext16u_i64:
case INDEX_op_ext32s_i64:
case INDEX_op_ext32u_i64:
case INDEX_op_ext_i32_i64:
case INDEX_op_extu_i32_i64:
case INDEX_op_bswap16_i32:
case INDEX_op_bswap16_i64:
case INDEX_op_bswap32_i32:
case INDEX_op_bswap32_i64:
case INDEX_op_bswap64_i64:
return C_O1_I1(r, r);
/* TODO: documentation. */
static const TCGTargetOpDef tcg_target_op_defs[] = {
{ INDEX_op_exit_tb, { NULL } },
{ INDEX_op_goto_tb, { NULL } },
{ INDEX_op_br, { NULL } },
case INDEX_op_st8_i32:
case INDEX_op_st16_i32:
case INDEX_op_st_i32:
case INDEX_op_st8_i64:
case INDEX_op_st16_i64:
case INDEX_op_st32_i64:
case INDEX_op_st_i64:
return C_O0_I2(r, r);
{ INDEX_op_ld8u_i32, { R, R } },
{ INDEX_op_ld8s_i32, { R, R } },
{ INDEX_op_ld16u_i32, { R, R } },
{ INDEX_op_ld16s_i32, { R, R } },
{ INDEX_op_ld_i32, { R, R } },
{ INDEX_op_st8_i32, { R, R } },
{ INDEX_op_st16_i32, { R, R } },
{ INDEX_op_st_i32, { R, R } },
case INDEX_op_div_i32:
case INDEX_op_div_i64:
case INDEX_op_divu_i32:
case INDEX_op_divu_i64:
case INDEX_op_rem_i32:
case INDEX_op_rem_i64:
case INDEX_op_remu_i32:
case INDEX_op_remu_i64:
return C_O1_I2(r, r, r);
{ INDEX_op_add_i32, { R, RI, RI } },
{ INDEX_op_sub_i32, { R, RI, RI } },
{ INDEX_op_mul_i32, { R, RI, RI } },
#if TCG_TARGET_HAS_div_i32
{ INDEX_op_div_i32, { R, R, R } },
{ INDEX_op_divu_i32, { R, R, R } },
{ INDEX_op_rem_i32, { R, R, R } },
{ INDEX_op_remu_i32, { R, R, R } },
#elif TCG_TARGET_HAS_div2_i32
{ INDEX_op_div2_i32, { R, R, "0", "1", R } },
{ INDEX_op_divu2_i32, { R, R, "0", "1", R } },
#endif
/* TODO: Does R, RI, RI result in faster code than R, R, RI?
If both operands are constants, we can optimize. */
{ INDEX_op_and_i32, { R, RI, RI } },
#if TCG_TARGET_HAS_andc_i32
{ INDEX_op_andc_i32, { R, RI, RI } },
#endif
#if TCG_TARGET_HAS_eqv_i32
{ INDEX_op_eqv_i32, { R, RI, RI } },
#endif
#if TCG_TARGET_HAS_nand_i32
{ INDEX_op_nand_i32, { R, RI, RI } },
#endif
#if TCG_TARGET_HAS_nor_i32
{ INDEX_op_nor_i32, { R, RI, RI } },
#endif
{ INDEX_op_or_i32, { R, RI, RI } },
#if TCG_TARGET_HAS_orc_i32
{ INDEX_op_orc_i32, { R, RI, RI } },
#endif
{ INDEX_op_xor_i32, { R, RI, RI } },
{ INDEX_op_shl_i32, { R, RI, RI } },
{ INDEX_op_shr_i32, { R, RI, RI } },
{ INDEX_op_sar_i32, { R, RI, RI } },
#if TCG_TARGET_HAS_rot_i32
{ INDEX_op_rotl_i32, { R, RI, RI } },
{ INDEX_op_rotr_i32, { R, RI, RI } },
#endif
#if TCG_TARGET_HAS_deposit_i32
{ INDEX_op_deposit_i32, { R, "0", R } },
#endif
case INDEX_op_add_i32:
case INDEX_op_add_i64:
case INDEX_op_sub_i32:
case INDEX_op_sub_i64:
case INDEX_op_mul_i32:
case INDEX_op_mul_i64:
case INDEX_op_and_i32:
case INDEX_op_and_i64:
case INDEX_op_andc_i32:
case INDEX_op_andc_i64:
case INDEX_op_eqv_i32:
case INDEX_op_eqv_i64:
case INDEX_op_nand_i32:
case INDEX_op_nand_i64:
case INDEX_op_nor_i32:
case INDEX_op_nor_i64:
case INDEX_op_or_i32:
case INDEX_op_or_i64:
case INDEX_op_orc_i32:
case INDEX_op_orc_i64:
case INDEX_op_xor_i32:
case INDEX_op_xor_i64:
case INDEX_op_shl_i32:
case INDEX_op_shl_i64:
case INDEX_op_shr_i32:
case INDEX_op_shr_i64:
case INDEX_op_sar_i32:
case INDEX_op_sar_i64:
case INDEX_op_rotl_i32:
case INDEX_op_rotl_i64:
case INDEX_op_rotr_i32:
case INDEX_op_rotr_i64:
/* TODO: Does R, RI, RI result in faster code than R, R, RI? */
return C_O1_I2(r, ri, ri);
{ INDEX_op_brcond_i32, { R, RI } },
case INDEX_op_deposit_i32:
case INDEX_op_deposit_i64:
return C_O1_I2(r, 0, r);
{ INDEX_op_setcond_i32, { R, R, RI } },
#if TCG_TARGET_REG_BITS == 64
{ INDEX_op_setcond_i64, { R, R, RI } },
#endif /* TCG_TARGET_REG_BITS == 64 */
case INDEX_op_brcond_i32:
case INDEX_op_brcond_i64:
return C_O0_I2(r, ri);
case INDEX_op_setcond_i32:
case INDEX_op_setcond_i64:
return C_O1_I2(r, r, ri);
#if TCG_TARGET_REG_BITS == 32
/* TODO: Support R, R, R, R, RI, RI? Will it be faster? */
{ INDEX_op_add2_i32, { R, R, R, R, R, R } },
{ INDEX_op_sub2_i32, { R, R, R, R, R, R } },
{ INDEX_op_brcond2_i32, { R, R, RI, RI } },
{ INDEX_op_mulu2_i32, { R, R, R, R } },
{ INDEX_op_setcond2_i32, { R, R, R, RI, RI } },
case INDEX_op_add2_i32:
case INDEX_op_sub2_i32:
return C_O2_I4(r, r, r, r, r, r);
case INDEX_op_brcond2_i32:
return C_O0_I4(r, r, ri, ri);
case INDEX_op_mulu2_i32:
return C_O2_I2(r, r, r, r);
case INDEX_op_setcond2_i32:
return C_O1_I4(r, r, r, ri, ri);
#endif
#if TCG_TARGET_HAS_not_i32
{ INDEX_op_not_i32, { R, R } },
#endif
#if TCG_TARGET_HAS_neg_i32
{ INDEX_op_neg_i32, { R, R } },
#endif
case INDEX_op_qemu_ld_i32:
return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
? C_O1_I1(r, r)
: C_O1_I2(r, r, r));
case INDEX_op_qemu_ld_i64:
return (TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r)
: TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? C_O2_I1(r, r, r)
: C_O2_I2(r, r, r, r));
case INDEX_op_qemu_st_i32:
return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
? C_O0_I2(r, r)
: C_O0_I3(r, r, r));
case INDEX_op_qemu_st_i64:
return (TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r)
: TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? C_O0_I3(r, r, r)
: C_O0_I4(r, r, r, r));
#if TCG_TARGET_REG_BITS == 64
{ INDEX_op_ld8u_i64, { R, R } },
{ INDEX_op_ld8s_i64, { R, R } },
{ INDEX_op_ld16u_i64, { R, R } },
{ INDEX_op_ld16s_i64, { R, R } },
{ INDEX_op_ld32u_i64, { R, R } },
{ INDEX_op_ld32s_i64, { R, R } },
{ INDEX_op_ld_i64, { R, R } },
{ INDEX_op_st8_i64, { R, R } },
{ INDEX_op_st16_i64, { R, R } },
{ INDEX_op_st32_i64, { R, R } },
{ INDEX_op_st_i64, { R, R } },
{ INDEX_op_add_i64, { R, RI, RI } },
{ INDEX_op_sub_i64, { R, RI, RI } },
{ INDEX_op_mul_i64, { R, RI, RI } },
#if TCG_TARGET_HAS_div_i64
{ INDEX_op_div_i64, { R, R, R } },
{ INDEX_op_divu_i64, { R, R, R } },
{ INDEX_op_rem_i64, { R, R, R } },
{ INDEX_op_remu_i64, { R, R, R } },
#elif TCG_TARGET_HAS_div2_i64
{ INDEX_op_div2_i64, { R, R, "0", "1", R } },
{ INDEX_op_divu2_i64, { R, R, "0", "1", R } },
#endif
{ INDEX_op_and_i64, { R, RI, RI } },
#if TCG_TARGET_HAS_andc_i64
{ INDEX_op_andc_i64, { R, RI, RI } },
#endif
#if TCG_TARGET_HAS_eqv_i64
{ INDEX_op_eqv_i64, { R, RI, RI } },
#endif
#if TCG_TARGET_HAS_nand_i64
{ INDEX_op_nand_i64, { R, RI, RI } },
#endif
#if TCG_TARGET_HAS_nor_i64
{ INDEX_op_nor_i64, { R, RI, RI } },
#endif
{ INDEX_op_or_i64, { R, RI, RI } },
#if TCG_TARGET_HAS_orc_i64
{ INDEX_op_orc_i64, { R, RI, RI } },
#endif
{ INDEX_op_xor_i64, { R, RI, RI } },
{ INDEX_op_shl_i64, { R, RI, RI } },
{ INDEX_op_shr_i64, { R, RI, RI } },
{ INDEX_op_sar_i64, { R, RI, RI } },
#if TCG_TARGET_HAS_rot_i64
{ INDEX_op_rotl_i64, { R, RI, RI } },
{ INDEX_op_rotr_i64, { R, RI, RI } },
#endif
#if TCG_TARGET_HAS_deposit_i64
{ INDEX_op_deposit_i64, { R, "0", R } },
#endif
{ INDEX_op_brcond_i64, { R, RI } },
#if TCG_TARGET_HAS_ext8s_i64
{ INDEX_op_ext8s_i64, { R, R } },
#endif
#if TCG_TARGET_HAS_ext16s_i64
{ INDEX_op_ext16s_i64, { R, R } },
#endif
#if TCG_TARGET_HAS_ext32s_i64
{ INDEX_op_ext32s_i64, { R, R } },
#endif
#if TCG_TARGET_HAS_ext8u_i64
{ INDEX_op_ext8u_i64, { R, R } },
#endif
#if TCG_TARGET_HAS_ext16u_i64
{ INDEX_op_ext16u_i64, { R, R } },
#endif
#if TCG_TARGET_HAS_ext32u_i64
{ INDEX_op_ext32u_i64, { R, R } },
#endif
{ INDEX_op_ext_i32_i64, { R, R } },
{ INDEX_op_extu_i32_i64, { R, R } },
#if TCG_TARGET_HAS_bswap16_i64
{ INDEX_op_bswap16_i64, { R, R } },
#endif
#if TCG_TARGET_HAS_bswap32_i64
{ INDEX_op_bswap32_i64, { R, R } },
#endif
#if TCG_TARGET_HAS_bswap64_i64
{ INDEX_op_bswap64_i64, { R, R } },
#endif
#if TCG_TARGET_HAS_not_i64
{ INDEX_op_not_i64, { R, R } },
#endif
#if TCG_TARGET_HAS_neg_i64
{ INDEX_op_neg_i64, { R, R } },
#endif
#endif /* TCG_TARGET_REG_BITS == 64 */
{ INDEX_op_qemu_ld_i32, { R, L } },
{ INDEX_op_qemu_ld_i64, { R64, L } },
{ INDEX_op_qemu_st_i32, { R, S } },
{ INDEX_op_qemu_st_i64, { R64, S } },
#if TCG_TARGET_HAS_ext8s_i32
{ INDEX_op_ext8s_i32, { R, R } },
#endif
#if TCG_TARGET_HAS_ext16s_i32
{ INDEX_op_ext16s_i32, { R, R } },
#endif
#if TCG_TARGET_HAS_ext8u_i32
{ INDEX_op_ext8u_i32, { R, R } },
#endif
#if TCG_TARGET_HAS_ext16u_i32
{ INDEX_op_ext16u_i32, { R, R } },
#endif
#if TCG_TARGET_HAS_bswap16_i32
{ INDEX_op_bswap16_i32, { R, R } },
#endif
#if TCG_TARGET_HAS_bswap32_i32
{ INDEX_op_bswap32_i32, { R, R } },
#endif
{ INDEX_op_mb, { } },
{ -1 },
};
static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
{
int i, n = ARRAY_SIZE(tcg_target_op_defs);
for (i = 0; i < n; ++i) {
if (tcg_target_op_defs[i].op == op) {
return &tcg_target_op_defs[i];
}
default:
g_assert_not_reached();
}
return NULL;
}
static const int tcg_target_reg_alloc_order[] = {
@ -384,22 +291,6 @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
return true;
}
/* Parse target specific constraints. */
static const char *target_parse_constraint(TCGArgConstraint *ct,
const char *ct_str, TCGType type)
{
switch (*ct_str++) {
case 'r':
case 'L': /* qemu_ld constraint */
case 'S': /* qemu_st constraint */
ct->regs = BIT(TCG_TARGET_NB_REGS) - 1;
break;
default:
return NULL;
}
return ct_str;
}
#if defined(CONFIG_DEBUG_TCG_INTERPRETER)
/* Show current bytecode. Used by tcg interpreter. */
void tci_disas(uint8_t opc)