mirror of https://gitee.com/openkylin/qemu.git
Queued TCG patches
-----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJWuStqAAoJEK0ScMxN0Ceb5jAH/A0b9oMEB9pkXclPBqolbekv 6gcMl3Zp+v8vBJh/IBdd2c9+xZygJ5KtSH8SMbjkfFLxsuBPC5UVTT2g+8oQjLhm aqb7hD4CTzgtSlbnG5UGXn9iiWaE7arX+cQRLTJMZj3vfJAA1iyj85gQPhIK7OvT 5eHGeMFTVm7zj2OENnUiBW14shr4fVFBH5IzWV1KJnM6V85U56zTAjxatHQmEesi O0WBnVPZKpD2ksZfX7484YKNiE4iNNzus01zwL5223ml+jhRAW7Y7RSbd2h/bK9w LMpNxePdzlVz1VQ15BNH5uZCA+GeUpwfWrgJgJQk9BLYY0rScQFn4g4MBZix5aI= =RMc5 -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/rth/tags/pull-tcg-20160209' into staging Queued TCG patches # gpg: Signature made Mon 08 Feb 2016 23:57:30 GMT using RSA key ID 4DD0279B # gpg: Good signature from "Richard Henderson <rth7680@gmail.com>" # gpg: aka "Richard Henderson <rth@redhat.com>" # gpg: aka "Richard Henderson <rth@twiddle.net>" * remotes/rth/tags/pull-tcg-20160209: tcg: Introduce temp_load tcg: Change temp_save argument to TCGTemp tcg: Change temp_sync argument to TCGTemp tcg: Change temp_dead argument to TCGTemp tcg: Change reg_to_temp to TCGTemp pointer tcg: Remove tcg_get_arg_str_i32/64 tcg: More use of TCGReg where appropriate tcg: Work around clang bug wrt enum ranges tcg: Tidy temporary allocation tcg: Change ts->mem_reg to ts->mem_base tcg: Change tcg_global_mem_new_* to take a TCGv_ptr tcg: Remove lingering references to gen_opc_buf tcg: Respect highwater in tcg_out_tb_finalize Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
74f30f153f
|
@ -152,13 +152,13 @@ void alpha_translate_init(void)
|
|||
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
|
||||
|
||||
for (i = 0; i < 31; i++) {
|
||||
cpu_std_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
|
||||
cpu_std_ir[i] = tcg_global_mem_new_i64(cpu_env,
|
||||
offsetof(CPUAlphaState, ir[i]),
|
||||
greg_names[i]);
|
||||
}
|
||||
|
||||
for (i = 0; i < 31; i++) {
|
||||
cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
|
||||
cpu_fir[i] = tcg_global_mem_new_i64(cpu_env,
|
||||
offsetof(CPUAlphaState, fir[i]),
|
||||
freg_names[i]);
|
||||
}
|
||||
|
@ -167,7 +167,7 @@ void alpha_translate_init(void)
|
|||
memcpy(cpu_pal_ir, cpu_std_ir, sizeof(cpu_pal_ir));
|
||||
for (i = 0; i < 8; i++) {
|
||||
int r = (i == 7 ? 25 : i + 8);
|
||||
cpu_pal_ir[r] = tcg_global_mem_new_i64(TCG_AREG0,
|
||||
cpu_pal_ir[r] = tcg_global_mem_new_i64(cpu_env,
|
||||
offsetof(CPUAlphaState,
|
||||
shadow[i]),
|
||||
shadow_names[i]);
|
||||
|
@ -176,7 +176,7 @@ void alpha_translate_init(void)
|
|||
|
||||
for (i = 0; i < ARRAY_SIZE(vars); ++i) {
|
||||
const GlobalVar *v = &vars[i];
|
||||
*v->var = tcg_global_mem_new_i64(TCG_AREG0, v->ofs, v->name);
|
||||
*v->var = tcg_global_mem_new_i64(cpu_env, v->ofs, v->name);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -86,16 +86,16 @@ void a64_translate_init(void)
|
|||
{
|
||||
int i;
|
||||
|
||||
cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
|
||||
cpu_pc = tcg_global_mem_new_i64(cpu_env,
|
||||
offsetof(CPUARMState, pc),
|
||||
"pc");
|
||||
for (i = 0; i < 32; i++) {
|
||||
cpu_X[i] = tcg_global_mem_new_i64(TCG_AREG0,
|
||||
cpu_X[i] = tcg_global_mem_new_i64(cpu_env,
|
||||
offsetof(CPUARMState, xregs[i]),
|
||||
regnames[i]);
|
||||
}
|
||||
|
||||
cpu_exclusive_high = tcg_global_mem_new_i64(TCG_AREG0,
|
||||
cpu_exclusive_high = tcg_global_mem_new_i64(cpu_env,
|
||||
offsetof(CPUARMState, exclusive_high), "exclusive_high");
|
||||
}
|
||||
|
||||
|
|
|
@ -86,23 +86,23 @@ void arm_translate_init(void)
|
|||
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
|
||||
|
||||
for (i = 0; i < 16; i++) {
|
||||
cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
|
||||
cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
|
||||
offsetof(CPUARMState, regs[i]),
|
||||
regnames[i]);
|
||||
}
|
||||
cpu_CF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, CF), "CF");
|
||||
cpu_NF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, NF), "NF");
|
||||
cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), "VF");
|
||||
cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), "ZF");
|
||||
cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
|
||||
cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
|
||||
cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
|
||||
cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
|
||||
|
||||
cpu_exclusive_addr = tcg_global_mem_new_i64(TCG_AREG0,
|
||||
cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
|
||||
offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
|
||||
cpu_exclusive_val = tcg_global_mem_new_i64(TCG_AREG0,
|
||||
cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
|
||||
offsetof(CPUARMState, exclusive_val), "exclusive_val");
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
cpu_exclusive_test = tcg_global_mem_new_i64(TCG_AREG0,
|
||||
cpu_exclusive_test = tcg_global_mem_new_i64(cpu_env,
|
||||
offsetof(CPUARMState, exclusive_test), "exclusive_test");
|
||||
cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
|
||||
cpu_exclusive_info = tcg_global_mem_new_i32(cpu_env,
|
||||
offsetof(CPUARMState, exclusive_info), "exclusive_info");
|
||||
#endif
|
||||
|
||||
|
@ -11209,8 +11209,7 @@ static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
|
|||
return false;
|
||||
}
|
||||
|
||||
/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
|
||||
basic block 'tb'. */
|
||||
/* generate intermediate code for basic block 'tb'. */
|
||||
void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
|
||||
{
|
||||
ARMCPU *cpu = arm_env_get_cpu(env);
|
||||
|
|
|
@ -3364,41 +3364,41 @@ void cris_initialize_tcg(void)
|
|||
int i;
|
||||
|
||||
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
|
||||
cc_x = tcg_global_mem_new(TCG_AREG0,
|
||||
cc_x = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUCRISState, cc_x), "cc_x");
|
||||
cc_src = tcg_global_mem_new(TCG_AREG0,
|
||||
cc_src = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUCRISState, cc_src), "cc_src");
|
||||
cc_dest = tcg_global_mem_new(TCG_AREG0,
|
||||
cc_dest = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUCRISState, cc_dest),
|
||||
"cc_dest");
|
||||
cc_result = tcg_global_mem_new(TCG_AREG0,
|
||||
cc_result = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUCRISState, cc_result),
|
||||
"cc_result");
|
||||
cc_op = tcg_global_mem_new(TCG_AREG0,
|
||||
cc_op = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUCRISState, cc_op), "cc_op");
|
||||
cc_size = tcg_global_mem_new(TCG_AREG0,
|
||||
cc_size = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUCRISState, cc_size),
|
||||
"cc_size");
|
||||
cc_mask = tcg_global_mem_new(TCG_AREG0,
|
||||
cc_mask = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUCRISState, cc_mask),
|
||||
"cc_mask");
|
||||
|
||||
env_pc = tcg_global_mem_new(TCG_AREG0,
|
||||
env_pc = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUCRISState, pc),
|
||||
"pc");
|
||||
env_btarget = tcg_global_mem_new(TCG_AREG0,
|
||||
env_btarget = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUCRISState, btarget),
|
||||
"btarget");
|
||||
env_btaken = tcg_global_mem_new(TCG_AREG0,
|
||||
env_btaken = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUCRISState, btaken),
|
||||
"btaken");
|
||||
for (i = 0; i < 16; i++) {
|
||||
cpu_R[i] = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_R[i] = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUCRISState, regs[i]),
|
||||
regnames[i]);
|
||||
}
|
||||
for (i = 0; i < 16; i++) {
|
||||
cpu_PR[i] = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_PR[i] = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUCRISState, pregs[i]),
|
||||
pregnames[i]);
|
||||
}
|
||||
|
|
|
@ -1247,45 +1247,45 @@ static unsigned int crisv10_decoder(CPUCRISState *env, DisasContext *dc)
|
|||
|
||||
void cris_initialize_crisv10_tcg(void)
|
||||
{
|
||||
int i;
|
||||
int i;
|
||||
|
||||
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
|
||||
cc_x = tcg_global_mem_new(TCG_AREG0,
|
||||
offsetof(CPUCRISState, cc_x), "cc_x");
|
||||
cc_src = tcg_global_mem_new(TCG_AREG0,
|
||||
offsetof(CPUCRISState, cc_src), "cc_src");
|
||||
cc_dest = tcg_global_mem_new(TCG_AREG0,
|
||||
offsetof(CPUCRISState, cc_dest),
|
||||
"cc_dest");
|
||||
cc_result = tcg_global_mem_new(TCG_AREG0,
|
||||
offsetof(CPUCRISState, cc_result),
|
||||
"cc_result");
|
||||
cc_op = tcg_global_mem_new(TCG_AREG0,
|
||||
offsetof(CPUCRISState, cc_op), "cc_op");
|
||||
cc_size = tcg_global_mem_new(TCG_AREG0,
|
||||
offsetof(CPUCRISState, cc_size),
|
||||
"cc_size");
|
||||
cc_mask = tcg_global_mem_new(TCG_AREG0,
|
||||
offsetof(CPUCRISState, cc_mask),
|
||||
"cc_mask");
|
||||
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
|
||||
cc_x = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUCRISState, cc_x), "cc_x");
|
||||
cc_src = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUCRISState, cc_src), "cc_src");
|
||||
cc_dest = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUCRISState, cc_dest),
|
||||
"cc_dest");
|
||||
cc_result = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUCRISState, cc_result),
|
||||
"cc_result");
|
||||
cc_op = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUCRISState, cc_op), "cc_op");
|
||||
cc_size = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUCRISState, cc_size),
|
||||
"cc_size");
|
||||
cc_mask = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUCRISState, cc_mask),
|
||||
"cc_mask");
|
||||
|
||||
env_pc = tcg_global_mem_new(TCG_AREG0,
|
||||
offsetof(CPUCRISState, pc),
|
||||
"pc");
|
||||
env_btarget = tcg_global_mem_new(TCG_AREG0,
|
||||
offsetof(CPUCRISState, btarget),
|
||||
"btarget");
|
||||
env_btaken = tcg_global_mem_new(TCG_AREG0,
|
||||
offsetof(CPUCRISState, btaken),
|
||||
"btaken");
|
||||
for (i = 0; i < 16; i++) {
|
||||
cpu_R[i] = tcg_global_mem_new(TCG_AREG0,
|
||||
offsetof(CPUCRISState, regs[i]),
|
||||
regnames_v10[i]);
|
||||
}
|
||||
for (i = 0; i < 16; i++) {
|
||||
cpu_PR[i] = tcg_global_mem_new(TCG_AREG0,
|
||||
offsetof(CPUCRISState, pregs[i]),
|
||||
pregnames_v10[i]);
|
||||
}
|
||||
env_pc = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUCRISState, pc),
|
||||
"pc");
|
||||
env_btarget = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUCRISState, btarget),
|
||||
"btarget");
|
||||
env_btaken = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUCRISState, btaken),
|
||||
"btaken");
|
||||
for (i = 0; i < 16; i++) {
|
||||
cpu_R[i] = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUCRISState, regs[i]),
|
||||
regnames_v10[i]);
|
||||
}
|
||||
for (i = 0; i < 16; i++) {
|
||||
cpu_PR[i] = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUCRISState, pregs[i]),
|
||||
pregnames_v10[i]);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7860,17 +7860,17 @@ void tcg_x86_init(void)
|
|||
int i;
|
||||
|
||||
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
|
||||
cpu_cc_op = tcg_global_mem_new_i32(TCG_AREG0,
|
||||
cpu_cc_op = tcg_global_mem_new_i32(cpu_env,
|
||||
offsetof(CPUX86State, cc_op), "cc_op");
|
||||
cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_dst),
|
||||
cpu_cc_dst = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, cc_dst),
|
||||
"cc_dst");
|
||||
cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_src),
|
||||
cpu_cc_src = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, cc_src),
|
||||
"cc_src");
|
||||
cpu_cc_src2 = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_src2),
|
||||
cpu_cc_src2 = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, cc_src2),
|
||||
"cc_src2");
|
||||
|
||||
for (i = 0; i < CPU_NB_REGS; ++i) {
|
||||
cpu_regs[i] = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_regs[i] = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUX86State, regs[i]),
|
||||
reg_names[i]);
|
||||
}
|
||||
|
@ -7878,8 +7878,7 @@ void tcg_x86_init(void)
|
|||
helper_lock_init();
|
||||
}
|
||||
|
||||
/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
|
||||
basic block 'tb'. */
|
||||
/* generate intermediate code for basic block 'tb'. */
|
||||
void gen_intermediate_code(CPUX86State *env, TranslationBlock *tb)
|
||||
{
|
||||
X86CPU *cpu = x86_env_get_cpu(env);
|
||||
|
|
|
@ -1193,48 +1193,48 @@ void lm32_translate_init(void)
|
|||
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
|
||||
cpu_R[i] = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_R[i] = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPULM32State, regs[i]),
|
||||
regnames[i]);
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(cpu_bp); i++) {
|
||||
cpu_bp[i] = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_bp[i] = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPULM32State, bp[i]),
|
||||
regnames[32+i]);
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(cpu_wp); i++) {
|
||||
cpu_wp[i] = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_wp[i] = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPULM32State, wp[i]),
|
||||
regnames[36+i]);
|
||||
}
|
||||
|
||||
cpu_pc = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_pc = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPULM32State, pc),
|
||||
"pc");
|
||||
cpu_ie = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_ie = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPULM32State, ie),
|
||||
"ie");
|
||||
cpu_icc = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_icc = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPULM32State, icc),
|
||||
"icc");
|
||||
cpu_dcc = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_dcc = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPULM32State, dcc),
|
||||
"dcc");
|
||||
cpu_cc = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_cc = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPULM32State, cc),
|
||||
"cc");
|
||||
cpu_cfg = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_cfg = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPULM32State, cfg),
|
||||
"cfg");
|
||||
cpu_eba = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_eba = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPULM32State, eba),
|
||||
"eba");
|
||||
cpu_dc = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_dc = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPULM32State, dc),
|
||||
"dc");
|
||||
cpu_deba = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_deba = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPULM32State, deba),
|
||||
"deba");
|
||||
}
|
||||
|
|
|
@ -76,48 +76,52 @@ void m68k_tcg_init(void)
|
|||
char *p;
|
||||
int i;
|
||||
|
||||
#define DEFO32(name, offset) QREG_##name = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUM68KState, offset), #name);
|
||||
#define DEFO64(name, offset) QREG_##name = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUM68KState, offset), #name);
|
||||
#define DEFF64(name, offset) DEFO64(name, offset)
|
||||
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
|
||||
|
||||
#define DEFO32(name, offset) \
|
||||
QREG_##name = tcg_global_mem_new_i32(cpu_env, \
|
||||
offsetof(CPUM68KState, offset), #name);
|
||||
#define DEFO64(name, offset) \
|
||||
QREG_##name = tcg_global_mem_new_i64(cpu_env, \
|
||||
offsetof(CPUM68KState, offset), #name);
|
||||
#define DEFF64(name, offset) DEFO64(name, offset)
|
||||
#include "qregs.def"
|
||||
#undef DEFO32
|
||||
#undef DEFO64
|
||||
#undef DEFF64
|
||||
|
||||
cpu_halted = tcg_global_mem_new_i32(TCG_AREG0,
|
||||
cpu_halted = tcg_global_mem_new_i32(cpu_env,
|
||||
-offsetof(M68kCPU, env) +
|
||||
offsetof(CPUState, halted), "HALTED");
|
||||
cpu_exception_index = tcg_global_mem_new_i32(TCG_AREG0,
|
||||
cpu_exception_index = tcg_global_mem_new_i32(cpu_env,
|
||||
-offsetof(M68kCPU, env) +
|
||||
offsetof(CPUState, exception_index),
|
||||
"EXCEPTION");
|
||||
|
||||
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
|
||||
|
||||
p = cpu_reg_names;
|
||||
for (i = 0; i < 8; i++) {
|
||||
sprintf(p, "D%d", i);
|
||||
cpu_dregs[i] = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_dregs[i] = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUM68KState, dregs[i]), p);
|
||||
p += 3;
|
||||
sprintf(p, "A%d", i);
|
||||
cpu_aregs[i] = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_aregs[i] = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUM68KState, aregs[i]), p);
|
||||
p += 3;
|
||||
sprintf(p, "F%d", i);
|
||||
cpu_fregs[i] = tcg_global_mem_new_i64(TCG_AREG0,
|
||||
cpu_fregs[i] = tcg_global_mem_new_i64(cpu_env,
|
||||
offsetof(CPUM68KState, fregs[i]), p);
|
||||
p += 3;
|
||||
}
|
||||
for (i = 0; i < 4; i++) {
|
||||
sprintf(p, "ACC%d", i);
|
||||
cpu_macc[i] = tcg_global_mem_new_i64(TCG_AREG0,
|
||||
cpu_macc[i] = tcg_global_mem_new_i64(cpu_env,
|
||||
offsetof(CPUM68KState, macc[i]), p);
|
||||
p += 5;
|
||||
}
|
||||
|
||||
NULL_QREG = tcg_global_mem_new(TCG_AREG0, -4, "NULL");
|
||||
store_dummy = tcg_global_mem_new(TCG_AREG0, -8, "NULL");
|
||||
NULL_QREG = tcg_global_mem_new(cpu_env, -4, "NULL");
|
||||
store_dummy = tcg_global_mem_new(cpu_env, -8, "NULL");
|
||||
}
|
||||
|
||||
/* internal defines */
|
||||
|
|
|
@ -1870,34 +1870,34 @@ void mb_tcg_init(void)
|
|||
|
||||
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
|
||||
|
||||
env_debug = tcg_global_mem_new(TCG_AREG0,
|
||||
env_debug = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUMBState, debug),
|
||||
"debug0");
|
||||
env_iflags = tcg_global_mem_new(TCG_AREG0,
|
||||
env_iflags = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUMBState, iflags),
|
||||
"iflags");
|
||||
env_imm = tcg_global_mem_new(TCG_AREG0,
|
||||
env_imm = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUMBState, imm),
|
||||
"imm");
|
||||
env_btarget = tcg_global_mem_new(TCG_AREG0,
|
||||
env_btarget = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUMBState, btarget),
|
||||
"btarget");
|
||||
env_btaken = tcg_global_mem_new(TCG_AREG0,
|
||||
env_btaken = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUMBState, btaken),
|
||||
"btaken");
|
||||
env_res_addr = tcg_global_mem_new(TCG_AREG0,
|
||||
env_res_addr = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUMBState, res_addr),
|
||||
"res_addr");
|
||||
env_res_val = tcg_global_mem_new(TCG_AREG0,
|
||||
env_res_val = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUMBState, res_val),
|
||||
"res_val");
|
||||
for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
|
||||
cpu_R[i] = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_R[i] = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUMBState, regs[i]),
|
||||
regnames[i]);
|
||||
}
|
||||
for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
|
||||
cpu_SR[i] = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_SR[i] = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUMBState, sregs[i]),
|
||||
special_regnames[i]);
|
||||
}
|
||||
|
|
|
@ -19829,48 +19829,49 @@ void mips_tcg_init(void)
|
|||
return;
|
||||
|
||||
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
|
||||
|
||||
TCGV_UNUSED(cpu_gpr[0]);
|
||||
for (i = 1; i < 32; i++)
|
||||
cpu_gpr[i] = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_gpr[i] = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUMIPSState, active_tc.gpr[i]),
|
||||
regnames[i]);
|
||||
|
||||
for (i = 0; i < 32; i++) {
|
||||
int off = offsetof(CPUMIPSState, active_fpu.fpr[i].wr.d[0]);
|
||||
msa_wr_d[i * 2] =
|
||||
tcg_global_mem_new_i64(TCG_AREG0, off, msaregnames[i * 2]);
|
||||
tcg_global_mem_new_i64(cpu_env, off, msaregnames[i * 2]);
|
||||
/* The scalar floating-point unit (FPU) registers are mapped on
|
||||
* the MSA vector registers. */
|
||||
fpu_f64[i] = msa_wr_d[i * 2];
|
||||
off = offsetof(CPUMIPSState, active_fpu.fpr[i].wr.d[1]);
|
||||
msa_wr_d[i * 2 + 1] =
|
||||
tcg_global_mem_new_i64(TCG_AREG0, off, msaregnames[i * 2 + 1]);
|
||||
tcg_global_mem_new_i64(cpu_env, off, msaregnames[i * 2 + 1]);
|
||||
}
|
||||
|
||||
cpu_PC = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_PC = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUMIPSState, active_tc.PC), "PC");
|
||||
for (i = 0; i < MIPS_DSP_ACC; i++) {
|
||||
cpu_HI[i] = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_HI[i] = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUMIPSState, active_tc.HI[i]),
|
||||
regnames_HI[i]);
|
||||
cpu_LO[i] = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_LO[i] = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUMIPSState, active_tc.LO[i]),
|
||||
regnames_LO[i]);
|
||||
}
|
||||
cpu_dspctrl = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_dspctrl = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUMIPSState, active_tc.DSPControl),
|
||||
"DSPControl");
|
||||
bcond = tcg_global_mem_new(TCG_AREG0,
|
||||
bcond = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUMIPSState, bcond), "bcond");
|
||||
btarget = tcg_global_mem_new(TCG_AREG0,
|
||||
btarget = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUMIPSState, btarget), "btarget");
|
||||
hflags = tcg_global_mem_new_i32(TCG_AREG0,
|
||||
hflags = tcg_global_mem_new_i32(cpu_env,
|
||||
offsetof(CPUMIPSState, hflags), "hflags");
|
||||
|
||||
fpu_fcr0 = tcg_global_mem_new_i32(TCG_AREG0,
|
||||
fpu_fcr0 = tcg_global_mem_new_i32(cpu_env,
|
||||
offsetof(CPUMIPSState, active_fpu.fcr0),
|
||||
"fcr0");
|
||||
fpu_fcr31 = tcg_global_mem_new_i32(TCG_AREG0,
|
||||
fpu_fcr31 = tcg_global_mem_new_i32(cpu_env,
|
||||
offsetof(CPUMIPSState, active_fpu.fcr31),
|
||||
"fcr31");
|
||||
|
||||
|
|
|
@ -106,16 +106,16 @@ void moxie_translate_init(void)
|
|||
return;
|
||||
}
|
||||
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
|
||||
cpu_pc = tcg_global_mem_new_i32(TCG_AREG0,
|
||||
cpu_pc = tcg_global_mem_new_i32(cpu_env,
|
||||
offsetof(CPUMoxieState, pc), "$pc");
|
||||
for (i = 0; i < 16; i++)
|
||||
cpu_gregs[i] = tcg_global_mem_new_i32(TCG_AREG0,
|
||||
cpu_gregs[i] = tcg_global_mem_new_i32(cpu_env,
|
||||
offsetof(CPUMoxieState, gregs[i]),
|
||||
gregnames[i]);
|
||||
|
||||
cc_a = tcg_global_mem_new_i32(TCG_AREG0,
|
||||
cc_a = tcg_global_mem_new_i32(cpu_env,
|
||||
offsetof(CPUMoxieState, cc_a), "cc_a");
|
||||
cc_b = tcg_global_mem_new_i32(TCG_AREG0,
|
||||
cc_b = tcg_global_mem_new_i32(cpu_env,
|
||||
offsetof(CPUMoxieState, cc_b), "cc_b");
|
||||
|
||||
done_init = 1;
|
||||
|
|
|
@ -78,39 +78,39 @@ void openrisc_translate_init(void)
|
|||
int i;
|
||||
|
||||
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
|
||||
cpu_sr = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_sr = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUOpenRISCState, sr), "sr");
|
||||
env_flags = tcg_global_mem_new_i32(TCG_AREG0,
|
||||
env_flags = tcg_global_mem_new_i32(cpu_env,
|
||||
offsetof(CPUOpenRISCState, flags),
|
||||
"flags");
|
||||
cpu_pc = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_pc = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUOpenRISCState, pc), "pc");
|
||||
cpu_npc = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_npc = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUOpenRISCState, npc), "npc");
|
||||
cpu_ppc = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_ppc = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUOpenRISCState, ppc), "ppc");
|
||||
jmp_pc = tcg_global_mem_new(TCG_AREG0,
|
||||
jmp_pc = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUOpenRISCState, jmp_pc), "jmp_pc");
|
||||
env_btaken = tcg_global_mem_new_i32(TCG_AREG0,
|
||||
env_btaken = tcg_global_mem_new_i32(cpu_env,
|
||||
offsetof(CPUOpenRISCState, btaken),
|
||||
"btaken");
|
||||
fpcsr = tcg_global_mem_new_i32(TCG_AREG0,
|
||||
fpcsr = tcg_global_mem_new_i32(cpu_env,
|
||||
offsetof(CPUOpenRISCState, fpcsr),
|
||||
"fpcsr");
|
||||
machi = tcg_global_mem_new(TCG_AREG0,
|
||||
machi = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUOpenRISCState, machi),
|
||||
"machi");
|
||||
maclo = tcg_global_mem_new(TCG_AREG0,
|
||||
maclo = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUOpenRISCState, maclo),
|
||||
"maclo");
|
||||
fpmaddhi = tcg_global_mem_new(TCG_AREG0,
|
||||
fpmaddhi = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUOpenRISCState, fpmaddhi),
|
||||
"fpmaddhi");
|
||||
fpmaddlo = tcg_global_mem_new(TCG_AREG0,
|
||||
fpmaddlo = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUOpenRISCState, fpmaddlo),
|
||||
"fpmaddlo");
|
||||
for (i = 0; i < 32; i++) {
|
||||
cpu_R[i] = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_R[i] = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUOpenRISCState, gpr[i]),
|
||||
regnames[i]);
|
||||
}
|
||||
|
|
|
@ -93,7 +93,7 @@ void ppc_translate_init(void)
|
|||
|
||||
for (i = 0; i < 8; i++) {
|
||||
snprintf(p, cpu_reg_names_size, "crf%d", i);
|
||||
cpu_crf[i] = tcg_global_mem_new_i32(TCG_AREG0,
|
||||
cpu_crf[i] = tcg_global_mem_new_i32(cpu_env,
|
||||
offsetof(CPUPPCState, crf[i]), p);
|
||||
p += 5;
|
||||
cpu_reg_names_size -= 5;
|
||||
|
@ -101,28 +101,28 @@ void ppc_translate_init(void)
|
|||
|
||||
for (i = 0; i < 32; i++) {
|
||||
snprintf(p, cpu_reg_names_size, "r%d", i);
|
||||
cpu_gpr[i] = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_gpr[i] = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUPPCState, gpr[i]), p);
|
||||
p += (i < 10) ? 3 : 4;
|
||||
cpu_reg_names_size -= (i < 10) ? 3 : 4;
|
||||
snprintf(p, cpu_reg_names_size, "r%dH", i);
|
||||
cpu_gprh[i] = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_gprh[i] = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUPPCState, gprh[i]), p);
|
||||
p += (i < 10) ? 4 : 5;
|
||||
cpu_reg_names_size -= (i < 10) ? 4 : 5;
|
||||
|
||||
snprintf(p, cpu_reg_names_size, "fp%d", i);
|
||||
cpu_fpr[i] = tcg_global_mem_new_i64(TCG_AREG0,
|
||||
cpu_fpr[i] = tcg_global_mem_new_i64(cpu_env,
|
||||
offsetof(CPUPPCState, fpr[i]), p);
|
||||
p += (i < 10) ? 4 : 5;
|
||||
cpu_reg_names_size -= (i < 10) ? 4 : 5;
|
||||
|
||||
snprintf(p, cpu_reg_names_size, "avr%dH", i);
|
||||
#ifdef HOST_WORDS_BIGENDIAN
|
||||
cpu_avrh[i] = tcg_global_mem_new_i64(TCG_AREG0,
|
||||
cpu_avrh[i] = tcg_global_mem_new_i64(cpu_env,
|
||||
offsetof(CPUPPCState, avr[i].u64[0]), p);
|
||||
#else
|
||||
cpu_avrh[i] = tcg_global_mem_new_i64(TCG_AREG0,
|
||||
cpu_avrh[i] = tcg_global_mem_new_i64(cpu_env,
|
||||
offsetof(CPUPPCState, avr[i].u64[1]), p);
|
||||
#endif
|
||||
p += (i < 10) ? 6 : 7;
|
||||
|
@ -130,55 +130,55 @@ void ppc_translate_init(void)
|
|||
|
||||
snprintf(p, cpu_reg_names_size, "avr%dL", i);
|
||||
#ifdef HOST_WORDS_BIGENDIAN
|
||||
cpu_avrl[i] = tcg_global_mem_new_i64(TCG_AREG0,
|
||||
cpu_avrl[i] = tcg_global_mem_new_i64(cpu_env,
|
||||
offsetof(CPUPPCState, avr[i].u64[1]), p);
|
||||
#else
|
||||
cpu_avrl[i] = tcg_global_mem_new_i64(TCG_AREG0,
|
||||
cpu_avrl[i] = tcg_global_mem_new_i64(cpu_env,
|
||||
offsetof(CPUPPCState, avr[i].u64[0]), p);
|
||||
#endif
|
||||
p += (i < 10) ? 6 : 7;
|
||||
cpu_reg_names_size -= (i < 10) ? 6 : 7;
|
||||
snprintf(p, cpu_reg_names_size, "vsr%d", i);
|
||||
cpu_vsr[i] = tcg_global_mem_new_i64(TCG_AREG0,
|
||||
offsetof(CPUPPCState, vsr[i]), p);
|
||||
cpu_vsr[i] = tcg_global_mem_new_i64(cpu_env,
|
||||
offsetof(CPUPPCState, vsr[i]), p);
|
||||
p += (i < 10) ? 5 : 6;
|
||||
cpu_reg_names_size -= (i < 10) ? 5 : 6;
|
||||
}
|
||||
|
||||
cpu_nip = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_nip = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUPPCState, nip), "nip");
|
||||
|
||||
cpu_msr = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_msr = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUPPCState, msr), "msr");
|
||||
|
||||
cpu_ctr = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_ctr = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUPPCState, ctr), "ctr");
|
||||
|
||||
cpu_lr = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_lr = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUPPCState, lr), "lr");
|
||||
|
||||
#if defined(TARGET_PPC64)
|
||||
cpu_cfar = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_cfar = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUPPCState, cfar), "cfar");
|
||||
#endif
|
||||
|
||||
cpu_xer = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_xer = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUPPCState, xer), "xer");
|
||||
cpu_so = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_so = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUPPCState, so), "SO");
|
||||
cpu_ov = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_ov = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUPPCState, ov), "OV");
|
||||
cpu_ca = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_ca = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUPPCState, ca), "CA");
|
||||
|
||||
cpu_reserve = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_reserve = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUPPCState, reserve_addr),
|
||||
"reserve_addr");
|
||||
|
||||
cpu_fpscr = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_fpscr = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUPPCState, fpscr), "fpscr");
|
||||
|
||||
cpu_access_type = tcg_global_mem_new_i32(TCG_AREG0,
|
||||
cpu_access_type = tcg_global_mem_new_i32(cpu_env,
|
||||
offsetof(CPUPPCState, access_type), "access_type");
|
||||
|
||||
done_init = 1;
|
||||
|
|
|
@ -168,35 +168,35 @@ void s390x_translate_init(void)
|
|||
int i;
|
||||
|
||||
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
|
||||
psw_addr = tcg_global_mem_new_i64(TCG_AREG0,
|
||||
psw_addr = tcg_global_mem_new_i64(cpu_env,
|
||||
offsetof(CPUS390XState, psw.addr),
|
||||
"psw_addr");
|
||||
psw_mask = tcg_global_mem_new_i64(TCG_AREG0,
|
||||
psw_mask = tcg_global_mem_new_i64(cpu_env,
|
||||
offsetof(CPUS390XState, psw.mask),
|
||||
"psw_mask");
|
||||
gbea = tcg_global_mem_new_i64(TCG_AREG0,
|
||||
gbea = tcg_global_mem_new_i64(cpu_env,
|
||||
offsetof(CPUS390XState, gbea),
|
||||
"gbea");
|
||||
|
||||
cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUS390XState, cc_op),
|
||||
cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
|
||||
"cc_op");
|
||||
cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_src),
|
||||
cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
|
||||
"cc_src");
|
||||
cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_dst),
|
||||
cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
|
||||
"cc_dst");
|
||||
cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_vr),
|
||||
cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
|
||||
"cc_vr");
|
||||
|
||||
for (i = 0; i < 16; i++) {
|
||||
snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
|
||||
regs[i] = tcg_global_mem_new(TCG_AREG0,
|
||||
regs[i] = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUS390XState, regs[i]),
|
||||
cpu_reg_names[i]);
|
||||
}
|
||||
|
||||
for (i = 0; i < 16; i++) {
|
||||
snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
|
||||
fregs[i] = tcg_global_mem_new(TCG_AREG0,
|
||||
fregs[i] = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUS390XState, vregs[i][0].d),
|
||||
cpu_reg_names[i + 16]);
|
||||
}
|
||||
|
|
|
@ -102,53 +102,53 @@ void sh4_translate_init(void)
|
|||
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
|
||||
|
||||
for (i = 0; i < 24; i++)
|
||||
cpu_gregs[i] = tcg_global_mem_new_i32(TCG_AREG0,
|
||||
cpu_gregs[i] = tcg_global_mem_new_i32(cpu_env,
|
||||
offsetof(CPUSH4State, gregs[i]),
|
||||
gregnames[i]);
|
||||
|
||||
cpu_pc = tcg_global_mem_new_i32(TCG_AREG0,
|
||||
cpu_pc = tcg_global_mem_new_i32(cpu_env,
|
||||
offsetof(CPUSH4State, pc), "PC");
|
||||
cpu_sr = tcg_global_mem_new_i32(TCG_AREG0,
|
||||
cpu_sr = tcg_global_mem_new_i32(cpu_env,
|
||||
offsetof(CPUSH4State, sr), "SR");
|
||||
cpu_sr_m = tcg_global_mem_new_i32(TCG_AREG0,
|
||||
offsetof(CPUSH4State, sr_m), "SR_M");
|
||||
cpu_sr_q = tcg_global_mem_new_i32(TCG_AREG0,
|
||||
offsetof(CPUSH4State, sr_q), "SR_Q");
|
||||
cpu_sr_t = tcg_global_mem_new_i32(TCG_AREG0,
|
||||
offsetof(CPUSH4State, sr_t), "SR_T");
|
||||
cpu_ssr = tcg_global_mem_new_i32(TCG_AREG0,
|
||||
cpu_sr_m = tcg_global_mem_new_i32(cpu_env,
|
||||
offsetof(CPUSH4State, sr_m), "SR_M");
|
||||
cpu_sr_q = tcg_global_mem_new_i32(cpu_env,
|
||||
offsetof(CPUSH4State, sr_q), "SR_Q");
|
||||
cpu_sr_t = tcg_global_mem_new_i32(cpu_env,
|
||||
offsetof(CPUSH4State, sr_t), "SR_T");
|
||||
cpu_ssr = tcg_global_mem_new_i32(cpu_env,
|
||||
offsetof(CPUSH4State, ssr), "SSR");
|
||||
cpu_spc = tcg_global_mem_new_i32(TCG_AREG0,
|
||||
cpu_spc = tcg_global_mem_new_i32(cpu_env,
|
||||
offsetof(CPUSH4State, spc), "SPC");
|
||||
cpu_gbr = tcg_global_mem_new_i32(TCG_AREG0,
|
||||
cpu_gbr = tcg_global_mem_new_i32(cpu_env,
|
||||
offsetof(CPUSH4State, gbr), "GBR");
|
||||
cpu_vbr = tcg_global_mem_new_i32(TCG_AREG0,
|
||||
cpu_vbr = tcg_global_mem_new_i32(cpu_env,
|
||||
offsetof(CPUSH4State, vbr), "VBR");
|
||||
cpu_sgr = tcg_global_mem_new_i32(TCG_AREG0,
|
||||
cpu_sgr = tcg_global_mem_new_i32(cpu_env,
|
||||
offsetof(CPUSH4State, sgr), "SGR");
|
||||
cpu_dbr = tcg_global_mem_new_i32(TCG_AREG0,
|
||||
cpu_dbr = tcg_global_mem_new_i32(cpu_env,
|
||||
offsetof(CPUSH4State, dbr), "DBR");
|
||||
cpu_mach = tcg_global_mem_new_i32(TCG_AREG0,
|
||||
cpu_mach = tcg_global_mem_new_i32(cpu_env,
|
||||
offsetof(CPUSH4State, mach), "MACH");
|
||||
cpu_macl = tcg_global_mem_new_i32(TCG_AREG0,
|
||||
cpu_macl = tcg_global_mem_new_i32(cpu_env,
|
||||
offsetof(CPUSH4State, macl), "MACL");
|
||||
cpu_pr = tcg_global_mem_new_i32(TCG_AREG0,
|
||||
cpu_pr = tcg_global_mem_new_i32(cpu_env,
|
||||
offsetof(CPUSH4State, pr), "PR");
|
||||
cpu_fpscr = tcg_global_mem_new_i32(TCG_AREG0,
|
||||
cpu_fpscr = tcg_global_mem_new_i32(cpu_env,
|
||||
offsetof(CPUSH4State, fpscr), "FPSCR");
|
||||
cpu_fpul = tcg_global_mem_new_i32(TCG_AREG0,
|
||||
cpu_fpul = tcg_global_mem_new_i32(cpu_env,
|
||||
offsetof(CPUSH4State, fpul), "FPUL");
|
||||
|
||||
cpu_flags = tcg_global_mem_new_i32(TCG_AREG0,
|
||||
cpu_flags = tcg_global_mem_new_i32(cpu_env,
|
||||
offsetof(CPUSH4State, flags), "_flags_");
|
||||
cpu_delayed_pc = tcg_global_mem_new_i32(TCG_AREG0,
|
||||
cpu_delayed_pc = tcg_global_mem_new_i32(cpu_env,
|
||||
offsetof(CPUSH4State, delayed_pc),
|
||||
"_delayed_pc_");
|
||||
cpu_ldst = tcg_global_mem_new_i32(TCG_AREG0,
|
||||
cpu_ldst = tcg_global_mem_new_i32(cpu_env,
|
||||
offsetof(CPUSH4State, ldst), "_ldst_");
|
||||
|
||||
for (i = 0; i < 32; i++)
|
||||
cpu_fregs[i] = tcg_global_mem_new_i32(TCG_AREG0,
|
||||
cpu_fregs[i] = tcg_global_mem_new_i32(cpu_env,
|
||||
offsetof(CPUSH4State, fregs[i]),
|
||||
fregnames[i]);
|
||||
|
||||
|
|
|
@ -5353,75 +5353,79 @@ void gen_intermediate_code_init(CPUSPARCState *env)
|
|||
inited = 1;
|
||||
|
||||
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
|
||||
cpu_regwptr = tcg_global_mem_new_ptr(TCG_AREG0,
|
||||
cpu_regwptr = tcg_global_mem_new_ptr(cpu_env,
|
||||
offsetof(CPUSPARCState, regwptr),
|
||||
"regwptr");
|
||||
#ifdef TARGET_SPARC64
|
||||
cpu_xcc = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, xcc),
|
||||
cpu_xcc = tcg_global_mem_new_i32(cpu_env, offsetof(CPUSPARCState, xcc),
|
||||
"xcc");
|
||||
cpu_asi = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, asi),
|
||||
cpu_asi = tcg_global_mem_new_i32(cpu_env, offsetof(CPUSPARCState, asi),
|
||||
"asi");
|
||||
cpu_fprs = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, fprs),
|
||||
cpu_fprs = tcg_global_mem_new_i32(cpu_env,
|
||||
offsetof(CPUSPARCState, fprs),
|
||||
"fprs");
|
||||
cpu_gsr = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, gsr),
|
||||
cpu_gsr = tcg_global_mem_new(cpu_env, offsetof(CPUSPARCState, gsr),
|
||||
"gsr");
|
||||
cpu_tick_cmpr = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_tick_cmpr = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUSPARCState, tick_cmpr),
|
||||
"tick_cmpr");
|
||||
cpu_stick_cmpr = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_stick_cmpr = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUSPARCState, stick_cmpr),
|
||||
"stick_cmpr");
|
||||
cpu_hstick_cmpr = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_hstick_cmpr = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUSPARCState, hstick_cmpr),
|
||||
"hstick_cmpr");
|
||||
cpu_hintp = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, hintp),
|
||||
cpu_hintp = tcg_global_mem_new(cpu_env, offsetof(CPUSPARCState, hintp),
|
||||
"hintp");
|
||||
cpu_htba = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, htba),
|
||||
cpu_htba = tcg_global_mem_new(cpu_env, offsetof(CPUSPARCState, htba),
|
||||
"htba");
|
||||
cpu_hver = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, hver),
|
||||
cpu_hver = tcg_global_mem_new(cpu_env, offsetof(CPUSPARCState, hver),
|
||||
"hver");
|
||||
cpu_ssr = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_ssr = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUSPARCState, ssr), "ssr");
|
||||
cpu_ver = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_ver = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUSPARCState, version), "ver");
|
||||
cpu_softint = tcg_global_mem_new_i32(TCG_AREG0,
|
||||
cpu_softint = tcg_global_mem_new_i32(cpu_env,
|
||||
offsetof(CPUSPARCState, softint),
|
||||
"softint");
|
||||
#else
|
||||
cpu_wim = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, wim),
|
||||
cpu_wim = tcg_global_mem_new(cpu_env, offsetof(CPUSPARCState, wim),
|
||||
"wim");
|
||||
#endif
|
||||
cpu_cond = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, cond),
|
||||
cpu_cond = tcg_global_mem_new(cpu_env, offsetof(CPUSPARCState, cond),
|
||||
"cond");
|
||||
cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, cc_src),
|
||||
cpu_cc_src = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUSPARCState, cc_src),
|
||||
"cc_src");
|
||||
cpu_cc_src2 = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_cc_src2 = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUSPARCState, cc_src2),
|
||||
"cc_src2");
|
||||
cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, cc_dst),
|
||||
cpu_cc_dst = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUSPARCState, cc_dst),
|
||||
"cc_dst");
|
||||
cpu_cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, cc_op),
|
||||
cpu_cc_op = tcg_global_mem_new_i32(cpu_env,
|
||||
offsetof(CPUSPARCState, cc_op),
|
||||
"cc_op");
|
||||
cpu_psr = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUSPARCState, psr),
|
||||
cpu_psr = tcg_global_mem_new_i32(cpu_env, offsetof(CPUSPARCState, psr),
|
||||
"psr");
|
||||
cpu_fsr = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, fsr),
|
||||
cpu_fsr = tcg_global_mem_new(cpu_env, offsetof(CPUSPARCState, fsr),
|
||||
"fsr");
|
||||
cpu_pc = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, pc),
|
||||
cpu_pc = tcg_global_mem_new(cpu_env, offsetof(CPUSPARCState, pc),
|
||||
"pc");
|
||||
cpu_npc = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, npc),
|
||||
cpu_npc = tcg_global_mem_new(cpu_env, offsetof(CPUSPARCState, npc),
|
||||
"npc");
|
||||
cpu_y = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, y), "y");
|
||||
cpu_y = tcg_global_mem_new(cpu_env, offsetof(CPUSPARCState, y), "y");
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
cpu_tbr = tcg_global_mem_new(TCG_AREG0, offsetof(CPUSPARCState, tbr),
|
||||
cpu_tbr = tcg_global_mem_new(cpu_env, offsetof(CPUSPARCState, tbr),
|
||||
"tbr");
|
||||
#endif
|
||||
for (i = 1; i < 8; i++) {
|
||||
cpu_gregs[i] = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_gregs[i] = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUSPARCState, gregs[i]),
|
||||
gregnames[i]);
|
||||
}
|
||||
for (i = 0; i < TARGET_DPREGS; i++) {
|
||||
cpu_fpr[i] = tcg_global_mem_new_i64(TCG_AREG0,
|
||||
cpu_fpr[i] = tcg_global_mem_new_i64(cpu_env,
|
||||
offsetof(CPUSPARCState, fpr[i]),
|
||||
fregnames[i]);
|
||||
}
|
||||
|
|
|
@ -2442,9 +2442,9 @@ void tilegx_tcg_init(void)
|
|||
int i;
|
||||
|
||||
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
|
||||
cpu_pc = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUTLGState, pc), "pc");
|
||||
cpu_pc = tcg_global_mem_new_i64(cpu_env, offsetof(CPUTLGState, pc), "pc");
|
||||
for (i = 0; i < TILEGX_R_COUNT; i++) {
|
||||
cpu_regs[i] = tcg_global_mem_new_i64(TCG_AREG0,
|
||||
cpu_regs[i] = tcg_global_mem_new_i64(cpu_env,
|
||||
offsetof(CPUTLGState, regs[i]),
|
||||
reg_names[i]);
|
||||
}
|
||||
|
|
|
@ -8350,13 +8350,13 @@ void cpu_state_reset(CPUTriCoreState *env)
|
|||
|
||||
static void tricore_tcg_init_csfr(void)
|
||||
{
|
||||
cpu_PCXI = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_PCXI = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUTriCoreState, PCXI), "PCXI");
|
||||
cpu_PSW = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_PSW = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUTriCoreState, PSW), "PSW");
|
||||
cpu_PC = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_PC = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUTriCoreState, PC), "PC");
|
||||
cpu_ICR = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_ICR = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUTriCoreState, ICR), "ICR");
|
||||
}
|
||||
|
||||
|
@ -8370,30 +8370,30 @@ void tricore_tcg_init(void)
|
|||
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
|
||||
/* reg init */
|
||||
for (i = 0 ; i < 16 ; i++) {
|
||||
cpu_gpr_a[i] = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_gpr_a[i] = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUTriCoreState, gpr_a[i]),
|
||||
regnames_a[i]);
|
||||
}
|
||||
for (i = 0 ; i < 16 ; i++) {
|
||||
cpu_gpr_d[i] = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_gpr_d[i] = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUTriCoreState, gpr_d[i]),
|
||||
regnames_d[i]);
|
||||
}
|
||||
tricore_tcg_init_csfr();
|
||||
/* init PSW flag cache */
|
||||
cpu_PSW_C = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_PSW_C = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUTriCoreState, PSW_USB_C),
|
||||
"PSW_C");
|
||||
cpu_PSW_V = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_PSW_V = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUTriCoreState, PSW_USB_V),
|
||||
"PSW_V");
|
||||
cpu_PSW_SV = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_PSW_SV = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUTriCoreState, PSW_USB_SV),
|
||||
"PSW_SV");
|
||||
cpu_PSW_AV = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_PSW_AV = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUTriCoreState, PSW_USB_AV),
|
||||
"PSW_AV");
|
||||
cpu_PSW_SAV = tcg_global_mem_new(TCG_AREG0,
|
||||
cpu_PSW_SAV = tcg_global_mem_new(cpu_env,
|
||||
offsetof(CPUTriCoreState, PSW_USB_SAV),
|
||||
"PSW_SAV");
|
||||
}
|
||||
|
|
|
@ -71,7 +71,7 @@ void uc32_translate_init(void)
|
|||
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
|
||||
|
||||
for (i = 0; i < 32; i++) {
|
||||
cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
|
||||
cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
|
||||
offsetof(CPUUniCore32State, regs[i]), regnames[i]);
|
||||
}
|
||||
}
|
||||
|
@ -1860,8 +1860,7 @@ static void disas_uc32_insn(CPUUniCore32State *env, DisasContext *s)
|
|||
}
|
||||
}
|
||||
|
||||
/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
|
||||
basic block 'tb'. */
|
||||
/* generate intermediate code for basic block 'tb'. */
|
||||
void gen_intermediate_code(CPUUniCore32State *env, TranslationBlock *tb)
|
||||
{
|
||||
UniCore32CPU *cpu = uc32_env_get_cpu(env);
|
||||
|
|
|
@ -218,24 +218,24 @@ void xtensa_translate_init(void)
|
|||
int i;
|
||||
|
||||
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
|
||||
cpu_pc = tcg_global_mem_new_i32(TCG_AREG0,
|
||||
cpu_pc = tcg_global_mem_new_i32(cpu_env,
|
||||
offsetof(CPUXtensaState, pc), "pc");
|
||||
|
||||
for (i = 0; i < 16; i++) {
|
||||
cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
|
||||
cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
|
||||
offsetof(CPUXtensaState, regs[i]),
|
||||
regnames[i]);
|
||||
}
|
||||
|
||||
for (i = 0; i < 16; i++) {
|
||||
cpu_FR[i] = tcg_global_mem_new_i32(TCG_AREG0,
|
||||
cpu_FR[i] = tcg_global_mem_new_i32(cpu_env,
|
||||
offsetof(CPUXtensaState, fregs[i].f32[FP_F32_LOW]),
|
||||
fregnames[i]);
|
||||
}
|
||||
|
||||
for (i = 0; i < 256; ++i) {
|
||||
if (sregnames[i].name) {
|
||||
cpu_SR[i] = tcg_global_mem_new_i32(TCG_AREG0,
|
||||
cpu_SR[i] = tcg_global_mem_new_i32(cpu_env,
|
||||
offsetof(CPUXtensaState, sregs[i]),
|
||||
sregnames[i].name);
|
||||
}
|
||||
|
@ -243,7 +243,7 @@ void xtensa_translate_init(void)
|
|||
|
||||
for (i = 0; i < 256; ++i) {
|
||||
if (uregnames[i].name) {
|
||||
cpu_UR[i] = tcg_global_mem_new_i32(TCG_AREG0,
|
||||
cpu_UR[i] = tcg_global_mem_new_i32(cpu_env,
|
||||
offsetof(CPUXtensaState, uregs[i]),
|
||||
uregnames[i].name);
|
||||
}
|
||||
|
|
|
@ -1572,7 +1572,7 @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOp opc,
|
|||
be->labels = l;
|
||||
}
|
||||
|
||||
static void tcg_out_tb_finalize(TCGContext *s)
|
||||
static bool tcg_out_tb_finalize(TCGContext *s)
|
||||
{
|
||||
static const void * const helpers[8] = {
|
||||
helper_ret_stb_mmu,
|
||||
|
@ -1620,7 +1620,16 @@ static void tcg_out_tb_finalize(TCGContext *s)
|
|||
}
|
||||
|
||||
reloc_pcrel21b_slot2(l->label_ptr, dest);
|
||||
|
||||
/* Test for (pending) buffer overflow. The assumption is that any
|
||||
one operation beginning below the high water mark cannot overrun
|
||||
the buffer completely. Thus we can test for overflow after
|
||||
generating code without having to check during generation. */
|
||||
if (unlikely((void *)s->code_ptr > s->code_gen_highwater)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args)
|
||||
|
|
|
@ -56,7 +56,7 @@ static inline void tcg_out_tb_init(TCGContext *s)
|
|||
static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l);
|
||||
static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l);
|
||||
|
||||
static void tcg_out_tb_finalize(TCGContext *s)
|
||||
static bool tcg_out_tb_finalize(TCGContext *s)
|
||||
{
|
||||
TCGLabelQemuLdst *lb;
|
||||
|
||||
|
@ -67,7 +67,16 @@ static void tcg_out_tb_finalize(TCGContext *s)
|
|||
} else {
|
||||
tcg_out_qemu_st_slow_path(s, lb);
|
||||
}
|
||||
|
||||
/* Test for (pending) buffer overflow. The assumption is that any
|
||||
one operation beginning below the high water mark cannot overrun
|
||||
the buffer completely. Thus we can test for overflow after
|
||||
generating code without having to check during generation. */
|
||||
if (unlikely((void *)s->code_ptr > s->code_gen_highwater)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -38,6 +38,7 @@ static inline void tcg_out_tb_init(TCGContext *s)
|
|||
* Generate TB finalization at the end of block
|
||||
*/
|
||||
|
||||
static inline void tcg_out_tb_finalize(TCGContext *s)
|
||||
static inline bool tcg_out_tb_finalize(TCGContext *s)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
|
568
tcg/tcg.c
568
tcg/tcg.c
|
@ -111,7 +111,7 @@ static void tcg_out_call(TCGContext *s, tcg_insn_unit *target);
|
|||
static int tcg_target_const_match(tcg_target_long val, TCGType type,
|
||||
const TCGArgConstraint *arg_ct);
|
||||
static void tcg_out_tb_init(TCGContext *s);
|
||||
static void tcg_out_tb_finalize(TCGContext *s);
|
||||
static bool tcg_out_tb_finalize(TCGContext *s);
|
||||
|
||||
|
||||
|
||||
|
@ -389,11 +389,7 @@ void tcg_prologue_init(TCGContext *s)
|
|||
/* Compute a high-water mark, at which we voluntarily flush the buffer
|
||||
and start over. The size here is arbitrary, significantly larger
|
||||
than we expect the code generation for any one opcode to require. */
|
||||
/* ??? We currently have no good estimate for, or checks in,
|
||||
tcg_out_tb_finalize. If there are quite a lot of guest memory ops,
|
||||
the number of out-of-line fragments could be quite high. In the
|
||||
short-term, increase the highwater buffer. */
|
||||
s->code_gen_highwater = s->code_gen_buffer + (total_size - 64*1024);
|
||||
s->code_gen_highwater = s->code_gen_buffer + (total_size - 1024);
|
||||
|
||||
tcg_register_jit(s->code_gen_buffer, total_size);
|
||||
|
||||
|
@ -407,13 +403,6 @@ void tcg_prologue_init(TCGContext *s)
|
|||
#endif
|
||||
}
|
||||
|
||||
void tcg_set_frame(TCGContext *s, int reg, intptr_t start, intptr_t size)
|
||||
{
|
||||
s->frame_start = start;
|
||||
s->frame_end = start + size;
|
||||
s->frame_reg = reg;
|
||||
}
|
||||
|
||||
void tcg_func_start(TCGContext *s)
|
||||
{
|
||||
tcg_pool_reset(s);
|
||||
|
@ -437,128 +426,125 @@ void tcg_func_start(TCGContext *s)
|
|||
s->be = tcg_malloc(sizeof(TCGBackendData));
|
||||
}
|
||||
|
||||
static inline void tcg_temp_alloc(TCGContext *s, int n)
|
||||
static inline int temp_idx(TCGContext *s, TCGTemp *ts)
|
||||
{
|
||||
if (n > TCG_MAX_TEMPS)
|
||||
tcg_abort();
|
||||
ptrdiff_t n = ts - s->temps;
|
||||
tcg_debug_assert(n >= 0 && n < s->nb_temps);
|
||||
return n;
|
||||
}
|
||||
|
||||
static inline int tcg_global_reg_new_internal(TCGType type, int reg,
|
||||
const char *name)
|
||||
static inline TCGTemp *tcg_temp_alloc(TCGContext *s)
|
||||
{
|
||||
TCGContext *s = &tcg_ctx;
|
||||
TCGTemp *ts;
|
||||
int idx;
|
||||
int n = s->nb_temps++;
|
||||
tcg_debug_assert(n < TCG_MAX_TEMPS);
|
||||
return memset(&s->temps[n], 0, sizeof(TCGTemp));
|
||||
}
|
||||
|
||||
#if TCG_TARGET_REG_BITS == 32
|
||||
if (type != TCG_TYPE_I32)
|
||||
static inline TCGTemp *tcg_global_alloc(TCGContext *s)
|
||||
{
|
||||
tcg_debug_assert(s->nb_globals == s->nb_temps);
|
||||
s->nb_globals++;
|
||||
return tcg_temp_alloc(s);
|
||||
}
|
||||
|
||||
static int tcg_global_reg_new_internal(TCGContext *s, TCGType type,
|
||||
TCGReg reg, const char *name)
|
||||
{
|
||||
TCGTemp *ts;
|
||||
|
||||
if (TCG_TARGET_REG_BITS == 32 && type != TCG_TYPE_I32) {
|
||||
tcg_abort();
|
||||
#endif
|
||||
if (tcg_regset_test_reg(s->reserved_regs, reg))
|
||||
tcg_abort();
|
||||
idx = s->nb_globals;
|
||||
tcg_temp_alloc(s, s->nb_globals + 1);
|
||||
ts = &s->temps[s->nb_globals];
|
||||
}
|
||||
|
||||
ts = tcg_global_alloc(s);
|
||||
ts->base_type = type;
|
||||
ts->type = type;
|
||||
ts->fixed_reg = 1;
|
||||
ts->reg = reg;
|
||||
ts->name = name;
|
||||
s->nb_globals++;
|
||||
tcg_regset_set_reg(s->reserved_regs, reg);
|
||||
return idx;
|
||||
|
||||
return temp_idx(s, ts);
|
||||
}
|
||||
|
||||
TCGv_i32 tcg_global_reg_new_i32(int reg, const char *name)
|
||||
void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size)
|
||||
{
|
||||
int idx;
|
||||
s->frame_start = start;
|
||||
s->frame_end = start + size;
|
||||
idx = tcg_global_reg_new_internal(s, TCG_TYPE_PTR, reg, "_frame");
|
||||
s->frame_temp = &s->temps[idx];
|
||||
}
|
||||
|
||||
idx = tcg_global_reg_new_internal(TCG_TYPE_I32, reg, name);
|
||||
TCGv_i32 tcg_global_reg_new_i32(TCGReg reg, const char *name)
|
||||
{
|
||||
TCGContext *s = &tcg_ctx;
|
||||
int idx;
|
||||
|
||||
if (tcg_regset_test_reg(s->reserved_regs, reg)) {
|
||||
tcg_abort();
|
||||
}
|
||||
idx = tcg_global_reg_new_internal(s, TCG_TYPE_I32, reg, name);
|
||||
return MAKE_TCGV_I32(idx);
|
||||
}
|
||||
|
||||
TCGv_i64 tcg_global_reg_new_i64(int reg, const char *name)
|
||||
TCGv_i64 tcg_global_reg_new_i64(TCGReg reg, const char *name)
|
||||
{
|
||||
TCGContext *s = &tcg_ctx;
|
||||
int idx;
|
||||
|
||||
idx = tcg_global_reg_new_internal(TCG_TYPE_I64, reg, name);
|
||||
if (tcg_regset_test_reg(s->reserved_regs, reg)) {
|
||||
tcg_abort();
|
||||
}
|
||||
idx = tcg_global_reg_new_internal(s, TCG_TYPE_I64, reg, name);
|
||||
return MAKE_TCGV_I64(idx);
|
||||
}
|
||||
|
||||
static inline int tcg_global_mem_new_internal(TCGType type, int reg,
|
||||
intptr_t offset,
|
||||
const char *name)
|
||||
int tcg_global_mem_new_internal(TCGType type, TCGv_ptr base,
|
||||
intptr_t offset, const char *name)
|
||||
{
|
||||
TCGContext *s = &tcg_ctx;
|
||||
TCGTemp *ts;
|
||||
int idx;
|
||||
|
||||
idx = s->nb_globals;
|
||||
#if TCG_TARGET_REG_BITS == 32
|
||||
if (type == TCG_TYPE_I64) {
|
||||
char buf[64];
|
||||
tcg_temp_alloc(s, s->nb_globals + 2);
|
||||
ts = &s->temps[s->nb_globals];
|
||||
ts->base_type = type;
|
||||
ts->type = TCG_TYPE_I32;
|
||||
ts->fixed_reg = 0;
|
||||
ts->mem_allocated = 1;
|
||||
ts->mem_reg = reg;
|
||||
TCGTemp *base_ts = &s->temps[GET_TCGV_PTR(base)];
|
||||
TCGTemp *ts = tcg_global_alloc(s);
|
||||
int bigendian = 0;
|
||||
#ifdef HOST_WORDS_BIGENDIAN
|
||||
ts->mem_offset = offset + 4;
|
||||
#else
|
||||
ts->mem_offset = offset;
|
||||
bigendian = 1;
|
||||
#endif
|
||||
|
||||
if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
|
||||
TCGTemp *ts2 = tcg_global_alloc(s);
|
||||
char buf[64];
|
||||
|
||||
ts->base_type = TCG_TYPE_I64;
|
||||
ts->type = TCG_TYPE_I32;
|
||||
ts->mem_allocated = 1;
|
||||
ts->mem_base = base_ts;
|
||||
ts->mem_offset = offset + bigendian * 4;
|
||||
pstrcpy(buf, sizeof(buf), name);
|
||||
pstrcat(buf, sizeof(buf), "_0");
|
||||
ts->name = strdup(buf);
|
||||
ts++;
|
||||
|
||||
ts->base_type = type;
|
||||
ts->type = TCG_TYPE_I32;
|
||||
ts->fixed_reg = 0;
|
||||
ts->mem_allocated = 1;
|
||||
ts->mem_reg = reg;
|
||||
#ifdef HOST_WORDS_BIGENDIAN
|
||||
ts->mem_offset = offset;
|
||||
#else
|
||||
ts->mem_offset = offset + 4;
|
||||
#endif
|
||||
tcg_debug_assert(ts2 == ts + 1);
|
||||
ts2->base_type = TCG_TYPE_I64;
|
||||
ts2->type = TCG_TYPE_I32;
|
||||
ts2->mem_allocated = 1;
|
||||
ts2->mem_base = base_ts;
|
||||
ts2->mem_offset = offset + (1 - bigendian) * 4;
|
||||
pstrcpy(buf, sizeof(buf), name);
|
||||
pstrcat(buf, sizeof(buf), "_1");
|
||||
ts->name = strdup(buf);
|
||||
|
||||
s->nb_globals += 2;
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
tcg_temp_alloc(s, s->nb_globals + 1);
|
||||
ts = &s->temps[s->nb_globals];
|
||||
} else {
|
||||
ts->base_type = type;
|
||||
ts->type = type;
|
||||
ts->fixed_reg = 0;
|
||||
ts->mem_allocated = 1;
|
||||
ts->mem_reg = reg;
|
||||
ts->mem_base = base_ts;
|
||||
ts->mem_offset = offset;
|
||||
ts->name = name;
|
||||
s->nb_globals++;
|
||||
}
|
||||
return idx;
|
||||
return temp_idx(s, ts);
|
||||
}
|
||||
|
||||
TCGv_i32 tcg_global_mem_new_i32(int reg, intptr_t offset, const char *name)
|
||||
{
|
||||
int idx = tcg_global_mem_new_internal(TCG_TYPE_I32, reg, offset, name);
|
||||
return MAKE_TCGV_I32(idx);
|
||||
}
|
||||
|
||||
TCGv_i64 tcg_global_mem_new_i64(int reg, intptr_t offset, const char *name)
|
||||
{
|
||||
int idx = tcg_global_mem_new_internal(TCG_TYPE_I64, reg, offset, name);
|
||||
return MAKE_TCGV_I64(idx);
|
||||
}
|
||||
|
||||
static inline int tcg_temp_new_internal(TCGType type, int temp_local)
|
||||
static int tcg_temp_new_internal(TCGType type, int temp_local)
|
||||
{
|
||||
TCGContext *s = &tcg_ctx;
|
||||
TCGTemp *ts;
|
||||
|
@ -572,38 +558,30 @@ static inline int tcg_temp_new_internal(TCGType type, int temp_local)
|
|||
|
||||
ts = &s->temps[idx];
|
||||
ts->temp_allocated = 1;
|
||||
assert(ts->base_type == type);
|
||||
assert(ts->temp_local == temp_local);
|
||||
tcg_debug_assert(ts->base_type == type);
|
||||
tcg_debug_assert(ts->temp_local == temp_local);
|
||||
} else {
|
||||
idx = s->nb_temps;
|
||||
#if TCG_TARGET_REG_BITS == 32
|
||||
if (type == TCG_TYPE_I64) {
|
||||
tcg_temp_alloc(s, s->nb_temps + 2);
|
||||
ts = &s->temps[s->nb_temps];
|
||||
ts = tcg_temp_alloc(s);
|
||||
if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
|
||||
TCGTemp *ts2 = tcg_temp_alloc(s);
|
||||
|
||||
ts->base_type = type;
|
||||
ts->type = TCG_TYPE_I32;
|
||||
ts->temp_allocated = 1;
|
||||
ts->temp_local = temp_local;
|
||||
ts->name = NULL;
|
||||
ts++;
|
||||
ts->base_type = type;
|
||||
ts->type = TCG_TYPE_I32;
|
||||
ts->temp_allocated = 1;
|
||||
ts->temp_local = temp_local;
|
||||
ts->name = NULL;
|
||||
s->nb_temps += 2;
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
tcg_temp_alloc(s, s->nb_temps + 1);
|
||||
ts = &s->temps[s->nb_temps];
|
||||
|
||||
tcg_debug_assert(ts2 == ts + 1);
|
||||
ts2->base_type = TCG_TYPE_I64;
|
||||
ts2->type = TCG_TYPE_I32;
|
||||
ts2->temp_allocated = 1;
|
||||
ts2->temp_local = temp_local;
|
||||
} else {
|
||||
ts->base_type = type;
|
||||
ts->type = type;
|
||||
ts->temp_allocated = 1;
|
||||
ts->temp_local = temp_local;
|
||||
ts->name = NULL;
|
||||
s->nb_temps++;
|
||||
}
|
||||
idx = temp_idx(s, ts);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_DEBUG_TCG)
|
||||
|
@ -922,37 +900,30 @@ static void tcg_reg_alloc_start(TCGContext *s)
|
|||
ts->mem_allocated = 0;
|
||||
ts->fixed_reg = 0;
|
||||
}
|
||||
for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
|
||||
s->reg_to_temp[i] = -1;
|
||||
}
|
||||
|
||||
memset(s->reg_to_temp, 0, sizeof(s->reg_to_temp));
|
||||
}
|
||||
|
||||
static char *tcg_get_arg_str_idx(TCGContext *s, char *buf, int buf_size,
|
||||
int idx)
|
||||
static char *tcg_get_arg_str_ptr(TCGContext *s, char *buf, int buf_size,
|
||||
TCGTemp *ts)
|
||||
{
|
||||
TCGTemp *ts;
|
||||
int idx = temp_idx(s, ts);
|
||||
|
||||
assert(idx >= 0 && idx < s->nb_temps);
|
||||
ts = &s->temps[idx];
|
||||
if (idx < s->nb_globals) {
|
||||
pstrcpy(buf, buf_size, ts->name);
|
||||
} else if (ts->temp_local) {
|
||||
snprintf(buf, buf_size, "loc%d", idx - s->nb_globals);
|
||||
} else {
|
||||
if (ts->temp_local)
|
||||
snprintf(buf, buf_size, "loc%d", idx - s->nb_globals);
|
||||
else
|
||||
snprintf(buf, buf_size, "tmp%d", idx - s->nb_globals);
|
||||
snprintf(buf, buf_size, "tmp%d", idx - s->nb_globals);
|
||||
}
|
||||
return buf;
|
||||
}
|
||||
|
||||
char *tcg_get_arg_str_i32(TCGContext *s, char *buf, int buf_size, TCGv_i32 arg)
|
||||
static char *tcg_get_arg_str_idx(TCGContext *s, char *buf,
|
||||
int buf_size, int idx)
|
||||
{
|
||||
return tcg_get_arg_str_idx(s, buf, buf_size, GET_TCGV_I32(arg));
|
||||
}
|
||||
|
||||
char *tcg_get_arg_str_i64(TCGContext *s, char *buf, int buf_size, TCGv_i64 arg)
|
||||
{
|
||||
return tcg_get_arg_str_idx(s, buf, buf_size, GET_TCGV_I64(arg));
|
||||
assert(idx >= 0 && idx < s->nb_temps);
|
||||
return tcg_get_arg_str_ptr(s, buf, buf_size, &s->temps[idx]);
|
||||
}
|
||||
|
||||
/* Find helper name. */
|
||||
|
@ -1580,8 +1551,7 @@ static void tcg_liveness_analysis(TCGContext *s)
|
|||
/* dummy liveness analysis */
|
||||
static void tcg_liveness_analysis(TCGContext *s)
|
||||
{
|
||||
int nb_ops;
|
||||
nb_ops = s->gen_opc_ptr - s->gen_opc_buf;
|
||||
int nb_ops = s->gen_next_op_idx;
|
||||
|
||||
s->op_dead_args = tcg_malloc(nb_ops * sizeof(uint16_t));
|
||||
memset(s->op_dead_args, 0, nb_ops * sizeof(uint16_t));
|
||||
|
@ -1605,7 +1575,8 @@ static void dump_regs(TCGContext *s)
|
|||
printf("%s", tcg_target_reg_names[ts->reg]);
|
||||
break;
|
||||
case TEMP_VAL_MEM:
|
||||
printf("%d(%s)", (int)ts->mem_offset, tcg_target_reg_names[ts->mem_reg]);
|
||||
printf("%d(%s)", (int)ts->mem_offset,
|
||||
tcg_target_reg_names[ts->mem_base->reg]);
|
||||
break;
|
||||
case TEMP_VAL_CONST:
|
||||
printf("$0x%" TCG_PRIlx, ts->val);
|
||||
|
@ -1621,43 +1592,41 @@ static void dump_regs(TCGContext *s)
|
|||
}
|
||||
|
||||
for(i = 0; i < TCG_TARGET_NB_REGS; i++) {
|
||||
if (s->reg_to_temp[i] >= 0) {
|
||||
if (s->reg_to_temp[i] != NULL) {
|
||||
printf("%s: %s\n",
|
||||
tcg_target_reg_names[i],
|
||||
tcg_get_arg_str_idx(s, buf, sizeof(buf), s->reg_to_temp[i]));
|
||||
tcg_get_arg_str_ptr(s, buf, sizeof(buf), s->reg_to_temp[i]));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void check_regs(TCGContext *s)
|
||||
{
|
||||
int reg, k;
|
||||
TCGReg reg;
|
||||
int k;
|
||||
TCGTemp *ts;
|
||||
char buf[64];
|
||||
|
||||
for(reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
|
||||
k = s->reg_to_temp[reg];
|
||||
if (k >= 0) {
|
||||
ts = &s->temps[k];
|
||||
if (ts->val_type != TEMP_VAL_REG ||
|
||||
ts->reg != reg) {
|
||||
for (reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
|
||||
ts = s->reg_to_temp[reg];
|
||||
if (ts != NULL) {
|
||||
if (ts->val_type != TEMP_VAL_REG || ts->reg != reg) {
|
||||
printf("Inconsistency for register %s:\n",
|
||||
tcg_target_reg_names[reg]);
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
}
|
||||
for(k = 0; k < s->nb_temps; k++) {
|
||||
for (k = 0; k < s->nb_temps; k++) {
|
||||
ts = &s->temps[k];
|
||||
if (ts->val_type == TEMP_VAL_REG &&
|
||||
!ts->fixed_reg &&
|
||||
s->reg_to_temp[ts->reg] != k) {
|
||||
printf("Inconsistency for temp %s:\n",
|
||||
tcg_get_arg_str_idx(s, buf, sizeof(buf), k));
|
||||
if (ts->val_type == TEMP_VAL_REG && !ts->fixed_reg
|
||||
&& s->reg_to_temp[ts->reg] != ts) {
|
||||
printf("Inconsistency for temp %s:\n",
|
||||
tcg_get_arg_str_ptr(s, buf, sizeof(buf), ts));
|
||||
fail:
|
||||
printf("reg state:\n");
|
||||
dump_regs(s);
|
||||
tcg_abort();
|
||||
printf("reg state:\n");
|
||||
dump_regs(s);
|
||||
tcg_abort();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1678,46 +1647,43 @@ static void temp_allocate_frame(TCGContext *s, int temp)
|
|||
tcg_abort();
|
||||
}
|
||||
ts->mem_offset = s->current_frame_offset;
|
||||
ts->mem_reg = s->frame_reg;
|
||||
ts->mem_base = s->frame_temp;
|
||||
ts->mem_allocated = 1;
|
||||
s->current_frame_offset += sizeof(tcg_target_long);
|
||||
}
|
||||
|
||||
/* sync register 'reg' by saving it to the corresponding temporary */
|
||||
static inline void tcg_reg_sync(TCGContext *s, int reg)
|
||||
static inline void tcg_reg_sync(TCGContext *s, TCGReg reg)
|
||||
{
|
||||
TCGTemp *ts;
|
||||
int temp;
|
||||
TCGTemp *ts = s->reg_to_temp[reg];
|
||||
|
||||
temp = s->reg_to_temp[reg];
|
||||
ts = &s->temps[temp];
|
||||
assert(ts->val_type == TEMP_VAL_REG);
|
||||
if (!ts->mem_coherent && !ts->fixed_reg) {
|
||||
if (!ts->mem_allocated) {
|
||||
temp_allocate_frame(s, temp);
|
||||
temp_allocate_frame(s, temp_idx(s, ts));
|
||||
}
|
||||
tcg_out_st(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
|
||||
tcg_out_st(s, ts->type, reg, ts->mem_base->reg, ts->mem_offset);
|
||||
}
|
||||
ts->mem_coherent = 1;
|
||||
}
|
||||
|
||||
/* free register 'reg' by spilling the corresponding temporary if necessary */
|
||||
static void tcg_reg_free(TCGContext *s, int reg)
|
||||
static void tcg_reg_free(TCGContext *s, TCGReg reg)
|
||||
{
|
||||
int temp;
|
||||
TCGTemp *ts = s->reg_to_temp[reg];
|
||||
|
||||
temp = s->reg_to_temp[reg];
|
||||
if (temp != -1) {
|
||||
if (ts != NULL) {
|
||||
tcg_reg_sync(s, reg);
|
||||
s->temps[temp].val_type = TEMP_VAL_MEM;
|
||||
s->reg_to_temp[reg] = -1;
|
||||
ts->val_type = TEMP_VAL_MEM;
|
||||
s->reg_to_temp[reg] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/* Allocate a register belonging to reg1 & ~reg2 */
|
||||
static int tcg_reg_alloc(TCGContext *s, TCGRegSet reg1, TCGRegSet reg2)
|
||||
static TCGReg tcg_reg_alloc(TCGContext *s, TCGRegSet reg1, TCGRegSet reg2)
|
||||
{
|
||||
int i, reg;
|
||||
int i;
|
||||
TCGReg reg;
|
||||
TCGRegSet reg_ct;
|
||||
|
||||
tcg_regset_andnot(reg_ct, reg1, reg2);
|
||||
|
@ -1725,7 +1691,7 @@ static int tcg_reg_alloc(TCGContext *s, TCGRegSet reg1, TCGRegSet reg2)
|
|||
/* first try free registers */
|
||||
for(i = 0; i < ARRAY_SIZE(tcg_target_reg_alloc_order); i++) {
|
||||
reg = tcg_target_reg_alloc_order[i];
|
||||
if (tcg_regset_test_reg(reg_ct, reg) && s->reg_to_temp[reg] == -1)
|
||||
if (tcg_regset_test_reg(reg_ct, reg) && s->reg_to_temp[reg] == NULL)
|
||||
return reg;
|
||||
}
|
||||
|
||||
|
@ -1741,64 +1707,82 @@ static int tcg_reg_alloc(TCGContext *s, TCGRegSet reg1, TCGRegSet reg2)
|
|||
tcg_abort();
|
||||
}
|
||||
|
||||
/* mark a temporary as dead. */
|
||||
static inline void temp_dead(TCGContext *s, int temp)
|
||||
/* Make sure the temporary is in a register. If needed, allocate the register
|
||||
from DESIRED while avoiding ALLOCATED. */
|
||||
static void temp_load(TCGContext *s, TCGTemp *ts, TCGRegSet desired_regs,
|
||||
TCGRegSet allocated_regs)
|
||||
{
|
||||
TCGTemp *ts;
|
||||
TCGReg reg;
|
||||
|
||||
ts = &s->temps[temp];
|
||||
if (!ts->fixed_reg) {
|
||||
if (ts->val_type == TEMP_VAL_REG) {
|
||||
s->reg_to_temp[ts->reg] = -1;
|
||||
}
|
||||
if (temp < s->nb_globals || ts->temp_local) {
|
||||
ts->val_type = TEMP_VAL_MEM;
|
||||
} else {
|
||||
ts->val_type = TEMP_VAL_DEAD;
|
||||
}
|
||||
switch (ts->val_type) {
|
||||
case TEMP_VAL_REG:
|
||||
return;
|
||||
case TEMP_VAL_CONST:
|
||||
reg = tcg_reg_alloc(s, desired_regs, allocated_regs);
|
||||
tcg_out_movi(s, ts->type, reg, ts->val);
|
||||
ts->mem_coherent = 0;
|
||||
break;
|
||||
case TEMP_VAL_MEM:
|
||||
reg = tcg_reg_alloc(s, desired_regs, allocated_regs);
|
||||
tcg_out_ld(s, ts->type, reg, ts->mem_base->reg, ts->mem_offset);
|
||||
ts->mem_coherent = 1;
|
||||
break;
|
||||
case TEMP_VAL_DEAD:
|
||||
default:
|
||||
tcg_abort();
|
||||
}
|
||||
ts->reg = reg;
|
||||
ts->val_type = TEMP_VAL_REG;
|
||||
s->reg_to_temp[reg] = ts;
|
||||
}
|
||||
|
||||
/* mark a temporary as dead. */
|
||||
static inline void temp_dead(TCGContext *s, TCGTemp *ts)
|
||||
{
|
||||
if (ts->fixed_reg) {
|
||||
return;
|
||||
}
|
||||
if (ts->val_type == TEMP_VAL_REG) {
|
||||
s->reg_to_temp[ts->reg] = NULL;
|
||||
}
|
||||
ts->val_type = (temp_idx(s, ts) < s->nb_globals || ts->temp_local
|
||||
? TEMP_VAL_MEM : TEMP_VAL_DEAD);
|
||||
}
|
||||
|
||||
/* sync a temporary to memory. 'allocated_regs' is used in case a
|
||||
temporary registers needs to be allocated to store a constant. */
|
||||
static inline void temp_sync(TCGContext *s, int temp, TCGRegSet allocated_regs)
|
||||
static void temp_sync(TCGContext *s, TCGTemp *ts, TCGRegSet allocated_regs)
|
||||
{
|
||||
TCGTemp *ts;
|
||||
|
||||
ts = &s->temps[temp];
|
||||
if (!ts->fixed_reg) {
|
||||
switch(ts->val_type) {
|
||||
case TEMP_VAL_CONST:
|
||||
ts->reg = tcg_reg_alloc(s, tcg_target_available_regs[ts->type],
|
||||
allocated_regs);
|
||||
ts->val_type = TEMP_VAL_REG;
|
||||
s->reg_to_temp[ts->reg] = temp;
|
||||
ts->mem_coherent = 0;
|
||||
tcg_out_movi(s, ts->type, ts->reg, ts->val);
|
||||
/* fallthrough*/
|
||||
case TEMP_VAL_REG:
|
||||
tcg_reg_sync(s, ts->reg);
|
||||
break;
|
||||
case TEMP_VAL_DEAD:
|
||||
case TEMP_VAL_MEM:
|
||||
break;
|
||||
default:
|
||||
tcg_abort();
|
||||
}
|
||||
if (ts->fixed_reg) {
|
||||
return;
|
||||
}
|
||||
switch (ts->val_type) {
|
||||
case TEMP_VAL_CONST:
|
||||
temp_load(s, ts, tcg_target_available_regs[ts->type], allocated_regs);
|
||||
/* fallthrough */
|
||||
case TEMP_VAL_REG:
|
||||
tcg_reg_sync(s, ts->reg);
|
||||
break;
|
||||
case TEMP_VAL_DEAD:
|
||||
case TEMP_VAL_MEM:
|
||||
break;
|
||||
default:
|
||||
tcg_abort();
|
||||
}
|
||||
}
|
||||
|
||||
/* save a temporary to memory. 'allocated_regs' is used in case a
|
||||
temporary registers needs to be allocated to store a constant. */
|
||||
static inline void temp_save(TCGContext *s, int temp, TCGRegSet allocated_regs)
|
||||
static inline void temp_save(TCGContext *s, TCGTemp *ts,
|
||||
TCGRegSet allocated_regs)
|
||||
{
|
||||
#ifdef USE_LIVENESS_ANALYSIS
|
||||
/* The liveness analysis already ensures that globals are back
|
||||
in memory. Keep an assert for safety. */
|
||||
assert(s->temps[temp].val_type == TEMP_VAL_MEM || s->temps[temp].fixed_reg);
|
||||
tcg_debug_assert(ts->val_type == TEMP_VAL_MEM || ts->fixed_reg);
|
||||
#else
|
||||
temp_sync(s, temp, allocated_regs);
|
||||
temp_dead(s, temp);
|
||||
temp_sync(s, ts, allocated_regs);
|
||||
temp_dead(s, ts);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -1809,8 +1793,8 @@ static void save_globals(TCGContext *s, TCGRegSet allocated_regs)
|
|||
{
|
||||
int i;
|
||||
|
||||
for(i = 0; i < s->nb_globals; i++) {
|
||||
temp_save(s, i, allocated_regs);
|
||||
for (i = 0; i < s->nb_globals; i++) {
|
||||
temp_save(s, &s->temps[i], allocated_regs);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1822,11 +1806,13 @@ static void sync_globals(TCGContext *s, TCGRegSet allocated_regs)
|
|||
int i;
|
||||
|
||||
for (i = 0; i < s->nb_globals; i++) {
|
||||
TCGTemp *ts = &s->temps[i];
|
||||
#ifdef USE_LIVENESS_ANALYSIS
|
||||
assert(s->temps[i].val_type != TEMP_VAL_REG || s->temps[i].fixed_reg ||
|
||||
s->temps[i].mem_coherent);
|
||||
tcg_debug_assert(ts->val_type != TEMP_VAL_REG
|
||||
|| ts->fixed_reg
|
||||
|| ts->mem_coherent);
|
||||
#else
|
||||
temp_sync(s, i, allocated_regs);
|
||||
temp_sync(s, ts, allocated_regs);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
@ -1835,20 +1821,19 @@ static void sync_globals(TCGContext *s, TCGRegSet allocated_regs)
|
|||
all globals are stored at their canonical location. */
|
||||
static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs)
|
||||
{
|
||||
TCGTemp *ts;
|
||||
int i;
|
||||
|
||||
for(i = s->nb_globals; i < s->nb_temps; i++) {
|
||||
ts = &s->temps[i];
|
||||
for (i = s->nb_globals; i < s->nb_temps; i++) {
|
||||
TCGTemp *ts = &s->temps[i];
|
||||
if (ts->temp_local) {
|
||||
temp_save(s, i, allocated_regs);
|
||||
temp_save(s, ts, allocated_regs);
|
||||
} else {
|
||||
#ifdef USE_LIVENESS_ANALYSIS
|
||||
/* The liveness analysis already ensures that temps are dead.
|
||||
Keep an assert for safety. */
|
||||
assert(ts->val_type == TEMP_VAL_DEAD);
|
||||
#else
|
||||
temp_dead(s, i);
|
||||
temp_dead(s, ts);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
@ -1874,16 +1859,17 @@ static void tcg_reg_alloc_movi(TCGContext *s, const TCGArg *args,
|
|||
tcg_out_movi(s, ots->type, ots->reg, val);
|
||||
} else {
|
||||
/* The movi is not explicitly generated here */
|
||||
if (ots->val_type == TEMP_VAL_REG)
|
||||
s->reg_to_temp[ots->reg] = -1;
|
||||
if (ots->val_type == TEMP_VAL_REG) {
|
||||
s->reg_to_temp[ots->reg] = NULL;
|
||||
}
|
||||
ots->val_type = TEMP_VAL_CONST;
|
||||
ots->val = val;
|
||||
}
|
||||
if (NEED_SYNC_ARG(0)) {
|
||||
temp_sync(s, args[0], s->reserved_regs);
|
||||
temp_sync(s, ots, s->reserved_regs);
|
||||
}
|
||||
if (IS_DEAD_ARG(0)) {
|
||||
temp_dead(s, args[0]);
|
||||
temp_dead(s, ots);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1909,17 +1895,7 @@ static void tcg_reg_alloc_mov(TCGContext *s, const TCGOpDef *def,
|
|||
we don't have to reload SOURCE the next time it is used. */
|
||||
if (((NEED_SYNC_ARG(0) || ots->fixed_reg) && ts->val_type != TEMP_VAL_REG)
|
||||
|| ts->val_type == TEMP_VAL_MEM) {
|
||||
ts->reg = tcg_reg_alloc(s, tcg_target_available_regs[itype],
|
||||
allocated_regs);
|
||||
if (ts->val_type == TEMP_VAL_MEM) {
|
||||
tcg_out_ld(s, itype, ts->reg, ts->mem_reg, ts->mem_offset);
|
||||
ts->mem_coherent = 1;
|
||||
} else if (ts->val_type == TEMP_VAL_CONST) {
|
||||
tcg_out_movi(s, itype, ts->reg, ts->val);
|
||||
ts->mem_coherent = 0;
|
||||
}
|
||||
s->reg_to_temp[ts->reg] = args[1];
|
||||
ts->val_type = TEMP_VAL_REG;
|
||||
temp_load(s, ts, tcg_target_available_regs[itype], allocated_regs);
|
||||
}
|
||||
|
||||
if (IS_DEAD_ARG(0) && !ots->fixed_reg) {
|
||||
|
@ -1931,20 +1907,20 @@ static void tcg_reg_alloc_mov(TCGContext *s, const TCGOpDef *def,
|
|||
if (!ots->mem_allocated) {
|
||||
temp_allocate_frame(s, args[0]);
|
||||
}
|
||||
tcg_out_st(s, otype, ts->reg, ots->mem_reg, ots->mem_offset);
|
||||
tcg_out_st(s, otype, ts->reg, ots->mem_base->reg, ots->mem_offset);
|
||||
if (IS_DEAD_ARG(1)) {
|
||||
temp_dead(s, args[1]);
|
||||
temp_dead(s, ts);
|
||||
}
|
||||
temp_dead(s, args[0]);
|
||||
temp_dead(s, ots);
|
||||
} else if (ts->val_type == TEMP_VAL_CONST) {
|
||||
/* propagate constant */
|
||||
if (ots->val_type == TEMP_VAL_REG) {
|
||||
s->reg_to_temp[ots->reg] = -1;
|
||||
s->reg_to_temp[ots->reg] = NULL;
|
||||
}
|
||||
ots->val_type = TEMP_VAL_CONST;
|
||||
ots->val = ts->val;
|
||||
if (IS_DEAD_ARG(1)) {
|
||||
temp_dead(s, args[1]);
|
||||
temp_dead(s, ts);
|
||||
}
|
||||
} else {
|
||||
/* The code in the first if block should have moved the
|
||||
|
@ -1953,10 +1929,10 @@ static void tcg_reg_alloc_mov(TCGContext *s, const TCGOpDef *def,
|
|||
if (IS_DEAD_ARG(1) && !ts->fixed_reg && !ots->fixed_reg) {
|
||||
/* the mov can be suppressed */
|
||||
if (ots->val_type == TEMP_VAL_REG) {
|
||||
s->reg_to_temp[ots->reg] = -1;
|
||||
s->reg_to_temp[ots->reg] = NULL;
|
||||
}
|
||||
ots->reg = ts->reg;
|
||||
temp_dead(s, args[1]);
|
||||
temp_dead(s, ts);
|
||||
} else {
|
||||
if (ots->val_type != TEMP_VAL_REG) {
|
||||
/* When allocating a new register, make sure to not spill the
|
||||
|
@ -1969,7 +1945,7 @@ static void tcg_reg_alloc_mov(TCGContext *s, const TCGOpDef *def,
|
|||
}
|
||||
ots->val_type = TEMP_VAL_REG;
|
||||
ots->mem_coherent = 0;
|
||||
s->reg_to_temp[ots->reg] = args[0];
|
||||
s->reg_to_temp[ots->reg] = ots;
|
||||
if (NEED_SYNC_ARG(0)) {
|
||||
tcg_reg_sync(s, ots->reg);
|
||||
}
|
||||
|
@ -1982,7 +1958,8 @@ static void tcg_reg_alloc_op(TCGContext *s,
|
|||
uint8_t sync_args)
|
||||
{
|
||||
TCGRegSet allocated_regs;
|
||||
int i, k, nb_iargs, nb_oargs, reg;
|
||||
int i, k, nb_iargs, nb_oargs;
|
||||
TCGReg reg;
|
||||
TCGArg arg;
|
||||
const TCGArgConstraint *arg_ct;
|
||||
TCGTemp *ts;
|
||||
|
@ -2004,30 +1981,17 @@ static void tcg_reg_alloc_op(TCGContext *s,
|
|||
arg = args[i];
|
||||
arg_ct = &def->args_ct[i];
|
||||
ts = &s->temps[arg];
|
||||
if (ts->val_type == TEMP_VAL_MEM) {
|
||||
reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
|
||||
tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
|
||||
ts->val_type = TEMP_VAL_REG;
|
||||
ts->reg = reg;
|
||||
ts->mem_coherent = 1;
|
||||
s->reg_to_temp[reg] = arg;
|
||||
} else if (ts->val_type == TEMP_VAL_CONST) {
|
||||
if (tcg_target_const_match(ts->val, ts->type, arg_ct)) {
|
||||
/* constant is OK for instruction */
|
||||
const_args[i] = 1;
|
||||
new_args[i] = ts->val;
|
||||
goto iarg_end;
|
||||
} else {
|
||||
/* need to move to a register */
|
||||
reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
|
||||
tcg_out_movi(s, ts->type, reg, ts->val);
|
||||
ts->val_type = TEMP_VAL_REG;
|
||||
ts->reg = reg;
|
||||
ts->mem_coherent = 0;
|
||||
s->reg_to_temp[reg] = arg;
|
||||
}
|
||||
|
||||
if (ts->val_type == TEMP_VAL_CONST
|
||||
&& tcg_target_const_match(ts->val, ts->type, arg_ct)) {
|
||||
/* constant is OK for instruction */
|
||||
const_args[i] = 1;
|
||||
new_args[i] = ts->val;
|
||||
goto iarg_end;
|
||||
}
|
||||
assert(ts->val_type == TEMP_VAL_REG);
|
||||
|
||||
temp_load(s, ts, arg_ct->u.regs, allocated_regs);
|
||||
|
||||
if (arg_ct->ct & TCG_CT_IALIAS) {
|
||||
if (ts->fixed_reg) {
|
||||
/* if fixed register, we must allocate a new register
|
||||
|
@ -2072,7 +2036,7 @@ static void tcg_reg_alloc_op(TCGContext *s,
|
|||
/* mark dead temporaries and free the associated registers */
|
||||
for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
|
||||
if (IS_DEAD_ARG(i)) {
|
||||
temp_dead(s, args[i]);
|
||||
temp_dead(s, &s->temps[args[i]]);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2081,9 +2045,9 @@ static void tcg_reg_alloc_op(TCGContext *s,
|
|||
} else {
|
||||
if (def->flags & TCG_OPF_CALL_CLOBBER) {
|
||||
/* XXX: permit generic clobber register list ? */
|
||||
for(reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
|
||||
if (tcg_regset_test_reg(tcg_target_call_clobber_regs, reg)) {
|
||||
tcg_reg_free(s, reg);
|
||||
for (i = 0; i < TCG_TARGET_NB_REGS; i++) {
|
||||
if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) {
|
||||
tcg_reg_free(s, i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2115,14 +2079,14 @@ static void tcg_reg_alloc_op(TCGContext *s,
|
|||
/* if a fixed register is used, then a move will be done afterwards */
|
||||
if (!ts->fixed_reg) {
|
||||
if (ts->val_type == TEMP_VAL_REG) {
|
||||
s->reg_to_temp[ts->reg] = -1;
|
||||
s->reg_to_temp[ts->reg] = NULL;
|
||||
}
|
||||
ts->val_type = TEMP_VAL_REG;
|
||||
ts->reg = reg;
|
||||
/* temp value is modified, so the value kept in memory is
|
||||
potentially not the same */
|
||||
ts->mem_coherent = 0;
|
||||
s->reg_to_temp[reg] = arg;
|
||||
s->reg_to_temp[reg] = ts;
|
||||
}
|
||||
oarg_end:
|
||||
new_args[i] = reg;
|
||||
|
@ -2143,7 +2107,7 @@ static void tcg_reg_alloc_op(TCGContext *s,
|
|||
tcg_reg_sync(s, reg);
|
||||
}
|
||||
if (IS_DEAD_ARG(i)) {
|
||||
temp_dead(s, args[i]);
|
||||
temp_dead(s, ts);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2158,7 +2122,8 @@ static void tcg_reg_alloc_call(TCGContext *s, int nb_oargs, int nb_iargs,
|
|||
const TCGArg * const args, uint16_t dead_args,
|
||||
uint8_t sync_args)
|
||||
{
|
||||
int flags, nb_regs, i, reg;
|
||||
int flags, nb_regs, i;
|
||||
TCGReg reg;
|
||||
TCGArg arg;
|
||||
TCGTemp *ts;
|
||||
intptr_t stack_offset;
|
||||
|
@ -2194,23 +2159,9 @@ static void tcg_reg_alloc_call(TCGContext *s, int nb_oargs, int nb_iargs,
|
|||
#endif
|
||||
if (arg != TCG_CALL_DUMMY_ARG) {
|
||||
ts = &s->temps[arg];
|
||||
if (ts->val_type == TEMP_VAL_REG) {
|
||||
tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK, stack_offset);
|
||||
} else if (ts->val_type == TEMP_VAL_MEM) {
|
||||
reg = tcg_reg_alloc(s, tcg_target_available_regs[ts->type],
|
||||
s->reserved_regs);
|
||||
/* XXX: not correct if reading values from the stack */
|
||||
tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
|
||||
tcg_out_st(s, ts->type, reg, TCG_REG_CALL_STACK, stack_offset);
|
||||
} else if (ts->val_type == TEMP_VAL_CONST) {
|
||||
reg = tcg_reg_alloc(s, tcg_target_available_regs[ts->type],
|
||||
s->reserved_regs);
|
||||
/* XXX: sign extend may be needed on some targets */
|
||||
tcg_out_movi(s, ts->type, reg, ts->val);
|
||||
tcg_out_st(s, ts->type, reg, TCG_REG_CALL_STACK, stack_offset);
|
||||
} else {
|
||||
tcg_abort();
|
||||
}
|
||||
temp_load(s, ts, tcg_target_available_regs[ts->type],
|
||||
s->reserved_regs);
|
||||
tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK, stack_offset);
|
||||
}
|
||||
#ifndef TCG_TARGET_STACK_GROWSUP
|
||||
stack_offset += sizeof(tcg_target_long);
|
||||
|
@ -2225,18 +2176,19 @@ static void tcg_reg_alloc_call(TCGContext *s, int nb_oargs, int nb_iargs,
|
|||
ts = &s->temps[arg];
|
||||
reg = tcg_target_call_iarg_regs[i];
|
||||
tcg_reg_free(s, reg);
|
||||
|
||||
if (ts->val_type == TEMP_VAL_REG) {
|
||||
if (ts->reg != reg) {
|
||||
tcg_out_mov(s, ts->type, reg, ts->reg);
|
||||
}
|
||||
} else if (ts->val_type == TEMP_VAL_MEM) {
|
||||
tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
|
||||
} else if (ts->val_type == TEMP_VAL_CONST) {
|
||||
/* XXX: sign extend ? */
|
||||
tcg_out_movi(s, ts->type, reg, ts->val);
|
||||
} else {
|
||||
tcg_abort();
|
||||
TCGRegSet arg_set;
|
||||
|
||||
tcg_regset_clear(arg_set);
|
||||
tcg_regset_set_reg(arg_set, reg);
|
||||
temp_load(s, ts, arg_set, allocated_regs);
|
||||
}
|
||||
|
||||
tcg_regset_set_reg(allocated_regs, reg);
|
||||
}
|
||||
}
|
||||
|
@ -2244,14 +2196,14 @@ static void tcg_reg_alloc_call(TCGContext *s, int nb_oargs, int nb_iargs,
|
|||
/* mark dead temporaries and free the associated registers */
|
||||
for(i = nb_oargs; i < nb_iargs + nb_oargs; i++) {
|
||||
if (IS_DEAD_ARG(i)) {
|
||||
temp_dead(s, args[i]);
|
||||
temp_dead(s, &s->temps[args[i]]);
|
||||
}
|
||||
}
|
||||
|
||||
/* clobber call registers */
|
||||
for(reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
|
||||
if (tcg_regset_test_reg(tcg_target_call_clobber_regs, reg)) {
|
||||
tcg_reg_free(s, reg);
|
||||
for (i = 0; i < TCG_TARGET_NB_REGS; i++) {
|
||||
if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) {
|
||||
tcg_reg_free(s, i);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2272,7 +2224,7 @@ static void tcg_reg_alloc_call(TCGContext *s, int nb_oargs, int nb_iargs,
|
|||
arg = args[i];
|
||||
ts = &s->temps[arg];
|
||||
reg = tcg_target_call_oarg_regs[i];
|
||||
assert(s->reg_to_temp[reg] == -1);
|
||||
assert(s->reg_to_temp[reg] == NULL);
|
||||
|
||||
if (ts->fixed_reg) {
|
||||
if (ts->reg != reg) {
|
||||
|
@ -2280,17 +2232,17 @@ static void tcg_reg_alloc_call(TCGContext *s, int nb_oargs, int nb_iargs,
|
|||
}
|
||||
} else {
|
||||
if (ts->val_type == TEMP_VAL_REG) {
|
||||
s->reg_to_temp[ts->reg] = -1;
|
||||
s->reg_to_temp[ts->reg] = NULL;
|
||||
}
|
||||
ts->val_type = TEMP_VAL_REG;
|
||||
ts->reg = reg;
|
||||
ts->mem_coherent = 0;
|
||||
s->reg_to_temp[reg] = arg;
|
||||
s->reg_to_temp[reg] = ts;
|
||||
if (NEED_SYNC_ARG(i)) {
|
||||
tcg_reg_sync(s, reg);
|
||||
}
|
||||
if (IS_DEAD_ARG(i)) {
|
||||
temp_dead(s, args[i]);
|
||||
temp_dead(s, ts);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2420,7 +2372,7 @@ int tcg_gen_code(TCGContext *s, tcg_insn_unit *gen_code_buf)
|
|||
}
|
||||
break;
|
||||
case INDEX_op_discard:
|
||||
temp_dead(s, args[0]);
|
||||
temp_dead(s, &s->temps[args[0]]);
|
||||
break;
|
||||
case INDEX_op_set_label:
|
||||
tcg_reg_alloc_bb_end(s, s->reserved_regs);
|
||||
|
@ -2456,7 +2408,9 @@ int tcg_gen_code(TCGContext *s, tcg_insn_unit *gen_code_buf)
|
|||
s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
|
||||
|
||||
/* Generate TB finalization at the end of block */
|
||||
tcg_out_tb_finalize(s);
|
||||
if (!tcg_out_tb_finalize(s)) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* flush instruction cache */
|
||||
flush_icache_range((uintptr_t)s->code_buf, (uintptr_t)s->code_ptr);
|
||||
|
|
49
tcg/tcg.h
49
tcg/tcg.h
|
@ -448,8 +448,7 @@ typedef enum TCGTempVal {
|
|||
} TCGTempVal;
|
||||
|
||||
typedef struct TCGTemp {
|
||||
unsigned int reg:8;
|
||||
unsigned int mem_reg:8;
|
||||
TCGReg reg:8;
|
||||
TCGTempVal val_type:8;
|
||||
TCGType base_type:8;
|
||||
TCGType type:8;
|
||||
|
@ -462,6 +461,7 @@ typedef struct TCGTemp {
|
|||
unsigned int temp_allocated:1; /* never used for code gen */
|
||||
|
||||
tcg_target_long val;
|
||||
struct TCGTemp *mem_base;
|
||||
intptr_t mem_offset;
|
||||
const char *name;
|
||||
} TCGTemp;
|
||||
|
@ -515,7 +515,7 @@ struct TCGContext {
|
|||
intptr_t current_frame_offset;
|
||||
intptr_t frame_start;
|
||||
intptr_t frame_end;
|
||||
int frame_reg;
|
||||
TCGTemp *frame_temp;
|
||||
|
||||
tcg_insn_unit *code_ptr;
|
||||
|
||||
|
@ -572,9 +572,9 @@ struct TCGContext {
|
|||
TCGTempSet free_temps[TCG_TYPE_COUNT * 2];
|
||||
TCGTemp temps[TCG_MAX_TEMPS]; /* globals first, temps after */
|
||||
|
||||
/* tells in which temporary a given register is. It does not take
|
||||
into account fixed registers */
|
||||
int reg_to_temp[TCG_TARGET_NB_REGS];
|
||||
/* Tells which temporary holds a given register.
|
||||
It does not take into account fixed registers */
|
||||
TCGTemp *reg_to_temp[TCG_TARGET_NB_REGS];
|
||||
|
||||
TCGOp gen_op_buf[OPC_BUF_SIZE];
|
||||
TCGArg gen_opparam_buf[OPPARAM_BUF_SIZE];
|
||||
|
@ -628,35 +628,52 @@ void tcg_func_start(TCGContext *s);
|
|||
|
||||
int tcg_gen_code(TCGContext *s, tcg_insn_unit *gen_code_buf);
|
||||
|
||||
void tcg_set_frame(TCGContext *s, int reg, intptr_t start, intptr_t size);
|
||||
void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size);
|
||||
|
||||
int tcg_global_mem_new_internal(TCGType, TCGv_ptr, intptr_t, const char *);
|
||||
|
||||
TCGv_i32 tcg_global_reg_new_i32(TCGReg reg, const char *name);
|
||||
TCGv_i64 tcg_global_reg_new_i64(TCGReg reg, const char *name);
|
||||
|
||||
TCGv_i32 tcg_global_reg_new_i32(int reg, const char *name);
|
||||
TCGv_i32 tcg_global_mem_new_i32(int reg, intptr_t offset, const char *name);
|
||||
TCGv_i32 tcg_temp_new_internal_i32(int temp_local);
|
||||
TCGv_i64 tcg_temp_new_internal_i64(int temp_local);
|
||||
|
||||
void tcg_temp_free_i32(TCGv_i32 arg);
|
||||
void tcg_temp_free_i64(TCGv_i64 arg);
|
||||
|
||||
static inline TCGv_i32 tcg_global_mem_new_i32(TCGv_ptr reg, intptr_t offset,
|
||||
const char *name)
|
||||
{
|
||||
int idx = tcg_global_mem_new_internal(TCG_TYPE_I32, reg, offset, name);
|
||||
return MAKE_TCGV_I32(idx);
|
||||
}
|
||||
|
||||
static inline TCGv_i32 tcg_temp_new_i32(void)
|
||||
{
|
||||
return tcg_temp_new_internal_i32(0);
|
||||
}
|
||||
|
||||
static inline TCGv_i32 tcg_temp_local_new_i32(void)
|
||||
{
|
||||
return tcg_temp_new_internal_i32(1);
|
||||
}
|
||||
void tcg_temp_free_i32(TCGv_i32 arg);
|
||||
char *tcg_get_arg_str_i32(TCGContext *s, char *buf, int buf_size, TCGv_i32 arg);
|
||||
|
||||
TCGv_i64 tcg_global_reg_new_i64(int reg, const char *name);
|
||||
TCGv_i64 tcg_global_mem_new_i64(int reg, intptr_t offset, const char *name);
|
||||
TCGv_i64 tcg_temp_new_internal_i64(int temp_local);
|
||||
static inline TCGv_i64 tcg_global_mem_new_i64(TCGv_ptr reg, intptr_t offset,
|
||||
const char *name)
|
||||
{
|
||||
int idx = tcg_global_mem_new_internal(TCG_TYPE_I64, reg, offset, name);
|
||||
return MAKE_TCGV_I64(idx);
|
||||
}
|
||||
|
||||
static inline TCGv_i64 tcg_temp_new_i64(void)
|
||||
{
|
||||
return tcg_temp_new_internal_i64(0);
|
||||
}
|
||||
|
||||
static inline TCGv_i64 tcg_temp_local_new_i64(void)
|
||||
{
|
||||
return tcg_temp_new_internal_i64(1);
|
||||
}
|
||||
void tcg_temp_free_i64(TCGv_i64 arg);
|
||||
char *tcg_get_arg_str_i64(TCGContext *s, char *buf, int buf_size, TCGv_i64 arg);
|
||||
|
||||
#if defined(CONFIG_DEBUG_TCG)
|
||||
/* If you call tcg_clear_temp_count() at the start of a section of
|
||||
|
|
Loading…
Reference in New Issue