mirror of https://gitee.com/openkylin/qemu.git
exec: refactor cpu_restore_state
Refactor common code around calls to cpu_restore_state(). tb_find_pc() has now no external users, make it static. Signed-off-by: Blue Swirl <blauwirbel@gmail.com>
This commit is contained in:
parent
5b6dd8683d
commit
a8a826a3c3
|
@ -80,8 +80,8 @@ void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb,
|
|||
void cpu_gen_init(void);
|
||||
int cpu_gen_code(CPUArchState *env, struct TranslationBlock *tb,
|
||||
int *gen_code_size_ptr);
|
||||
int cpu_restore_state(struct TranslationBlock *tb,
|
||||
CPUArchState *env, uintptr_t searched_pc);
|
||||
bool cpu_restore_state(CPUArchState *env, uintptr_t searched_pc);
|
||||
|
||||
void QEMU_NORETURN cpu_resume_from_signal(CPUArchState *env1, void *puc);
|
||||
void QEMU_NORETURN cpu_io_recompile(CPUArchState *env, uintptr_t retaddr);
|
||||
TranslationBlock *tb_gen_code(CPUArchState *env,
|
||||
|
@ -275,8 +275,6 @@ static inline void tb_add_jump(TranslationBlock *tb, int n,
|
|||
}
|
||||
}
|
||||
|
||||
TranslationBlock *tb_find_pc(uintptr_t pc_ptr);
|
||||
|
||||
#include "qemu-lock.h"
|
||||
|
||||
extern spinlock_t tb_lock;
|
||||
|
|
|
@ -387,7 +387,6 @@ static void patch_instruction(VAPICROMState *s, CPUX86State *env, target_ulong i
|
|||
VAPICHandlers *handlers;
|
||||
uint8_t opcode[2];
|
||||
uint32_t imm32;
|
||||
TranslationBlock *current_tb;
|
||||
target_ulong current_pc = 0;
|
||||
target_ulong current_cs_base = 0;
|
||||
int current_flags = 0;
|
||||
|
@ -399,8 +398,7 @@ static void patch_instruction(VAPICROMState *s, CPUX86State *env, target_ulong i
|
|||
}
|
||||
|
||||
if (!kvm_enabled()) {
|
||||
current_tb = tb_find_pc(env->mem_io_pc);
|
||||
cpu_restore_state(current_tb, env, env->mem_io_pc);
|
||||
cpu_restore_state(env, env->mem_io_pc);
|
||||
cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
|
||||
¤t_flags);
|
||||
}
|
||||
|
|
|
@ -494,16 +494,6 @@ void cpu_dump_state (CPUAlphaState *env, FILE *f, fprintf_function cpu_fprintf,
|
|||
cpu_fprintf(f, "\n");
|
||||
}
|
||||
|
||||
void do_restore_state(CPUAlphaState *env, uintptr_t retaddr)
|
||||
{
|
||||
if (retaddr) {
|
||||
TranslationBlock *tb = tb_find_pc(retaddr);
|
||||
if (tb) {
|
||||
cpu_restore_state(tb, env, retaddr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* This should only be called from translate, via gen_excp.
|
||||
We expect that ENV->PC has already been updated. */
|
||||
void QEMU_NORETURN helper_excp(CPUAlphaState *env, int excp, int error)
|
||||
|
@ -519,7 +509,9 @@ void QEMU_NORETURN dynamic_excp(CPUAlphaState *env, uintptr_t retaddr,
|
|||
{
|
||||
env->exception_index = excp;
|
||||
env->error_code = error;
|
||||
do_restore_state(env, retaddr);
|
||||
if (retaddr) {
|
||||
cpu_restore_state(env, retaddr);
|
||||
}
|
||||
cpu_loop_exit(env);
|
||||
}
|
||||
|
||||
|
|
|
@ -94,7 +94,9 @@ static void do_unaligned_access(CPUAlphaState *env, target_ulong addr,
|
|||
uint64_t pc;
|
||||
uint32_t insn;
|
||||
|
||||
do_restore_state(env, retaddr);
|
||||
if (retaddr) {
|
||||
cpu_restore_state(env, retaddr);
|
||||
}
|
||||
|
||||
pc = env->pc;
|
||||
insn = cpu_ldl_code(env, pc);
|
||||
|
@ -143,7 +145,9 @@ void tlb_fill(CPUAlphaState *env, target_ulong addr, int is_write,
|
|||
|
||||
ret = cpu_alpha_handle_mmu_fault(env, addr, is_write, mmu_idx);
|
||||
if (unlikely(ret != 0)) {
|
||||
do_restore_state(env, retaddr);
|
||||
if (retaddr) {
|
||||
cpu_restore_state(env, retaddr);
|
||||
}
|
||||
/* Exception index and error code are already set */
|
||||
cpu_loop_exit(env);
|
||||
}
|
||||
|
|
|
@ -74,19 +74,13 @@ uint32_t HELPER(neon_tbl)(CPUARMState *env, uint32_t ireg, uint32_t def,
|
|||
void tlb_fill(CPUARMState *env, target_ulong addr, int is_write, int mmu_idx,
|
||||
uintptr_t retaddr)
|
||||
{
|
||||
TranslationBlock *tb;
|
||||
int ret;
|
||||
|
||||
ret = cpu_arm_handle_mmu_fault(env, addr, is_write, mmu_idx);
|
||||
if (unlikely(ret)) {
|
||||
if (retaddr) {
|
||||
/* now we have a real cpu fault */
|
||||
tb = tb_find_pc(retaddr);
|
||||
if (tb) {
|
||||
/* the PC is inside the translated code. It means that we have
|
||||
a virtual CPU fault */
|
||||
cpu_restore_state(tb, env, retaddr);
|
||||
}
|
||||
cpu_restore_state(env, retaddr);
|
||||
}
|
||||
raise_exception(env, env->exception_index);
|
||||
}
|
||||
|
|
|
@ -57,7 +57,6 @@
|
|||
void tlb_fill(CPUCRISState *env, target_ulong addr, int is_write, int mmu_idx,
|
||||
uintptr_t retaddr)
|
||||
{
|
||||
TranslationBlock *tb;
|
||||
int ret;
|
||||
|
||||
D_LOG("%s pc=%x tpc=%x ra=%p\n", __func__,
|
||||
|
@ -66,12 +65,7 @@ void tlb_fill(CPUCRISState *env, target_ulong addr, int is_write, int mmu_idx,
|
|||
if (unlikely(ret)) {
|
||||
if (retaddr) {
|
||||
/* now we have a real cpu fault */
|
||||
tb = tb_find_pc(retaddr);
|
||||
if (tb) {
|
||||
/* the PC is inside the translated code. It means that we have
|
||||
a virtual CPU fault */
|
||||
cpu_restore_state(tb, env, retaddr);
|
||||
|
||||
if (cpu_restore_state(env, retaddr)) {
|
||||
/* Evaluate flags after retranslation. */
|
||||
helper_top_evaluate_flags(env);
|
||||
}
|
||||
|
|
|
@ -1196,15 +1196,12 @@ void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
|
|||
|
||||
void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
|
||||
{
|
||||
TranslationBlock *tb;
|
||||
|
||||
if (kvm_enabled()) {
|
||||
env->tpr_access_type = access;
|
||||
|
||||
cpu_interrupt(env, CPU_INTERRUPT_TPR);
|
||||
} else {
|
||||
tb = tb_find_pc(env->mem_io_pc);
|
||||
cpu_restore_state(tb, env, env->mem_io_pc);
|
||||
cpu_restore_state(env, env->mem_io_pc);
|
||||
|
||||
apic_handle_tpr_access_report(env->apic_state, env->eip, access);
|
||||
}
|
||||
|
|
|
@ -135,19 +135,13 @@ void helper_boundl(CPUX86State *env, target_ulong a0, int v)
|
|||
void tlb_fill(CPUX86State *env, target_ulong addr, int is_write, int mmu_idx,
|
||||
uintptr_t retaddr)
|
||||
{
|
||||
TranslationBlock *tb;
|
||||
int ret;
|
||||
|
||||
ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx);
|
||||
if (ret) {
|
||||
if (retaddr) {
|
||||
/* now we have a real cpu fault */
|
||||
tb = tb_find_pc(retaddr);
|
||||
if (tb) {
|
||||
/* the PC is inside the translated code. It means that we have
|
||||
a virtual CPU fault */
|
||||
cpu_restore_state(tb, env, retaddr);
|
||||
}
|
||||
cpu_restore_state(env, retaddr);
|
||||
}
|
||||
raise_exception_err(env, env->exception_index, env->error_code);
|
||||
}
|
||||
|
|
|
@ -76,19 +76,13 @@ uint32_t helper_rcsr_jrx(CPULM32State *env)
|
|||
void tlb_fill(CPULM32State *env, target_ulong addr, int is_write, int mmu_idx,
|
||||
uintptr_t retaddr)
|
||||
{
|
||||
TranslationBlock *tb;
|
||||
int ret;
|
||||
|
||||
ret = cpu_lm32_handle_mmu_fault(env, addr, is_write, mmu_idx);
|
||||
if (unlikely(ret)) {
|
||||
if (retaddr) {
|
||||
/* now we have a real cpu fault */
|
||||
tb = tb_find_pc(retaddr);
|
||||
if (tb) {
|
||||
/* the PC is inside the translated code. It means that we have
|
||||
a virtual CPU fault */
|
||||
cpu_restore_state(tb, env, retaddr);
|
||||
}
|
||||
cpu_restore_state(env, retaddr);
|
||||
}
|
||||
cpu_loop_exit(env);
|
||||
}
|
||||
|
|
|
@ -56,19 +56,13 @@ extern int semihosting_enabled;
|
|||
void tlb_fill(CPUM68KState *env, target_ulong addr, int is_write, int mmu_idx,
|
||||
uintptr_t retaddr)
|
||||
{
|
||||
TranslationBlock *tb;
|
||||
int ret;
|
||||
|
||||
ret = cpu_m68k_handle_mmu_fault(env, addr, is_write, mmu_idx);
|
||||
if (unlikely(ret)) {
|
||||
if (retaddr) {
|
||||
/* now we have a real cpu fault */
|
||||
tb = tb_find_pc(retaddr);
|
||||
if (tb) {
|
||||
/* the PC is inside the translated code. It means that we have
|
||||
a virtual CPU fault */
|
||||
cpu_restore_state(tb, env, retaddr);
|
||||
}
|
||||
cpu_restore_state(env, retaddr);
|
||||
}
|
||||
cpu_loop_exit(env);
|
||||
}
|
||||
|
|
|
@ -44,19 +44,13 @@
|
|||
void tlb_fill(CPUMBState *env, target_ulong addr, int is_write, int mmu_idx,
|
||||
uintptr_t retaddr)
|
||||
{
|
||||
TranslationBlock *tb;
|
||||
int ret;
|
||||
|
||||
ret = cpu_mb_handle_mmu_fault(env, addr, is_write, mmu_idx);
|
||||
if (unlikely(ret)) {
|
||||
if (retaddr) {
|
||||
/* now we have a real cpu fault */
|
||||
tb = tb_find_pc(retaddr);
|
||||
if (tb) {
|
||||
/* the PC is inside the translated code. It means that we have
|
||||
a virtual CPU fault */
|
||||
cpu_restore_state(tb, env, retaddr);
|
||||
}
|
||||
cpu_restore_state(env, retaddr);
|
||||
}
|
||||
cpu_loop_exit(env);
|
||||
}
|
||||
|
|
|
@ -38,7 +38,6 @@ static inline void QEMU_NORETURN do_raise_exception_err(CPUMIPSState *env,
|
|||
int error_code,
|
||||
uintptr_t pc)
|
||||
{
|
||||
TranslationBlock *tb;
|
||||
#if 1
|
||||
if (exception < 0x100)
|
||||
qemu_log("%s: %d %d\n", __func__, exception, error_code);
|
||||
|
@ -48,12 +47,7 @@ static inline void QEMU_NORETURN do_raise_exception_err(CPUMIPSState *env,
|
|||
|
||||
if (pc) {
|
||||
/* now we have a real cpu fault */
|
||||
tb = tb_find_pc(pc);
|
||||
if (tb) {
|
||||
/* the PC is inside the translated code. It means that we have
|
||||
a virtual CPU fault */
|
||||
cpu_restore_state(tb, env, pc);
|
||||
}
|
||||
cpu_restore_state(env, pc);
|
||||
}
|
||||
|
||||
cpu_loop_exit(env);
|
||||
|
|
|
@ -39,8 +39,6 @@
|
|||
void tlb_fill(CPUOpenRISCState *env, target_ulong addr, int is_write,
|
||||
int mmu_idx, uintptr_t retaddr)
|
||||
{
|
||||
TranslationBlock *tb;
|
||||
unsigned long pc;
|
||||
int ret;
|
||||
|
||||
ret = cpu_openrisc_handle_mmu_fault(env, addr, is_write, mmu_idx);
|
||||
|
@ -48,13 +46,7 @@ void tlb_fill(CPUOpenRISCState *env, target_ulong addr, int is_write,
|
|||
if (ret) {
|
||||
if (retaddr) {
|
||||
/* now we have a real cpu fault. */
|
||||
pc = (unsigned long)retaddr;
|
||||
tb = tb_find_pc(pc);
|
||||
if (tb) {
|
||||
/* the PC is inside the translated code. It means that we
|
||||
have a virtual CPU fault. */
|
||||
cpu_restore_state(tb, env, pc);
|
||||
}
|
||||
cpu_restore_state(env, retaddr);
|
||||
}
|
||||
/* Raise Exception. */
|
||||
cpu_loop_exit(env);
|
||||
|
|
|
@ -275,19 +275,13 @@ STVE(stvewx, cpu_stl_data, bswap32, u32)
|
|||
void tlb_fill(CPUPPCState *env, target_ulong addr, int is_write, int mmu_idx,
|
||||
uintptr_t retaddr)
|
||||
{
|
||||
TranslationBlock *tb;
|
||||
int ret;
|
||||
|
||||
ret = cpu_ppc_handle_mmu_fault(env, addr, is_write, mmu_idx);
|
||||
if (unlikely(ret != 0)) {
|
||||
if (likely(retaddr)) {
|
||||
/* now we have a real cpu fault */
|
||||
tb = tb_find_pc(retaddr);
|
||||
if (likely(tb)) {
|
||||
/* the PC is inside the translated code. It means that we have
|
||||
a virtual CPU fault */
|
||||
cpu_restore_state(tb, env, retaddr);
|
||||
}
|
||||
cpu_restore_state(env, retaddr);
|
||||
}
|
||||
helper_raise_exception_err(env, env->exception_index, env->error_code);
|
||||
}
|
||||
|
|
|
@ -47,19 +47,13 @@
|
|||
void tlb_fill(CPUS390XState *env, target_ulong addr, int is_write, int mmu_idx,
|
||||
uintptr_t retaddr)
|
||||
{
|
||||
TranslationBlock *tb;
|
||||
int ret;
|
||||
|
||||
ret = cpu_s390x_handle_mmu_fault(env, addr, is_write, mmu_idx);
|
||||
if (unlikely(ret != 0)) {
|
||||
if (likely(retaddr)) {
|
||||
/* now we have a real cpu fault */
|
||||
tb = tb_find_pc(retaddr);
|
||||
if (likely(tb)) {
|
||||
/* the PC is inside the translated code. It means that we have
|
||||
a virtual CPU fault */
|
||||
cpu_restore_state(tb, env, retaddr);
|
||||
}
|
||||
cpu_restore_state(env, retaddr);
|
||||
}
|
||||
cpu_loop_exit(env);
|
||||
}
|
||||
|
|
|
@ -21,21 +21,6 @@
|
|||
#include "cpu.h"
|
||||
#include "helper.h"
|
||||
|
||||
static inline void cpu_restore_state_from_retaddr(CPUSH4State *env,
|
||||
uintptr_t retaddr)
|
||||
{
|
||||
TranslationBlock *tb;
|
||||
|
||||
if (retaddr) {
|
||||
tb = tb_find_pc(retaddr);
|
||||
if (tb) {
|
||||
/* the PC is inside the translated code. It means that we have
|
||||
a virtual CPU fault */
|
||||
cpu_restore_state(tb, env, retaddr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
#include "softmmu_exec.h"
|
||||
|
||||
|
@ -61,7 +46,9 @@ void tlb_fill(CPUSH4State *env, target_ulong addr, int is_write, int mmu_idx,
|
|||
ret = cpu_sh4_handle_mmu_fault(env, addr, is_write, mmu_idx);
|
||||
if (ret) {
|
||||
/* now we have a real cpu fault */
|
||||
cpu_restore_state_from_retaddr(env, retaddr);
|
||||
if (retaddr) {
|
||||
cpu_restore_state(env, retaddr);
|
||||
}
|
||||
cpu_loop_exit(env);
|
||||
}
|
||||
}
|
||||
|
@ -82,7 +69,9 @@ static inline void QEMU_NORETURN raise_exception(CPUSH4State *env, int index,
|
|||
uintptr_t retaddr)
|
||||
{
|
||||
env->exception_index = index;
|
||||
cpu_restore_state_from_retaddr(env, retaddr);
|
||||
if (retaddr) {
|
||||
cpu_restore_state(env, retaddr);
|
||||
}
|
||||
cpu_loop_exit(env);
|
||||
}
|
||||
|
||||
|
|
|
@ -710,7 +710,6 @@ uint64_t cpu_tick_get_count(CPUTimer *timer);
|
|||
void cpu_tick_set_limit(CPUTimer *timer, uint64_t limit);
|
||||
trap_state* cpu_tsptr(CPUSPARCState* env);
|
||||
#endif
|
||||
void cpu_restore_state2(CPUSPARCState *env, uintptr_t retaddr);
|
||||
|
||||
#define TB_FLAG_FPU_ENABLED (1 << 4)
|
||||
#define TB_FLAG_AM_ENABLED (1 << 5)
|
||||
|
|
|
@ -75,7 +75,7 @@ static target_ulong helper_udiv_common(CPUSPARCState *env, target_ulong a,
|
|||
x1 = (b & 0xffffffff);
|
||||
|
||||
if (x1 == 0) {
|
||||
cpu_restore_state2(env, GETPC());
|
||||
cpu_restore_state(env, GETPC());
|
||||
helper_raise_exception(env, TT_DIV_ZERO);
|
||||
}
|
||||
|
||||
|
@ -114,7 +114,7 @@ static target_ulong helper_sdiv_common(CPUSPARCState *env, target_ulong a,
|
|||
x1 = (b & 0xffffffff);
|
||||
|
||||
if (x1 == 0) {
|
||||
cpu_restore_state2(env, GETPC());
|
||||
cpu_restore_state(env, GETPC());
|
||||
helper_raise_exception(env, TT_DIV_ZERO);
|
||||
}
|
||||
|
||||
|
@ -147,7 +147,7 @@ int64_t helper_sdivx(CPUSPARCState *env, int64_t a, int64_t b)
|
|||
{
|
||||
if (b == 0) {
|
||||
/* Raise divide by zero trap. */
|
||||
cpu_restore_state2(env, GETPC());
|
||||
cpu_restore_state(env, GETPC());
|
||||
helper_raise_exception(env, TT_DIV_ZERO);
|
||||
} else if (b == -1) {
|
||||
/* Avoid overflow trap with i386 divide insn. */
|
||||
|
@ -161,7 +161,7 @@ uint64_t helper_udivx(CPUSPARCState *env, uint64_t a, uint64_t b)
|
|||
{
|
||||
if (b == 0) {
|
||||
/* Raise divide by zero trap. */
|
||||
cpu_restore_state2(env, GETPC());
|
||||
cpu_restore_state(env, GETPC());
|
||||
helper_raise_exception(env, TT_DIV_ZERO);
|
||||
}
|
||||
return a / b;
|
||||
|
@ -193,7 +193,7 @@ target_ulong helper_taddcctv(CPUSPARCState *env, target_ulong src1,
|
|||
return dst;
|
||||
|
||||
tag_overflow:
|
||||
cpu_restore_state2(env, GETPC());
|
||||
cpu_restore_state(env, GETPC());
|
||||
helper_raise_exception(env, TT_TOVF);
|
||||
}
|
||||
|
||||
|
@ -222,6 +222,6 @@ target_ulong helper_tsubcctv(CPUSPARCState *env, target_ulong src1,
|
|||
return dst;
|
||||
|
||||
tag_overflow:
|
||||
cpu_restore_state2(env, GETPC());
|
||||
cpu_restore_state(env, GETPC());
|
||||
helper_raise_exception(env, TT_TOVF);
|
||||
}
|
||||
|
|
|
@ -2393,22 +2393,6 @@ void cpu_unassigned_access(CPUSPARCState *env, hwaddr addr,
|
|||
#endif
|
||||
#endif
|
||||
|
||||
/* XXX: make it generic ? */
|
||||
void cpu_restore_state2(CPUSPARCState *env, uintptr_t retaddr)
|
||||
{
|
||||
TranslationBlock *tb;
|
||||
|
||||
if (retaddr) {
|
||||
/* now we have a real cpu fault */
|
||||
tb = tb_find_pc(retaddr);
|
||||
if (tb) {
|
||||
/* the PC is inside the translated code. It means that we have
|
||||
a virtual CPU fault */
|
||||
cpu_restore_state(tb, env, retaddr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
static void QEMU_NORETURN do_unaligned_access(CPUSPARCState *env,
|
||||
target_ulong addr, int is_write,
|
||||
|
@ -2418,7 +2402,9 @@ static void QEMU_NORETURN do_unaligned_access(CPUSPARCState *env,
|
|||
printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
|
||||
"\n", addr, env->pc);
|
||||
#endif
|
||||
cpu_restore_state2(env, retaddr);
|
||||
if (retaddr) {
|
||||
cpu_restore_state(env, retaddr);
|
||||
}
|
||||
helper_raise_exception(env, TT_UNALIGNED);
|
||||
}
|
||||
|
||||
|
@ -2433,7 +2419,9 @@ void tlb_fill(CPUSPARCState *env, target_ulong addr, int is_write, int mmu_idx,
|
|||
|
||||
ret = cpu_sparc_handle_mmu_fault(env, addr, is_write, mmu_idx);
|
||||
if (ret) {
|
||||
cpu_restore_state2(env, retaddr);
|
||||
if (retaddr) {
|
||||
cpu_restore_state(env, retaddr);
|
||||
}
|
||||
cpu_loop_exit(env);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -256,20 +256,13 @@ uint32_t HELPER(ror_cc)(CPUUniCore32State *env, uint32_t x, uint32_t i)
|
|||
void tlb_fill(CPUUniCore32State *env, target_ulong addr, int is_write,
|
||||
int mmu_idx, uintptr_t retaddr)
|
||||
{
|
||||
TranslationBlock *tb;
|
||||
unsigned long pc;
|
||||
int ret;
|
||||
|
||||
ret = uc32_cpu_handle_mmu_fault(env, addr, is_write, mmu_idx);
|
||||
if (unlikely(ret)) {
|
||||
if (retaddr) {
|
||||
/* now we have a real cpu fault */
|
||||
pc = (unsigned long)retaddr;
|
||||
tb = tb_find_pc(pc);
|
||||
if (tb) {/* the PC is inside the translated code.
|
||||
It means that we have a virtual CPU fault */
|
||||
cpu_restore_state(tb, env, pc);
|
||||
}
|
||||
cpu_restore_state(env, retaddr);
|
||||
}
|
||||
cpu_loop_exit(env);
|
||||
}
|
||||
|
|
|
@ -47,22 +47,12 @@ static void do_unaligned_access(CPUXtensaState *env,
|
|||
#define SHIFT 3
|
||||
#include "softmmu_template.h"
|
||||
|
||||
static void do_restore_state(CPUXtensaState *env, uintptr_t pc)
|
||||
{
|
||||
TranslationBlock *tb;
|
||||
|
||||
tb = tb_find_pc(pc);
|
||||
if (tb) {
|
||||
cpu_restore_state(tb, env, pc);
|
||||
}
|
||||
}
|
||||
|
||||
static void do_unaligned_access(CPUXtensaState *env,
|
||||
target_ulong addr, int is_write, int is_user, uintptr_t retaddr)
|
||||
{
|
||||
if (xtensa_option_enabled(env->config, XTENSA_OPTION_UNALIGNED_EXCEPTION) &&
|
||||
!xtensa_option_enabled(env->config, XTENSA_OPTION_HW_ALIGNMENT)) {
|
||||
do_restore_state(env, retaddr);
|
||||
cpu_restore_state(env, retaddr);
|
||||
HELPER(exception_cause_vaddr)(env,
|
||||
env->pc, LOAD_STORE_ALIGNMENT_CAUSE, addr);
|
||||
}
|
||||
|
@ -86,7 +76,7 @@ void tlb_fill(CPUXtensaState *env,
|
|||
paddr & TARGET_PAGE_MASK,
|
||||
access, mmu_idx, page_size);
|
||||
} else {
|
||||
do_restore_state(env, retaddr);
|
||||
cpu_restore_state(env, retaddr);
|
||||
HELPER(exception_cause_vaddr)(env, env->pc, ret, vaddr);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -142,6 +142,7 @@ TCGContext tcg_ctx;
|
|||
|
||||
static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
|
||||
tb_page_addr_t phys_page2);
|
||||
static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
|
||||
|
||||
void cpu_gen_init(void)
|
||||
{
|
||||
|
@ -211,8 +212,8 @@ int cpu_gen_code(CPUArchState *env, TranslationBlock *tb, int *gen_code_size_ptr
|
|||
|
||||
/* The cpu state corresponding to 'searched_pc' is restored.
|
||||
*/
|
||||
int cpu_restore_state(TranslationBlock *tb,
|
||||
CPUArchState *env, uintptr_t searched_pc)
|
||||
static int cpu_restore_state_from_tb(TranslationBlock *tb, CPUArchState *env,
|
||||
uintptr_t searched_pc)
|
||||
{
|
||||
TCGContext *s = &tcg_ctx;
|
||||
int j;
|
||||
|
@ -266,6 +267,18 @@ int cpu_restore_state(TranslationBlock *tb,
|
|||
return 0;
|
||||
}
|
||||
|
||||
bool cpu_restore_state(CPUArchState *env, uintptr_t retaddr)
|
||||
{
|
||||
TranslationBlock *tb;
|
||||
|
||||
tb = tb_find_pc(retaddr);
|
||||
if (tb) {
|
||||
cpu_restore_state_from_tb(tb, env, retaddr);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
#ifdef _WIN32
|
||||
static inline void map_exec(void *addr, long size)
|
||||
{
|
||||
|
@ -1057,7 +1070,7 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
|
|||
restore the CPU state */
|
||||
|
||||
current_tb_modified = 1;
|
||||
cpu_restore_state(current_tb, env, env->mem_io_pc);
|
||||
cpu_restore_state_from_tb(current_tb, env, env->mem_io_pc);
|
||||
cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
|
||||
¤t_flags);
|
||||
}
|
||||
|
@ -1171,7 +1184,7 @@ static void tb_invalidate_phys_page(tb_page_addr_t addr,
|
|||
restore the CPU state */
|
||||
|
||||
current_tb_modified = 1;
|
||||
cpu_restore_state(current_tb, env, pc);
|
||||
cpu_restore_state_from_tb(current_tb, env, pc);
|
||||
cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
|
||||
¤t_flags);
|
||||
}
|
||||
|
@ -1308,7 +1321,7 @@ bool is_tcg_gen_code(uintptr_t tc_ptr)
|
|||
|
||||
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
|
||||
tb[1].tc_ptr. Return NULL if not found */
|
||||
TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
|
||||
static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
|
||||
{
|
||||
int m_min, m_max, m;
|
||||
uintptr_t v;
|
||||
|
@ -1435,7 +1448,7 @@ void tb_check_watchpoint(CPUArchState *env)
|
|||
cpu_abort(env, "check_watchpoint: could not find TB for pc=%p",
|
||||
(void *)env->mem_io_pc);
|
||||
}
|
||||
cpu_restore_state(tb, env, env->mem_io_pc);
|
||||
cpu_restore_state_from_tb(tb, env, env->mem_io_pc);
|
||||
tb_phys_invalidate(tb, -1);
|
||||
}
|
||||
|
||||
|
@ -1486,7 +1499,7 @@ void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
|
|||
(void *)retaddr);
|
||||
}
|
||||
n = env->icount_decr.u16.low + tb->icount;
|
||||
cpu_restore_state(tb, env, retaddr);
|
||||
cpu_restore_state_from_tb(tb, env, retaddr);
|
||||
/* Calculate how many instructions had been executed before the fault
|
||||
occurred. */
|
||||
n = n - env->icount_decr.u16.low;
|
||||
|
|
|
@ -81,7 +81,6 @@ static inline int handle_cpu_signal(uintptr_t pc, unsigned long address,
|
|||
int is_write, sigset_t *old_set,
|
||||
void *puc)
|
||||
{
|
||||
TranslationBlock *tb;
|
||||
int ret;
|
||||
|
||||
#if defined(DEBUG_SIGNAL)
|
||||
|
@ -104,12 +103,7 @@ static inline int handle_cpu_signal(uintptr_t pc, unsigned long address,
|
|||
return 1; /* the MMU fault was handled without causing real CPU fault */
|
||||
}
|
||||
/* now we have a real cpu fault */
|
||||
tb = tb_find_pc(pc);
|
||||
if (tb) {
|
||||
/* the PC is inside the translated code. It means that we have
|
||||
a virtual CPU fault */
|
||||
cpu_restore_state(tb, cpu_single_env, pc);
|
||||
}
|
||||
cpu_restore_state(cpu_single_env, pc);
|
||||
|
||||
/* we restore the process signal mask as the sigreturn should
|
||||
do it (XXX: use sigsetjmp) */
|
||||
|
|
Loading…
Reference in New Issue