mirror of https://gitee.com/openkylin/qemu.git
target/i386: unify masking of interrupts
Interrupt handling depends on various flags in env->hflags or env->hflags2, and the exact detail were not exactly replicated between x86_cpu_has_work and x86_cpu_exec_interrupt. Create a new function that extracts the highest-priority non-masked interrupt, and use it in both functions. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
27e18b8952
commit
92d5f1a414
|
@ -5429,20 +5429,51 @@ static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
|
|||
cpu->env.eip = tb->pc - tb->cs_base;
|
||||
}
|
||||
|
||||
static bool x86_cpu_has_work(CPUState *cs)
|
||||
int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request)
|
||||
{
|
||||
X86CPU *cpu = X86_CPU(cs);
|
||||
CPUX86State *env = &cpu->env;
|
||||
|
||||
return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
|
||||
CPU_INTERRUPT_POLL)) &&
|
||||
(env->eflags & IF_MASK)) ||
|
||||
(cs->interrupt_request & (CPU_INTERRUPT_NMI |
|
||||
CPU_INTERRUPT_INIT |
|
||||
CPU_INTERRUPT_SIPI |
|
||||
CPU_INTERRUPT_MCE)) ||
|
||||
((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
|
||||
!(env->hflags & HF_SMM_MASK));
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
if (interrupt_request & CPU_INTERRUPT_POLL) {
|
||||
return CPU_INTERRUPT_POLL;
|
||||
}
|
||||
#endif
|
||||
if (interrupt_request & CPU_INTERRUPT_SIPI) {
|
||||
return CPU_INTERRUPT_SIPI;
|
||||
}
|
||||
|
||||
if (env->hflags2 & HF2_GIF_MASK) {
|
||||
if ((interrupt_request & CPU_INTERRUPT_SMI) &&
|
||||
!(env->hflags & HF_SMM_MASK)) {
|
||||
return CPU_INTERRUPT_SMI;
|
||||
} else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
|
||||
!(env->hflags2 & HF2_NMI_MASK)) {
|
||||
return CPU_INTERRUPT_NMI;
|
||||
} else if (interrupt_request & CPU_INTERRUPT_MCE) {
|
||||
return CPU_INTERRUPT_MCE;
|
||||
} else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
|
||||
(((env->hflags2 & HF2_VINTR_MASK) &&
|
||||
(env->hflags2 & HF2_HIF_MASK)) ||
|
||||
(!(env->hflags2 & HF2_VINTR_MASK) &&
|
||||
(env->eflags & IF_MASK &&
|
||||
!(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
|
||||
return CPU_INTERRUPT_HARD;
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
} else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
|
||||
(env->eflags & IF_MASK) &&
|
||||
!(env->hflags & HF_INHIBIT_IRQ_MASK)) {
|
||||
return CPU_INTERRUPT_VIRQ;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool x86_cpu_has_work(CPUState *cs)
|
||||
{
|
||||
return x86_cpu_pending_interrupt(cs, cs->interrupt_request) != 0;
|
||||
}
|
||||
|
||||
static void x86_disas_set_info(CPUState *cs, disassemble_info *info)
|
||||
|
|
|
@ -1485,6 +1485,7 @@ extern struct VMStateDescription vmstate_x86_cpu;
|
|||
*/
|
||||
void x86_cpu_do_interrupt(CPUState *cpu);
|
||||
bool x86_cpu_exec_interrupt(CPUState *cpu, int int_req);
|
||||
int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request);
|
||||
|
||||
int x86_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
|
||||
int cpuid, void *opaque);
|
||||
|
|
|
@ -1319,45 +1319,42 @@ bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
|||
{
|
||||
X86CPU *cpu = X86_CPU(cs);
|
||||
CPUX86State *env = &cpu->env;
|
||||
bool ret = false;
|
||||
int intno;
|
||||
|
||||
interrupt_request = x86_cpu_pending_interrupt(cs, interrupt_request);
|
||||
if (!interrupt_request) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Don't process multiple interrupt requests in a single call.
|
||||
* This is required to make icount-driven execution deterministic.
|
||||
*/
|
||||
switch (interrupt_request) {
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
if (interrupt_request & CPU_INTERRUPT_POLL) {
|
||||
case CPU_INTERRUPT_POLL:
|
||||
cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
|
||||
apic_poll_irq(cpu->apic_state);
|
||||
/* Don't process multiple interrupt requests in a single call.
|
||||
This is required to make icount-driven execution deterministic. */
|
||||
return true;
|
||||
}
|
||||
break;
|
||||
#endif
|
||||
if (interrupt_request & CPU_INTERRUPT_SIPI) {
|
||||
case CPU_INTERRUPT_SIPI:
|
||||
do_cpu_sipi(cpu);
|
||||
ret = true;
|
||||
} else if (env->hflags2 & HF2_GIF_MASK) {
|
||||
if ((interrupt_request & CPU_INTERRUPT_SMI) &&
|
||||
!(env->hflags & HF_SMM_MASK)) {
|
||||
break;
|
||||
case CPU_INTERRUPT_SMI:
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0, 0);
|
||||
cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
|
||||
do_smm_enter(cpu);
|
||||
ret = true;
|
||||
} else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
|
||||
!(env->hflags2 & HF2_NMI_MASK)) {
|
||||
break;
|
||||
case CPU_INTERRUPT_NMI:
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_NMI, 0, 0);
|
||||
cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
|
||||
env->hflags2 |= HF2_NMI_MASK;
|
||||
do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
|
||||
ret = true;
|
||||
} else if (interrupt_request & CPU_INTERRUPT_MCE) {
|
||||
break;
|
||||
case CPU_INTERRUPT_MCE:
|
||||
cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
|
||||
do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
|
||||
ret = true;
|
||||
} else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
|
||||
(((env->hflags2 & HF2_VINTR_MASK) &&
|
||||
(env->hflags2 & HF2_HIF_MASK)) ||
|
||||
(!(env->hflags2 & HF2_VINTR_MASK) &&
|
||||
(env->eflags & IF_MASK &&
|
||||
!(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
|
||||
int intno;
|
||||
break;
|
||||
case CPU_INTERRUPT_HARD:
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0, 0);
|
||||
cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
|
||||
CPU_INTERRUPT_VIRQ);
|
||||
|
@ -1365,14 +1362,9 @@ bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
|||
qemu_log_mask(CPU_LOG_TB_IN_ASM,
|
||||
"Servicing hardware INT=0x%02x\n", intno);
|
||||
do_interrupt_x86_hardirq(env, intno, 1);
|
||||
/* ensure that no TB jump will be modified as
|
||||
the program flow was changed */
|
||||
ret = true;
|
||||
break;
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
} else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
|
||||
(env->eflags & IF_MASK) &&
|
||||
!(env->hflags & HF_INHIBIT_IRQ_MASK)) {
|
||||
int intno;
|
||||
case CPU_INTERRUPT_VIRQ:
|
||||
/* FIXME: this should respect TPR */
|
||||
cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0, 0);
|
||||
intno = x86_ldl_phys(cs, env->vm_vmcb
|
||||
|
@ -1381,12 +1373,12 @@ bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
|||
"Servicing virtual hardware INT=0x%02x\n", intno);
|
||||
do_interrupt_x86_hardirq(env, intno, 1);
|
||||
cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
|
||||
ret = true;
|
||||
break;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
/* Ensure that no TB jump will be modified as the program flow was changed. */
|
||||
return true;
|
||||
}
|
||||
|
||||
void helper_lldt(CPUX86State *env, int selector)
|
||||
|
|
Loading…
Reference in New Issue