mirror of https://gitee.com/openkylin/qemu.git
correct CPL support (should fix flat real mode support)
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@343 c046a42c-6fe2-441c-8c8c-71466251a162
This commit is contained in:
parent
c33a346edf
commit
b6d78bfa0d
|
@ -244,13 +244,8 @@ int cpu_exec(CPUState *env1)
|
|||
(unsigned long)env->segs[R_ES].base |
|
||||
(unsigned long)env->segs[R_SS].base) != 0) <<
|
||||
GEN_FLAG_ADDSEG_SHIFT;
|
||||
if (env->cr[0] & CR0_PE_MASK) {
|
||||
if (!(env->eflags & VM_MASK))
|
||||
flags |= (env->segs[R_CS].selector & 3) <<
|
||||
GEN_FLAG_CPL_SHIFT;
|
||||
else
|
||||
flags |= (1 << GEN_FLAG_VM_SHIFT);
|
||||
}
|
||||
flags |= env->cpl << GEN_FLAG_CPL_SHIFT;
|
||||
flags |= (env->eflags & VM_MASK) >> (17 - GEN_FLAG_VM_SHIFT);
|
||||
flags |= (env->eflags & (IOPL_MASK | TF_MASK));
|
||||
cs_base = env->segs[R_CS].base;
|
||||
pc = cs_base + env->eip;
|
||||
|
|
|
@ -256,6 +256,7 @@ typedef struct CPUX86State {
|
|||
SegmentCache tr;
|
||||
SegmentCache gdt; /* only base and limit are used */
|
||||
SegmentCache idt; /* only base and limit are used */
|
||||
int cpl; /* current cpl */
|
||||
|
||||
/* sysenter registers */
|
||||
uint32_t sysenter_cs;
|
||||
|
@ -276,6 +277,7 @@ typedef struct CPUX86State {
|
|||
|
||||
uint32_t breakpoints[MAX_BREAKPOINTS];
|
||||
int nb_breakpoints;
|
||||
int singlestep_enabled;
|
||||
|
||||
/* user data */
|
||||
void *opaque;
|
||||
|
@ -298,6 +300,12 @@ int cpu_x86_get_pic_interrupt(CPUX86State *s);
|
|||
/* needed to load some predefinied segment registers */
|
||||
void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector);
|
||||
|
||||
/* wrapper, just in case memory mappings must be changed */
|
||||
static inline void cpu_x86_set_cpl(CPUX86State *s, int cpl)
|
||||
{
|
||||
s->cpl = cpl;
|
||||
}
|
||||
|
||||
/* simulate fsave/frstor */
|
||||
void cpu_x86_fsave(CPUX86State *s, uint8_t *ptr, int data32);
|
||||
void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32);
|
||||
|
|
|
@ -189,7 +189,7 @@ static void do_interrupt_protected(int intno, int is_int, int error_code,
|
|||
{
|
||||
SegmentCache *dt;
|
||||
uint8_t *ptr, *ssp;
|
||||
int type, dpl, cpl, selector, ss_dpl;
|
||||
int type, dpl, selector, ss_dpl;
|
||||
int has_error_code, new_stack, shift;
|
||||
uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2, push_size;
|
||||
uint32_t old_cs, old_ss, old_esp, old_eip;
|
||||
|
@ -216,12 +216,8 @@ static void do_interrupt_protected(int intno, int is_int, int error_code,
|
|||
break;
|
||||
}
|
||||
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
|
||||
if (env->eflags & VM_MASK)
|
||||
cpl = 3;
|
||||
else
|
||||
cpl = env->segs[R_CS].selector & 3;
|
||||
/* check privledge if software int */
|
||||
if (is_int && dpl < cpl)
|
||||
if (is_int && dpl < env->cpl)
|
||||
raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
|
||||
/* check valid bit */
|
||||
if (!(e2 & DESC_P_MASK))
|
||||
|
@ -236,11 +232,11 @@ static void do_interrupt_protected(int intno, int is_int, int error_code,
|
|||
if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
|
||||
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
|
||||
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
|
||||
if (dpl > cpl)
|
||||
if (dpl > env->cpl)
|
||||
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
|
||||
if (!(e2 & DESC_P_MASK))
|
||||
raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
|
||||
if (!(e2 & DESC_C_MASK) && dpl < cpl) {
|
||||
if (!(e2 & DESC_C_MASK) && dpl < env->cpl) {
|
||||
/* to inner priviledge */
|
||||
get_ss_esp_from_tss(&ss, &esp, dpl);
|
||||
if ((ss & 0xfffc) == 0)
|
||||
|
@ -259,7 +255,7 @@ static void do_interrupt_protected(int intno, int is_int, int error_code,
|
|||
if (!(ss_e2 & DESC_P_MASK))
|
||||
raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
|
||||
new_stack = 1;
|
||||
} else if ((e2 & DESC_C_MASK) || dpl == cpl) {
|
||||
} else if ((e2 & DESC_C_MASK) || dpl == env->cpl) {
|
||||
/* to same priviledge */
|
||||
new_stack = 0;
|
||||
} else {
|
||||
|
@ -406,7 +402,7 @@ void do_interrupt_user(int intno, int is_int, int error_code,
|
|||
{
|
||||
SegmentCache *dt;
|
||||
uint8_t *ptr;
|
||||
int dpl, cpl;
|
||||
int dpl;
|
||||
uint32_t e2;
|
||||
|
||||
dt = &env->idt;
|
||||
|
@ -414,9 +410,8 @@ void do_interrupt_user(int intno, int is_int, int error_code,
|
|||
e2 = ldl(ptr + 4);
|
||||
|
||||
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
|
||||
cpl = 3;
|
||||
/* check privledge if software int */
|
||||
if (is_int && dpl < cpl)
|
||||
if (is_int && dpl < env->cpl)
|
||||
raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
|
||||
|
||||
/* Since we emulate only user space, we cannot do more than
|
||||
|
@ -728,6 +723,9 @@ void load_seg(int seg_reg, int selector, unsigned int cur_eip)
|
|||
selector, (unsigned long)sc->base, sc->limit, sc->flags);
|
||||
#endif
|
||||
}
|
||||
if (seg_reg == R_CS) {
|
||||
cpu_x86_set_cpl(env, selector & 3);
|
||||
}
|
||||
sc->selector = selector;
|
||||
}
|
||||
|
||||
|
@ -744,7 +742,7 @@ void helper_ljmp_protected_T0_T1(void)
|
|||
raise_exception_err(EXCP0D_GPF, 0);
|
||||
if (load_segment(&e1, &e2, new_cs) != 0)
|
||||
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
|
||||
cpl = env->segs[R_CS].selector & 3;
|
||||
cpl = env->cpl;
|
||||
if (e2 & DESC_S_MASK) {
|
||||
if (!(e2 & DESC_CS_MASK))
|
||||
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
|
||||
|
@ -828,7 +826,7 @@ void helper_lcall_protected_T0_T1(int shift, int next_eip)
|
|||
raise_exception_err(EXCP0D_GPF, 0);
|
||||
if (load_segment(&e1, &e2, new_cs) != 0)
|
||||
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
|
||||
cpl = env->segs[R_CS].selector & 3;
|
||||
cpl = env->cpl;
|
||||
if (e2 & DESC_S_MASK) {
|
||||
if (!(e2 & DESC_CS_MASK))
|
||||
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
|
||||
|
@ -1081,7 +1079,7 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend)
|
|||
if (!(e2 & DESC_S_MASK) ||
|
||||
!(e2 & DESC_CS_MASK))
|
||||
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
|
||||
cpl = env->segs[R_CS].selector & 3;
|
||||
cpl = env->cpl;
|
||||
rpl = new_cs & 3;
|
||||
if (rpl < cpl)
|
||||
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
|
||||
|
@ -1158,12 +1156,13 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend)
|
|||
/* modify processor state */
|
||||
load_eflags(new_eflags, FL_UPDATE_CPL0_MASK | VM_MASK | VIF_MASK | VIP_MASK);
|
||||
load_seg_vm(R_CS, new_cs);
|
||||
cpu_x86_set_cpl(env, 3);
|
||||
load_seg_vm(R_SS, new_ss);
|
||||
load_seg_vm(R_ES, new_es);
|
||||
load_seg_vm(R_DS, new_ds);
|
||||
load_seg_vm(R_FS, new_fs);
|
||||
load_seg_vm(R_GS, new_gs);
|
||||
|
||||
|
||||
env->eip = new_eip;
|
||||
env->regs[R_ESP] = new_esp;
|
||||
}
|
||||
|
|
|
@ -4113,13 +4113,7 @@ static inline int gen_intermediate_code_internal(CPUState *env,
|
|||
dc->addseg = (flags >> GEN_FLAG_ADDSEG_SHIFT) & 1;
|
||||
dc->f_st = (flags >> GEN_FLAG_ST_SHIFT) & 7;
|
||||
dc->vm86 = (flags >> GEN_FLAG_VM_SHIFT) & 1;
|
||||
/* CPL is implicit if real mode or vm86 mode */
|
||||
if (!dc->pe)
|
||||
dc->cpl = 0;
|
||||
else if (dc->vm86)
|
||||
dc->cpl = 3;
|
||||
else
|
||||
dc->cpl = (flags >> GEN_FLAG_CPL_SHIFT) & 3;
|
||||
dc->cpl = (flags >> GEN_FLAG_CPL_SHIFT) & 3;
|
||||
dc->iopl = (flags >> GEN_FLAG_IOPL_SHIFT) & 3;
|
||||
dc->tf = (flags >> GEN_FLAG_TF_SHIFT) & 1;
|
||||
dc->cc_op = CC_OP_DYNAMIC;
|
||||
|
@ -4362,7 +4356,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, int is_write)
|
|||
int cpl, error_code, is_dirty, is_user, prot, page_size;
|
||||
void *map_addr;
|
||||
|
||||
cpl = env->segs[R_CS].selector & 3;
|
||||
cpl = env->cpl;
|
||||
is_user = (cpl == 3);
|
||||
|
||||
#ifdef DEBUG_MMU
|
||||
|
|
Loading…
Reference in New Issue