KVM: x86: clear SMM flags before loading state while leaving SMM
RSM emulation is currently broken on VMX when the interrupted guest has
CR4.VMXE=1. Stop dancing around the issue of HF_SMM_MASK being set when
loading SMSTATE into architectural state, e.g. by toggling it for
problematic flows, and simply clear HF_SMM_MASK prior to loading
architectural state (from SMRAM save state area).
Reported-by: Jon Doron <arilou@gmail.com>
Cc: Jim Mattson <jmattson@google.com>
Cc: Liran Alon <liran.alon@oracle.com>
Cc: Vitaly Kuznetsov <vkuznets@redhat.com>
Fixes: 5bea5123cb
("KVM: VMX: check nested state and CR4.VMXE against SMM")
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Tested-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
c5833c7a43
commit
9ec19493fb
|
@ -2571,6 +2571,12 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
|
|||
if (ret != X86EMUL_CONTINUE)
|
||||
return X86EMUL_UNHANDLEABLE;
|
||||
|
||||
if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
|
||||
ctxt->ops->set_nmi_mask(ctxt, false);
|
||||
|
||||
ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
|
||||
~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
|
||||
|
||||
/*
|
||||
* Get back to real mode, to prepare a safe state in which to load
|
||||
* CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU
|
||||
|
@ -2624,12 +2630,6 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
|
|||
return X86EMUL_UNHANDLEABLE;
|
||||
}
|
||||
|
||||
if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
|
||||
ctxt->ops->set_nmi_mask(ctxt, false);
|
||||
|
||||
ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
|
||||
~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
|
||||
|
||||
ctxt->ops->post_leave_smm(ctxt);
|
||||
|
||||
return X86EMUL_CONTINUE;
|
||||
|
|
|
@ -6239,21 +6239,17 @@ static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
|
|||
struct page *page;
|
||||
u64 guest;
|
||||
u64 vmcb;
|
||||
int ret;
|
||||
|
||||
guest = GET_SMSTATE(u64, smstate, 0x7ed8);
|
||||
vmcb = GET_SMSTATE(u64, smstate, 0x7ee0);
|
||||
|
||||
if (guest) {
|
||||
vcpu->arch.hflags &= ~HF_SMM_MASK;
|
||||
nested_vmcb = nested_svm_map(svm, vmcb, &page);
|
||||
if (nested_vmcb)
|
||||
enter_svm_guest_mode(svm, vmcb, nested_vmcb, page);
|
||||
else
|
||||
ret = 1;
|
||||
vcpu->arch.hflags |= HF_SMM_MASK;
|
||||
if (!nested_vmcb)
|
||||
return 1;
|
||||
enter_svm_guest_mode(svm, vmcb, nested_vmcb, page);
|
||||
}
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int enable_smi_window(struct kvm_vcpu *vcpu)
|
||||
|
|
|
@ -7409,9 +7409,7 @@ static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
|
|||
}
|
||||
|
||||
if (vmx->nested.smm.guest_mode) {
|
||||
vcpu->arch.hflags &= ~HF_SMM_MASK;
|
||||
ret = nested_vmx_enter_non_root_mode(vcpu, false);
|
||||
vcpu->arch.hflags |= HF_SMM_MASK;
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
|
Loading…
Reference in New Issue