KVM: x86: Sync SPTEs when injecting page/EPT fault into L1

When injecting a page fault or EPT violation/misconfiguration, KVM is
not syncing any shadow PTEs associated with the faulting address,
including those in previous MMUs that are associated with L1's current
EPTP (in a nested EPT scenario), nor is it flushing any hardware TLB
entries.  All this is done by kvm_mmu_invalidate_gva.

Page faults that are either !PRESENT or RSVD are exempt from the flushing,
as the CPU is not allowed to cache such translations.

Signed-off-by: Junaid Shahid <junaids@google.com>
Co-developed-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200320212833.3507-8-sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Junaid Shahid 2020-03-20 14:28:03 -07:00 committed by Paolo Bonzini
parent 0cd665bd20
commit ee1fa209f5
3 changed files with 17 additions and 8 deletions

View File

@ -4560,7 +4560,7 @@ static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer)
return 1;
if (kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e)) {
kvm_inject_page_fault(vcpu, &e);
kvm_inject_emulated_page_fault(vcpu, &e);
return 1;
}
@ -4869,7 +4869,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
return 1;
/* _system ok, nested_vmx_check_permission has verified cpl=0 */
if (kvm_write_guest_virt_system(vcpu, gva, &value, len, &e)) {
kvm_inject_page_fault(vcpu, &e);
kvm_inject_emulated_page_fault(vcpu, &e);
return 1;
}
}
@ -4943,7 +4943,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
instr_info, false, len, &gva))
return 1;
if (kvm_read_guest_virt(vcpu, gva, &value, len, &e)) {
kvm_inject_page_fault(vcpu, &e);
kvm_inject_emulated_page_fault(vcpu, &e);
return 1;
}
}
@ -5108,7 +5108,7 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
/* *_system ok, nested_vmx_check_permission has verified cpl=0 */
if (kvm_write_guest_virt_system(vcpu, gva, (void *)&current_vmptr,
sizeof(gpa_t), &e)) {
kvm_inject_page_fault(vcpu, &e);
kvm_inject_emulated_page_fault(vcpu, &e);
return 1;
}
return nested_vmx_succeed(vcpu);
@ -5152,7 +5152,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
vmx_instruction_info, false, sizeof(operand), &gva))
return 1;
if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
kvm_inject_page_fault(vcpu, &e);
kvm_inject_emulated_page_fault(vcpu, &e);
return 1;
}
@ -5220,7 +5220,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
vmx_instruction_info, false, sizeof(operand), &gva))
return 1;
if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
kvm_inject_page_fault(vcpu, &e);
kvm_inject_emulated_page_fault(vcpu, &e);
return 1;
}
if (operand.vpid >> 16)

View File

@ -5393,7 +5393,7 @@ static int handle_invpcid(struct kvm_vcpu *vcpu)
return 1;
if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
kvm_inject_page_fault(vcpu, &e);
kvm_inject_emulated_page_fault(vcpu, &e);
return 1;
}

View File

@ -619,8 +619,17 @@ bool kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
fault_mmu = fault->nested_page_fault ? vcpu->arch.mmu :
vcpu->arch.walk_mmu;
fault_mmu->inject_page_fault(vcpu, fault);
/*
* Invalidate the TLB entry for the faulting address, if it exists,
* else the access will fault indefinitely (and to emulate hardware).
*/
if ((fault->error_code & PFERR_PRESENT_MASK) &&
!(fault->error_code & PFERR_RSVD_MASK))
kvm_mmu_invalidate_gva(vcpu, fault_mmu, fault->address,
fault_mmu->root_hpa);
fault_mmu->inject_page_fault(vcpu, fault);
return fault->nested_page_fault;
}
EXPORT_SYMBOL_GPL(kvm_inject_emulated_page_fault);