kvm: x86: mmu: Fast Page Fault path retries

This change adds retries into the Fast Page Fault path. Without the
retries, the code still works, but if a retry does end up being needed,
then it will result in a second page fault for the same memory access,
which will cause much more overhead compared to just retrying within the
original fault.

This would be especially useful with the upcoming fast access tracking
change, as that would make it more likely for retries to be needed
(e.g. due to read and write faults happening on different CPUs at
the same time).

Signed-off-by: Junaid Shahid <junaids@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Junaid Shahid 2016-12-06 16:46:12 -08:00 committed by Radim Krčmář
parent ea4114bcd3
commit 97dceba29a
1 changed files with 73 additions and 51 deletions

View File

@ -2891,6 +2891,10 @@ static bool page_fault_can_be_fast(u32 error_code)
return true; return true;
} }
/*
* Returns true if the SPTE was fixed successfully. Otherwise,
* someone else modified the SPTE from its original value.
*/
static bool static bool
fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
u64 *sptep, u64 spte) u64 *sptep, u64 spte)
@ -2917,7 +2921,9 @@ fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
* *
* Compare with set_spte where instead shadow_dirty_mask is set. * Compare with set_spte where instead shadow_dirty_mask is set.
*/ */
if (cmpxchg64(sptep, spte, spte | PT_WRITABLE_MASK) == spte) if (cmpxchg64(sptep, spte, spte | PT_WRITABLE_MASK) != spte)
return false;
kvm_vcpu_mark_page_dirty(vcpu, gfn); kvm_vcpu_mark_page_dirty(vcpu, gfn);
return true; return true;
@ -2933,8 +2939,9 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
{ {
struct kvm_shadow_walk_iterator iterator; struct kvm_shadow_walk_iterator iterator;
struct kvm_mmu_page *sp; struct kvm_mmu_page *sp;
bool ret = false; bool fault_handled = false;
u64 spte = 0ull; u64 spte = 0ull;
uint retry_count = 0;
if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
return false; return false;
@ -2947,18 +2954,19 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
if (!is_shadow_present_pte(spte) || iterator.level < level) if (!is_shadow_present_pte(spte) || iterator.level < level)
break; break;
do {
/* /*
* If the mapping has been changed, let the vcpu fault on the * If the mapping has been changed, let the vcpu fault on the
* same address again. * same address again.
*/ */
if (!is_shadow_present_pte(spte)) { if (!is_shadow_present_pte(spte)) {
ret = true; fault_handled = true;
goto exit; break;
} }
sp = page_header(__pa(iterator.sptep)); sp = page_header(__pa(iterator.sptep));
if (!is_last_spte(spte, sp->role.level)) if (!is_last_spte(spte, sp->role.level))
goto exit; break;
/* /*
* Check if it is a spurious fault caused by TLB lazily flushed. * Check if it is a spurious fault caused by TLB lazily flushed.
@ -2967,42 +2975,56 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
* they are always ACC_ALL. * they are always ACC_ALL.
*/ */
if (is_writable_pte(spte)) { if (is_writable_pte(spte)) {
ret = true; fault_handled = true;
goto exit; break;
} }
/* /*
* Currently, to simplify the code, only the spte write-protected * Currently, to simplify the code, only the spte
* by dirty-log can be fast fixed. * write-protected by dirty-log can be fast fixed.
*/ */
if (!spte_can_locklessly_be_made_writable(spte)) if (!spte_can_locklessly_be_made_writable(spte))
goto exit; break;
/* /*
* Do not fix write-permission on the large spte since we only dirty * Do not fix write-permission on the large spte since we only
* the first page into the dirty-bitmap in fast_pf_fix_direct_spte() * dirty the first page into the dirty-bitmap in
* that means other pages are missed if its slot is dirty-logged. * fast_pf_fix_direct_spte() that means other pages are missed
* if its slot is dirty-logged.
* *
* Instead, we let the slow page fault path create a normal spte to * Instead, we let the slow page fault path create a normal spte
* fix the access. * to fix the access.
* *
* See the comments in kvm_arch_commit_memory_region(). * See the comments in kvm_arch_commit_memory_region().
*/ */
if (sp->role.level > PT_PAGE_TABLE_LEVEL) if (sp->role.level > PT_PAGE_TABLE_LEVEL)
goto exit; break;
/* /*
* Currently, fast page fault only works for direct mapping since * Currently, fast page fault only works for direct mapping
* the gfn is not stable for indirect shadow page. * since the gfn is not stable for indirect shadow page. See
* See Documentation/virtual/kvm/locking.txt to get more detail. * Documentation/virtual/kvm/locking.txt to get more detail.
*/ */
ret = fast_pf_fix_direct_spte(vcpu, sp, iterator.sptep, spte); fault_handled = fast_pf_fix_direct_spte(vcpu, sp,
exit: iterator.sptep, spte);
if (fault_handled)
break;
if (++retry_count > 4) {
printk_once(KERN_WARNING
"kvm: Fast #PF retrying more than 4 times.\n");
break;
}
spte = mmu_spte_get_lockless(iterator.sptep);
} while (true);
trace_fast_page_fault(vcpu, gva, error_code, iterator.sptep, trace_fast_page_fault(vcpu, gva, error_code, iterator.sptep,
spte, ret); spte, fault_handled);
walk_shadow_page_lockless_end(vcpu); walk_shadow_page_lockless_end(vcpu);
return ret; return fault_handled;
} }
static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,