KVM: x86: MMU: Eliminate an extra memory slot search in mapping_level()

Calling kvm_vcpu_gfn_to_memslot() twice in mapping_level() should be
avoided since getting a slot by binary search may not be negligible,
especially for virtual machines with many memory slots.

Signed-off-by: Takuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Takuya Yoshikawa 2015-10-16 17:08:03 +09:00 committed by Paolo Bonzini
parent d8aacf5df8
commit 5225fdf8c8
1 changed files with 11 additions and 6 deletions

View File

@ -818,14 +818,11 @@ static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
kvm->arch.indirect_shadow_pages--;
}
static int has_wrprotected_page(struct kvm_vcpu *vcpu,
gfn_t gfn,
int level)
static int __has_wrprotected_page(gfn_t gfn, int level,
struct kvm_memory_slot *slot)
{
struct kvm_memory_slot *slot;
struct kvm_lpage_info *linfo;
slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
if (slot) {
linfo = lpage_info_slot(gfn, slot, level);
return linfo->write_count;
@ -834,6 +831,14 @@ static int has_wrprotected_page(struct kvm_vcpu *vcpu,
return 1;
}
static int has_wrprotected_page(struct kvm_vcpu *vcpu, gfn_t gfn, int level)
{
struct kvm_memory_slot *slot;
slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
return __has_wrprotected_page(gfn, level, slot);
}
static int host_mapping_level(struct kvm *kvm, gfn_t gfn)
{
unsigned long page_size;
@ -896,7 +901,7 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn,
max_level = min(kvm_x86_ops->get_lpage_level(), host_level);
for (level = PT_DIRECTORY_LEVEL; level <= max_level; ++level)
if (has_wrprotected_page(vcpu, large_gfn, level))
if (__has_wrprotected_page(large_gfn, level, slot))
break;
return level - 1;