KVM: Make kvm_mmu_change_mmu_pages() take mmu_lock by itself

No reason to make callers take mmu_lock since we do not need to protect
kvm_mmu_change_mmu_pages() and kvm_mmu_slot_remove_write_access()
together by mmu_lock in kvm_arch_commit_memory_region(): the former
calls kvm_mmu_commit_zap_page() and flushes TLBs by itself.

Note: we do not need to protect kvm->arch.n_requested_mmu_pages by
mmu_lock as can be seen from the fact that it is read locklessly.

Reviewed-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Takuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp>
Signed-off-by: Gleb Natapov <gleb@redhat.com>
This commit is contained in:
Takuya Yoshikawa 2013-01-08 19:46:07 +09:00 committed by Gleb Natapov
parent e12091ce7b
commit b34cb590fb
2 changed files with 8 additions and 5 deletions

View File

@ -2143,6 +2143,8 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
* change the value * change the value
*/ */
spin_lock(&kvm->mmu_lock);
if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) { if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages && while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages &&
!list_empty(&kvm->arch.active_mmu_pages)) { !list_empty(&kvm->arch.active_mmu_pages)) {
@ -2157,6 +2159,8 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
} }
kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages; kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
spin_unlock(&kvm->mmu_lock);
} }
int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)

View File

@ -3270,12 +3270,10 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
return -EINVAL; return -EINVAL;
mutex_lock(&kvm->slots_lock); mutex_lock(&kvm->slots_lock);
spin_lock(&kvm->mmu_lock);
kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages); kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages; kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
spin_unlock(&kvm->mmu_lock);
mutex_unlock(&kvm->slots_lock); mutex_unlock(&kvm->slots_lock);
return 0; return 0;
} }
@ -6894,7 +6892,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
if (!kvm->arch.n_requested_mmu_pages) if (!kvm->arch.n_requested_mmu_pages)
nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm); nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
spin_lock(&kvm->mmu_lock);
if (nr_mmu_pages) if (nr_mmu_pages)
kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages); kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
/* /*
@ -6902,9 +6899,11 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
* Existing largepage mappings are destroyed here and new ones will * Existing largepage mappings are destroyed here and new ones will
* not be created until the end of the logging. * not be created until the end of the logging.
*/ */
if (npages && (mem->flags & KVM_MEM_LOG_DIRTY_PAGES)) if (npages && (mem->flags & KVM_MEM_LOG_DIRTY_PAGES)) {
spin_lock(&kvm->mmu_lock);
kvm_mmu_slot_remove_write_access(kvm, mem->slot); kvm_mmu_slot_remove_write_access(kvm, mem->slot);
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
}
/* /*
* If memory slot is created, or moved, we need to clear all * If memory slot is created, or moved, we need to clear all
* mmio sptes. * mmio sptes.