KVM: Explicitly free allocated-but-unused dirty bitmap

Explicitly free an allocated-but-unused dirty bitmap instead of relying
on kvm_free_memslot() if an error occurs in __kvm_set_memory_region().
There is no longer a need to abuse kvm_free_memslot() to free arch
specific resources as arch specific code is now called only after the
common flow is guaranteed to succeed.  Arch code can still fail, but
it's responsible for its own cleanup in that case.

Eliminating the error path's abuse of kvm_free_memslot() paves the way
for simplifying kvm_free_memslot(), i.e. dropping its @dont param.

Reviewed-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Sean Christopherson 2020-02-18 13:07:21 -08:00 committed by Paolo Bonzini
parent 414de7abbf
commit bd0e96fdc5
1 changed files with 4 additions and 3 deletions

View File

@ -1093,7 +1093,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL_ACCOUNT); slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL_ACCOUNT);
if (!slots) if (!slots)
goto out_free; goto out_bitmap;
memcpy(slots, __kvm_memslots(kvm, as_id), sizeof(struct kvm_memslots)); memcpy(slots, __kvm_memslots(kvm, as_id), sizeof(struct kvm_memslots));
if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) { if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) {
@ -1141,8 +1141,9 @@ int __kvm_set_memory_region(struct kvm *kvm,
if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) if (change == KVM_MR_DELETE || change == KVM_MR_MOVE)
slots = install_new_memslots(kvm, as_id, slots); slots = install_new_memslots(kvm, as_id, slots);
kvfree(slots); kvfree(slots);
out_free: out_bitmap:
kvm_free_memslot(kvm, &new, &old); if (new.dirty_bitmap && !old.dirty_bitmap)
kvm_destroy_dirty_bitmap(&new);
out: out:
return r; return r;
} }