KVM: fix KVM_CLEAR_DIRTY_LOG for memory slots of unaligned size
If a memory slot's size is not a multiple of 64 pages (256K), then
the KVM_CLEAR_DIRTY_LOG API is unusable: clearing the final 64 pages
either requires the requested page range to go beyond memslot->npages,
or requires log->num_pages to be unaligned, and kvm_clear_dirty_log_protect
requires log->num_pages to be both in range and aligned.
To allow this case, allow log->num_pages not to be a multiple of 64 if
it ends exactly on the last page of the slot.
Reported-by: Peter Xu <peterx@redhat.com>
Fixes: 98938aa8ed
("KVM: validate userspace input in kvm_clear_dirty_log_protect()", 2019-01-02)
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
0699c64a4b
commit
76d58e0f07
|
@ -3830,8 +3830,9 @@ The ioctl clears the dirty status of pages in a memory slot, according to
|
|||
the bitmap that is passed in struct kvm_clear_dirty_log's dirty_bitmap
|
||||
field. Bit 0 of the bitmap corresponds to page "first_page" in the
|
||||
memory slot, and num_pages is the size in bits of the input bitmap.
|
||||
Both first_page and num_pages must be a multiple of 64. For each bit
|
||||
that is set in the input bitmap, the corresponding page is marked "clean"
|
||||
first_page must be a multiple of 64; num_pages must also be a multiple of
|
||||
64 unless first_page + num_pages is the size of the memory slot. For each
|
||||
bit that is set in the input bitmap, the corresponding page is marked "clean"
|
||||
in KVM's dirty bitmap, and dirty tracking is re-enabled for that page
|
||||
(for example via write-protection, or by clearing the dirty bit in
|
||||
a page table entry).
|
||||
|
|
|
@ -288,8 +288,11 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
|
|||
#endif
|
||||
max_gfn = (1ul << (guest_pa_bits - guest_page_shift)) - 1;
|
||||
guest_page_size = (1ul << guest_page_shift);
|
||||
/* 1G of guest page sized pages */
|
||||
guest_num_pages = (1ul << (30 - guest_page_shift));
|
||||
/*
|
||||
* A little more than 1G of guest page sized pages. Cover the
|
||||
* case where the size is not aligned to 64 pages.
|
||||
*/
|
||||
guest_num_pages = (1ul << (30 - guest_page_shift)) + 3;
|
||||
host_page_size = getpagesize();
|
||||
host_num_pages = (guest_num_pages * guest_page_size) / host_page_size +
|
||||
!!((guest_num_pages * guest_page_size) % host_page_size);
|
||||
|
@ -359,7 +362,7 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
|
|||
kvm_vm_get_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap);
|
||||
#ifdef USE_CLEAR_DIRTY_LOG
|
||||
kvm_vm_clear_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap, 0,
|
||||
DIV_ROUND_UP(host_num_pages, 64) * 64);
|
||||
host_num_pages);
|
||||
#endif
|
||||
vm_dirty_log_verify(bmap);
|
||||
iteration++;
|
||||
|
|
|
@ -1240,7 +1240,7 @@ int kvm_clear_dirty_log_protect(struct kvm *kvm,
|
|||
if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
|
||||
return -EINVAL;
|
||||
|
||||
if ((log->first_page & 63) || (log->num_pages & 63))
|
||||
if (log->first_page & 63)
|
||||
return -EINVAL;
|
||||
|
||||
slots = __kvm_memslots(kvm, as_id);
|
||||
|
@ -1253,8 +1253,9 @@ int kvm_clear_dirty_log_protect(struct kvm *kvm,
|
|||
n = kvm_dirty_bitmap_bytes(memslot);
|
||||
|
||||
if (log->first_page > memslot->npages ||
|
||||
log->num_pages > memslot->npages - log->first_page)
|
||||
return -EINVAL;
|
||||
log->num_pages > memslot->npages - log->first_page ||
|
||||
(log->num_pages < memslot->npages - log->first_page && (log->num_pages & 63)))
|
||||
return -EINVAL;
|
||||
|
||||
*flush = false;
|
||||
dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
|
||||
|
|
Loading…
Reference in New Issue