KVM: selftests: virt_map should take npages, not size

Also correct the comment and prototype for vm_create_default(),
as it takes a number of pages, not a size.

Signed-off-by: Andrew Jones <drjones@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Andrew Jones 2020-03-13 16:56:43 +01:00 committed by Paolo Bonzini
parent d0aac3320d
commit beca54702d
6 changed files with 16 additions and 19 deletions

View File

@ -410,8 +410,7 @@ static void run_test(enum vm_guest_mode mode, bool use_uffd,
guest_num_pages, 0);
/* Do mapping for the demand paging memory slot */
virt_map(vm, guest_test_virt_mem, guest_test_phys_mem,
guest_num_pages * guest_page_size, 0);
virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages, 0);
ucall_init(vm, NULL);

View File

@ -334,8 +334,7 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
KVM_MEM_LOG_DIRTY_PAGES);
/* Do mapping for the dirty track memory slot */
virt_map(vm, guest_test_virt_mem, guest_test_phys_mem,
guest_num_pages * guest_page_size, 0);
virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages, 0);
/* Cache the HVA pointer of the region */
host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_phys_mem);

View File

@ -117,7 +117,7 @@ void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid);
vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
uint32_t data_memslot, uint32_t pgd_memslot);
void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
size_t size, uint32_t pgd_memslot);
unsigned int npages, uint32_t pgd_memslot);
void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa);
void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva);
vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva);
@ -226,7 +226,7 @@ vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
*
* Input Args:
* vcpuid - The id of the single VCPU to add to the VM.
* extra_mem_pages - The size of extra memories to add (this will
* extra_mem_pages - The number of extra pages to add (this will
* decide how much extra space we will need to
* setup the page tables using memslot 0)
* guest_code - The vCPU's entry point
@ -236,7 +236,7 @@ vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
* Return:
* Pointer to opaque structure that describes the created VM.
*/
struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_size,
struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
void *guest_code);
/*

View File

@ -1015,21 +1015,21 @@ vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
* vm - Virtual Machine
* vaddr - Virtuall address to map
* paddr - VM Physical Address
* size - The size of the range to map
* npages - The number of pages to map
* pgd_memslot - Memory region slot for new virtual translation tables
*
* Output Args: None
*
* Return: None
*
* Within the VM given by vm, creates a virtual translation for the
* page range starting at vaddr to the page range starting at paddr.
* Within the VM given by @vm, creates a virtual translation for
* @npages starting at @vaddr to the page range starting at @paddr.
*/
void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
size_t size, uint32_t pgd_memslot)
unsigned int npages, uint32_t pgd_memslot)
{
size_t page_size = vm->page_size;
size_t npages = size / page_size;
size_t size = npages * page_size;
TEST_ASSERT(vaddr + size > vaddr, "Vaddr overflow");
TEST_ASSERT(paddr + size > paddr, "Paddr overflow");

View File

@ -87,7 +87,7 @@ static void test_move_memory_region(void)
gpa = vm_phy_pages_alloc(vm, 2, MEM_REGION_GPA, MEM_REGION_SLOT);
TEST_ASSERT(gpa == MEM_REGION_GPA, "Failed vm_phy_pages_alloc\n");
virt_map(vm, MEM_REGION_GPA, MEM_REGION_GPA, 2 * 4096, 0);
virt_map(vm, MEM_REGION_GPA, MEM_REGION_GPA, 2, 0);
/* Ditto for the host mapping so that both pages can be zeroed. */
hva = addr_gpa2hva(vm, MEM_REGION_GPA);

View File

@ -21,7 +21,7 @@
/* The memory slot index to track dirty pages */
#define TEST_MEM_SLOT_INDEX 1
#define TEST_MEM_SIZE 3
#define TEST_MEM_PAGES 3
/* L1 guest test virtual memory offset */
#define GUEST_TEST_MEM 0xc0000000
@ -91,15 +91,14 @@ int main(int argc, char *argv[])
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
GUEST_TEST_MEM,
TEST_MEM_SLOT_INDEX,
TEST_MEM_SIZE,
TEST_MEM_PAGES,
KVM_MEM_LOG_DIRTY_PAGES);
/*
* Add an identity map for GVA range [0xc0000000, 0xc0002000). This
* affects both L1 and L2. However...
*/
virt_map(vm, GUEST_TEST_MEM, GUEST_TEST_MEM,
TEST_MEM_SIZE * 4096, 0);
virt_map(vm, GUEST_TEST_MEM, GUEST_TEST_MEM, TEST_MEM_PAGES, 0);
/*
* ... pages in the L2 GPA range [0xc0001000, 0xc0003000) will map to
@ -113,11 +112,11 @@ int main(int argc, char *argv[])
nested_map(vmx, vm, NESTED_TEST_MEM1, GUEST_TEST_MEM, 4096, 0);
nested_map(vmx, vm, NESTED_TEST_MEM2, GUEST_TEST_MEM, 4096, 0);
bmap = bitmap_alloc(TEST_MEM_SIZE);
bmap = bitmap_alloc(TEST_MEM_PAGES);
host_test_mem = addr_gpa2hva(vm, GUEST_TEST_MEM);
while (!done) {
memset(host_test_mem, 0xaa, TEST_MEM_SIZE * 4096);
memset(host_test_mem, 0xaa, TEST_MEM_PAGES * 4096);
_vcpu_run(vm, VCPU_ID);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Unexpected exit reason: %u (%s),\n",