mirror of https://gitee.com/openkylin/linux.git
KVM: selftests: Implement perf_test_util more conventionally
It's not conventional C to put non-inline functions in header files. Create a source file for the functions instead. Also reduce the amount of globals and rename the functions to something less generic. Reviewed-by: Ben Gardon <bgardon@google.com> Reviewed-by: Peter Xu <peterx@redhat.com> Signed-off-by: Andrew Jones <drjones@redhat.com> Message-Id: <20201218141734.54359-4-drjones@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
1133e17ea7
commit
b268b6f0bd
|
@ -33,7 +33,7 @@ ifeq ($(ARCH),s390)
|
||||||
UNAME_M := s390x
|
UNAME_M := s390x
|
||||||
endif
|
endif
|
||||||
|
|
||||||
LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c lib/test_util.c lib/guest_modes.c
|
LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c lib/test_util.c lib/guest_modes.c lib/perf_test_util.c
|
||||||
LIBKVM_x86_64 = lib/x86_64/processor.c lib/x86_64/vmx.c lib/x86_64/svm.c lib/x86_64/ucall.c lib/x86_64/handlers.S
|
LIBKVM_x86_64 = lib/x86_64/processor.c lib/x86_64/vmx.c lib/x86_64/svm.c lib/x86_64/ucall.c lib/x86_64/handlers.S
|
||||||
LIBKVM_aarch64 = lib/aarch64/processor.c lib/aarch64/ucall.c
|
LIBKVM_aarch64 = lib/aarch64/processor.c lib/aarch64/ucall.c
|
||||||
LIBKVM_s390x = lib/s390x/processor.c lib/s390x/ucall.c lib/s390x/diag318_test_handler.c
|
LIBKVM_s390x = lib/s390x/processor.c lib/s390x/ucall.c lib/s390x/diag318_test_handler.c
|
||||||
|
|
|
@ -36,12 +36,14 @@
|
||||||
#define PER_VCPU_DEBUG(...) _no_printf(__VA_ARGS__)
|
#define PER_VCPU_DEBUG(...) _no_printf(__VA_ARGS__)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
static int nr_vcpus = 1;
|
||||||
|
static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
|
||||||
static char *guest_data_prototype;
|
static char *guest_data_prototype;
|
||||||
|
|
||||||
static void *vcpu_worker(void *data)
|
static void *vcpu_worker(void *data)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
struct vcpu_args *vcpu_args = (struct vcpu_args *)data;
|
struct perf_test_vcpu_args *vcpu_args = (struct perf_test_vcpu_args *)data;
|
||||||
int vcpu_id = vcpu_args->vcpu_id;
|
int vcpu_id = vcpu_args->vcpu_id;
|
||||||
struct kvm_vm *vm = perf_test_args.vm;
|
struct kvm_vm *vm = perf_test_args.vm;
|
||||||
struct kvm_run *run;
|
struct kvm_run *run;
|
||||||
|
@ -263,7 +265,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
|
||||||
int vcpu_id;
|
int vcpu_id;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
vm = create_vm(mode, nr_vcpus, guest_percpu_mem_size);
|
vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size);
|
||||||
|
|
||||||
perf_test_args.wr_fract = 1;
|
perf_test_args.wr_fract = 1;
|
||||||
|
|
||||||
|
@ -275,7 +277,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
|
||||||
vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads));
|
vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads));
|
||||||
TEST_ASSERT(vcpu_threads, "Memory allocation failed");
|
TEST_ASSERT(vcpu_threads, "Memory allocation failed");
|
||||||
|
|
||||||
add_vcpus(vm, nr_vcpus, guest_percpu_mem_size);
|
perf_test_setup_vcpus(vm, nr_vcpus, guest_percpu_mem_size);
|
||||||
|
|
||||||
if (p->use_uffd) {
|
if (p->use_uffd) {
|
||||||
uffd_handler_threads =
|
uffd_handler_threads =
|
||||||
|
@ -359,8 +361,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
|
||||||
perf_test_args.vcpu_args[0].pages * nr_vcpus /
|
perf_test_args.vcpu_args[0].pages * nr_vcpus /
|
||||||
((double)ts_diff.tv_sec + (double)ts_diff.tv_nsec / 100000000.0));
|
((double)ts_diff.tv_sec + (double)ts_diff.tv_nsec / 100000000.0));
|
||||||
|
|
||||||
ucall_uninit(vm);
|
perf_test_destroy_vm(vm);
|
||||||
kvm_vm_free(vm);
|
|
||||||
|
|
||||||
free(guest_data_prototype);
|
free(guest_data_prototype);
|
||||||
free(vcpu_threads);
|
free(vcpu_threads);
|
||||||
|
|
|
@ -22,11 +22,14 @@
|
||||||
/* How many host loops to run by default (one KVM_GET_DIRTY_LOG for each loop)*/
|
/* How many host loops to run by default (one KVM_GET_DIRTY_LOG for each loop)*/
|
||||||
#define TEST_HOST_LOOP_N 2UL
|
#define TEST_HOST_LOOP_N 2UL
|
||||||
|
|
||||||
|
static int nr_vcpus = 1;
|
||||||
|
static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
|
||||||
|
|
||||||
/* Host variables */
|
/* Host variables */
|
||||||
static u64 dirty_log_manual_caps;
|
static u64 dirty_log_manual_caps;
|
||||||
static bool host_quit;
|
static bool host_quit;
|
||||||
static uint64_t iteration;
|
static uint64_t iteration;
|
||||||
static uint64_t vcpu_last_completed_iteration[MAX_VCPUS];
|
static uint64_t vcpu_last_completed_iteration[KVM_MAX_VCPUS];
|
||||||
|
|
||||||
static void *vcpu_worker(void *data)
|
static void *vcpu_worker(void *data)
|
||||||
{
|
{
|
||||||
|
@ -38,7 +41,7 @@ static void *vcpu_worker(void *data)
|
||||||
struct timespec ts_diff;
|
struct timespec ts_diff;
|
||||||
struct timespec total = (struct timespec){0};
|
struct timespec total = (struct timespec){0};
|
||||||
struct timespec avg;
|
struct timespec avg;
|
||||||
struct vcpu_args *vcpu_args = (struct vcpu_args *)data;
|
struct perf_test_vcpu_args *vcpu_args = (struct perf_test_vcpu_args *)data;
|
||||||
int vcpu_id = vcpu_args->vcpu_id;
|
int vcpu_id = vcpu_args->vcpu_id;
|
||||||
|
|
||||||
vcpu_args_set(vm, vcpu_id, 1, vcpu_id);
|
vcpu_args_set(vm, vcpu_id, 1, vcpu_id);
|
||||||
|
@ -108,7 +111,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
|
||||||
struct kvm_enable_cap cap = {};
|
struct kvm_enable_cap cap = {};
|
||||||
struct timespec clear_dirty_log_total = (struct timespec){0};
|
struct timespec clear_dirty_log_total = (struct timespec){0};
|
||||||
|
|
||||||
vm = create_vm(mode, nr_vcpus, guest_percpu_mem_size);
|
vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size);
|
||||||
|
|
||||||
perf_test_args.wr_fract = p->wr_fract;
|
perf_test_args.wr_fract = p->wr_fract;
|
||||||
|
|
||||||
|
@ -126,7 +129,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
|
||||||
vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads));
|
vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads));
|
||||||
TEST_ASSERT(vcpu_threads, "Memory allocation failed");
|
TEST_ASSERT(vcpu_threads, "Memory allocation failed");
|
||||||
|
|
||||||
add_vcpus(vm, nr_vcpus, guest_percpu_mem_size);
|
perf_test_setup_vcpus(vm, nr_vcpus, guest_percpu_mem_size);
|
||||||
|
|
||||||
sync_global_to_guest(vm, perf_test_args);
|
sync_global_to_guest(vm, perf_test_args);
|
||||||
|
|
||||||
|
@ -152,7 +155,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
|
||||||
|
|
||||||
/* Enable dirty logging */
|
/* Enable dirty logging */
|
||||||
clock_gettime(CLOCK_MONOTONIC, &start);
|
clock_gettime(CLOCK_MONOTONIC, &start);
|
||||||
vm_mem_region_set_flags(vm, TEST_MEM_SLOT_INDEX,
|
vm_mem_region_set_flags(vm, PERF_TEST_MEM_SLOT_INDEX,
|
||||||
KVM_MEM_LOG_DIRTY_PAGES);
|
KVM_MEM_LOG_DIRTY_PAGES);
|
||||||
ts_diff = timespec_diff_now(start);
|
ts_diff = timespec_diff_now(start);
|
||||||
pr_info("Enabling dirty logging time: %ld.%.9lds\n\n",
|
pr_info("Enabling dirty logging time: %ld.%.9lds\n\n",
|
||||||
|
@ -179,7 +182,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
|
||||||
iteration, ts_diff.tv_sec, ts_diff.tv_nsec);
|
iteration, ts_diff.tv_sec, ts_diff.tv_nsec);
|
||||||
|
|
||||||
clock_gettime(CLOCK_MONOTONIC, &start);
|
clock_gettime(CLOCK_MONOTONIC, &start);
|
||||||
kvm_vm_get_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap);
|
kvm_vm_get_dirty_log(vm, PERF_TEST_MEM_SLOT_INDEX, bmap);
|
||||||
|
|
||||||
ts_diff = timespec_diff_now(start);
|
ts_diff = timespec_diff_now(start);
|
||||||
get_dirty_log_total = timespec_add(get_dirty_log_total,
|
get_dirty_log_total = timespec_add(get_dirty_log_total,
|
||||||
|
@ -189,7 +192,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
|
||||||
|
|
||||||
if (dirty_log_manual_caps) {
|
if (dirty_log_manual_caps) {
|
||||||
clock_gettime(CLOCK_MONOTONIC, &start);
|
clock_gettime(CLOCK_MONOTONIC, &start);
|
||||||
kvm_vm_clear_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap, 0,
|
kvm_vm_clear_dirty_log(vm, PERF_TEST_MEM_SLOT_INDEX, bmap, 0,
|
||||||
host_num_pages);
|
host_num_pages);
|
||||||
|
|
||||||
ts_diff = timespec_diff_now(start);
|
ts_diff = timespec_diff_now(start);
|
||||||
|
@ -207,7 +210,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
|
||||||
|
|
||||||
/* Disable dirty logging */
|
/* Disable dirty logging */
|
||||||
clock_gettime(CLOCK_MONOTONIC, &start);
|
clock_gettime(CLOCK_MONOTONIC, &start);
|
||||||
vm_mem_region_set_flags(vm, TEST_MEM_SLOT_INDEX, 0);
|
vm_mem_region_set_flags(vm, PERF_TEST_MEM_SLOT_INDEX, 0);
|
||||||
ts_diff = timespec_diff_now(start);
|
ts_diff = timespec_diff_now(start);
|
||||||
pr_info("Disabling dirty logging time: %ld.%.9lds\n",
|
pr_info("Disabling dirty logging time: %ld.%.9lds\n",
|
||||||
ts_diff.tv_sec, ts_diff.tv_nsec);
|
ts_diff.tv_sec, ts_diff.tv_nsec);
|
||||||
|
@ -226,8 +229,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
|
||||||
|
|
||||||
free(bmap);
|
free(bmap);
|
||||||
free(vcpu_threads);
|
free(vcpu_threads);
|
||||||
ucall_uninit(vm);
|
perf_test_destroy_vm(vm);
|
||||||
kvm_vm_free(vm);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void help(char *name)
|
static void help(char *name)
|
||||||
|
|
|
@ -16,6 +16,7 @@
|
||||||
|
|
||||||
#include "sparsebit.h"
|
#include "sparsebit.h"
|
||||||
|
|
||||||
|
#define KVM_MAX_VCPUS 512
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Callers of kvm_util only have an incomplete/opaque description of the
|
* Callers of kvm_util only have an incomplete/opaque description of the
|
||||||
|
|
|
@ -9,35 +9,15 @@
|
||||||
#define SELFTEST_KVM_PERF_TEST_UTIL_H
|
#define SELFTEST_KVM_PERF_TEST_UTIL_H
|
||||||
|
|
||||||
#include "kvm_util.h"
|
#include "kvm_util.h"
|
||||||
#include "processor.h"
|
|
||||||
|
|
||||||
#define MAX_VCPUS 512
|
|
||||||
|
|
||||||
#define TEST_MEM_SLOT_INDEX 1
|
|
||||||
|
|
||||||
/* Default guest test virtual memory offset */
|
/* Default guest test virtual memory offset */
|
||||||
#define DEFAULT_GUEST_TEST_MEM 0xc0000000
|
#define DEFAULT_GUEST_TEST_MEM 0xc0000000
|
||||||
|
|
||||||
#define DEFAULT_PER_VCPU_MEM_SIZE (1 << 30) /* 1G */
|
#define DEFAULT_PER_VCPU_MEM_SIZE (1 << 30) /* 1G */
|
||||||
|
|
||||||
/*
|
#define PERF_TEST_MEM_SLOT_INDEX 1
|
||||||
* Guest physical memory offset of the testing memory slot.
|
|
||||||
* This will be set to the topmost valid physical address minus
|
|
||||||
* the test memory size.
|
|
||||||
*/
|
|
||||||
static uint64_t guest_test_phys_mem;
|
|
||||||
|
|
||||||
/*
|
struct perf_test_vcpu_args {
|
||||||
* Guest virtual memory offset of the testing memory slot.
|
|
||||||
* Must not conflict with identity mapped test code.
|
|
||||||
*/
|
|
||||||
static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
|
|
||||||
static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
|
|
||||||
|
|
||||||
/* Number of VCPUs for the test */
|
|
||||||
static int nr_vcpus = 1;
|
|
||||||
|
|
||||||
struct vcpu_args {
|
|
||||||
uint64_t gva;
|
uint64_t gva;
|
||||||
uint64_t pages;
|
uint64_t pages;
|
||||||
|
|
||||||
|
@ -51,119 +31,21 @@ struct perf_test_args {
|
||||||
uint64_t guest_page_size;
|
uint64_t guest_page_size;
|
||||||
int wr_fract;
|
int wr_fract;
|
||||||
|
|
||||||
struct vcpu_args vcpu_args[MAX_VCPUS];
|
struct perf_test_vcpu_args vcpu_args[KVM_MAX_VCPUS];
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct perf_test_args perf_test_args;
|
extern struct perf_test_args perf_test_args;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Continuously write to the first 8 bytes of each page in the
|
* Guest physical memory offset of the testing memory slot.
|
||||||
* specified region.
|
* This will be set to the topmost valid physical address minus
|
||||||
|
* the test memory size.
|
||||||
*/
|
*/
|
||||||
static void guest_code(uint32_t vcpu_id)
|
extern uint64_t guest_test_phys_mem;
|
||||||
{
|
|
||||||
struct vcpu_args *vcpu_args = &perf_test_args.vcpu_args[vcpu_id];
|
|
||||||
uint64_t gva;
|
|
||||||
uint64_t pages;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
/* Make sure vCPU args data structure is not corrupt. */
|
struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
|
||||||
GUEST_ASSERT(vcpu_args->vcpu_id == vcpu_id);
|
uint64_t vcpu_memory_bytes);
|
||||||
|
void perf_test_destroy_vm(struct kvm_vm *vm);
|
||||||
gva = vcpu_args->gva;
|
void perf_test_setup_vcpus(struct kvm_vm *vm, int vcpus, uint64_t vcpu_memory_bytes);
|
||||||
pages = vcpu_args->pages;
|
|
||||||
|
|
||||||
while (true) {
|
|
||||||
for (i = 0; i < pages; i++) {
|
|
||||||
uint64_t addr = gva + (i * perf_test_args.guest_page_size);
|
|
||||||
|
|
||||||
if (i % perf_test_args.wr_fract == 0)
|
|
||||||
*(uint64_t *)addr = 0x0123456789ABCDEF;
|
|
||||||
else
|
|
||||||
READ_ONCE(*(uint64_t *)addr);
|
|
||||||
}
|
|
||||||
|
|
||||||
GUEST_SYNC(1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct kvm_vm *create_vm(enum vm_guest_mode mode, int vcpus,
|
|
||||||
uint64_t vcpu_memory_bytes)
|
|
||||||
{
|
|
||||||
struct kvm_vm *vm;
|
|
||||||
uint64_t guest_num_pages;
|
|
||||||
|
|
||||||
pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
|
|
||||||
|
|
||||||
perf_test_args.host_page_size = getpagesize();
|
|
||||||
perf_test_args.guest_page_size = vm_guest_mode_params[mode].page_size;
|
|
||||||
|
|
||||||
guest_num_pages = vm_adjust_num_guest_pages(mode,
|
|
||||||
(vcpus * vcpu_memory_bytes) / perf_test_args.guest_page_size);
|
|
||||||
|
|
||||||
TEST_ASSERT(vcpu_memory_bytes % perf_test_args.host_page_size == 0,
|
|
||||||
"Guest memory size is not host page size aligned.");
|
|
||||||
TEST_ASSERT(vcpu_memory_bytes % perf_test_args.guest_page_size == 0,
|
|
||||||
"Guest memory size is not guest page size aligned.");
|
|
||||||
|
|
||||||
vm = vm_create_with_vcpus(mode, vcpus,
|
|
||||||
(vcpus * vcpu_memory_bytes) / perf_test_args.guest_page_size,
|
|
||||||
0, guest_code, NULL);
|
|
||||||
|
|
||||||
perf_test_args.vm = vm;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If there should be more memory in the guest test region than there
|
|
||||||
* can be pages in the guest, it will definitely cause problems.
|
|
||||||
*/
|
|
||||||
TEST_ASSERT(guest_num_pages < vm_get_max_gfn(vm),
|
|
||||||
"Requested more guest memory than address space allows.\n"
|
|
||||||
" guest pages: %lx max gfn: %x vcpus: %d wss: %lx]\n",
|
|
||||||
guest_num_pages, vm_get_max_gfn(vm), vcpus,
|
|
||||||
vcpu_memory_bytes);
|
|
||||||
|
|
||||||
guest_test_phys_mem = (vm_get_max_gfn(vm) - guest_num_pages) *
|
|
||||||
perf_test_args.guest_page_size;
|
|
||||||
guest_test_phys_mem &= ~(perf_test_args.host_page_size - 1);
|
|
||||||
#ifdef __s390x__
|
|
||||||
/* Align to 1M (segment size) */
|
|
||||||
guest_test_phys_mem &= ~((1 << 20) - 1);
|
|
||||||
#endif
|
|
||||||
pr_info("guest physical test memory offset: 0x%lx\n", guest_test_phys_mem);
|
|
||||||
|
|
||||||
/* Add an extra memory slot for testing */
|
|
||||||
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
|
|
||||||
guest_test_phys_mem,
|
|
||||||
TEST_MEM_SLOT_INDEX,
|
|
||||||
guest_num_pages, 0);
|
|
||||||
|
|
||||||
/* Do mapping for the demand paging memory slot */
|
|
||||||
virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages, 0);
|
|
||||||
|
|
||||||
ucall_init(vm, NULL);
|
|
||||||
|
|
||||||
return vm;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void add_vcpus(struct kvm_vm *vm, int vcpus, uint64_t vcpu_memory_bytes)
|
|
||||||
{
|
|
||||||
vm_paddr_t vcpu_gpa;
|
|
||||||
struct vcpu_args *vcpu_args;
|
|
||||||
int vcpu_id;
|
|
||||||
|
|
||||||
for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) {
|
|
||||||
vcpu_args = &perf_test_args.vcpu_args[vcpu_id];
|
|
||||||
|
|
||||||
vcpu_args->vcpu_id = vcpu_id;
|
|
||||||
vcpu_args->gva = guest_test_virt_mem +
|
|
||||||
(vcpu_id * vcpu_memory_bytes);
|
|
||||||
vcpu_args->pages = vcpu_memory_bytes /
|
|
||||||
perf_test_args.guest_page_size;
|
|
||||||
|
|
||||||
vcpu_gpa = guest_test_phys_mem + (vcpu_id * vcpu_memory_bytes);
|
|
||||||
pr_debug("Added VCPU %d with test mem gpa [%lx, %lx)\n",
|
|
||||||
vcpu_id, vcpu_gpa, vcpu_gpa + vcpu_memory_bytes);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* SELFTEST_KVM_PERF_TEST_UTIL_H */
|
#endif /* SELFTEST_KVM_PERF_TEST_UTIL_H */
|
||||||
|
|
|
@ -0,0 +1,134 @@
|
||||||
|
// SPDX-License-Identifier: GPL-2.0
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2020, Google LLC.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "kvm_util.h"
|
||||||
|
#include "perf_test_util.h"
|
||||||
|
#include "processor.h"
|
||||||
|
|
||||||
|
struct perf_test_args perf_test_args;
|
||||||
|
|
||||||
|
uint64_t guest_test_phys_mem;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Guest virtual memory offset of the testing memory slot.
|
||||||
|
* Must not conflict with identity mapped test code.
|
||||||
|
*/
|
||||||
|
static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Continuously write to the first 8 bytes of each page in the
|
||||||
|
* specified region.
|
||||||
|
*/
|
||||||
|
static void guest_code(uint32_t vcpu_id)
|
||||||
|
{
|
||||||
|
struct perf_test_vcpu_args *vcpu_args = &perf_test_args.vcpu_args[vcpu_id];
|
||||||
|
uint64_t gva;
|
||||||
|
uint64_t pages;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
/* Make sure vCPU args data structure is not corrupt. */
|
||||||
|
GUEST_ASSERT(vcpu_args->vcpu_id == vcpu_id);
|
||||||
|
|
||||||
|
gva = vcpu_args->gva;
|
||||||
|
pages = vcpu_args->pages;
|
||||||
|
|
||||||
|
while (true) {
|
||||||
|
for (i = 0; i < pages; i++) {
|
||||||
|
uint64_t addr = gva + (i * perf_test_args.guest_page_size);
|
||||||
|
|
||||||
|
if (i % perf_test_args.wr_fract == 0)
|
||||||
|
*(uint64_t *)addr = 0x0123456789ABCDEF;
|
||||||
|
else
|
||||||
|
READ_ONCE(*(uint64_t *)addr);
|
||||||
|
}
|
||||||
|
|
||||||
|
GUEST_SYNC(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
|
||||||
|
uint64_t vcpu_memory_bytes)
|
||||||
|
{
|
||||||
|
struct kvm_vm *vm;
|
||||||
|
uint64_t guest_num_pages;
|
||||||
|
|
||||||
|
pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
|
||||||
|
|
||||||
|
perf_test_args.host_page_size = getpagesize();
|
||||||
|
perf_test_args.guest_page_size = vm_guest_mode_params[mode].page_size;
|
||||||
|
|
||||||
|
guest_num_pages = vm_adjust_num_guest_pages(mode,
|
||||||
|
(vcpus * vcpu_memory_bytes) / perf_test_args.guest_page_size);
|
||||||
|
|
||||||
|
TEST_ASSERT(vcpu_memory_bytes % perf_test_args.host_page_size == 0,
|
||||||
|
"Guest memory size is not host page size aligned.");
|
||||||
|
TEST_ASSERT(vcpu_memory_bytes % perf_test_args.guest_page_size == 0,
|
||||||
|
"Guest memory size is not guest page size aligned.");
|
||||||
|
|
||||||
|
vm = vm_create_with_vcpus(mode, vcpus,
|
||||||
|
(vcpus * vcpu_memory_bytes) / perf_test_args.guest_page_size,
|
||||||
|
0, guest_code, NULL);
|
||||||
|
|
||||||
|
perf_test_args.vm = vm;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If there should be more memory in the guest test region than there
|
||||||
|
* can be pages in the guest, it will definitely cause problems.
|
||||||
|
*/
|
||||||
|
TEST_ASSERT(guest_num_pages < vm_get_max_gfn(vm),
|
||||||
|
"Requested more guest memory than address space allows.\n"
|
||||||
|
" guest pages: %lx max gfn: %x vcpus: %d wss: %lx]\n",
|
||||||
|
guest_num_pages, vm_get_max_gfn(vm), vcpus,
|
||||||
|
vcpu_memory_bytes);
|
||||||
|
|
||||||
|
guest_test_phys_mem = (vm_get_max_gfn(vm) - guest_num_pages) *
|
||||||
|
perf_test_args.guest_page_size;
|
||||||
|
guest_test_phys_mem &= ~(perf_test_args.host_page_size - 1);
|
||||||
|
#ifdef __s390x__
|
||||||
|
/* Align to 1M (segment size) */
|
||||||
|
guest_test_phys_mem &= ~((1 << 20) - 1);
|
||||||
|
#endif
|
||||||
|
pr_info("guest physical test memory offset: 0x%lx\n", guest_test_phys_mem);
|
||||||
|
|
||||||
|
/* Add an extra memory slot for testing */
|
||||||
|
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
|
||||||
|
guest_test_phys_mem,
|
||||||
|
PERF_TEST_MEM_SLOT_INDEX,
|
||||||
|
guest_num_pages, 0);
|
||||||
|
|
||||||
|
/* Do mapping for the demand paging memory slot */
|
||||||
|
virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages, 0);
|
||||||
|
|
||||||
|
ucall_init(vm, NULL);
|
||||||
|
|
||||||
|
return vm;
|
||||||
|
}
|
||||||
|
|
||||||
|
void perf_test_destroy_vm(struct kvm_vm *vm)
|
||||||
|
{
|
||||||
|
ucall_uninit(vm);
|
||||||
|
kvm_vm_free(vm);
|
||||||
|
}
|
||||||
|
|
||||||
|
void perf_test_setup_vcpus(struct kvm_vm *vm, int vcpus, uint64_t vcpu_memory_bytes)
|
||||||
|
{
|
||||||
|
vm_paddr_t vcpu_gpa;
|
||||||
|
struct perf_test_vcpu_args *vcpu_args;
|
||||||
|
int vcpu_id;
|
||||||
|
|
||||||
|
for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) {
|
||||||
|
vcpu_args = &perf_test_args.vcpu_args[vcpu_id];
|
||||||
|
|
||||||
|
vcpu_args->vcpu_id = vcpu_id;
|
||||||
|
vcpu_args->gva = guest_test_virt_mem +
|
||||||
|
(vcpu_id * vcpu_memory_bytes);
|
||||||
|
vcpu_args->pages = vcpu_memory_bytes /
|
||||||
|
perf_test_args.guest_page_size;
|
||||||
|
|
||||||
|
vcpu_gpa = guest_test_phys_mem + (vcpu_id * vcpu_memory_bytes);
|
||||||
|
pr_debug("Added VCPU %d with test mem gpa [%lx, %lx)\n",
|
||||||
|
vcpu_id, vcpu_gpa, vcpu_gpa + vcpu_memory_bytes);
|
||||||
|
}
|
||||||
|
}
|
Loading…
Reference in New Issue