mirror of https://gitee.com/openkylin/linux.git
KVM: struct kvm_memory_slot.user_alloc -> bool
There's no need for this to be an int, it holds a boolean. Move to the end of the struct for alignment. Reviewed-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Alex Williamson <alex.williamson@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
This commit is contained in:
parent
0743247fbf
commit
f82a8cfe93
|
@ -955,7 +955,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
|
|||
kvm_mem.guest_phys_addr;
|
||||
kvm_userspace_mem.memory_size = kvm_mem.memory_size;
|
||||
r = kvm_vm_ioctl_set_memory_region(kvm,
|
||||
&kvm_userspace_mem, 0);
|
||||
&kvm_userspace_mem, false);
|
||||
if (r)
|
||||
goto out;
|
||||
break;
|
||||
|
@ -1580,7 +1580,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
|||
struct kvm_memory_slot *memslot,
|
||||
struct kvm_memory_slot old,
|
||||
struct kvm_userspace_memory_region *mem,
|
||||
int user_alloc)
|
||||
bool user_alloc)
|
||||
{
|
||||
unsigned long i;
|
||||
unsigned long pfn;
|
||||
|
@ -1611,7 +1611,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
|||
void kvm_arch_commit_memory_region(struct kvm *kvm,
|
||||
struct kvm_userspace_memory_region *mem,
|
||||
struct kvm_memory_slot old,
|
||||
int user_alloc)
|
||||
bool user_alloc)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -412,7 +412,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
|||
struct kvm_memory_slot *memslot,
|
||||
struct kvm_memory_slot old,
|
||||
struct kvm_userspace_memory_region *mem,
|
||||
int user_alloc)
|
||||
bool user_alloc)
|
||||
{
|
||||
return kvmppc_core_prepare_memory_region(kvm, memslot, mem);
|
||||
}
|
||||
|
@ -420,7 +420,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
|||
void kvm_arch_commit_memory_region(struct kvm *kvm,
|
||||
struct kvm_userspace_memory_region *mem,
|
||||
struct kvm_memory_slot old,
|
||||
int user_alloc)
|
||||
bool user_alloc)
|
||||
{
|
||||
kvmppc_core_commit_memory_region(kvm, mem, old);
|
||||
}
|
||||
|
|
|
@ -928,7 +928,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
|||
struct kvm_memory_slot *memslot,
|
||||
struct kvm_memory_slot old,
|
||||
struct kvm_userspace_memory_region *mem,
|
||||
int user_alloc)
|
||||
bool user_alloc)
|
||||
{
|
||||
/* A few sanity checks. We can have exactly one memory slot which has
|
||||
to start at guest virtual zero and which has to be located at a
|
||||
|
@ -958,7 +958,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
|||
void kvm_arch_commit_memory_region(struct kvm *kvm,
|
||||
struct kvm_userspace_memory_region *mem,
|
||||
struct kvm_memory_slot old,
|
||||
int user_alloc)
|
||||
bool user_alloc)
|
||||
{
|
||||
int rc;
|
||||
|
||||
|
|
|
@ -3667,7 +3667,7 @@ static int alloc_apic_access_page(struct kvm *kvm)
|
|||
kvm_userspace_mem.flags = 0;
|
||||
kvm_userspace_mem.guest_phys_addr = 0xfee00000ULL;
|
||||
kvm_userspace_mem.memory_size = PAGE_SIZE;
|
||||
r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0);
|
||||
r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, false);
|
||||
if (r)
|
||||
goto out;
|
||||
|
||||
|
@ -3697,7 +3697,7 @@ static int alloc_identity_pagetable(struct kvm *kvm)
|
|||
kvm_userspace_mem.guest_phys_addr =
|
||||
kvm->arch.ept_identity_map_addr;
|
||||
kvm_userspace_mem.memory_size = PAGE_SIZE;
|
||||
r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0);
|
||||
r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, false);
|
||||
if (r)
|
||||
goto out;
|
||||
|
||||
|
@ -4251,7 +4251,7 @@ static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
|
|||
.flags = 0,
|
||||
};
|
||||
|
||||
ret = kvm_set_memory_region(kvm, &tss_mem, 0);
|
||||
ret = kvm_set_memory_region(kvm, &tss_mem, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
kvm->arch.tss_addr = addr;
|
||||
|
|
|
@ -6839,7 +6839,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
|||
struct kvm_memory_slot *memslot,
|
||||
struct kvm_memory_slot old,
|
||||
struct kvm_userspace_memory_region *mem,
|
||||
int user_alloc)
|
||||
bool user_alloc)
|
||||
{
|
||||
int npages = memslot->npages;
|
||||
int map_flags = MAP_PRIVATE | MAP_ANONYMOUS;
|
||||
|
@ -6875,7 +6875,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
|||
void kvm_arch_commit_memory_region(struct kvm *kvm,
|
||||
struct kvm_userspace_memory_region *mem,
|
||||
struct kvm_memory_slot old,
|
||||
int user_alloc)
|
||||
bool user_alloc)
|
||||
{
|
||||
|
||||
int nr_mmu_pages = 0, npages = mem->memory_size >> PAGE_SHIFT;
|
||||
|
|
|
@ -270,8 +270,8 @@ struct kvm_memory_slot {
|
|||
unsigned long *dirty_bitmap;
|
||||
struct kvm_arch_memory_slot arch;
|
||||
unsigned long userspace_addr;
|
||||
int user_alloc;
|
||||
int id;
|
||||
bool user_alloc;
|
||||
};
|
||||
|
||||
static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
|
||||
|
@ -451,10 +451,10 @@ id_to_memslot(struct kvm_memslots *slots, int id)
|
|||
|
||||
int kvm_set_memory_region(struct kvm *kvm,
|
||||
struct kvm_userspace_memory_region *mem,
|
||||
int user_alloc);
|
||||
bool user_alloc);
|
||||
int __kvm_set_memory_region(struct kvm *kvm,
|
||||
struct kvm_userspace_memory_region *mem,
|
||||
int user_alloc);
|
||||
bool user_alloc);
|
||||
void kvm_arch_free_memslot(struct kvm_memory_slot *free,
|
||||
struct kvm_memory_slot *dont);
|
||||
int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages);
|
||||
|
@ -462,11 +462,11 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
|||
struct kvm_memory_slot *memslot,
|
||||
struct kvm_memory_slot old,
|
||||
struct kvm_userspace_memory_region *mem,
|
||||
int user_alloc);
|
||||
bool user_alloc);
|
||||
void kvm_arch_commit_memory_region(struct kvm *kvm,
|
||||
struct kvm_userspace_memory_region *mem,
|
||||
struct kvm_memory_slot old,
|
||||
int user_alloc);
|
||||
bool user_alloc);
|
||||
bool kvm_largepages_enabled(void);
|
||||
void kvm_disable_largepages(void);
|
||||
/* flush all memory translations */
|
||||
|
@ -553,7 +553,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
|
|||
int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
|
||||
struct
|
||||
kvm_userspace_memory_region *mem,
|
||||
int user_alloc);
|
||||
bool user_alloc);
|
||||
int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level);
|
||||
long kvm_arch_vm_ioctl(struct file *filp,
|
||||
unsigned int ioctl, unsigned long arg);
|
||||
|
|
|
@ -709,7 +709,7 @@ static int check_memory_region_flags(struct kvm_userspace_memory_region *mem)
|
|||
*/
|
||||
int __kvm_set_memory_region(struct kvm *kvm,
|
||||
struct kvm_userspace_memory_region *mem,
|
||||
int user_alloc)
|
||||
bool user_alloc)
|
||||
{
|
||||
int r;
|
||||
gfn_t base_gfn;
|
||||
|
@ -889,7 +889,7 @@ EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
|
|||
|
||||
int kvm_set_memory_region(struct kvm *kvm,
|
||||
struct kvm_userspace_memory_region *mem,
|
||||
int user_alloc)
|
||||
bool user_alloc)
|
||||
{
|
||||
int r;
|
||||
|
||||
|
@ -903,7 +903,7 @@ EXPORT_SYMBOL_GPL(kvm_set_memory_region);
|
|||
int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
|
||||
struct
|
||||
kvm_userspace_memory_region *mem,
|
||||
int user_alloc)
|
||||
bool user_alloc)
|
||||
{
|
||||
if (mem->slot >= KVM_USER_MEM_SLOTS)
|
||||
return -EINVAL;
|
||||
|
@ -2148,7 +2148,7 @@ static long kvm_vm_ioctl(struct file *filp,
|
|||
sizeof kvm_userspace_mem))
|
||||
goto out;
|
||||
|
||||
r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
|
||||
r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, true);
|
||||
break;
|
||||
}
|
||||
case KVM_GET_DIRTY_LOG: {
|
||||
|
|
Loading…
Reference in New Issue