mirror of https://gitee.com/openkylin/linux.git
KVM: split kvm_arch_set_memory_region into prepare and commit
Required for SRCU convertion later. Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
This commit is contained in:
parent
fef9cce0eb
commit
f7784b8ec9
|
@ -1578,15 +1578,15 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_arch_set_memory_region(struct kvm *kvm,
|
int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
||||||
struct kvm_userspace_memory_region *mem,
|
struct kvm_memory_slot *memslot,
|
||||||
struct kvm_memory_slot old,
|
struct kvm_memory_slot old,
|
||||||
|
struct kvm_userspace_memory_region *mem,
|
||||||
int user_alloc)
|
int user_alloc)
|
||||||
{
|
{
|
||||||
unsigned long i;
|
unsigned long i;
|
||||||
unsigned long pfn;
|
unsigned long pfn;
|
||||||
int npages = mem->memory_size >> PAGE_SHIFT;
|
int npages = memslot->npages;
|
||||||
struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
|
|
||||||
unsigned long base_gfn = memslot->base_gfn;
|
unsigned long base_gfn = memslot->base_gfn;
|
||||||
|
|
||||||
if (base_gfn + npages > (KVM_MAX_MEM_SIZE >> PAGE_SHIFT))
|
if (base_gfn + npages > (KVM_MAX_MEM_SIZE >> PAGE_SHIFT))
|
||||||
|
@ -1610,6 +1610,14 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void kvm_arch_commit_memory_region(struct kvm *kvm,
|
||||||
|
struct kvm_userspace_memory_region *mem,
|
||||||
|
struct kvm_memory_slot old,
|
||||||
|
int user_alloc)
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
void kvm_arch_flush_shadow(struct kvm *kvm)
|
void kvm_arch_flush_shadow(struct kvm *kvm)
|
||||||
{
|
{
|
||||||
kvm_flush_remote_tlbs(kvm);
|
kvm_flush_remote_tlbs(kvm);
|
||||||
|
|
|
@ -165,14 +165,24 @@ long kvm_arch_dev_ioctl(struct file *filp,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_arch_set_memory_region(struct kvm *kvm,
|
int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
||||||
struct kvm_userspace_memory_region *mem,
|
struct kvm_memory_slot *memslot,
|
||||||
struct kvm_memory_slot old,
|
struct kvm_memory_slot old,
|
||||||
int user_alloc)
|
struct kvm_userspace_memory_region *mem,
|
||||||
|
int user_alloc)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void kvm_arch_commit_memory_region(struct kvm *kvm,
|
||||||
|
struct kvm_userspace_memory_region *mem,
|
||||||
|
struct kvm_memory_slot old,
|
||||||
|
int user_alloc)
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void kvm_arch_flush_shadow(struct kvm *kvm)
|
void kvm_arch_flush_shadow(struct kvm *kvm)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
|
@ -690,14 +690,12 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Section: memory related */
|
/* Section: memory related */
|
||||||
int kvm_arch_set_memory_region(struct kvm *kvm,
|
int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
||||||
struct kvm_userspace_memory_region *mem,
|
struct kvm_memory_slot *memslot,
|
||||||
struct kvm_memory_slot old,
|
struct kvm_memory_slot old,
|
||||||
int user_alloc)
|
struct kvm_userspace_memory_region *mem,
|
||||||
|
int user_alloc)
|
||||||
{
|
{
|
||||||
int i;
|
|
||||||
struct kvm_vcpu *vcpu;
|
|
||||||
|
|
||||||
/* A few sanity checks. We can have exactly one memory slot which has
|
/* A few sanity checks. We can have exactly one memory slot which has
|
||||||
to start at guest virtual zero and which has to be located at a
|
to start at guest virtual zero and which has to be located at a
|
||||||
page boundary in userland and which has to end at a page boundary.
|
page boundary in userland and which has to end at a page boundary.
|
||||||
|
@ -720,14 +718,23 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
|
||||||
if (!user_alloc)
|
if (!user_alloc)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void kvm_arch_commit_memory_region(struct kvm *kvm,
|
||||||
|
struct kvm_userspace_memory_region *mem,
|
||||||
|
struct kvm_memory_slot old,
|
||||||
|
int user_alloc)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
struct kvm_vcpu *vcpu;
|
||||||
|
|
||||||
/* request update of sie control block for all available vcpus */
|
/* request update of sie control block for all available vcpus */
|
||||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||||
if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
|
if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
|
||||||
continue;
|
continue;
|
||||||
kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP);
|
kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvm_arch_flush_shadow(struct kvm *kvm)
|
void kvm_arch_flush_shadow(struct kvm *kvm)
|
||||||
|
|
|
@ -5228,13 +5228,13 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
|
||||||
kfree(kvm);
|
kfree(kvm);
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_arch_set_memory_region(struct kvm *kvm,
|
int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
||||||
struct kvm_userspace_memory_region *mem,
|
struct kvm_memory_slot *memslot,
|
||||||
struct kvm_memory_slot old,
|
struct kvm_memory_slot old,
|
||||||
|
struct kvm_userspace_memory_region *mem,
|
||||||
int user_alloc)
|
int user_alloc)
|
||||||
{
|
{
|
||||||
int npages = mem->memory_size >> PAGE_SHIFT;
|
int npages = memslot->npages;
|
||||||
struct kvm_memory_slot *memslot = &kvm->memslots->memslots[mem->slot];
|
|
||||||
|
|
||||||
/*To keep backward compatibility with older userspace,
|
/*To keep backward compatibility with older userspace,
|
||||||
*x86 needs to hanlde !user_alloc case.
|
*x86 needs to hanlde !user_alloc case.
|
||||||
|
@ -5254,26 +5254,35 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
|
||||||
if (IS_ERR((void *)userspace_addr))
|
if (IS_ERR((void *)userspace_addr))
|
||||||
return PTR_ERR((void *)userspace_addr);
|
return PTR_ERR((void *)userspace_addr);
|
||||||
|
|
||||||
/* set userspace_addr atomically for kvm_hva_to_rmapp */
|
|
||||||
spin_lock(&kvm->mmu_lock);
|
|
||||||
memslot->userspace_addr = userspace_addr;
|
memslot->userspace_addr = userspace_addr;
|
||||||
spin_unlock(&kvm->mmu_lock);
|
|
||||||
} else {
|
|
||||||
if (!old.user_alloc && old.rmap) {
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
down_write(¤t->mm->mmap_sem);
|
|
||||||
ret = do_munmap(current->mm, old.userspace_addr,
|
|
||||||
old.npages * PAGE_SIZE);
|
|
||||||
up_write(¤t->mm->mmap_sem);
|
|
||||||
if (ret < 0)
|
|
||||||
printk(KERN_WARNING
|
|
||||||
"kvm_vm_ioctl_set_memory_region: "
|
|
||||||
"failed to munmap memory\n");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void kvm_arch_commit_memory_region(struct kvm *kvm,
|
||||||
|
struct kvm_userspace_memory_region *mem,
|
||||||
|
struct kvm_memory_slot old,
|
||||||
|
int user_alloc)
|
||||||
|
{
|
||||||
|
|
||||||
|
int npages = mem->memory_size >> PAGE_SHIFT;
|
||||||
|
|
||||||
|
if (!user_alloc && !old.user_alloc && old.rmap && !npages) {
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
down_write(¤t->mm->mmap_sem);
|
||||||
|
ret = do_munmap(current->mm, old.userspace_addr,
|
||||||
|
old.npages * PAGE_SIZE);
|
||||||
|
up_write(¤t->mm->mmap_sem);
|
||||||
|
if (ret < 0)
|
||||||
|
printk(KERN_WARNING
|
||||||
|
"kvm_vm_ioctl_set_memory_region: "
|
||||||
|
"failed to munmap memory\n");
|
||||||
|
}
|
||||||
|
|
||||||
spin_lock(&kvm->mmu_lock);
|
spin_lock(&kvm->mmu_lock);
|
||||||
if (!kvm->arch.n_requested_mmu_pages) {
|
if (!kvm->arch.n_requested_mmu_pages) {
|
||||||
unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
|
unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
|
||||||
|
@ -5282,8 +5291,6 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
|
||||||
|
|
||||||
kvm_mmu_slot_remove_write_access(kvm, mem->slot);
|
kvm_mmu_slot_remove_write_access(kvm, mem->slot);
|
||||||
spin_unlock(&kvm->mmu_lock);
|
spin_unlock(&kvm->mmu_lock);
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvm_arch_flush_shadow(struct kvm *kvm)
|
void kvm_arch_flush_shadow(struct kvm *kvm)
|
||||||
|
|
|
@ -253,7 +253,12 @@ int kvm_set_memory_region(struct kvm *kvm,
|
||||||
int __kvm_set_memory_region(struct kvm *kvm,
|
int __kvm_set_memory_region(struct kvm *kvm,
|
||||||
struct kvm_userspace_memory_region *mem,
|
struct kvm_userspace_memory_region *mem,
|
||||||
int user_alloc);
|
int user_alloc);
|
||||||
int kvm_arch_set_memory_region(struct kvm *kvm,
|
int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
||||||
|
struct kvm_memory_slot *memslot,
|
||||||
|
struct kvm_memory_slot old,
|
||||||
|
struct kvm_userspace_memory_region *mem,
|
||||||
|
int user_alloc);
|
||||||
|
void kvm_arch_commit_memory_region(struct kvm *kvm,
|
||||||
struct kvm_userspace_memory_region *mem,
|
struct kvm_userspace_memory_region *mem,
|
||||||
struct kvm_memory_slot old,
|
struct kvm_memory_slot old,
|
||||||
int user_alloc);
|
int user_alloc);
|
||||||
|
|
|
@ -663,6 +663,10 @@ int __kvm_set_memory_region(struct kvm *kvm,
|
||||||
if (!npages)
|
if (!npages)
|
||||||
kvm_arch_flush_shadow(kvm);
|
kvm_arch_flush_shadow(kvm);
|
||||||
|
|
||||||
|
r = kvm_arch_prepare_memory_region(kvm, &new, old, mem, user_alloc);
|
||||||
|
if (r)
|
||||||
|
goto out_free;
|
||||||
|
|
||||||
spin_lock(&kvm->mmu_lock);
|
spin_lock(&kvm->mmu_lock);
|
||||||
if (mem->slot >= kvm->memslots->nmemslots)
|
if (mem->slot >= kvm->memslots->nmemslots)
|
||||||
kvm->memslots->nmemslots = mem->slot + 1;
|
kvm->memslots->nmemslots = mem->slot + 1;
|
||||||
|
@ -670,13 +674,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
|
||||||
*memslot = new;
|
*memslot = new;
|
||||||
spin_unlock(&kvm->mmu_lock);
|
spin_unlock(&kvm->mmu_lock);
|
||||||
|
|
||||||
r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc);
|
kvm_arch_commit_memory_region(kvm, mem, old, user_alloc);
|
||||||
if (r) {
|
|
||||||
spin_lock(&kvm->mmu_lock);
|
|
||||||
*memslot = old;
|
|
||||||
spin_unlock(&kvm->mmu_lock);
|
|
||||||
goto out_free;
|
|
||||||
}
|
|
||||||
|
|
||||||
kvm_free_physmem_slot(&old, npages ? &new : NULL);
|
kvm_free_physmem_slot(&old, npages ? &new : NULL);
|
||||||
/* Slot deletion case: we have to update the current slot */
|
/* Slot deletion case: we have to update the current slot */
|
||||||
|
|
Loading…
Reference in New Issue