KVM: PPC: Allocate vcpu struct in common PPC code

Move allocation of all flavors of PPC vCPUs to common PPC code.  All
variants either allocate 'struct kvm_vcpu' directly, or require that
the embedded 'struct kvm_vcpu' member be located at offset 0, i.e.
guarantee that the allocation can be directly interpreted as a 'struct
kvm_vcpu' object.

Remove the message from the build-time assertion regarding placement of
the struct, as compatibility with the arch usercopy region is no longer
the sole dependent on 'struct kvm_vcpu' being at offset zero.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Sean Christopherson 2019-12-18 13:54:57 -08:00 committed by Paolo Bonzini
parent 3ec8ca2964
commit c50bfbdc38
8 changed files with 51 additions and 77 deletions

View File

@ -119,7 +119,7 @@ extern int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr,
enum xlate_instdata xlid, enum xlate_readwrite xlrw,
struct kvmppc_pte *pte);
extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm,
extern int kvmppc_core_vcpu_create(struct kvm *kvm, struct kvm_vcpu *vcpu,
unsigned int id);
extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu);
extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu);
@ -274,7 +274,8 @@ struct kvmppc_ops {
void (*inject_interrupt)(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags);
void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned int id);
int (*vcpu_create)(struct kvm *kvm, struct kvm_vcpu *vcpu,
unsigned int id);
void (*vcpu_free)(struct kvm_vcpu *vcpu);
int (*check_requests)(struct kvm_vcpu *vcpu);
int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log);

View File

@ -789,9 +789,10 @@ void kvmppc_decrementer_func(struct kvm_vcpu *vcpu)
kvm_vcpu_kick(vcpu);
}
struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
int kvmppc_core_vcpu_create(struct kvm *kvm, struct kvm_vcpu *vcpu,
unsigned int id)
{
return kvm->arch.kvm_ops->vcpu_create(kvm, id);
return kvm->arch.kvm_ops->vcpu_create(kvm, vcpu, id);
}
void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)

View File

@ -2271,22 +2271,16 @@ static void debugfs_vcpu_init(struct kvm_vcpu *vcpu, unsigned int id)
}
#endif /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */
static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
static int kvmppc_core_vcpu_create_hv(struct kvm *kvm, struct kvm_vcpu *vcpu,
unsigned int id)
{
struct kvm_vcpu *vcpu;
int err;
int core;
struct kvmppc_vcore *vcore;
err = -ENOMEM;
vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
if (!vcpu)
goto out;
err = kvm_vcpu_init(vcpu, kvm, id);
if (err)
goto free_vcpu;
return err;
vcpu->arch.shared = &vcpu->arch.shregs;
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
@ -2383,14 +2377,11 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
debugfs_vcpu_init(vcpu, id);
return vcpu;
return 0;
uninit_vcpu:
kvm_vcpu_uninit(vcpu);
free_vcpu:
kmem_cache_free(kvm_vcpu_cache, vcpu);
out:
return ERR_PTR(err);
return err;
}
static int kvmhv_set_smt_mode(struct kvm *kvm, unsigned long smt_mode,
@ -2445,7 +2436,6 @@ static void kvmppc_core_vcpu_free_hv(struct kvm_vcpu *vcpu)
unpin_vpa(vcpu->kvm, &vcpu->arch.vpa);
spin_unlock(&vcpu->arch.vpa_update_lock);
kvm_vcpu_uninit(vcpu);
kmem_cache_free(kvm_vcpu_cache, vcpu);
}
static int kvmppc_core_check_requests_hv(struct kvm_vcpu *vcpu)

View File

@ -1744,21 +1744,16 @@ static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
return r;
}
static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
static int kvmppc_core_vcpu_create_pr(struct kvm *kvm, struct kvm_vcpu *vcpu,
unsigned int id)
{
struct kvmppc_vcpu_book3s *vcpu_book3s;
struct kvm_vcpu *vcpu;
int err = -ENOMEM;
unsigned long p;
vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
if (!vcpu)
goto out;
vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s));
if (!vcpu_book3s)
goto free_vcpu;
goto out;
vcpu->arch.book3s = vcpu_book3s;
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
@ -1808,7 +1803,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
if (err < 0)
goto free_shared_page;
return vcpu;
return 0;
free_shared_page:
free_page((unsigned long)vcpu->arch.shared);
@ -1820,10 +1815,8 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
free_vcpu3s:
#endif
vfree(vcpu_book3s);
free_vcpu:
kmem_cache_free(kvm_vcpu_cache, vcpu);
out:
return ERR_PTR(err);
return err;
}
static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
@ -1836,7 +1829,6 @@ static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
kfree(vcpu->arch.shadow_vcpu);
#endif
vfree(vcpu_book3s);
kmem_cache_free(kvm_vcpu_cache, vcpu);
}
static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)

View File

@ -2114,9 +2114,10 @@ int kvmppc_core_init_vm(struct kvm *kvm)
return kvm->arch.kvm_ops->init_vm(kvm);
}
struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
int kvmppc_core_vcpu_create(struct kvm *kvm, struct kvm_vcpu *vcpu,
unsigned int id)
{
return kvm->arch.kvm_ops->vcpu_create(kvm, id);
return kvm->arch.kvm_ops->vcpu_create(kvm, vcpu, id);
}
void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)

View File

@ -433,26 +433,18 @@ static int kvmppc_set_one_reg_e500(struct kvm_vcpu *vcpu, u64 id,
return r;
}
static struct kvm_vcpu *kvmppc_core_vcpu_create_e500(struct kvm *kvm,
static int kvmppc_core_vcpu_create_e500(struct kvm *kvm, struct kvm_vcpu *vcpu,
unsigned int id)
{
struct kvmppc_vcpu_e500 *vcpu_e500;
struct kvm_vcpu *vcpu;
int err;
BUILD_BUG_ON_MSG(offsetof(struct kvmppc_vcpu_e500, vcpu) != 0,
"struct kvm_vcpu must be at offset 0 for arch usercopy region");
BUILD_BUG_ON(offsetof(struct kvmppc_vcpu_e500, vcpu) != 0);
vcpu_e500 = to_e500(vcpu);
vcpu_e500 = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
if (!vcpu_e500) {
err = -ENOMEM;
goto out;
}
vcpu = &vcpu_e500->vcpu;
err = kvm_vcpu_init(vcpu, kvm, id);
if (err)
goto free_vcpu;
return err;
if (kvmppc_e500_id_table_alloc(vcpu_e500) == NULL) {
err = -ENOMEM;
@ -469,7 +461,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_e500(struct kvm *kvm,
goto uninit_tlb;
}
return vcpu;
return 0;
uninit_tlb:
kvmppc_e500_tlb_uninit(vcpu_e500);
@ -477,10 +469,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_e500(struct kvm *kvm,
kvmppc_e500_id_table_free(vcpu_e500);
uninit_vcpu:
kvm_vcpu_uninit(vcpu);
free_vcpu:
kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
out:
return ERR_PTR(err);
return err;
}
static void kvmppc_core_vcpu_free_e500(struct kvm_vcpu *vcpu)
@ -491,7 +480,6 @@ static void kvmppc_core_vcpu_free_e500(struct kvm_vcpu *vcpu)
kvmppc_e500_tlb_uninit(vcpu_e500);
kvmppc_e500_id_table_free(vcpu_e500);
kvm_vcpu_uninit(vcpu);
kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
}
static int kvmppc_core_init_vm_e500(struct kvm *kvm)

View File

@ -301,28 +301,21 @@ static int kvmppc_set_one_reg_e500mc(struct kvm_vcpu *vcpu, u64 id,
return r;
}
static struct kvm_vcpu *kvmppc_core_vcpu_create_e500mc(struct kvm *kvm,
static int kvmppc_core_vcpu_create_e500mc(struct kvm *kvm, struct kvm_vcpu *vcpu,
unsigned int id)
{
struct kvmppc_vcpu_e500 *vcpu_e500;
struct kvm_vcpu *vcpu;
int err;
BUILD_BUG_ON(offsetof(struct kvmppc_vcpu_e500, vcpu) != 0);
vcpu_e500 = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
if (!vcpu_e500) {
err = -ENOMEM;
goto out;
}
vcpu = &vcpu_e500->vcpu;
vcpu_e500 = to_e500(vcpu);
/* Invalid PIR value -- this LPID dosn't have valid state on any cpu */
vcpu->arch.oldpir = 0xffffffff;
err = kvm_vcpu_init(vcpu, kvm, id);
if (err)
goto free_vcpu;
return err;
err = kvmppc_e500_tlb_init(vcpu_e500);
if (err)
@ -334,17 +327,13 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_e500mc(struct kvm *kvm,
goto uninit_tlb;
}
return vcpu;
return 0;
uninit_tlb:
kvmppc_e500_tlb_uninit(vcpu_e500);
uninit_vcpu:
kvm_vcpu_uninit(vcpu);
free_vcpu:
kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
out:
return ERR_PTR(err);
return err;
}
static void kvmppc_core_vcpu_free_e500mc(struct kvm_vcpu *vcpu)
@ -354,7 +343,6 @@ static void kvmppc_core_vcpu_free_e500mc(struct kvm_vcpu *vcpu)
free_page((unsigned long)vcpu->arch.shared);
kvmppc_e500_tlb_uninit(vcpu_e500);
kvm_vcpu_uninit(vcpu);
kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
}
static int kvmppc_core_init_vm_e500mc(struct kvm *kvm)

View File

@ -723,12 +723,23 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
{
struct kvm_vcpu *vcpu;
vcpu = kvmppc_core_vcpu_create(kvm, id);
if (!IS_ERR(vcpu)) {
int err;
vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
if (!vcpu)
return ERR_PTR(-ENOMEM);
err = kvmppc_core_vcpu_create(kvm, vcpu, id);
if (err)
goto free_vcpu;
vcpu->arch.wqp = &vcpu->wq;
kvmppc_create_vcpu_debugfs(vcpu, id);
}
return vcpu;
free_vcpu:
kmem_cache_free(kvm_vcpu_cache, vcpu);
return ERR_PTR(err);
}
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
@ -758,6 +769,8 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
}
kvmppc_core_vcpu_free(vcpu);
kmem_cache_free(kvm_vcpu_cache, vcpu);
}
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)