mirror of https://gitee.com/openkylin/linux.git
KVM: s390: Introduce switching code
This patch adds code that performs transparent switch to Extended SCA on addition of 65th VCPU in a VM. Disposal of ESCA is added too. The entier ESCA functionality, however, is still not enabled. The enablement will be provided in a separate patch. This patch also uses read/write lock protection of SCA and its subfields for possible disposal at the BSCA-to-ESCA transition. While only Basic SCA needs such a protection (for the swap), any SCA access is now guarded. Signed-off-by: Eugene (jno) Dvurechenski <jno@linux.vnet.ibm.com> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
This commit is contained in:
parent
7d43bafcff
commit
5e04431523
|
@ -622,6 +622,7 @@ struct kvm_s390_crypto_cb {
|
||||||
struct kvm_arch{
|
struct kvm_arch{
|
||||||
void *sca;
|
void *sca;
|
||||||
int use_esca;
|
int use_esca;
|
||||||
|
rwlock_t sca_lock;
|
||||||
debug_info_t *dbf;
|
debug_info_t *dbf;
|
||||||
struct kvm_s390_float_interrupt float_int;
|
struct kvm_s390_float_interrupt float_int;
|
||||||
struct kvm_device *flic;
|
struct kvm_device *flic;
|
||||||
|
|
|
@ -259,10 +259,14 @@ struct aste {
|
||||||
|
|
||||||
int ipte_lock_held(struct kvm_vcpu *vcpu)
|
int ipte_lock_held(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
union ipte_control *ic = kvm_s390_get_ipte_control(vcpu->kvm);
|
if (vcpu->arch.sie_block->eca & 1) {
|
||||||
|
int rc;
|
||||||
|
|
||||||
if (vcpu->arch.sie_block->eca & 1)
|
read_lock(&vcpu->kvm->arch.sca_lock);
|
||||||
return ic->kh != 0;
|
rc = kvm_s390_get_ipte_control(vcpu->kvm)->kh != 0;
|
||||||
|
read_unlock(&vcpu->kvm->arch.sca_lock);
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
return vcpu->kvm->arch.ipte_lock_count != 0;
|
return vcpu->kvm->arch.ipte_lock_count != 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -274,16 +278,20 @@ static void ipte_lock_simple(struct kvm_vcpu *vcpu)
|
||||||
vcpu->kvm->arch.ipte_lock_count++;
|
vcpu->kvm->arch.ipte_lock_count++;
|
||||||
if (vcpu->kvm->arch.ipte_lock_count > 1)
|
if (vcpu->kvm->arch.ipte_lock_count > 1)
|
||||||
goto out;
|
goto out;
|
||||||
|
retry:
|
||||||
|
read_lock(&vcpu->kvm->arch.sca_lock);
|
||||||
ic = kvm_s390_get_ipte_control(vcpu->kvm);
|
ic = kvm_s390_get_ipte_control(vcpu->kvm);
|
||||||
do {
|
do {
|
||||||
old = READ_ONCE(*ic);
|
old = READ_ONCE(*ic);
|
||||||
while (old.k) {
|
if (old.k) {
|
||||||
|
read_unlock(&vcpu->kvm->arch.sca_lock);
|
||||||
cond_resched();
|
cond_resched();
|
||||||
old = READ_ONCE(*ic);
|
goto retry;
|
||||||
}
|
}
|
||||||
new = old;
|
new = old;
|
||||||
new.k = 1;
|
new.k = 1;
|
||||||
} while (cmpxchg(&ic->val, old.val, new.val) != old.val);
|
} while (cmpxchg(&ic->val, old.val, new.val) != old.val);
|
||||||
|
read_unlock(&vcpu->kvm->arch.sca_lock);
|
||||||
out:
|
out:
|
||||||
mutex_unlock(&vcpu->kvm->arch.ipte_mutex);
|
mutex_unlock(&vcpu->kvm->arch.ipte_mutex);
|
||||||
}
|
}
|
||||||
|
@ -296,12 +304,14 @@ static void ipte_unlock_simple(struct kvm_vcpu *vcpu)
|
||||||
vcpu->kvm->arch.ipte_lock_count--;
|
vcpu->kvm->arch.ipte_lock_count--;
|
||||||
if (vcpu->kvm->arch.ipte_lock_count)
|
if (vcpu->kvm->arch.ipte_lock_count)
|
||||||
goto out;
|
goto out;
|
||||||
|
read_lock(&vcpu->kvm->arch.sca_lock);
|
||||||
ic = kvm_s390_get_ipte_control(vcpu->kvm);
|
ic = kvm_s390_get_ipte_control(vcpu->kvm);
|
||||||
do {
|
do {
|
||||||
old = READ_ONCE(*ic);
|
old = READ_ONCE(*ic);
|
||||||
new = old;
|
new = old;
|
||||||
new.k = 0;
|
new.k = 0;
|
||||||
} while (cmpxchg(&ic->val, old.val, new.val) != old.val);
|
} while (cmpxchg(&ic->val, old.val, new.val) != old.val);
|
||||||
|
read_unlock(&vcpu->kvm->arch.sca_lock);
|
||||||
wake_up(&vcpu->kvm->arch.ipte_wq);
|
wake_up(&vcpu->kvm->arch.ipte_wq);
|
||||||
out:
|
out:
|
||||||
mutex_unlock(&vcpu->kvm->arch.ipte_mutex);
|
mutex_unlock(&vcpu->kvm->arch.ipte_mutex);
|
||||||
|
@ -311,23 +321,28 @@ static void ipte_lock_siif(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
union ipte_control old, new, *ic;
|
union ipte_control old, new, *ic;
|
||||||
|
|
||||||
|
retry:
|
||||||
|
read_lock(&vcpu->kvm->arch.sca_lock);
|
||||||
ic = kvm_s390_get_ipte_control(vcpu->kvm);
|
ic = kvm_s390_get_ipte_control(vcpu->kvm);
|
||||||
do {
|
do {
|
||||||
old = READ_ONCE(*ic);
|
old = READ_ONCE(*ic);
|
||||||
while (old.kg) {
|
if (old.kg) {
|
||||||
|
read_unlock(&vcpu->kvm->arch.sca_lock);
|
||||||
cond_resched();
|
cond_resched();
|
||||||
old = READ_ONCE(*ic);
|
goto retry;
|
||||||
}
|
}
|
||||||
new = old;
|
new = old;
|
||||||
new.k = 1;
|
new.k = 1;
|
||||||
new.kh++;
|
new.kh++;
|
||||||
} while (cmpxchg(&ic->val, old.val, new.val) != old.val);
|
} while (cmpxchg(&ic->val, old.val, new.val) != old.val);
|
||||||
|
read_unlock(&vcpu->kvm->arch.sca_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ipte_unlock_siif(struct kvm_vcpu *vcpu)
|
static void ipte_unlock_siif(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
union ipte_control old, new, *ic;
|
union ipte_control old, new, *ic;
|
||||||
|
|
||||||
|
read_lock(&vcpu->kvm->arch.sca_lock);
|
||||||
ic = kvm_s390_get_ipte_control(vcpu->kvm);
|
ic = kvm_s390_get_ipte_control(vcpu->kvm);
|
||||||
do {
|
do {
|
||||||
old = READ_ONCE(*ic);
|
old = READ_ONCE(*ic);
|
||||||
|
@ -336,6 +351,7 @@ static void ipte_unlock_siif(struct kvm_vcpu *vcpu)
|
||||||
if (!new.kh)
|
if (!new.kh)
|
||||||
new.k = 0;
|
new.k = 0;
|
||||||
} while (cmpxchg(&ic->val, old.val, new.val) != old.val);
|
} while (cmpxchg(&ic->val, old.val, new.val) != old.val);
|
||||||
|
read_unlock(&vcpu->kvm->arch.sca_lock);
|
||||||
if (!new.kh)
|
if (!new.kh)
|
||||||
wake_up(&vcpu->kvm->arch.ipte_wq);
|
wake_up(&vcpu->kvm->arch.ipte_wq);
|
||||||
}
|
}
|
||||||
|
|
|
@ -39,6 +39,7 @@ static int sca_ext_call_pending(struct kvm_vcpu *vcpu, int *src_id)
|
||||||
{
|
{
|
||||||
int c, scn;
|
int c, scn;
|
||||||
|
|
||||||
|
read_lock(&vcpu->kvm->arch.sca_lock);
|
||||||
if (vcpu->kvm->arch.use_esca) {
|
if (vcpu->kvm->arch.use_esca) {
|
||||||
struct esca_block *sca = vcpu->kvm->arch.sca;
|
struct esca_block *sca = vcpu->kvm->arch.sca;
|
||||||
union esca_sigp_ctrl sigp_ctrl =
|
union esca_sigp_ctrl sigp_ctrl =
|
||||||
|
@ -54,6 +55,7 @@ static int sca_ext_call_pending(struct kvm_vcpu *vcpu, int *src_id)
|
||||||
c = sigp_ctrl.c;
|
c = sigp_ctrl.c;
|
||||||
scn = sigp_ctrl.scn;
|
scn = sigp_ctrl.scn;
|
||||||
}
|
}
|
||||||
|
read_unlock(&vcpu->kvm->arch.sca_lock);
|
||||||
|
|
||||||
if (src_id)
|
if (src_id)
|
||||||
*src_id = scn;
|
*src_id = scn;
|
||||||
|
@ -66,6 +68,7 @@ static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
|
||||||
{
|
{
|
||||||
int expect, rc;
|
int expect, rc;
|
||||||
|
|
||||||
|
read_lock(&vcpu->kvm->arch.sca_lock);
|
||||||
if (vcpu->kvm->arch.use_esca) {
|
if (vcpu->kvm->arch.use_esca) {
|
||||||
struct esca_block *sca = vcpu->kvm->arch.sca;
|
struct esca_block *sca = vcpu->kvm->arch.sca;
|
||||||
union esca_sigp_ctrl *sigp_ctrl =
|
union esca_sigp_ctrl *sigp_ctrl =
|
||||||
|
@ -91,6 +94,7 @@ static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
|
||||||
expect = old_val.value;
|
expect = old_val.value;
|
||||||
rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value);
|
rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value);
|
||||||
}
|
}
|
||||||
|
read_unlock(&vcpu->kvm->arch.sca_lock);
|
||||||
|
|
||||||
if (rc != expect) {
|
if (rc != expect) {
|
||||||
/* another external call is pending */
|
/* another external call is pending */
|
||||||
|
@ -106,6 +110,7 @@ static void sca_clear_ext_call(struct kvm_vcpu *vcpu)
|
||||||
int rc, expect;
|
int rc, expect;
|
||||||
|
|
||||||
atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags);
|
atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags);
|
||||||
|
read_lock(&vcpu->kvm->arch.sca_lock);
|
||||||
if (vcpu->kvm->arch.use_esca) {
|
if (vcpu->kvm->arch.use_esca) {
|
||||||
struct esca_block *sca = vcpu->kvm->arch.sca;
|
struct esca_block *sca = vcpu->kvm->arch.sca;
|
||||||
union esca_sigp_ctrl *sigp_ctrl =
|
union esca_sigp_ctrl *sigp_ctrl =
|
||||||
|
@ -123,6 +128,7 @@ static void sca_clear_ext_call(struct kvm_vcpu *vcpu)
|
||||||
expect = old.value;
|
expect = old.value;
|
||||||
rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
|
rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
|
||||||
}
|
}
|
||||||
|
read_unlock(&vcpu->kvm->arch.sca_lock);
|
||||||
WARN_ON(rc != expect); /* cannot clear? */
|
WARN_ON(rc != expect); /* cannot clear? */
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1080,7 +1080,7 @@ static int kvm_s390_crypto_init(struct kvm *kvm)
|
||||||
static void sca_dispose(struct kvm *kvm)
|
static void sca_dispose(struct kvm *kvm)
|
||||||
{
|
{
|
||||||
if (kvm->arch.use_esca)
|
if (kvm->arch.use_esca)
|
||||||
BUG(); /* not implemented yet */
|
free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
|
||||||
else
|
else
|
||||||
free_page((unsigned long)(kvm->arch.sca));
|
free_page((unsigned long)(kvm->arch.sca));
|
||||||
kvm->arch.sca = NULL;
|
kvm->arch.sca = NULL;
|
||||||
|
@ -1110,6 +1110,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
||||||
rc = -ENOMEM;
|
rc = -ENOMEM;
|
||||||
|
|
||||||
kvm->arch.use_esca = 0; /* start with basic SCA */
|
kvm->arch.use_esca = 0; /* start with basic SCA */
|
||||||
|
rwlock_init(&kvm->arch.sca_lock);
|
||||||
kvm->arch.sca = (struct bsca_block *) get_zeroed_page(GFP_KERNEL);
|
kvm->arch.sca = (struct bsca_block *) get_zeroed_page(GFP_KERNEL);
|
||||||
if (!kvm->arch.sca)
|
if (!kvm->arch.sca)
|
||||||
goto out_err;
|
goto out_err;
|
||||||
|
@ -1259,6 +1260,7 @@ static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
|
||||||
|
|
||||||
static void sca_del_vcpu(struct kvm_vcpu *vcpu)
|
static void sca_del_vcpu(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
|
read_lock(&vcpu->kvm->arch.sca_lock);
|
||||||
if (vcpu->kvm->arch.use_esca) {
|
if (vcpu->kvm->arch.use_esca) {
|
||||||
struct esca_block *sca = vcpu->kvm->arch.sca;
|
struct esca_block *sca = vcpu->kvm->arch.sca;
|
||||||
|
|
||||||
|
@ -1272,11 +1274,13 @@ static void sca_del_vcpu(struct kvm_vcpu *vcpu)
|
||||||
if (sca->cpu[vcpu->vcpu_id].sda == (__u64) vcpu->arch.sie_block)
|
if (sca->cpu[vcpu->vcpu_id].sda == (__u64) vcpu->arch.sie_block)
|
||||||
sca->cpu[vcpu->vcpu_id].sda = 0;
|
sca->cpu[vcpu->vcpu_id].sda = 0;
|
||||||
}
|
}
|
||||||
|
read_unlock(&vcpu->kvm->arch.sca_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void sca_add_vcpu(struct kvm_vcpu *vcpu, struct kvm *kvm,
|
static void sca_add_vcpu(struct kvm_vcpu *vcpu, struct kvm *kvm,
|
||||||
unsigned int id)
|
unsigned int id)
|
||||||
{
|
{
|
||||||
|
read_lock(&kvm->arch.sca_lock);
|
||||||
if (kvm->arch.use_esca) {
|
if (kvm->arch.use_esca) {
|
||||||
struct esca_block *sca = kvm->arch.sca;
|
struct esca_block *sca = kvm->arch.sca;
|
||||||
|
|
||||||
|
@ -1294,11 +1298,78 @@ static void sca_add_vcpu(struct kvm_vcpu *vcpu, struct kvm *kvm,
|
||||||
vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
|
vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
|
||||||
set_bit_inv(id, (unsigned long *) &sca->mcn);
|
set_bit_inv(id, (unsigned long *) &sca->mcn);
|
||||||
}
|
}
|
||||||
|
read_unlock(&kvm->arch.sca_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Basic SCA to Extended SCA data copy routines */
|
||||||
|
static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
|
||||||
|
{
|
||||||
|
d->sda = s->sda;
|
||||||
|
d->sigp_ctrl.c = s->sigp_ctrl.c;
|
||||||
|
d->sigp_ctrl.scn = s->sigp_ctrl.scn;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
d->ipte_control = s->ipte_control;
|
||||||
|
d->mcn[0] = s->mcn;
|
||||||
|
for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
|
||||||
|
sca_copy_entry(&d->cpu[i], &s->cpu[i]);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int sca_switch_to_extended(struct kvm *kvm)
|
||||||
|
{
|
||||||
|
struct bsca_block *old_sca = kvm->arch.sca;
|
||||||
|
struct esca_block *new_sca;
|
||||||
|
struct kvm_vcpu *vcpu;
|
||||||
|
unsigned int vcpu_idx;
|
||||||
|
u32 scaol, scaoh;
|
||||||
|
|
||||||
|
new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
|
||||||
|
if (!new_sca)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
scaoh = (u32)((u64)(new_sca) >> 32);
|
||||||
|
scaol = (u32)(u64)(new_sca) & ~0x3fU;
|
||||||
|
|
||||||
|
kvm_s390_vcpu_block_all(kvm);
|
||||||
|
write_lock(&kvm->arch.sca_lock);
|
||||||
|
|
||||||
|
sca_copy_b_to_e(new_sca, old_sca);
|
||||||
|
|
||||||
|
kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
|
||||||
|
vcpu->arch.sie_block->scaoh = scaoh;
|
||||||
|
vcpu->arch.sie_block->scaol = scaol;
|
||||||
|
vcpu->arch.sie_block->ecb2 |= 0x04U;
|
||||||
|
}
|
||||||
|
kvm->arch.sca = new_sca;
|
||||||
|
kvm->arch.use_esca = 1;
|
||||||
|
|
||||||
|
write_unlock(&kvm->arch.sca_lock);
|
||||||
|
kvm_s390_vcpu_unblock_all(kvm);
|
||||||
|
|
||||||
|
free_page((unsigned long)old_sca);
|
||||||
|
|
||||||
|
VM_EVENT(kvm, 2, "Switched to ESCA (%p -> %p)", old_sca, kvm->arch.sca);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
|
static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
|
||||||
{
|
{
|
||||||
return id < KVM_MAX_VCPUS;
|
int rc;
|
||||||
|
|
||||||
|
if (id < KVM_S390_BSCA_CPU_SLOTS)
|
||||||
|
return true;
|
||||||
|
if (!sclp.has_esca)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
mutex_lock(&kvm->lock);
|
||||||
|
rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
|
||||||
|
mutex_unlock(&kvm->lock);
|
||||||
|
|
||||||
|
return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
|
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
|
||||||
|
|
Loading…
Reference in New Issue