KVM: s390 features, kvm_get_vcpu_by_id and stat

Several features for s390
 1. ESCA support (up to 248 vCPUs)
 2. KVM detection: we  can now detect if we support KVM (e.g. does KVM
    under KVM work?)
 
 kvm_stat:
 1. cleanup the exit path
 
 kvm_get_vcpu_by_id:
 1. Use kvm_get_vcpu_by_id where appropriate
 2. Apply a heuristic to optimize for ID VCPU == No. VCPU
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2.0.14 (GNU/Linux)
 
 iQIcBAABAgAGBQJWXqtzAAoJEBF7vIC1phx8W10P/0lMuB0B/GqyZdkkOUvQJuPh
 U5zb3wNBunjVNVg/raD6JaSacIzZCnWKZtDZiHWaTPhtCyRJyDbCPiSgOyxJMMEd
 qaxv220IIZF3j5k9rL7cGV+S1aYSWJ/cPhWRQ3kk3RwbivEEw8ISCXw/UBmfLXnD
 7X1QUEBRIuwA5WVn9FZ8ECzMBNqcuIO5b078ggSVlEWEQDpa1fp080DmESN/Ffws
 fazr2JhBcX0q20WXd1Rh1CMlbCYWM9NLZSTbJmDf+B7z5z/Yi+IfbhB36cWTD2X5
 2a0vXEJ0XwMhncZW3E7MYc+0qJMTpaFaCXhX/cZFele/8+eI3L0hqb70bYkvZqid
 FAuBstBHb7mpmOQnV+ym/HY8nz/1j/8pRHgblA55TcJll6N45KKZqX1QxCFt0a9d
 WJDN2Nrm2iT0y53spw5BvmmMWbaLwMRldkJM+4VlZrThjdldM+iJ4HGdX2kCwKDS
 y9D/MqsmVA1WQQVaENGU39fwZWP9EDpn5NCu/6/l0EdvmudtuDcscu/sQByU8+lg
 t7WTZAK7BUvE8Z481Cy/Nh5/bkY+b0g9BQFFG3n/ywj563OobVxNi8/EllSkbZBT
 aj14hmB7icEMUOwNfyX5rOB08dPphh5oBeT+g9G/MPIC0nYvHyMz9QbGiba+l/8T
 zIykgDhcxi9MLo923iZf
 =i4js
 -----END PGP SIGNATURE-----

Merge tag 'kvm-s390-next-4.5-1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into HEAD

KVM: s390 features, kvm_get_vcpu_by_id and stat

Several features for s390
1. ESCA support (up to 248 vCPUs)
2. KVM detection: we  can now detect if we support KVM (e.g. does KVM
   under KVM work?)

kvm_stat:
1. cleanup the exit path

kvm_get_vcpu_by_id:
1. Use kvm_get_vcpu_by_id where appropriate
2. Apply a heuristic to optimize for ID VCPU == No. VCPU
This commit is contained in:
Paolo Bonzini 2015-12-02 13:50:04 +01:00
commit 4601463485
15 changed files with 407 additions and 159 deletions

View File

@ -308,16 +308,10 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
static struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
{
int r;
struct kvm_vcpu *v, *ret = NULL;
struct kvm_vcpu *ret;
mutex_lock(&kvm->lock);
kvm_for_each_vcpu(r, v, kvm) {
if (v->vcpu_id == id) {
ret = v;
break;
}
}
ret = kvm_get_vcpu_by_id(kvm, id);
mutex_unlock(&kvm->lock);
return ret;
}

View File

@ -104,6 +104,9 @@
#define HWCAP_S390_TE 1024
#define HWCAP_S390_VXRS 2048
/* Internal bits, not exposed via elf */
#define HWCAP_INT_SIE 1UL
/*
* These are used to set parameters in the core dumps.
*/
@ -169,6 +172,10 @@ extern unsigned int vdso_enabled;
extern unsigned long elf_hwcap;
#define ELF_HWCAP (elf_hwcap)
/* Internal hardware capabilities, not exposed via elf */
extern unsigned long int_hwcap;
/* This yields a string that ld.so will use to load implementation
specific libraries for optimization. This is more specific in
intent than poking at uname or /proc/cpuinfo.

View File

@ -25,7 +25,9 @@
#include <asm/fpu/api.h>
#include <asm/isc.h>
#define KVM_MAX_VCPUS 64
#define KVM_S390_BSCA_CPU_SLOTS 64
#define KVM_S390_ESCA_CPU_SLOTS 248
#define KVM_MAX_VCPUS KVM_S390_ESCA_CPU_SLOTS
#define KVM_USER_MEM_SLOTS 32
/*
@ -40,9 +42,34 @@
#define SIGP_CTRL_C 0x80
#define SIGP_CTRL_SCN_MASK 0x3f
struct sca_entry {
union bsca_sigp_ctrl {
__u8 value;
struct {
__u8 c : 1;
__u8 r : 1;
__u8 scn : 6;
};
} __packed;
union esca_sigp_ctrl {
__u16 value;
struct {
__u8 c : 1;
__u8 reserved: 7;
__u8 scn;
};
} __packed;
struct esca_entry {
union esca_sigp_ctrl sigp_ctrl;
__u16 reserved1[3];
__u64 sda;
__u64 reserved2[6];
} __packed;
struct bsca_entry {
__u8 reserved0;
__u8 sigp_ctrl;
union bsca_sigp_ctrl sigp_ctrl;
__u16 reserved[3];
__u64 sda;
__u64 reserved2[2];
@ -57,14 +84,22 @@ union ipte_control {
};
};
struct sca_block {
struct bsca_block {
union ipte_control ipte_control;
__u64 reserved[5];
__u64 mcn;
__u64 reserved2;
struct sca_entry cpu[64];
struct bsca_entry cpu[KVM_S390_BSCA_CPU_SLOTS];
} __attribute__((packed));
struct esca_block {
union ipte_control ipte_control;
__u64 reserved1[7];
__u64 mcn[4];
__u64 reserved2[20];
struct esca_entry cpu[KVM_S390_ESCA_CPU_SLOTS];
} __packed;
#define CPUSTAT_STOPPED 0x80000000
#define CPUSTAT_WAIT 0x10000000
#define CPUSTAT_ECALL_PEND 0x08000000
@ -585,7 +620,9 @@ struct kvm_s390_crypto_cb {
};
struct kvm_arch{
struct sca_block *sca;
void *sca;
int use_esca;
rwlock_t sca_lock;
debug_info_t *dbf;
struct kvm_s390_float_interrupt float_int;
struct kvm_device *flic;

View File

@ -29,7 +29,10 @@ struct sclp_ipl_info {
struct sclp_core_entry {
u8 core_id;
u8 reserved0[2];
u8 reserved0;
u8 : 4;
u8 sief2 : 1;
u8 : 3;
u8 : 3;
u8 siif : 1;
u8 sigpif : 1;
@ -53,6 +56,9 @@ struct sclp_info {
unsigned char has_sigpif : 1;
unsigned char has_core_type : 1;
unsigned char has_sprp : 1;
unsigned char has_hvs : 1;
unsigned char has_esca : 1;
unsigned char has_sief2 : 1;
unsigned int ibc;
unsigned int mtid;
unsigned int mtid_cp;

View File

@ -61,6 +61,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
"esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp",
"edat", "etf3eh", "highgprs", "te", "vx"
};
static const char * const int_hwcap_str[] = {
"sie"
};
unsigned long n = (unsigned long) v - 1;
int i;
@ -75,6 +78,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
for (i = 0; i < ARRAY_SIZE(hwcap_str); i++)
if (hwcap_str[i] && (elf_hwcap & (1UL << i)))
seq_printf(m, "%s ", hwcap_str[i]);
for (i = 0; i < ARRAY_SIZE(int_hwcap_str); i++)
if (int_hwcap_str[i] && (int_hwcap & (1UL << i)))
seq_printf(m, "%s ", int_hwcap_str[i]);
seq_puts(m, "\n");
show_cacheinfo(m);
}

View File

@ -80,6 +80,8 @@ EXPORT_SYMBOL(console_irq);
unsigned long elf_hwcap __read_mostly = 0;
char elf_platform[ELF_PLATFORM_SIZE];
unsigned long int_hwcap = 0;
int __initdata memory_end_set;
unsigned long __initdata memory_end;
unsigned long __initdata max_physmem_end;
@ -793,6 +795,13 @@ static int __init setup_hwcaps(void)
strcpy(elf_platform, "z13");
break;
}
/*
* Virtualization support HWCAP_INT_SIE is bit 0.
*/
if (sclp.has_sief2)
int_hwcap |= HWCAP_INT_SIE;
return 0;
}
arch_initcall(setup_hwcaps);

View File

@ -155,10 +155,8 @@ static int __diag_time_slice_end(struct kvm_vcpu *vcpu)
static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = vcpu->kvm;
struct kvm_vcpu *tcpu;
int tid;
int i;
tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
vcpu->stat.diagnose_9c++;
@ -167,12 +165,9 @@ static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
if (tid == vcpu->vcpu_id)
return 0;
kvm_for_each_vcpu(i, tcpu, kvm)
if (tcpu->vcpu_id == tid) {
tcpu = kvm_get_vcpu_by_id(vcpu->kvm, tid);
if (tcpu)
kvm_vcpu_yield_to(tcpu);
break;
}
return 0;
}

View File

@ -259,10 +259,14 @@ struct aste {
int ipte_lock_held(struct kvm_vcpu *vcpu)
{
union ipte_control *ic = &vcpu->kvm->arch.sca->ipte_control;
if (vcpu->arch.sie_block->eca & 1) {
int rc;
if (vcpu->arch.sie_block->eca & 1)
return ic->kh != 0;
read_lock(&vcpu->kvm->arch.sca_lock);
rc = kvm_s390_get_ipte_control(vcpu->kvm)->kh != 0;
read_unlock(&vcpu->kvm->arch.sca_lock);
return rc;
}
return vcpu->kvm->arch.ipte_lock_count != 0;
}
@ -274,16 +278,20 @@ static void ipte_lock_simple(struct kvm_vcpu *vcpu)
vcpu->kvm->arch.ipte_lock_count++;
if (vcpu->kvm->arch.ipte_lock_count > 1)
goto out;
ic = &vcpu->kvm->arch.sca->ipte_control;
retry:
read_lock(&vcpu->kvm->arch.sca_lock);
ic = kvm_s390_get_ipte_control(vcpu->kvm);
do {
old = READ_ONCE(*ic);
while (old.k) {
if (old.k) {
read_unlock(&vcpu->kvm->arch.sca_lock);
cond_resched();
old = READ_ONCE(*ic);
goto retry;
}
new = old;
new.k = 1;
} while (cmpxchg(&ic->val, old.val, new.val) != old.val);
read_unlock(&vcpu->kvm->arch.sca_lock);
out:
mutex_unlock(&vcpu->kvm->arch.ipte_mutex);
}
@ -296,12 +304,14 @@ static void ipte_unlock_simple(struct kvm_vcpu *vcpu)
vcpu->kvm->arch.ipte_lock_count--;
if (vcpu->kvm->arch.ipte_lock_count)
goto out;
ic = &vcpu->kvm->arch.sca->ipte_control;
read_lock(&vcpu->kvm->arch.sca_lock);
ic = kvm_s390_get_ipte_control(vcpu->kvm);
do {
old = READ_ONCE(*ic);
new = old;
new.k = 0;
} while (cmpxchg(&ic->val, old.val, new.val) != old.val);
read_unlock(&vcpu->kvm->arch.sca_lock);
wake_up(&vcpu->kvm->arch.ipte_wq);
out:
mutex_unlock(&vcpu->kvm->arch.ipte_mutex);
@ -311,24 +321,29 @@ static void ipte_lock_siif(struct kvm_vcpu *vcpu)
{
union ipte_control old, new, *ic;
ic = &vcpu->kvm->arch.sca->ipte_control;
retry:
read_lock(&vcpu->kvm->arch.sca_lock);
ic = kvm_s390_get_ipte_control(vcpu->kvm);
do {
old = READ_ONCE(*ic);
while (old.kg) {
if (old.kg) {
read_unlock(&vcpu->kvm->arch.sca_lock);
cond_resched();
old = READ_ONCE(*ic);
goto retry;
}
new = old;
new.k = 1;
new.kh++;
} while (cmpxchg(&ic->val, old.val, new.val) != old.val);
read_unlock(&vcpu->kvm->arch.sca_lock);
}
static void ipte_unlock_siif(struct kvm_vcpu *vcpu)
{
union ipte_control old, new, *ic;
ic = &vcpu->kvm->arch.sca->ipte_control;
read_lock(&vcpu->kvm->arch.sca_lock);
ic = kvm_s390_get_ipte_control(vcpu->kvm);
do {
old = READ_ONCE(*ic);
new = old;
@ -336,6 +351,7 @@ static void ipte_unlock_siif(struct kvm_vcpu *vcpu)
if (!new.kh)
new.k = 0;
} while (cmpxchg(&ic->val, old.val, new.val) != old.val);
read_unlock(&vcpu->kvm->arch.sca_lock);
if (!new.kh)
wake_up(&vcpu->kvm->arch.ipte_wq);
}

View File

@ -54,9 +54,6 @@ void kvm_s390_rewind_psw(struct kvm_vcpu *vcpu, int ilc)
static int handle_noop(struct kvm_vcpu *vcpu)
{
switch (vcpu->arch.sie_block->icptcode) {
case 0x0:
vcpu->stat.exit_null++;
break;
case 0x10:
vcpu->stat.exit_external_request++;
break;
@ -338,8 +335,10 @@ static int handle_partial_execution(struct kvm_vcpu *vcpu)
int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
{
if (kvm_is_ucontrol(vcpu->kvm))
return -EOPNOTSUPP;
switch (vcpu->arch.sie_block->icptcode) {
case 0x00:
case 0x10:
case 0x18:
return handle_noop(vcpu);

View File

@ -34,6 +34,106 @@
#define PFAULT_DONE 0x0680
#define VIRTIO_PARAM 0x0d00
/* handle external calls via sigp interpretation facility */
static int sca_ext_call_pending(struct kvm_vcpu *vcpu, int *src_id)
{
int c, scn;
if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND))
return 0;
read_lock(&vcpu->kvm->arch.sca_lock);
if (vcpu->kvm->arch.use_esca) {
struct esca_block *sca = vcpu->kvm->arch.sca;
union esca_sigp_ctrl sigp_ctrl =
sca->cpu[vcpu->vcpu_id].sigp_ctrl;
c = sigp_ctrl.c;
scn = sigp_ctrl.scn;
} else {
struct bsca_block *sca = vcpu->kvm->arch.sca;
union bsca_sigp_ctrl sigp_ctrl =
sca->cpu[vcpu->vcpu_id].sigp_ctrl;
c = sigp_ctrl.c;
scn = sigp_ctrl.scn;
}
read_unlock(&vcpu->kvm->arch.sca_lock);
if (src_id)
*src_id = scn;
return c;
}
static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
{
int expect, rc;
read_lock(&vcpu->kvm->arch.sca_lock);
if (vcpu->kvm->arch.use_esca) {
struct esca_block *sca = vcpu->kvm->arch.sca;
union esca_sigp_ctrl *sigp_ctrl =
&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
union esca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
new_val.scn = src_id;
new_val.c = 1;
old_val.c = 0;
expect = old_val.value;
rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value);
} else {
struct bsca_block *sca = vcpu->kvm->arch.sca;
union bsca_sigp_ctrl *sigp_ctrl =
&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
union bsca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
new_val.scn = src_id;
new_val.c = 1;
old_val.c = 0;
expect = old_val.value;
rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value);
}
read_unlock(&vcpu->kvm->arch.sca_lock);
if (rc != expect) {
/* another external call is pending */
return -EBUSY;
}
atomic_or(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
return 0;
}
static void sca_clear_ext_call(struct kvm_vcpu *vcpu)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
int rc, expect;
atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags);
read_lock(&vcpu->kvm->arch.sca_lock);
if (vcpu->kvm->arch.use_esca) {
struct esca_block *sca = vcpu->kvm->arch.sca;
union esca_sigp_ctrl *sigp_ctrl =
&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
union esca_sigp_ctrl old = *sigp_ctrl;
expect = old.value;
rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
} else {
struct bsca_block *sca = vcpu->kvm->arch.sca;
union bsca_sigp_ctrl *sigp_ctrl =
&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
union bsca_sigp_ctrl old = *sigp_ctrl;
expect = old.value;
rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
}
read_unlock(&vcpu->kvm->arch.sca_lock);
WARN_ON(rc != expect); /* cannot clear? */
}
int psw_extint_disabled(struct kvm_vcpu *vcpu)
{
return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
@ -792,13 +892,11 @@ static const deliver_irq_t deliver_irq_funcs[] = {
int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
uint8_t sigp_ctrl = vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl;
if (!sclp.has_sigpif)
return test_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
return (sigp_ctrl & SIGP_CTRL_C) &&
(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND);
return sca_ext_call_pending(vcpu, NULL);
}
int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop)
@ -909,9 +1007,7 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
memset(&li->irq, 0, sizeof(li->irq));
spin_unlock(&li->lock);
/* clear pending external calls set by sigp interpretation facility */
atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags);
vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl = 0;
sca_clear_ext_call(vcpu);
}
int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
@ -1003,21 +1099,6 @@ static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
return 0;
}
static int __inject_extcall_sigpif(struct kvm_vcpu *vcpu, uint16_t src_id)
{
unsigned char new_val, old_val;
uint8_t *sigp_ctrl = &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl;
new_val = SIGP_CTRL_C | (src_id & SIGP_CTRL_SCN_MASK);
old_val = *sigp_ctrl & ~SIGP_CTRL_C;
if (cmpxchg(sigp_ctrl, old_val, new_val) != old_val) {
/* another external call is pending */
return -EBUSY;
}
atomic_or(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
return 0;
}
static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
@ -1034,7 +1115,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
return -EINVAL;
if (sclp.has_sigpif)
return __inject_extcall_sigpif(vcpu, src_id);
return sca_inject_ext_call(vcpu, src_id);
if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
return -EBUSY;
@ -2203,7 +2284,7 @@ static void store_local_irq(struct kvm_s390_local_interrupt *li,
int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len)
{
uint8_t sigp_ctrl = vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl;
int scn;
unsigned long sigp_emerg_pending[BITS_TO_LONGS(KVM_MAX_VCPUS)];
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
unsigned long pending_irqs;
@ -2243,14 +2324,12 @@ int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len)
}
}
if ((sigp_ctrl & SIGP_CTRL_C) &&
(atomic_read(&vcpu->arch.sie_block->cpuflags) &
CPUSTAT_ECALL_PEND)) {
if (sca_ext_call_pending(vcpu, &scn)) {
if (n + sizeof(irq) > len)
return -ENOBUFS;
memset(&irq, 0, sizeof(irq));
irq.type = KVM_S390_INT_EXTERNAL_CALL;
irq.u.extcall.code = sigp_ctrl & SIGP_CTRL_SCN_MASK;
irq.u.extcall.code = scn;
if (copy_to_user(&buf[n], &irq, sizeof(irq)))
return -EFAULT;
n += sizeof(irq);

View File

@ -246,7 +246,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
break;
case KVM_CAP_NR_VCPUS:
case KVM_CAP_MAX_VCPUS:
r = KVM_MAX_VCPUS;
r = sclp.has_esca ? KVM_S390_ESCA_CPU_SLOTS
: KVM_S390_BSCA_CPU_SLOTS;
break;
case KVM_CAP_NR_MEMSLOTS:
r = KVM_USER_MEM_SLOTS;
@ -283,6 +284,8 @@ static void kvm_s390_sync_dirty_log(struct kvm *kvm,
}
/* Section: vm related */
static void sca_del_vcpu(struct kvm_vcpu *vcpu);
/*
* Get (and clear) the dirty memory log for a memory slot.
*/
@ -1024,7 +1027,7 @@ static int kvm_s390_apxa_installed(void)
u8 config[128];
int cc;
if (test_facility(2) && test_facility(12)) {
if (test_facility(12)) {
cc = kvm_s390_query_ap_config(config);
if (cc)
@ -1075,6 +1078,15 @@ static int kvm_s390_crypto_init(struct kvm *kvm)
return 0;
}
static void sca_dispose(struct kvm *kvm)
{
if (kvm->arch.use_esca)
free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
else
free_page((unsigned long)(kvm->arch.sca));
kvm->arch.sca = NULL;
}
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
int i, rc;
@ -1098,14 +1110,17 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
rc = -ENOMEM;
kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
kvm->arch.use_esca = 0; /* start with basic SCA */
rwlock_init(&kvm->arch.sca_lock);
kvm->arch.sca = (struct bsca_block *) get_zeroed_page(GFP_KERNEL);
if (!kvm->arch.sca)
goto out_err;
spin_lock(&kvm_lock);
sca_offset += 16;
if (sca_offset + sizeof(struct sca_block) > PAGE_SIZE)
if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
sca_offset = 0;
kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
kvm->arch.sca = (struct bsca_block *)
((char *) kvm->arch.sca + sca_offset);
spin_unlock(&kvm_lock);
sprintf(debug_name, "kvm-%u", current->pid);
@ -1177,7 +1192,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kfree(kvm->arch.crypto.crycb);
free_page((unsigned long)kvm->arch.model.fac);
debug_unregister(kvm->arch.dbf);
free_page((unsigned long)(kvm->arch.sca));
sca_dispose(kvm);
KVM_EVENT(3, "creation of vm failed: %d", rc);
return rc;
}
@ -1188,13 +1203,8 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
kvm_s390_clear_local_irqs(vcpu);
kvm_clear_async_pf_completion_queue(vcpu);
if (!kvm_is_ucontrol(vcpu->kvm)) {
clear_bit(63 - vcpu->vcpu_id,
(unsigned long *) &vcpu->kvm->arch.sca->mcn);
if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
(__u64) vcpu->arch.sie_block)
vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
}
if (!kvm_is_ucontrol(vcpu->kvm))
sca_del_vcpu(vcpu);
smp_mb();
if (kvm_is_ucontrol(vcpu->kvm))
@ -1228,7 +1238,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
{
kvm_free_vcpus(kvm);
free_page((unsigned long)kvm->arch.model.fac);
free_page((unsigned long)(kvm->arch.sca));
sca_dispose(kvm);
debug_unregister(kvm->arch.dbf);
kfree(kvm->arch.crypto.crycb);
if (!kvm_is_ucontrol(kvm))
@ -1249,6 +1259,116 @@ static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
return 0;
}
static void sca_del_vcpu(struct kvm_vcpu *vcpu)
{
read_lock(&vcpu->kvm->arch.sca_lock);
if (vcpu->kvm->arch.use_esca) {
struct esca_block *sca = vcpu->kvm->arch.sca;
clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
sca->cpu[vcpu->vcpu_id].sda = 0;
} else {
struct bsca_block *sca = vcpu->kvm->arch.sca;
clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
sca->cpu[vcpu->vcpu_id].sda = 0;
}
read_unlock(&vcpu->kvm->arch.sca_lock);
}
static void sca_add_vcpu(struct kvm_vcpu *vcpu)
{
read_lock(&vcpu->kvm->arch.sca_lock);
if (vcpu->kvm->arch.use_esca) {
struct esca_block *sca = vcpu->kvm->arch.sca;
sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
vcpu->arch.sie_block->ecb2 |= 0x04U;
set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
} else {
struct bsca_block *sca = vcpu->kvm->arch.sca;
sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
}
read_unlock(&vcpu->kvm->arch.sca_lock);
}
/* Basic SCA to Extended SCA data copy routines */
static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
{
d->sda = s->sda;
d->sigp_ctrl.c = s->sigp_ctrl.c;
d->sigp_ctrl.scn = s->sigp_ctrl.scn;
}
static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
{
int i;
d->ipte_control = s->ipte_control;
d->mcn[0] = s->mcn;
for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
sca_copy_entry(&d->cpu[i], &s->cpu[i]);
}
static int sca_switch_to_extended(struct kvm *kvm)
{
struct bsca_block *old_sca = kvm->arch.sca;
struct esca_block *new_sca;
struct kvm_vcpu *vcpu;
unsigned int vcpu_idx;
u32 scaol, scaoh;
new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
if (!new_sca)
return -ENOMEM;
scaoh = (u32)((u64)(new_sca) >> 32);
scaol = (u32)(u64)(new_sca) & ~0x3fU;
kvm_s390_vcpu_block_all(kvm);
write_lock(&kvm->arch.sca_lock);
sca_copy_b_to_e(new_sca, old_sca);
kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
vcpu->arch.sie_block->scaoh = scaoh;
vcpu->arch.sie_block->scaol = scaol;
vcpu->arch.sie_block->ecb2 |= 0x04U;
}
kvm->arch.sca = new_sca;
kvm->arch.use_esca = 1;
write_unlock(&kvm->arch.sca_lock);
kvm_s390_vcpu_unblock_all(kvm);
free_page((unsigned long)old_sca);
VM_EVENT(kvm, 2, "Switched to ESCA (%p -> %p)", old_sca, kvm->arch.sca);
return 0;
}
static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
{
int rc;
if (id < KVM_S390_BSCA_CPU_SLOTS)
return true;
if (!sclp.has_esca)
return false;
mutex_lock(&kvm->lock);
rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
mutex_unlock(&kvm->lock);
return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
}
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
@ -1369,8 +1489,11 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
preempt_enable();
mutex_unlock(&vcpu->kvm->lock);
if (!kvm_is_ucontrol(vcpu->kvm))
if (!kvm_is_ucontrol(vcpu->kvm)) {
vcpu->arch.gmap = vcpu->kvm->arch.gmap;
sca_add_vcpu(vcpu);
}
}
static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
@ -1465,7 +1588,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
struct sie_page *sie_page;
int rc = -EINVAL;
if (id >= KVM_MAX_VCPUS)
if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
goto out;
rc = -ENOMEM;
@ -1482,20 +1605,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
vcpu->arch.sie_block->icpua = id;
if (!kvm_is_ucontrol(kvm)) {
if (!kvm->arch.sca) {
WARN_ON_ONCE(1);
goto out_free_cpu;
}
if (!kvm->arch.sca->cpu[id].sda)
kvm->arch.sca->cpu[id].sda =
(__u64) vcpu->arch.sie_block;
vcpu->arch.sie_block->scaoh =
(__u32)(((__u64)kvm->arch.sca) >> 32);
vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
}
spin_lock_init(&vcpu->arch.local_int.lock);
vcpu->arch.local_int.float_int = &kvm->arch.float_int;
vcpu->arch.local_int.wq = &vcpu->wq;
@ -1509,10 +1618,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
*/
vcpu->arch.guest_fpregs.fprs = kzalloc(sizeof(freg_t) * __NUM_FPRS,
GFP_KERNEL);
if (!vcpu->arch.guest_fpregs.fprs) {
rc = -ENOMEM;
if (!vcpu->arch.guest_fpregs.fprs)
goto out_free_sie_block;
}
rc = kvm_vcpu_init(vcpu, kvm, id);
if (rc)
@ -2071,8 +2178,6 @@ static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
{
int rc = -1;
VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
vcpu->arch.sie_block->icptcode);
trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
@ -2080,40 +2185,35 @@ static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
if (guestdbg_enabled(vcpu))
kvm_s390_restore_guest_per_regs(vcpu);
if (exit_reason >= 0) {
rc = 0;
memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
if (vcpu->arch.sie_block->icptcode > 0) {
int rc = kvm_handle_sie_intercept(vcpu);
if (rc != -EOPNOTSUPP)
return rc;
vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
return -EREMOTE;
} else if (exit_reason != -EFAULT) {
vcpu->stat.exit_null++;
return 0;
} else if (kvm_is_ucontrol(vcpu->kvm)) {
vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
vcpu->run->s390_ucontrol.trans_exc_code =
current->thread.gmap_addr;
vcpu->run->s390_ucontrol.pgm_code = 0x10;
rc = -EREMOTE;
return -EREMOTE;
} else if (current->thread.gmap_pfault) {
trace_kvm_s390_major_guest_pfault(vcpu);
current->thread.gmap_pfault = 0;
if (kvm_arch_setup_async_pf(vcpu)) {
rc = 0;
} else {
gpa_t gpa = current->thread.gmap_addr;
rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
if (kvm_arch_setup_async_pf(vcpu))
return 0;
return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
}
}
if (rc == -1)
rc = vcpu_post_run_fault_in_sie(vcpu);
memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
if (rc == 0) {
if (kvm_is_ucontrol(vcpu->kvm))
/* Don't exit for host interrupts. */
rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
else
rc = kvm_handle_sie_intercept(vcpu);
}
return rc;
return vcpu_post_run_fault_in_sie(vcpu);
}
static int __vcpu_run(struct kvm_vcpu *vcpu)
@ -2233,18 +2333,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
rc = 0;
}
if (rc == -EOPNOTSUPP) {
/* intercept cannot be handled in-kernel, prepare kvm-run */
kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
rc = 0;
}
if (rc == -EREMOTE) {
/* intercept was handled, but userspace support is needed
* kvm_run has been prepared by the handler */
/* userspace support is needed, kvm_run has been prepared */
rc = 0;
}
@ -2767,6 +2857,11 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
static int __init kvm_s390_init(void)
{
if (!sclp.has_sief2) {
pr_info("SIE not available\n");
return -ENODEV;
}
return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
}

View File

@ -340,4 +340,11 @@ void kvm_s390_clear_bp_data(struct kvm_vcpu *vcpu);
void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu);
void kvm_s390_handle_per_event(struct kvm_vcpu *vcpu);
/* support for Basic/Extended SCA handling */
static inline union ipte_control *kvm_s390_get_ipte_control(struct kvm *kvm)
{
struct bsca_block *sca = kvm->arch.sca; /* SCA version doesn't matter */
return &sca->ipte_control;
}
#endif

View File

@ -43,7 +43,10 @@ struct read_info_sccb {
u8 _pad_92[100 - 92]; /* 92-99 */
u32 rnsize2; /* 100-103 */
u64 rnmax2; /* 104-111 */
u8 _pad_112[120 - 112]; /* 112-119 */
u8 _pad_112[116 - 112]; /* 112-115 */
u8 fac116; /* 116 */
u8 _pad_117[119 - 117]; /* 117-118 */
u8 fac119; /* 119 */
u16 hcpua; /* 120-121 */
u8 _pad_122[4096 - 122]; /* 122-4095 */
} __packed __aligned(PAGE_SIZE);
@ -108,6 +111,8 @@ static void __init sclp_facilities_detect(struct read_info_sccb *sccb)
sclp.facilities = sccb->facilities;
sclp.has_sprp = !!(sccb->fac84 & 0x02);
sclp.has_core_type = !!(sccb->fac84 & 0x01);
sclp.has_esca = !!(sccb->fac116 & 0x08);
sclp.has_hvs = !!(sccb->fac119 & 0x80);
if (sccb->fac85 & 0x02)
S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP;
sclp.rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
@ -131,6 +136,7 @@ static void __init sclp_facilities_detect(struct read_info_sccb *sccb)
continue;
sclp.has_siif = cpue->siif;
sclp.has_sigpif = cpue->sigpif;
sclp.has_sief2 = cpue->sief2;
break;
}

View File

@ -472,6 +472,11 @@ static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
struct kvm_vcpu *vcpu;
int i;
if (id < 0 || id >= KVM_MAX_VCPUS)
return NULL;
vcpu = kvm_get_vcpu(kvm, id);
if (vcpu && vcpu->vcpu_id == id)
return vcpu;
kvm_for_each_vcpu(i, vcpu, kvm)
if (vcpu->vcpu_id == id)
return vcpu;
@ -1011,7 +1016,6 @@ struct kvm_stats_debugfs_item {
const char *name;
int offset;
enum kvm_stat_kind kind;
struct dentry *dentry;
};
extern struct kvm_stats_debugfs_item debugfs_entries[];
extern struct dentry *kvm_debugfs_dir;

View File

@ -2257,7 +2257,7 @@ static int create_vcpu_fd(struct kvm_vcpu *vcpu)
static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
{
int r;
struct kvm_vcpu *vcpu, *v;
struct kvm_vcpu *vcpu;
if (id >= KVM_MAX_VCPUS)
return -EINVAL;
@ -2281,9 +2281,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
r = -EINVAL;
goto unlock_vcpu_destroy;
}
kvm_for_each_vcpu(r, v, kvm)
if (v->vcpu_id == id) {
if (kvm_get_vcpu_by_id(kvm, id)) {
r = -EEXIST;
goto unlock_vcpu_destroy;
}
@ -3449,10 +3447,9 @@ static int kvm_init_debug(void)
goto out;
for (p = debugfs_entries; p->name; ++p) {
p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
if (!debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
(void *)(long)p->offset,
stat_fops[p->kind]);
if (p->dentry == NULL)
stat_fops[p->kind]))
goto out_dir;
}
@ -3464,15 +3461,6 @@ static int kvm_init_debug(void)
return r;
}
static void kvm_exit_debug(void)
{
struct kvm_stats_debugfs_item *p;
for (p = debugfs_entries; p->name; ++p)
debugfs_remove(p->dentry);
debugfs_remove(kvm_debugfs_dir);
}
static int kvm_suspend(void)
{
if (kvm_usage_count)
@ -3630,7 +3618,7 @@ EXPORT_SYMBOL_GPL(kvm_init);
void kvm_exit(void)
{
kvm_exit_debug();
debugfs_remove_recursive(kvm_debugfs_dir);
misc_deregister(&kvm_dev);
kmem_cache_destroy(kvm_vcpu_cache);
kvm_async_pf_deinit();