KVM/arm fixes for 5.8, take #2
- Make sure a vcpu becoming non-resident doesn't race against the doorbell delivery - Only advertise pvtime if accounting is enabled - Return the correct error code if reset fails with SVE - Make sure that pseudo-NMI functions are annotated as __always_inline -----BEGIN PGP SIGNATURE----- iQJDBAABCgAtFiEEn9UcU+C1Yxj9lZw9I9DQutE9ekMFAl76E48PHG1hekBrZXJu ZWwub3JnAAoJECPQ0LrRPXpDFsAP/1jFBCusd/axezuE/LVhEj72789I7uFlC+/+ sf5ebrw2xTS2/qj+MpU5sDmof9lnRO5z4MTr/VOlEMikSE38JvKPxqNzklnyheso kroX6L+DpjTdBGaur2VU8TLme88pcSrZwRR8trSy5Li3nuc3kT19VhV2tkt0ruyT tzliJxRKLosZkFTx+LeXnm08JUYClvCs7DCH+F2PfvbRJoWQzWlq3gdp0sjBkWX7 OsAOwiShNFBk/AiR40811WgcEOsPrHDePlBfQjh6BmEZvxZF73rtNkA7zKXjchs3 v5Tfl2rlMxXc5EcsylNgjOcXJ/wKYJP89Bq9DLmhPQigckKUWiDmbKhaJ3B0X1FV PuTUl4PTEoiTa+vgiZgsaDa/ivpDX/oQYAVsVs3/lTc2Jm9iCFIn6Ph+G5HVblwt ZsJ9lrAgahByuKXFg5vjiq02gB+4s7HdffNtvMms1Vathdi/kRRAe7nCohcozcw+ xSbyasytL78/zM5Q9zQxMpEnWQoJbIj2cV6+JdBfMQ6XmBveYstpRKfVnfHCmmfL 6cL3rPSTKReLX7mBK+05B1RXcp7l1rF58lB4XsZ55vNj6ttt8dL0HP3LUwBgnSUY 10HaBDRxuG4aKxreIkV2ew0Rwp/potGEM5rwEshmpDrDoNd0lTg5eS7clRFeiaDd CECRXaY6 =ZjTT -----END PGP SIGNATURE----- Merge tag 'kvmarm-fixes-5.8-2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into kvm-master KVM/arm fixes for 5.8, take #2 - Make sure a vcpu becoming non-resident doesn't race against the doorbell delivery - Only advertise pvtime if accounting is enabled - Return the correct error code if reset fails with SVE - Make sure that pseudo-NMI functions are annotated as __always_inline
This commit is contained in:
commit
6e1d72f1ea
|
@ -109,7 +109,7 @@ static inline u32 gic_read_pmr(void)
|
|||
return read_sysreg_s(SYS_ICC_PMR_EL1);
|
||||
}
|
||||
|
||||
static inline void gic_write_pmr(u32 val)
|
||||
static __always_inline void gic_write_pmr(u32 val)
|
||||
{
|
||||
write_sysreg_s(val, SYS_ICC_PMR_EL1);
|
||||
}
|
||||
|
|
|
@ -675,7 +675,7 @@ static inline bool system_supports_generic_auth(void)
|
|||
cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH);
|
||||
}
|
||||
|
||||
static inline bool system_uses_irq_prio_masking(void)
|
||||
static __always_inline bool system_uses_irq_prio_masking(void)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) &&
|
||||
cpus_have_const_cap(ARM64_HAS_IRQ_PRIO_MASKING);
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
|
||||
#include <linux/arm-smccc.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/sched/stat.h>
|
||||
|
||||
#include <asm/kvm_mmu.h>
|
||||
#include <asm/pvclock-abi.h>
|
||||
|
@ -73,6 +74,11 @@ gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu)
|
|||
return base;
|
||||
}
|
||||
|
||||
static bool kvm_arm_pvtime_supported(void)
|
||||
{
|
||||
return !!sched_info_on();
|
||||
}
|
||||
|
||||
int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
|
||||
struct kvm_device_attr *attr)
|
||||
{
|
||||
|
@ -82,7 +88,8 @@ int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
|
|||
int ret = 0;
|
||||
int idx;
|
||||
|
||||
if (attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
|
||||
if (!kvm_arm_pvtime_supported() ||
|
||||
attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
|
||||
return -ENXIO;
|
||||
|
||||
if (get_user(ipa, user))
|
||||
|
@ -110,7 +117,8 @@ int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
|
|||
u64 __user *user = (u64 __user *)attr->addr;
|
||||
u64 ipa;
|
||||
|
||||
if (attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
|
||||
if (!kvm_arm_pvtime_supported() ||
|
||||
attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
|
||||
return -ENXIO;
|
||||
|
||||
ipa = vcpu->arch.steal.base;
|
||||
|
@ -125,7 +133,8 @@ int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
|
|||
{
|
||||
switch (attr->attr) {
|
||||
case KVM_ARM_VCPU_PVTIME_IPA:
|
||||
return 0;
|
||||
if (kvm_arm_pvtime_supported())
|
||||
return 0;
|
||||
}
|
||||
return -ENXIO;
|
||||
}
|
||||
|
|
|
@ -245,7 +245,7 @@ static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
|
|||
*/
|
||||
int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int ret = -EINVAL;
|
||||
int ret;
|
||||
bool loaded;
|
||||
u32 pstate;
|
||||
|
||||
|
@ -269,15 +269,19 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
|
|||
|
||||
if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) ||
|
||||
test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features)) {
|
||||
if (kvm_vcpu_enable_ptrauth(vcpu))
|
||||
if (kvm_vcpu_enable_ptrauth(vcpu)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
switch (vcpu->arch.target) {
|
||||
default:
|
||||
if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
|
||||
if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1))
|
||||
if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
pstate = VCPU_RESET_PSTATE_SVC;
|
||||
} else {
|
||||
pstate = VCPU_RESET_PSTATE_EL1;
|
||||
|
|
|
@ -90,7 +90,15 @@ static irqreturn_t vgic_v4_doorbell_handler(int irq, void *info)
|
|||
!irqd_irq_disabled(&irq_to_desc(irq)->irq_data))
|
||||
disable_irq_nosync(irq);
|
||||
|
||||
/*
|
||||
* The v4.1 doorbell can fire concurrently with the vPE being
|
||||
* made non-resident. Ensure we only update pending_last
|
||||
* *after* the non-residency sequence has completed.
|
||||
*/
|
||||
raw_spin_lock(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vpe_lock);
|
||||
vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true;
|
||||
raw_spin_unlock(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vpe_lock);
|
||||
|
||||
kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
|
||||
kvm_vcpu_kick(vcpu);
|
||||
|
||||
|
|
|
@ -4054,16 +4054,24 @@ static void its_vpe_4_1_deschedule(struct its_vpe *vpe,
|
|||
u64 val;
|
||||
|
||||
if (info->req_db) {
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* vPE is going to block: make the vPE non-resident with
|
||||
* PendingLast clear and DB set. The GIC guarantees that if
|
||||
* we read-back PendingLast clear, then a doorbell will be
|
||||
* delivered when an interrupt comes.
|
||||
*
|
||||
* Note the locking to deal with the concurrent update of
|
||||
* pending_last from the doorbell interrupt handler that can
|
||||
* run concurrently.
|
||||
*/
|
||||
raw_spin_lock_irqsave(&vpe->vpe_lock, flags);
|
||||
val = its_clear_vpend_valid(vlpi_base,
|
||||
GICR_VPENDBASER_PendingLast,
|
||||
GICR_VPENDBASER_4_1_DB);
|
||||
vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
|
||||
raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags);
|
||||
} else {
|
||||
/*
|
||||
* We're not blocking, so just make the vPE non-resident
|
||||
|
|
Loading…
Reference in New Issue