KVM: nVMX: handle nested posted interrupts when apicv is disabled for L1

Even when APICv is disabled for L1 it can (and, actually, is) still
available for L2, this means we need to always call
vmx_deliver_nested_posted_interrupt() when attempting an interrupt
delivery.

Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Cc: stable@vger.kernel.org
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Vitaly Kuznetsov 2020-02-20 18:22:05 +01:00 committed by Paolo Bonzini
parent 93fd9666c2
commit 91a5f413af
4 changed files with 17 additions and 10 deletions

View File

@ -1146,7 +1146,7 @@ struct kvm_x86_ops {
void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap); void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu); void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa); void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa);
void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector); int (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu); int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr); int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr);

View File

@ -1046,11 +1046,8 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
apic->regs + APIC_TMR); apic->regs + APIC_TMR);
} }
if (vcpu->arch.apicv_active) if (kvm_x86_ops->deliver_posted_interrupt(vcpu, vector)) {
kvm_x86_ops->deliver_posted_interrupt(vcpu, vector);
else {
kvm_lapic_set_irr(vector, apic); kvm_lapic_set_irr(vector, apic);
kvm_make_request(KVM_REQ_EVENT, vcpu); kvm_make_request(KVM_REQ_EVENT, vcpu);
kvm_vcpu_kick(vcpu); kvm_vcpu_kick(vcpu);
} }

View File

@ -5258,8 +5258,11 @@ static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
return; return;
} }
static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec) static int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
{ {
if (!vcpu->arch.apicv_active)
return -1;
kvm_lapic_set_irr(vec, vcpu->arch.apic); kvm_lapic_set_irr(vec, vcpu->arch.apic);
smp_mb__after_atomic(); smp_mb__after_atomic();
@ -5271,6 +5274,8 @@ static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
put_cpu(); put_cpu();
} else } else
kvm_vcpu_wake_up(vcpu); kvm_vcpu_wake_up(vcpu);
return 0;
} }
static bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu) static bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)

View File

@ -3822,24 +3822,29 @@ static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
* 2. If target vcpu isn't running(root mode), kick it to pick up the * 2. If target vcpu isn't running(root mode), kick it to pick up the
* interrupt from PIR in next vmentry. * interrupt from PIR in next vmentry.
*/ */
static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector) static int vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
int r; int r;
r = vmx_deliver_nested_posted_interrupt(vcpu, vector); r = vmx_deliver_nested_posted_interrupt(vcpu, vector);
if (!r) if (!r)
return; return 0;
if (!vcpu->arch.apicv_active)
return -1;
if (pi_test_and_set_pir(vector, &vmx->pi_desc)) if (pi_test_and_set_pir(vector, &vmx->pi_desc))
return; return 0;
/* If a previous notification has sent the IPI, nothing to do. */ /* If a previous notification has sent the IPI, nothing to do. */
if (pi_test_and_set_on(&vmx->pi_desc)) if (pi_test_and_set_on(&vmx->pi_desc))
return; return 0;
if (!kvm_vcpu_trigger_posted_interrupt(vcpu, false)) if (!kvm_vcpu_trigger_posted_interrupt(vcpu, false))
kvm_vcpu_kick(vcpu); kvm_vcpu_kick(vcpu);
return 0;
} }
/* /*