kvm: ioapic: Refactor kvm_ioapic_update_eoi()

Refactor code for handling IOAPIC EOI for subsequent patch.
There is no functional change.

Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Suravee Suthikulpanit 2019-11-14 14:15:18 -06:00 committed by Paolo Bonzini
parent e2ed4078a6
commit 1ec2405c7c
1 changed files with 56 additions and 54 deletions

View File

@ -154,10 +154,16 @@ static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic)
__rtc_irq_eoi_tracking_restore_one(vcpu); __rtc_irq_eoi_tracking_restore_one(vcpu);
} }
static void rtc_irq_eoi(struct kvm_ioapic *ioapic, struct kvm_vcpu *vcpu) static void rtc_irq_eoi(struct kvm_ioapic *ioapic, struct kvm_vcpu *vcpu,
int vector)
{ {
if (test_and_clear_bit(vcpu->vcpu_id, struct dest_map *dest_map = &ioapic->rtc_status.dest_map;
ioapic->rtc_status.dest_map.map)) {
/* RTC special handling */
if (test_bit(vcpu->vcpu_id, dest_map->map) &&
(vector == dest_map->vectors[vcpu->vcpu_id]) &&
(test_and_clear_bit(vcpu->vcpu_id,
ioapic->rtc_status.dest_map.map))) {
--ioapic->rtc_status.pending_eoi; --ioapic->rtc_status.pending_eoi;
rtc_status_pending_eoi_check_valid(ioapic); rtc_status_pending_eoi_check_valid(ioapic);
} }
@ -454,72 +460,68 @@ static void kvm_ioapic_eoi_inject_work(struct work_struct *work)
} }
#define IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT 10000 #define IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT 10000
static void kvm_ioapic_update_eoi_one(struct kvm_vcpu *vcpu,
static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, struct kvm_ioapic *ioapic,
struct kvm_ioapic *ioapic, int vector, int trigger_mode) int trigger_mode,
int pin)
{ {
struct dest_map *dest_map = &ioapic->rtc_status.dest_map;
struct kvm_lapic *apic = vcpu->arch.apic; struct kvm_lapic *apic = vcpu->arch.apic;
int i; union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[pin];
/* RTC special handling */ /*
if (test_bit(vcpu->vcpu_id, dest_map->map) && * We are dropping lock while calling ack notifiers because ack
vector == dest_map->vectors[vcpu->vcpu_id]) * notifier callbacks for assigned devices call into IOAPIC
rtc_irq_eoi(ioapic, vcpu); * recursively. Since remote_irr is cleared only after call
* to notifiers if the same vector will be delivered while lock
* is dropped it will be put into irr and will be delivered
* after ack notifier returns.
*/
spin_unlock(&ioapic->lock);
kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, pin);
spin_lock(&ioapic->lock);
for (i = 0; i < IOAPIC_NUM_PINS; i++) { if (trigger_mode != IOAPIC_LEVEL_TRIG ||
union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i]; kvm_lapic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI)
return;
if (ent->fields.vector != vector) ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG);
continue; ent->fields.remote_irr = 0;
if (!ent->fields.mask && (ioapic->irr & (1 << pin))) {
/* ++ioapic->irq_eoi[pin];
* We are dropping lock while calling ack notifiers because ack if (ioapic->irq_eoi[pin] == IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT) {
* notifier callbacks for assigned devices call into IOAPIC /*
* recursively. Since remote_irr is cleared only after call * Real hardware does not deliver the interrupt
* to notifiers if the same vector will be delivered while lock * immediately during eoi broadcast, and this
* is dropped it will be put into irr and will be delivered * lets a buggy guest make slow progress
* after ack notifier returns. * even if it does not correctly handle a
*/ * level-triggered interrupt. Emulate this
spin_unlock(&ioapic->lock); * behavior if we detect an interrupt storm.
kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i); */
spin_lock(&ioapic->lock); schedule_delayed_work(&ioapic->eoi_inject, HZ / 100);
ioapic->irq_eoi[pin] = 0;
if (trigger_mode != IOAPIC_LEVEL_TRIG || trace_kvm_ioapic_delayed_eoi_inj(ent->bits);
kvm_lapic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI)
continue;
ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG);
ent->fields.remote_irr = 0;
if (!ent->fields.mask && (ioapic->irr & (1 << i))) {
++ioapic->irq_eoi[i];
if (ioapic->irq_eoi[i] == IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT) {
/*
* Real hardware does not deliver the interrupt
* immediately during eoi broadcast, and this
* lets a buggy guest make slow progress
* even if it does not correctly handle a
* level-triggered interrupt. Emulate this
* behavior if we detect an interrupt storm.
*/
schedule_delayed_work(&ioapic->eoi_inject, HZ / 100);
ioapic->irq_eoi[i] = 0;
trace_kvm_ioapic_delayed_eoi_inj(ent->bits);
} else {
ioapic_service(ioapic, i, false);
}
} else { } else {
ioapic->irq_eoi[i] = 0; ioapic_service(ioapic, pin, false);
} }
} else {
ioapic->irq_eoi[pin] = 0;
} }
} }
void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector, int trigger_mode) void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector, int trigger_mode)
{ {
int i;
struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic; struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
spin_lock(&ioapic->lock); spin_lock(&ioapic->lock);
__kvm_ioapic_update_eoi(vcpu, ioapic, vector, trigger_mode); rtc_irq_eoi(ioapic, vcpu, vector);
for (i = 0; i < IOAPIC_NUM_PINS; i++) {
union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
if (ent->fields.vector != vector)
continue;
kvm_ioapic_update_eoi_one(vcpu, ioapic, trigger_mode, i);
}
spin_unlock(&ioapic->lock); spin_unlock(&ioapic->lock);
} }