KVM: arm/arm64: Remove struct vgic_irq pending field
One of the goals behind the VGIC redesign was to get rid of cached or intermediate state in the data structures, but we decided to allow ourselves to precompute the pending value of an IRQ based on the line level and pending latch state. However, this has now become difficult to base proper GICv3 save/restore on, because there is a potential to modify the pending state without knowing if an interrupt is edge or level configured. See the following post and related message for more background: https://lists.cs.columbia.edu/pipermail/kvmarm/2017-January/023195.html This commit gets rid of the precomputed pending field in favor of a function that calculates the value when needed, irq_is_pending(). The soft_pending field is renamed to pending_latch to represent that this latch is the equivalent hardware latch which gets manipulated by the input signal for edge-triggered interrupts and when writing to the SPENDR/CPENDR registers. After this commit save/restore code should be able to simply restore the pending_latch state, line_level state, and config state in any order and get the desired result. Reviewed-by: Andre Przywara <andre.przywara@arm.com> Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Tested-by: Andre Przywara <andre.przywara@arm.com> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
This commit is contained in:
parent
7a308bb301
commit
8694e4da66
|
@ -101,9 +101,10 @@ struct vgic_irq {
|
||||||
*/
|
*/
|
||||||
|
|
||||||
u32 intid; /* Guest visible INTID */
|
u32 intid; /* Guest visible INTID */
|
||||||
bool pending;
|
|
||||||
bool line_level; /* Level only */
|
bool line_level; /* Level only */
|
||||||
bool soft_pending; /* Level only */
|
bool pending_latch; /* The pending latch state used to calculate
|
||||||
|
* the pending state for both level
|
||||||
|
* and edge triggered IRQs. */
|
||||||
bool active; /* not used for LPIs */
|
bool active; /* not used for LPIs */
|
||||||
bool enabled;
|
bool enabled;
|
||||||
bool hw; /* Tied to HW IRQ */
|
bool hw; /* Tied to HW IRQ */
|
||||||
|
|
|
@ -350,7 +350,7 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
|
||||||
|
|
||||||
irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]);
|
irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]);
|
||||||
spin_lock(&irq->irq_lock);
|
spin_lock(&irq->irq_lock);
|
||||||
irq->pending = pendmask & (1U << bit_nr);
|
irq->pending_latch = pendmask & (1U << bit_nr);
|
||||||
vgic_queue_irq_unlock(vcpu->kvm, irq);
|
vgic_queue_irq_unlock(vcpu->kvm, irq);
|
||||||
vgic_put_irq(vcpu->kvm, irq);
|
vgic_put_irq(vcpu->kvm, irq);
|
||||||
}
|
}
|
||||||
|
@ -465,7 +465,7 @@ static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
|
||||||
spin_lock(&itte->irq->irq_lock);
|
spin_lock(&itte->irq->irq_lock);
|
||||||
itte->irq->pending = true;
|
itte->irq->pending_latch = true;
|
||||||
vgic_queue_irq_unlock(kvm, itte->irq);
|
vgic_queue_irq_unlock(kvm, itte->irq);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -913,7 +913,7 @@ static int vgic_its_cmd_handle_clear(struct kvm *kvm, struct vgic_its *its,
|
||||||
if (!itte)
|
if (!itte)
|
||||||
return E_ITS_CLEAR_UNMAPPED_INTERRUPT;
|
return E_ITS_CLEAR_UNMAPPED_INTERRUPT;
|
||||||
|
|
||||||
itte->irq->pending = false;
|
itte->irq->pending_latch = false;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -98,7 +98,7 @@ static void vgic_mmio_write_sgir(struct kvm_vcpu *source_vcpu,
|
||||||
irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid);
|
irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid);
|
||||||
|
|
||||||
spin_lock(&irq->irq_lock);
|
spin_lock(&irq->irq_lock);
|
||||||
irq->pending = true;
|
irq->pending_latch = true;
|
||||||
irq->source |= 1U << source_vcpu->vcpu_id;
|
irq->source |= 1U << source_vcpu->vcpu_id;
|
||||||
|
|
||||||
vgic_queue_irq_unlock(source_vcpu->kvm, irq);
|
vgic_queue_irq_unlock(source_vcpu->kvm, irq);
|
||||||
|
@ -182,7 +182,7 @@ static void vgic_mmio_write_sgipendc(struct kvm_vcpu *vcpu,
|
||||||
|
|
||||||
irq->source &= ~((val >> (i * 8)) & 0xff);
|
irq->source &= ~((val >> (i * 8)) & 0xff);
|
||||||
if (!irq->source)
|
if (!irq->source)
|
||||||
irq->pending = false;
|
irq->pending_latch = false;
|
||||||
|
|
||||||
spin_unlock(&irq->irq_lock);
|
spin_unlock(&irq->irq_lock);
|
||||||
vgic_put_irq(vcpu->kvm, irq);
|
vgic_put_irq(vcpu->kvm, irq);
|
||||||
|
@ -204,7 +204,7 @@ static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu,
|
||||||
irq->source |= (val >> (i * 8)) & 0xff;
|
irq->source |= (val >> (i * 8)) & 0xff;
|
||||||
|
|
||||||
if (irq->source) {
|
if (irq->source) {
|
||||||
irq->pending = true;
|
irq->pending_latch = true;
|
||||||
vgic_queue_irq_unlock(vcpu->kvm, irq);
|
vgic_queue_irq_unlock(vcpu->kvm, irq);
|
||||||
} else {
|
} else {
|
||||||
spin_unlock(&irq->irq_lock);
|
spin_unlock(&irq->irq_lock);
|
||||||
|
|
|
@ -646,7 +646,7 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg)
|
||||||
irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi);
|
irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi);
|
||||||
|
|
||||||
spin_lock(&irq->irq_lock);
|
spin_lock(&irq->irq_lock);
|
||||||
irq->pending = true;
|
irq->pending_latch = true;
|
||||||
|
|
||||||
vgic_queue_irq_unlock(vcpu->kvm, irq);
|
vgic_queue_irq_unlock(vcpu->kvm, irq);
|
||||||
vgic_put_irq(vcpu->kvm, irq);
|
vgic_put_irq(vcpu->kvm, irq);
|
||||||
|
|
|
@ -111,7 +111,7 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
|
||||||
for (i = 0; i < len * 8; i++) {
|
for (i = 0; i < len * 8; i++) {
|
||||||
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
|
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
|
||||||
|
|
||||||
if (irq->pending)
|
if (irq_is_pending(irq))
|
||||||
value |= (1U << i);
|
value |= (1U << i);
|
||||||
|
|
||||||
vgic_put_irq(vcpu->kvm, irq);
|
vgic_put_irq(vcpu->kvm, irq);
|
||||||
|
@ -131,9 +131,7 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
|
||||||
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
|
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
|
||||||
|
|
||||||
spin_lock(&irq->irq_lock);
|
spin_lock(&irq->irq_lock);
|
||||||
irq->pending = true;
|
irq->pending_latch = true;
|
||||||
if (irq->config == VGIC_CONFIG_LEVEL)
|
|
||||||
irq->soft_pending = true;
|
|
||||||
|
|
||||||
vgic_queue_irq_unlock(vcpu->kvm, irq);
|
vgic_queue_irq_unlock(vcpu->kvm, irq);
|
||||||
vgic_put_irq(vcpu->kvm, irq);
|
vgic_put_irq(vcpu->kvm, irq);
|
||||||
|
@ -152,12 +150,7 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
|
||||||
|
|
||||||
spin_lock(&irq->irq_lock);
|
spin_lock(&irq->irq_lock);
|
||||||
|
|
||||||
if (irq->config == VGIC_CONFIG_LEVEL) {
|
irq->pending_latch = false;
|
||||||
irq->soft_pending = false;
|
|
||||||
irq->pending = irq->line_level;
|
|
||||||
} else {
|
|
||||||
irq->pending = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
spin_unlock(&irq->irq_lock);
|
spin_unlock(&irq->irq_lock);
|
||||||
vgic_put_irq(vcpu->kvm, irq);
|
vgic_put_irq(vcpu->kvm, irq);
|
||||||
|
@ -359,12 +352,10 @@ void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
|
||||||
irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
|
irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
|
||||||
spin_lock(&irq->irq_lock);
|
spin_lock(&irq->irq_lock);
|
||||||
|
|
||||||
if (test_bit(i * 2 + 1, &val)) {
|
if (test_bit(i * 2 + 1, &val))
|
||||||
irq->config = VGIC_CONFIG_EDGE;
|
irq->config = VGIC_CONFIG_EDGE;
|
||||||
} else {
|
else
|
||||||
irq->config = VGIC_CONFIG_LEVEL;
|
irq->config = VGIC_CONFIG_LEVEL;
|
||||||
irq->pending = irq->line_level | irq->soft_pending;
|
|
||||||
}
|
|
||||||
|
|
||||||
spin_unlock(&irq->irq_lock);
|
spin_unlock(&irq->irq_lock);
|
||||||
vgic_put_irq(vcpu->kvm, irq);
|
vgic_put_irq(vcpu->kvm, irq);
|
||||||
|
|
|
@ -104,7 +104,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
|
||||||
/* Edge is the only case where we preserve the pending bit */
|
/* Edge is the only case where we preserve the pending bit */
|
||||||
if (irq->config == VGIC_CONFIG_EDGE &&
|
if (irq->config == VGIC_CONFIG_EDGE &&
|
||||||
(val & GICH_LR_PENDING_BIT)) {
|
(val & GICH_LR_PENDING_BIT)) {
|
||||||
irq->pending = true;
|
irq->pending_latch = true;
|
||||||
|
|
||||||
if (vgic_irq_is_sgi(intid)) {
|
if (vgic_irq_is_sgi(intid)) {
|
||||||
u32 cpuid = val & GICH_LR_PHYSID_CPUID;
|
u32 cpuid = val & GICH_LR_PHYSID_CPUID;
|
||||||
|
@ -120,9 +120,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
|
||||||
*/
|
*/
|
||||||
if (irq->config == VGIC_CONFIG_LEVEL) {
|
if (irq->config == VGIC_CONFIG_LEVEL) {
|
||||||
if (!(val & GICH_LR_PENDING_BIT))
|
if (!(val & GICH_LR_PENDING_BIT))
|
||||||
irq->soft_pending = false;
|
irq->pending_latch = false;
|
||||||
|
|
||||||
irq->pending = irq->line_level || irq->soft_pending;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock(&irq->irq_lock);
|
spin_unlock(&irq->irq_lock);
|
||||||
|
@ -145,11 +143,11 @@ void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
|
||||||
{
|
{
|
||||||
u32 val = irq->intid;
|
u32 val = irq->intid;
|
||||||
|
|
||||||
if (irq->pending) {
|
if (irq_is_pending(irq)) {
|
||||||
val |= GICH_LR_PENDING_BIT;
|
val |= GICH_LR_PENDING_BIT;
|
||||||
|
|
||||||
if (irq->config == VGIC_CONFIG_EDGE)
|
if (irq->config == VGIC_CONFIG_EDGE)
|
||||||
irq->pending = false;
|
irq->pending_latch = false;
|
||||||
|
|
||||||
if (vgic_irq_is_sgi(irq->intid)) {
|
if (vgic_irq_is_sgi(irq->intid)) {
|
||||||
u32 src = ffs(irq->source);
|
u32 src = ffs(irq->source);
|
||||||
|
@ -158,7 +156,7 @@ void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
|
||||||
val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
|
val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
|
||||||
irq->source &= ~(1 << (src - 1));
|
irq->source &= ~(1 << (src - 1));
|
||||||
if (irq->source)
|
if (irq->source)
|
||||||
irq->pending = true;
|
irq->pending_latch = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -94,7 +94,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
|
||||||
/* Edge is the only case where we preserve the pending bit */
|
/* Edge is the only case where we preserve the pending bit */
|
||||||
if (irq->config == VGIC_CONFIG_EDGE &&
|
if (irq->config == VGIC_CONFIG_EDGE &&
|
||||||
(val & ICH_LR_PENDING_BIT)) {
|
(val & ICH_LR_PENDING_BIT)) {
|
||||||
irq->pending = true;
|
irq->pending_latch = true;
|
||||||
|
|
||||||
if (vgic_irq_is_sgi(intid) &&
|
if (vgic_irq_is_sgi(intid) &&
|
||||||
model == KVM_DEV_TYPE_ARM_VGIC_V2) {
|
model == KVM_DEV_TYPE_ARM_VGIC_V2) {
|
||||||
|
@ -111,9 +111,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
|
||||||
*/
|
*/
|
||||||
if (irq->config == VGIC_CONFIG_LEVEL) {
|
if (irq->config == VGIC_CONFIG_LEVEL) {
|
||||||
if (!(val & ICH_LR_PENDING_BIT))
|
if (!(val & ICH_LR_PENDING_BIT))
|
||||||
irq->soft_pending = false;
|
irq->pending_latch = false;
|
||||||
|
|
||||||
irq->pending = irq->line_level || irq->soft_pending;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock(&irq->irq_lock);
|
spin_unlock(&irq->irq_lock);
|
||||||
|
@ -127,11 +125,11 @@ void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
|
||||||
u32 model = vcpu->kvm->arch.vgic.vgic_model;
|
u32 model = vcpu->kvm->arch.vgic.vgic_model;
|
||||||
u64 val = irq->intid;
|
u64 val = irq->intid;
|
||||||
|
|
||||||
if (irq->pending) {
|
if (irq_is_pending(irq)) {
|
||||||
val |= ICH_LR_PENDING_BIT;
|
val |= ICH_LR_PENDING_BIT;
|
||||||
|
|
||||||
if (irq->config == VGIC_CONFIG_EDGE)
|
if (irq->config == VGIC_CONFIG_EDGE)
|
||||||
irq->pending = false;
|
irq->pending_latch = false;
|
||||||
|
|
||||||
if (vgic_irq_is_sgi(irq->intid) &&
|
if (vgic_irq_is_sgi(irq->intid) &&
|
||||||
model == KVM_DEV_TYPE_ARM_VGIC_V2) {
|
model == KVM_DEV_TYPE_ARM_VGIC_V2) {
|
||||||
|
@ -141,7 +139,7 @@ void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
|
||||||
val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
|
val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
|
||||||
irq->source &= ~(1 << (src - 1));
|
irq->source &= ~(1 << (src - 1));
|
||||||
if (irq->source)
|
if (irq->source)
|
||||||
irq->pending = true;
|
irq->pending_latch = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -160,7 +160,7 @@ static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq)
|
||||||
* If the distributor is disabled, pending interrupts shouldn't be
|
* If the distributor is disabled, pending interrupts shouldn't be
|
||||||
* forwarded.
|
* forwarded.
|
||||||
*/
|
*/
|
||||||
if (irq->enabled && irq->pending) {
|
if (irq->enabled && irq_is_pending(irq)) {
|
||||||
if (unlikely(irq->target_vcpu &&
|
if (unlikely(irq->target_vcpu &&
|
||||||
!irq->target_vcpu->kvm->arch.vgic.enabled))
|
!irq->target_vcpu->kvm->arch.vgic.enabled))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -204,8 +204,8 @@ static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b)
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
penda = irqa->enabled && irqa->pending;
|
penda = irqa->enabled && irq_is_pending(irqa);
|
||||||
pendb = irqb->enabled && irqb->pending;
|
pendb = irqb->enabled && irq_is_pending(irqb);
|
||||||
|
|
||||||
if (!penda || !pendb) {
|
if (!penda || !pendb) {
|
||||||
ret = (int)pendb - (int)penda;
|
ret = (int)pendb - (int)penda;
|
||||||
|
@ -371,12 +371,10 @@ static int vgic_update_irq_pending(struct kvm *kvm, int cpuid,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (irq->config == VGIC_CONFIG_LEVEL) {
|
if (irq->config == VGIC_CONFIG_LEVEL)
|
||||||
irq->line_level = level;
|
irq->line_level = level;
|
||||||
irq->pending = level || irq->soft_pending;
|
else
|
||||||
} else {
|
irq->pending_latch = true;
|
||||||
irq->pending = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
vgic_queue_irq_unlock(kvm, irq);
|
vgic_queue_irq_unlock(kvm, irq);
|
||||||
vgic_put_irq(kvm, irq);
|
vgic_put_irq(kvm, irq);
|
||||||
|
@ -689,7 +687,7 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
|
||||||
|
|
||||||
list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
|
list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
|
||||||
spin_lock(&irq->irq_lock);
|
spin_lock(&irq->irq_lock);
|
||||||
pending = irq->pending && irq->enabled;
|
pending = irq_is_pending(irq) && irq->enabled;
|
||||||
spin_unlock(&irq->irq_lock);
|
spin_unlock(&irq->irq_lock);
|
||||||
|
|
||||||
if (pending)
|
if (pending)
|
||||||
|
|
|
@ -30,6 +30,14 @@
|
||||||
|
|
||||||
#define vgic_irq_is_sgi(intid) ((intid) < VGIC_NR_SGIS)
|
#define vgic_irq_is_sgi(intid) ((intid) < VGIC_NR_SGIS)
|
||||||
|
|
||||||
|
static inline bool irq_is_pending(struct vgic_irq *irq)
|
||||||
|
{
|
||||||
|
if (irq->config == VGIC_CONFIG_EDGE)
|
||||||
|
return irq->pending_latch;
|
||||||
|
else
|
||||||
|
return irq->pending_latch || irq->line_level;
|
||||||
|
}
|
||||||
|
|
||||||
struct vgic_vmcr {
|
struct vgic_vmcr {
|
||||||
u32 ctlr;
|
u32 ctlr;
|
||||||
u32 abpr;
|
u32 abpr;
|
||||||
|
|
Loading…
Reference in New Issue