x86/irq: Protect smp_cleanup_move

smp_cleanup_move fiddles without protection in the interrupt
descriptors and the vector array. A concurrent irq setup/teardown or
affinity setting can pull the rug under that operation.

Add proper locking.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Jiang Liu <jiang.liu@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Bjorn Helgaas <bhelgaas@google.com>
Link: http://lkml.kernel.org/r/20150802203609.222975294@linutronix.de
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
Thomas Gleixner 2015-08-02 20:38:23 +00:00
parent ad3f8d5afe
commit df54c4934e
1 changed files with 14 additions and 4 deletions

View File

@ -539,6 +539,9 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
entering_ack_irq();
/* Prevent vectors vanishing under us */
raw_spin_lock(&vector_lock);
me = smp_processor_id();
for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
int irq;
@ -546,6 +549,7 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
struct irq_desc *desc;
struct apic_chip_data *data;
retry:
irq = __this_cpu_read(vector_irq[vector]);
if (irq <= VECTOR_UNDEFINED)
@ -555,12 +559,16 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
if (!desc)
continue;
if (!raw_spin_trylock(&desc->lock)) {
raw_spin_unlock(&vector_lock);
cpu_relax();
raw_spin_lock(&vector_lock);
goto retry;
}
data = apic_chip_data(&desc->irq_data);
if (!data)
continue;
raw_spin_lock(&desc->lock);
goto unlock;
/*
* Check if the irq migration is in progress. If so, we
* haven't received the cleanup request yet for this irq.
@ -589,6 +597,8 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
raw_spin_unlock(&desc->lock);
}
raw_spin_unlock(&vector_lock);
exiting_irq();
}