genirq: Free irq_desc with rcu
The new VMD device driver needs to iterate over a list of "demultiplexing" interrupts. Protecting that list with a lock is not possible because the list is also required in code pathes which hold irq descriptor lock. Therefor the demultiplexing interrupt handler would create a lock inversion scenario if it calls a demux handler with the list protection lock held. A solution for this is to free the irq descriptor via RCU, so the list can be walked with rcu read lock held. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Keith Busch <keith.busch@intel.com>
This commit is contained in:
parent
f0cb322073
commit
425a5072dc
|
@ -1,6 +1,8 @@
|
|||
#ifndef _LINUX_IRQDESC_H
|
||||
#define _LINUX_IRQDESC_H
|
||||
|
||||
#include <linux/rcupdate.h>
|
||||
|
||||
/*
|
||||
* Core internal functions to deal with irq descriptors
|
||||
*/
|
||||
|
@ -40,6 +42,7 @@ struct pt_regs;
|
|||
* IRQF_NO_SUSPEND set
|
||||
* @force_resume_depth: number of irqactions on a irq descriptor with
|
||||
* IRQF_FORCE_RESUME set
|
||||
* @rcu: rcu head for delayed free
|
||||
* @dir: /proc/irq/ procfs entry
|
||||
* @name: flow handler name for /proc/interrupts output
|
||||
*/
|
||||
|
@ -81,6 +84,9 @@ struct irq_desc {
|
|||
#endif
|
||||
#ifdef CONFIG_PROC_FS
|
||||
struct proc_dir_entry *dir;
|
||||
#endif
|
||||
#ifdef CONFIG_SPARSE_IRQ
|
||||
struct rcu_head rcu;
|
||||
#endif
|
||||
int parent_irq;
|
||||
struct module *owner;
|
||||
|
|
|
@ -159,6 +159,7 @@ static struct irq_desc *alloc_desc(int irq, int node, struct module *owner)
|
|||
|
||||
raw_spin_lock_init(&desc->lock);
|
||||
lockdep_set_class(&desc->lock, &irq_desc_lock_class);
|
||||
init_rcu_head(&desc->rcu);
|
||||
|
||||
desc_set_defaults(irq, desc, node, owner);
|
||||
|
||||
|
@ -171,6 +172,15 @@ static struct irq_desc *alloc_desc(int irq, int node, struct module *owner)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static void delayed_free_desc(struct rcu_head *rhp)
|
||||
{
|
||||
struct irq_desc *desc = container_of(rhp, struct irq_desc, rcu);
|
||||
|
||||
free_masks(desc);
|
||||
free_percpu(desc->kstat_irqs);
|
||||
kfree(desc);
|
||||
}
|
||||
|
||||
static void free_desc(unsigned int irq)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
|
@ -187,9 +197,12 @@ static void free_desc(unsigned int irq)
|
|||
delete_irq_desc(irq);
|
||||
mutex_unlock(&sparse_irq_lock);
|
||||
|
||||
free_masks(desc);
|
||||
free_percpu(desc->kstat_irqs);
|
||||
kfree(desc);
|
||||
/*
|
||||
* We free the descriptor, masks and stat fields via RCU. That
|
||||
* allows demultiplex interrupts to do rcu based management of
|
||||
* the child interrupts.
|
||||
*/
|
||||
call_rcu(&desc->rcu, delayed_free_desc);
|
||||
}
|
||||
|
||||
static int alloc_descs(unsigned int start, unsigned int cnt, int node,
|
||||
|
|
Loading…
Reference in New Issue