mirror of https://gitee.com/openkylin/linux.git
[POWERPC] Fix performance regression in IRQ radix tree locking
When reworking the powerpc irq code, I figured out that we were using the radix tree in a racy way. As a temporary fix, I put a spinlock in there. However, this can have a significant impact on performances. This patch reworks that to use a smarter technique based on the fact that what we need is in fact a rwlock with extremely rare writers (thus optimized for the read path). Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
This commit is contained in:
parent
4b3afca934
commit
8ec8f2e85c
|
@ -322,7 +322,8 @@ EXPORT_SYMBOL(do_softirq);
|
||||||
|
|
||||||
static LIST_HEAD(irq_hosts);
|
static LIST_HEAD(irq_hosts);
|
||||||
static spinlock_t irq_big_lock = SPIN_LOCK_UNLOCKED;
|
static spinlock_t irq_big_lock = SPIN_LOCK_UNLOCKED;
|
||||||
|
static DEFINE_PER_CPU(unsigned int, irq_radix_reader);
|
||||||
|
static unsigned int irq_radix_writer;
|
||||||
struct irq_map_entry irq_map[NR_IRQS];
|
struct irq_map_entry irq_map[NR_IRQS];
|
||||||
static unsigned int irq_virq_count = NR_IRQS;
|
static unsigned int irq_virq_count = NR_IRQS;
|
||||||
static struct irq_host *irq_default_host;
|
static struct irq_host *irq_default_host;
|
||||||
|
@ -455,6 +456,58 @@ void irq_set_virq_count(unsigned int count)
|
||||||
irq_virq_count = count;
|
irq_virq_count = count;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* radix tree not lockless safe ! we use a brlock-type mecanism
|
||||||
|
* for now, until we can use a lockless radix tree
|
||||||
|
*/
|
||||||
|
static void irq_radix_wrlock(unsigned long *flags)
|
||||||
|
{
|
||||||
|
unsigned int cpu, ok;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&irq_big_lock, *flags);
|
||||||
|
irq_radix_writer = 1;
|
||||||
|
smp_mb();
|
||||||
|
do {
|
||||||
|
barrier();
|
||||||
|
ok = 1;
|
||||||
|
for_each_possible_cpu(cpu) {
|
||||||
|
if (per_cpu(irq_radix_reader, cpu)) {
|
||||||
|
ok = 0;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (!ok)
|
||||||
|
cpu_relax();
|
||||||
|
} while(!ok);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void irq_radix_wrunlock(unsigned long flags)
|
||||||
|
{
|
||||||
|
smp_wmb();
|
||||||
|
irq_radix_writer = 0;
|
||||||
|
spin_unlock_irqrestore(&irq_big_lock, flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void irq_radix_rdlock(unsigned long *flags)
|
||||||
|
{
|
||||||
|
local_irq_save(*flags);
|
||||||
|
__get_cpu_var(irq_radix_reader) = 1;
|
||||||
|
smp_mb();
|
||||||
|
if (likely(irq_radix_writer == 0))
|
||||||
|
return;
|
||||||
|
__get_cpu_var(irq_radix_reader) = 0;
|
||||||
|
smp_wmb();
|
||||||
|
spin_lock(&irq_big_lock);
|
||||||
|
__get_cpu_var(irq_radix_reader) = 1;
|
||||||
|
spin_unlock(&irq_big_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void irq_radix_rdunlock(unsigned long flags)
|
||||||
|
{
|
||||||
|
__get_cpu_var(irq_radix_reader) = 0;
|
||||||
|
local_irq_restore(flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
unsigned int irq_create_mapping(struct irq_host *host,
|
unsigned int irq_create_mapping(struct irq_host *host,
|
||||||
irq_hw_number_t hwirq)
|
irq_hw_number_t hwirq)
|
||||||
{
|
{
|
||||||
|
@ -604,13 +657,9 @@ void irq_dispose_mapping(unsigned int virq)
|
||||||
/* Check if radix tree allocated yet */
|
/* Check if radix tree allocated yet */
|
||||||
if (host->revmap_data.tree.gfp_mask == 0)
|
if (host->revmap_data.tree.gfp_mask == 0)
|
||||||
break;
|
break;
|
||||||
/* XXX radix tree not safe ! remove lock whem it becomes safe
|
irq_radix_wrlock(&flags);
|
||||||
* and use some RCU sync to make sure everything is ok before we
|
|
||||||
* can re-use that map entry
|
|
||||||
*/
|
|
||||||
spin_lock_irqsave(&irq_big_lock, flags);
|
|
||||||
radix_tree_delete(&host->revmap_data.tree, hwirq);
|
radix_tree_delete(&host->revmap_data.tree, hwirq);
|
||||||
spin_unlock_irqrestore(&irq_big_lock, flags);
|
irq_radix_wrunlock(flags);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -677,25 +726,24 @@ unsigned int irq_radix_revmap(struct irq_host *host,
|
||||||
if (tree->gfp_mask == 0)
|
if (tree->gfp_mask == 0)
|
||||||
return irq_find_mapping(host, hwirq);
|
return irq_find_mapping(host, hwirq);
|
||||||
|
|
||||||
/* XXX Current radix trees are NOT SMP safe !!! Remove that lock
|
|
||||||
* when that is fixed (when Nick's patch gets in
|
|
||||||
*/
|
|
||||||
spin_lock_irqsave(&irq_big_lock, flags);
|
|
||||||
|
|
||||||
/* Now try to resolve */
|
/* Now try to resolve */
|
||||||
|
irq_radix_rdlock(&flags);
|
||||||
ptr = radix_tree_lookup(tree, hwirq);
|
ptr = radix_tree_lookup(tree, hwirq);
|
||||||
|
irq_radix_rdunlock(flags);
|
||||||
|
|
||||||
/* Found it, return */
|
/* Found it, return */
|
||||||
if (ptr) {
|
if (ptr) {
|
||||||
virq = ptr - irq_map;
|
virq = ptr - irq_map;
|
||||||
goto bail;
|
return virq;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* If not there, try to insert it */
|
/* If not there, try to insert it */
|
||||||
virq = irq_find_mapping(host, hwirq);
|
virq = irq_find_mapping(host, hwirq);
|
||||||
if (virq != NO_IRQ)
|
if (virq != NO_IRQ) {
|
||||||
|
irq_radix_wrlock(&flags);
|
||||||
radix_tree_insert(tree, hwirq, &irq_map[virq]);
|
radix_tree_insert(tree, hwirq, &irq_map[virq]);
|
||||||
bail:
|
irq_radix_wrunlock(flags);
|
||||||
spin_unlock_irqrestore(&irq_big_lock, flags);
|
}
|
||||||
return virq;
|
return virq;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -806,12 +854,12 @@ static int irq_late_init(void)
|
||||||
struct irq_host *h;
|
struct irq_host *h;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(&irq_big_lock, flags);
|
irq_radix_wrlock(&flags);
|
||||||
list_for_each_entry(h, &irq_hosts, link) {
|
list_for_each_entry(h, &irq_hosts, link) {
|
||||||
if (h->revmap_type == IRQ_HOST_MAP_TREE)
|
if (h->revmap_type == IRQ_HOST_MAP_TREE)
|
||||||
INIT_RADIX_TREE(&h->revmap_data.tree, GFP_ATOMIC);
|
INIT_RADIX_TREE(&h->revmap_data.tree, GFP_ATOMIC);
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&irq_big_lock, flags);
|
irq_radix_wrunlock(flags);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue