mirror of https://gitee.com/openkylin/linux.git
genirq: Provide edge_eoi flow handler
This is a replacment for the cell flow handler which is in the way of cleanups. Must be selected to avoid general bloat. Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
32f4125ebf
commit
0521c8fbb3
|
@ -423,6 +423,7 @@ extern irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action);
|
||||||
extern void handle_level_irq(unsigned int irq, struct irq_desc *desc);
|
extern void handle_level_irq(unsigned int irq, struct irq_desc *desc);
|
||||||
extern void handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc);
|
extern void handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc);
|
||||||
extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc);
|
extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc);
|
||||||
|
extern void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc);
|
||||||
extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc);
|
extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc);
|
||||||
extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc);
|
extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc);
|
||||||
extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc);
|
extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc);
|
||||||
|
|
|
@ -51,6 +51,10 @@ config HARDIRQS_SW_RESEND
|
||||||
config IRQ_PREFLOW_FASTEOI
|
config IRQ_PREFLOW_FASTEOI
|
||||||
bool
|
bool
|
||||||
|
|
||||||
|
# Edge style eoi based handler (cell)
|
||||||
|
config IRQ_EDGE_EOI_HANDLER
|
||||||
|
bool
|
||||||
|
|
||||||
# Support forced irq threading
|
# Support forced irq threading
|
||||||
config IRQ_FORCED_THREADING
|
config IRQ_FORCED_THREADING
|
||||||
bool
|
bool
|
||||||
|
|
|
@ -604,6 +604,51 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
|
||||||
raw_spin_unlock(&desc->lock);
|
raw_spin_unlock(&desc->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
|
||||||
|
/**
|
||||||
|
* handle_edge_eoi_irq - edge eoi type IRQ handler
|
||||||
|
* @irq: the interrupt number
|
||||||
|
* @desc: the interrupt description structure for this irq
|
||||||
|
*
|
||||||
|
* Similar as the above handle_edge_irq, but using eoi and w/o the
|
||||||
|
* mask/unmask logic.
|
||||||
|
*/
|
||||||
|
void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc)
|
||||||
|
{
|
||||||
|
struct irq_chip *chip = irq_desc_get_chip(desc);
|
||||||
|
|
||||||
|
raw_spin_lock(&desc->lock);
|
||||||
|
|
||||||
|
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
|
||||||
|
/*
|
||||||
|
* If we're currently running this IRQ, or its disabled,
|
||||||
|
* we shouldn't process the IRQ. Mark it pending, handle
|
||||||
|
* the necessary masking and go out
|
||||||
|
*/
|
||||||
|
if (unlikely(irqd_irq_disabled(&desc->irq_data) ||
|
||||||
|
irqd_irq_inprogress(&desc->irq_data) || !desc->action)) {
|
||||||
|
if (!irq_check_poll(desc)) {
|
||||||
|
desc->istate |= IRQS_PENDING;
|
||||||
|
goto out_eoi;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
kstat_incr_irqs_this_cpu(irq, desc);
|
||||||
|
|
||||||
|
do {
|
||||||
|
if (unlikely(!desc->action))
|
||||||
|
goto out_eoi;
|
||||||
|
|
||||||
|
handle_irq_event(desc);
|
||||||
|
|
||||||
|
} while ((desc->istate & IRQS_PENDING) &&
|
||||||
|
!irqd_irq_disabled(&desc->irq_data));
|
||||||
|
|
||||||
|
out_unlock:
|
||||||
|
chip->irq_eoi(&desc->irq_data);
|
||||||
|
raw_spin_unlock(&desc->lock);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* handle_percpu_irq - Per CPU local irq handler
|
* handle_percpu_irq - Per CPU local irq handler
|
||||||
* @irq: the interrupt number
|
* @irq: the interrupt number
|
||||||
|
|
Loading…
Reference in New Issue