softirq: Move related code into one section

To prepare for adding a RT aware variant of softirq serialization and
processing move related code into one section so the necessary #ifdeffery
is reduced to one.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
Link: https://lore.kernel.org/r/20201113141733.974214480@linutronix.de
This commit is contained in:
Thomas Gleixner 2020-11-13 15:02:18 +01:00
parent 15115830c8
commit ae9ef58996
1 changed files with 55 additions and 54 deletions

View File

@ -92,6 +92,13 @@ static bool ksoftirqd_running(unsigned long pending)
!__kthread_should_park(tsk); !__kthread_should_park(tsk);
} }
#ifdef CONFIG_TRACE_IRQFLAGS
DEFINE_PER_CPU(int, hardirqs_enabled);
DEFINE_PER_CPU(int, hardirq_context);
EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled);
EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
#endif
/* /*
* preempt_count and SOFTIRQ_OFFSET usage: * preempt_count and SOFTIRQ_OFFSET usage:
* - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
@ -102,17 +109,11 @@ static bool ksoftirqd_running(unsigned long pending)
* softirq and whether we just have bh disabled. * softirq and whether we just have bh disabled.
*/ */
/*
* This one is for softirq.c-internal use,
* where hardirqs are disabled legitimately:
*/
#ifdef CONFIG_TRACE_IRQFLAGS #ifdef CONFIG_TRACE_IRQFLAGS
/*
DEFINE_PER_CPU(int, hardirqs_enabled); * This is for softirq.c-internal use, where hardirqs are disabled
DEFINE_PER_CPU(int, hardirq_context); * legitimately:
EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled); */
EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
{ {
unsigned long flags; unsigned long flags;
@ -203,6 +204,50 @@ void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
} }
EXPORT_SYMBOL(__local_bh_enable_ip); EXPORT_SYMBOL(__local_bh_enable_ip);
static inline void invoke_softirq(void)
{
if (ksoftirqd_running(local_softirq_pending()))
return;
if (!force_irqthreads) {
#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
/*
* We can safely execute softirq on the current stack if
* it is the irq stack, because it should be near empty
* at this stage.
*/
__do_softirq();
#else
/*
* Otherwise, irq_exit() is called on the task stack that can
* be potentially deep already. So call softirq in its own stack
* to prevent from any overrun.
*/
do_softirq_own_stack();
#endif
} else {
wakeup_softirqd();
}
}
asmlinkage __visible void do_softirq(void)
{
__u32 pending;
unsigned long flags;
if (in_interrupt())
return;
local_irq_save(flags);
pending = local_softirq_pending();
if (pending && !ksoftirqd_running(pending))
do_softirq_own_stack();
local_irq_restore(flags);
}
/* /*
* We restart softirq processing for at most MAX_SOFTIRQ_RESTART times, * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
* but break the loop if need_resched() is set or after 2 ms. * but break the loop if need_resched() is set or after 2 ms.
@ -327,24 +372,6 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
current_restore_flags(old_flags, PF_MEMALLOC); current_restore_flags(old_flags, PF_MEMALLOC);
} }
asmlinkage __visible void do_softirq(void)
{
__u32 pending;
unsigned long flags;
if (in_interrupt())
return;
local_irq_save(flags);
pending = local_softirq_pending();
if (pending && !ksoftirqd_running(pending))
do_softirq_own_stack();
local_irq_restore(flags);
}
/** /**
* irq_enter_rcu - Enter an interrupt context with RCU watching * irq_enter_rcu - Enter an interrupt context with RCU watching
*/ */
@ -371,32 +398,6 @@ void irq_enter(void)
irq_enter_rcu(); irq_enter_rcu();
} }
static inline void invoke_softirq(void)
{
if (ksoftirqd_running(local_softirq_pending()))
return;
if (!force_irqthreads) {
#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
/*
* We can safely execute softirq on the current stack if
* it is the irq stack, because it should be near empty
* at this stage.
*/
__do_softirq();
#else
/*
* Otherwise, irq_exit() is called on the task stack that can
* be potentially deep already. So call softirq in its own stack
* to prevent from any overrun.
*/
do_softirq_own_stack();
#endif
} else {
wakeup_softirqd();
}
}
static inline void tick_irq_exit(void) static inline void tick_irq_exit(void)
{ {
#ifdef CONFIG_NO_HZ_COMMON #ifdef CONFIG_NO_HZ_COMMON