mirror of https://gitee.com/openkylin/linux.git
x86/tracing: Add irq_enter/exit() in smp_trace_reschedule_interrupt()
Reschedule vector tracepoints may be called in cpu idle state. This causes lockdep check warning below. The tracepoint requires rcu but for accuracy it also requires irq_enter() (tracepoints record the irq context), thus, the tracepoint interrupt handler should be calling irq_enter() and not rcu_irq_enter() (irq_enter() calls rcu_irq_enter()). So, add irq_enter/exit() to smp_trace_reschedule_interrupt() with common pre/post processing functions, smp_entering_irq() and exiting_irq() (exiting_irq() calls just irq_exit() in arch/x86/include/asm/apic.h), because these can be shared among reschedule, call_function, and call_function_single vectors. [ 50.720557] Testing event reschedule_exit: [ 50.721349] [ 50.721502] =============================== [ 50.721835] [ INFO: suspicious RCU usage. ] [ 50.722169] 3.10.0-rc6-00004-gcf910e8 #190 Not tainted [ 50.722582] ------------------------------- [ 50.722915] /c/kernel-tests/src/linux/arch/x86/include/asm/trace/irq_vectors.h:50 suspicious rcu_dereference_check() usage! [ 50.723770] [ 50.723770] other info that might help us debug this: [ 50.723770] [ 50.724385] [ 50.724385] RCU used illegally from idle CPU! [ 50.724385] rcu_scheduler_active = 1, debug_locks = 0 [ 50.725232] RCU used illegally from extended quiescent state! [ 50.725690] no locks held by swapper/0/0. [ 50.726010] [ 50.726010] stack backtrace: [...] Signed-off-by: Seiji Aguchi <seiji.aguchi@hds.com> Reviewed-by: Steven Rostedt <rostedt@goodmis.org> Link: http://lkml.kernel.org/r/51CDCFA3.9080101@hds.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
5236eb968e
commit
4787c368a9
|
@ -265,23 +265,30 @@ void smp_reschedule_interrupt(struct pt_regs *regs)
|
||||||
*/
|
*/
|
||||||
}
|
}
|
||||||
|
|
||||||
void smp_trace_reschedule_interrupt(struct pt_regs *regs)
|
static inline void smp_entering_irq(void)
|
||||||
{
|
|
||||||
ack_APIC_irq();
|
|
||||||
trace_reschedule_entry(RESCHEDULE_VECTOR);
|
|
||||||
__smp_reschedule_interrupt();
|
|
||||||
trace_reschedule_exit(RESCHEDULE_VECTOR);
|
|
||||||
/*
|
|
||||||
* KVM uses this interrupt to force a cpu out of guest mode
|
|
||||||
*/
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void call_function_entering_irq(void)
|
|
||||||
{
|
{
|
||||||
ack_APIC_irq();
|
ack_APIC_irq();
|
||||||
irq_enter();
|
irq_enter();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void smp_trace_reschedule_interrupt(struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Need to call irq_enter() before calling the trace point.
|
||||||
|
* __smp_reschedule_interrupt() calls irq_enter/exit() too (in
|
||||||
|
* scheduler_ipi(). This is OK, since those functions are allowed
|
||||||
|
* to nest.
|
||||||
|
*/
|
||||||
|
smp_entering_irq();
|
||||||
|
trace_reschedule_entry(RESCHEDULE_VECTOR);
|
||||||
|
__smp_reschedule_interrupt();
|
||||||
|
trace_reschedule_exit(RESCHEDULE_VECTOR);
|
||||||
|
exiting_irq();
|
||||||
|
/*
|
||||||
|
* KVM uses this interrupt to force a cpu out of guest mode
|
||||||
|
*/
|
||||||
|
}
|
||||||
|
|
||||||
static inline void __smp_call_function_interrupt(void)
|
static inline void __smp_call_function_interrupt(void)
|
||||||
{
|
{
|
||||||
generic_smp_call_function_interrupt();
|
generic_smp_call_function_interrupt();
|
||||||
|
@ -290,14 +297,14 @@ static inline void __smp_call_function_interrupt(void)
|
||||||
|
|
||||||
void smp_call_function_interrupt(struct pt_regs *regs)
|
void smp_call_function_interrupt(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
call_function_entering_irq();
|
smp_entering_irq();
|
||||||
__smp_call_function_interrupt();
|
__smp_call_function_interrupt();
|
||||||
exiting_irq();
|
exiting_irq();
|
||||||
}
|
}
|
||||||
|
|
||||||
void smp_trace_call_function_interrupt(struct pt_regs *regs)
|
void smp_trace_call_function_interrupt(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
call_function_entering_irq();
|
smp_entering_irq();
|
||||||
trace_call_function_entry(CALL_FUNCTION_VECTOR);
|
trace_call_function_entry(CALL_FUNCTION_VECTOR);
|
||||||
__smp_call_function_interrupt();
|
__smp_call_function_interrupt();
|
||||||
trace_call_function_exit(CALL_FUNCTION_VECTOR);
|
trace_call_function_exit(CALL_FUNCTION_VECTOR);
|
||||||
|
@ -312,14 +319,14 @@ static inline void __smp_call_function_single_interrupt(void)
|
||||||
|
|
||||||
void smp_call_function_single_interrupt(struct pt_regs *regs)
|
void smp_call_function_single_interrupt(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
call_function_entering_irq();
|
smp_entering_irq();
|
||||||
__smp_call_function_single_interrupt();
|
__smp_call_function_single_interrupt();
|
||||||
exiting_irq();
|
exiting_irq();
|
||||||
}
|
}
|
||||||
|
|
||||||
void smp_trace_call_function_single_interrupt(struct pt_regs *regs)
|
void smp_trace_call_function_single_interrupt(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
call_function_entering_irq();
|
smp_entering_irq();
|
||||||
trace_call_function_single_entry(CALL_FUNCTION_SINGLE_VECTOR);
|
trace_call_function_single_entry(CALL_FUNCTION_SINGLE_VECTOR);
|
||||||
__smp_call_function_single_interrupt();
|
__smp_call_function_single_interrupt();
|
||||||
trace_call_function_single_exit(CALL_FUNCTION_SINGLE_VECTOR);
|
trace_call_function_single_exit(CALL_FUNCTION_SINGLE_VECTOR);
|
||||||
|
|
Loading…
Reference in New Issue