x86/dumpstack: Allow preemption in show_stack_log_lvl() and dump_trace()

show_stack_log_lvl() and dump_trace() are already preemption safe:

- If they're running in irq or exception context, preemption is already
  disabled and the percpu stack pointers can be trusted.

- If they're running with preemption enabled, they must be running on
  the task stack anyway, so it doesn't matter if they're comparing the
  stack pointer against a percpu stack pointer from this CPU or another
  one: either way it won't match.

Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Byungchul Park <byungchul.park@lge.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Nilay Vaish <nilayvaish@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/a0ca0b1044eca97d4f0ec7c1619cf80b3b65560d.1473371307.git.jpoimboe@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Josh Poimboeuf 2016-09-08 16:49:20 -05:00 committed by Ingo Molnar
parent 85063fac1f
commit cfeeed279d
2 changed files with 15 additions and 25 deletions

View File

@ -24,16 +24,16 @@ static void *is_irq_stack(void *p, void *irq)
}
static void *is_hardirq_stack(unsigned long *stack, int cpu)
static void *is_hardirq_stack(unsigned long *stack)
{
void *irq = per_cpu(hardirq_stack, cpu);
void *irq = this_cpu_read(hardirq_stack);
return is_irq_stack(stack, irq);
}
static void *is_softirq_stack(unsigned long *stack, int cpu)
static void *is_softirq_stack(unsigned long *stack)
{
void *irq = per_cpu(softirq_stack, cpu);
void *irq = this_cpu_read(softirq_stack);
return is_irq_stack(stack, irq);
}
@ -42,7 +42,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
unsigned long *stack, unsigned long bp,
const struct stacktrace_ops *ops, void *data)
{
const unsigned cpu = get_cpu();
int graph = 0;
u32 *prev_esp;
@ -53,9 +52,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
for (;;) {
void *end_stack;
end_stack = is_hardirq_stack(stack, cpu);
end_stack = is_hardirq_stack(stack);
if (!end_stack)
end_stack = is_softirq_stack(stack, cpu);
end_stack = is_softirq_stack(stack);
bp = ops->walk_stack(task, stack, bp, ops, data,
end_stack, &graph);
@ -74,7 +73,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
break;
touch_nmi_watchdog();
}
put_cpu();
}
EXPORT_SYMBOL(dump_trace);

View File

@ -31,8 +31,8 @@ static char x86_stack_ids[][8] = {
#endif
};
static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
unsigned *usedp, char **idp)
static unsigned long *in_exception_stack(unsigned long stack, unsigned *usedp,
char **idp)
{
unsigned k;
@ -41,7 +41,7 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
* 'stack' is in one of them:
*/
for (k = 0; k < N_EXCEPTION_STACKS; k++) {
unsigned long end = per_cpu(orig_ist, cpu).ist[k];
unsigned long end = raw_cpu_ptr(&orig_ist)->ist[k];
/*
* Is 'stack' above this exception frame's end?
* If yes then skip to the next frame.
@ -111,7 +111,7 @@ enum stack_type {
};
static enum stack_type
analyze_stack(int cpu, struct task_struct *task, unsigned long *stack,
analyze_stack(struct task_struct *task, unsigned long *stack,
unsigned long **stack_end, unsigned long *irq_stack,
unsigned *used, char **id)
{
@ -121,8 +121,7 @@ analyze_stack(int cpu, struct task_struct *task, unsigned long *stack,
if ((unsigned long)task_stack_page(task) == addr)
return STACK_IS_NORMAL;
*stack_end = in_exception_stack(cpu, (unsigned long)stack,
used, id);
*stack_end = in_exception_stack((unsigned long)stack, used, id);
if (*stack_end)
return STACK_IS_EXCEPTION;
@ -149,8 +148,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
unsigned long *stack, unsigned long bp,
const struct stacktrace_ops *ops, void *data)
{
const unsigned cpu = get_cpu();
unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
unsigned long *irq_stack = (unsigned long *)this_cpu_read(irq_stack_ptr);
unsigned used = 0;
int graph = 0;
int done = 0;
@ -169,8 +167,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
enum stack_type stype;
char *id;
stype = analyze_stack(cpu, task, stack, &stack_end,
irq_stack, &used, &id);
stype = analyze_stack(task, stack, &stack_end, irq_stack, &used,
&id);
/* Default finish unless specified to continue */
done = 1;
@ -225,7 +223,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
* This handles the process stack:
*/
bp = ops->walk_stack(task, stack, bp, ops, data, NULL, &graph);
put_cpu();
}
EXPORT_SYMBOL(dump_trace);
@ -236,13 +233,9 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
unsigned long *irq_stack_end;
unsigned long *irq_stack;
unsigned long *stack;
int cpu;
int i;
preempt_disable();
cpu = smp_processor_id();
irq_stack_end = (unsigned long *)(per_cpu(irq_stack_ptr, cpu));
irq_stack_end = (unsigned long *)this_cpu_read(irq_stack_ptr);
irq_stack = irq_stack_end - (IRQ_STACK_SIZE / sizeof(long));
sp = sp ? : get_stack_pointer(task, regs);
@ -274,7 +267,6 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
stack++;
touch_nmi_watchdog();
}
preempt_enable();
pr_cont("\n");
show_trace_log_lvl(task, regs, sp, bp, log_lvl);