x86/dumpstack: Remove dump_trace() and related callbacks
All previous users of dump_trace() have been converted to use the new unwind interfaces, so we can remove it and the related print_context_stack() and print_context_stack_bp() callback functions. Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Andy Lutomirski <luto@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Byungchul Park <byungchul.park@lge.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Kees Cook <keescook@chromium.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Nilay Vaish <nilayvaish@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/5b97da3572b40b5a4d8e185cf2429308d0987a13.1474045023.git.jpoimboe@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
e18bcccd1a
commit
c8fe460982
|
@ -45,42 +45,6 @@ static inline bool on_stack(struct stack_info *info, void *addr, size_t len)
|
|||
|
||||
extern int kstack_depth_to_print;
|
||||
|
||||
struct thread_info;
|
||||
struct stacktrace_ops;
|
||||
|
||||
typedef unsigned long (*walk_stack_t)(struct task_struct *task,
|
||||
unsigned long *stack,
|
||||
unsigned long bp,
|
||||
const struct stacktrace_ops *ops,
|
||||
void *data,
|
||||
struct stack_info *info,
|
||||
int *graph);
|
||||
|
||||
extern unsigned long
|
||||
print_context_stack(struct task_struct *task,
|
||||
unsigned long *stack, unsigned long bp,
|
||||
const struct stacktrace_ops *ops, void *data,
|
||||
struct stack_info *info, int *graph);
|
||||
|
||||
extern unsigned long
|
||||
print_context_stack_bp(struct task_struct *task,
|
||||
unsigned long *stack, unsigned long bp,
|
||||
const struct stacktrace_ops *ops, void *data,
|
||||
struct stack_info *info, int *graph);
|
||||
|
||||
/* Generic stack tracer with callbacks */
|
||||
|
||||
struct stacktrace_ops {
|
||||
int (*address)(void *data, unsigned long address, int reliable);
|
||||
/* On negative return stop dumping */
|
||||
int (*stack)(void *data, const char *name);
|
||||
walk_stack_t walk_stack;
|
||||
};
|
||||
|
||||
void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
|
||||
unsigned long *stack, unsigned long bp,
|
||||
const struct stacktrace_ops *ops, void *data);
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
#define STACKSLOTS_PER_LINE 8
|
||||
#else
|
||||
|
|
|
@ -56,92 +56,6 @@ void printk_address(unsigned long address)
|
|||
pr_cont(" [<%p>] %pS\n", (void *)address, (void *)address);
|
||||
}
|
||||
|
||||
/*
|
||||
* x86-64 can have up to three kernel stacks:
|
||||
* process stack
|
||||
* interrupt stack
|
||||
* severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
|
||||
*/
|
||||
|
||||
unsigned long
|
||||
print_context_stack(struct task_struct *task,
|
||||
unsigned long *stack, unsigned long bp,
|
||||
const struct stacktrace_ops *ops, void *data,
|
||||
struct stack_info *info, int *graph)
|
||||
{
|
||||
struct stack_frame *frame = (struct stack_frame *)bp;
|
||||
|
||||
/*
|
||||
* If we overflowed the stack into a guard page, jump back to the
|
||||
* bottom of the usable stack.
|
||||
*/
|
||||
if ((unsigned long)task_stack_page(task) - (unsigned long)stack <
|
||||
PAGE_SIZE)
|
||||
stack = (unsigned long *)task_stack_page(task);
|
||||
|
||||
while (on_stack(info, stack, sizeof(*stack))) {
|
||||
unsigned long addr = *stack;
|
||||
|
||||
if (__kernel_text_address(addr)) {
|
||||
unsigned long real_addr;
|
||||
int reliable = 0;
|
||||
|
||||
if ((unsigned long) stack == bp + sizeof(long)) {
|
||||
reliable = 1;
|
||||
frame = frame->next_frame;
|
||||
bp = (unsigned long) frame;
|
||||
}
|
||||
|
||||
/*
|
||||
* When function graph tracing is enabled for a
|
||||
* function, its return address on the stack is
|
||||
* replaced with the address of an ftrace handler
|
||||
* (return_to_handler). In that case, before printing
|
||||
* the "real" address, we want to print the handler
|
||||
* address as an "unreliable" hint that function graph
|
||||
* tracing was involved.
|
||||
*/
|
||||
real_addr = ftrace_graph_ret_addr(task, graph, addr,
|
||||
stack);
|
||||
if (real_addr != addr)
|
||||
ops->address(data, addr, 0);
|
||||
|
||||
ops->address(data, real_addr, reliable);
|
||||
}
|
||||
stack++;
|
||||
}
|
||||
return bp;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(print_context_stack);
|
||||
|
||||
unsigned long
|
||||
print_context_stack_bp(struct task_struct *task,
|
||||
unsigned long *stack, unsigned long bp,
|
||||
const struct stacktrace_ops *ops, void *data,
|
||||
struct stack_info *info, int *graph)
|
||||
{
|
||||
struct stack_frame *frame = (struct stack_frame *)bp;
|
||||
unsigned long *retp = &frame->return_address;
|
||||
|
||||
while (on_stack(info, stack, sizeof(*stack) * 2)) {
|
||||
unsigned long addr = *retp;
|
||||
unsigned long real_addr;
|
||||
|
||||
if (!__kernel_text_address(addr))
|
||||
break;
|
||||
|
||||
real_addr = ftrace_graph_ret_addr(task, graph, addr, retp);
|
||||
if (ops->address(data, real_addr, 1))
|
||||
break;
|
||||
|
||||
frame = frame->next_frame;
|
||||
retp = &frame->return_address;
|
||||
}
|
||||
|
||||
return (unsigned long)frame;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(print_context_stack_bp);
|
||||
|
||||
void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
|
||||
unsigned long *stack, char *log_lvl)
|
||||
{
|
||||
|
|
|
@ -121,41 +121,6 @@ int get_stack_info(unsigned long *stack, struct task_struct *task,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
void dump_trace(struct task_struct *task, struct pt_regs *regs,
|
||||
unsigned long *stack, unsigned long bp,
|
||||
const struct stacktrace_ops *ops, void *data)
|
||||
{
|
||||
unsigned long visit_mask = 0;
|
||||
int graph = 0;
|
||||
|
||||
task = task ? : current;
|
||||
stack = stack ? : get_stack_pointer(task, regs);
|
||||
bp = bp ? : (unsigned long)get_frame_pointer(task, regs);
|
||||
|
||||
for (;;) {
|
||||
const char *begin_str, *end_str;
|
||||
struct stack_info info;
|
||||
|
||||
if (get_stack_info(stack, task, &info, &visit_mask))
|
||||
break;
|
||||
|
||||
stack_type_str(info.type, &begin_str, &end_str);
|
||||
|
||||
if (begin_str && ops->stack(data, begin_str) < 0)
|
||||
break;
|
||||
|
||||
bp = ops->walk_stack(task, stack, bp, ops, data, &info, &graph);
|
||||
|
||||
if (end_str && ops->stack(data, end_str) < 0)
|
||||
break;
|
||||
|
||||
stack = info.next_sp;
|
||||
|
||||
touch_nmi_watchdog();
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(dump_trace);
|
||||
|
||||
void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
|
||||
unsigned long *sp, char *log_lvl)
|
||||
{
|
||||
|
|
|
@ -140,75 +140,6 @@ int get_stack_info(unsigned long *stack, struct task_struct *task,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* x86-64 can have up to three kernel stacks:
|
||||
* process stack
|
||||
* interrupt stack
|
||||
* severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
|
||||
*/
|
||||
|
||||
void dump_trace(struct task_struct *task, struct pt_regs *regs,
|
||||
unsigned long *stack, unsigned long bp,
|
||||
const struct stacktrace_ops *ops, void *data)
|
||||
{
|
||||
unsigned long visit_mask = 0;
|
||||
struct stack_info info;
|
||||
int graph = 0;
|
||||
int done = 0;
|
||||
|
||||
task = task ? : current;
|
||||
stack = stack ? : get_stack_pointer(task, regs);
|
||||
bp = bp ? : (unsigned long)get_frame_pointer(task, regs);
|
||||
|
||||
/*
|
||||
* Print function call entries in all stacks, starting at the
|
||||
* current stack address. If the stacks consist of nested
|
||||
* exceptions
|
||||
*/
|
||||
while (!done) {
|
||||
const char *begin_str, *end_str;
|
||||
|
||||
get_stack_info(stack, task, &info, &visit_mask);
|
||||
|
||||
/* Default finish unless specified to continue */
|
||||
done = 1;
|
||||
|
||||
switch (info.type) {
|
||||
|
||||
/* Break out early if we are on the thread stack */
|
||||
case STACK_TYPE_TASK:
|
||||
break;
|
||||
|
||||
case STACK_TYPE_IRQ:
|
||||
case STACK_TYPE_EXCEPTION ... STACK_TYPE_EXCEPTION_LAST:
|
||||
|
||||
stack_type_str(info.type, &begin_str, &end_str);
|
||||
|
||||
if (ops->stack(data, begin_str) < 0)
|
||||
break;
|
||||
|
||||
bp = ops->walk_stack(task, stack, bp, ops,
|
||||
data, &info, &graph);
|
||||
|
||||
ops->stack(data, end_str);
|
||||
|
||||
stack = info.next_sp;
|
||||
done = 0;
|
||||
break;
|
||||
|
||||
default:
|
||||
ops->stack(data, "UNK");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* This handles the process stack:
|
||||
*/
|
||||
bp = ops->walk_stack(task, stack, bp, ops, data, &info, &graph);
|
||||
}
|
||||
EXPORT_SYMBOL(dump_trace);
|
||||
|
||||
void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
|
||||
unsigned long *sp, char *log_lvl)
|
||||
{
|
||||
|
|
Loading…
Reference in New Issue