2019-06-03 13:44:50 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2012-03-05 19:49:27 +08:00
|
|
|
/*
|
|
|
|
* Stack tracing support
|
|
|
|
*
|
|
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
|
|
*/
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/export.h>
|
2015-12-15 16:33:41 +08:00
|
|
|
#include <linux/ftrace.h>
|
2019-07-25 16:16:05 +08:00
|
|
|
#include <linux/kprobes.h>
|
2012-03-05 19:49:27 +08:00
|
|
|
#include <linux/sched.h>
|
2017-02-09 01:51:35 +08:00
|
|
|
#include <linux/sched/debug.h>
|
2017-02-09 01:51:37 +08:00
|
|
|
#include <linux/sched/task_stack.h>
|
2012-03-05 19:49:27 +08:00
|
|
|
#include <linux/stacktrace.h>
|
|
|
|
|
2015-12-04 19:02:26 +08:00
|
|
|
#include <asm/irq.h>
|
2020-03-13 17:04:59 +08:00
|
|
|
#include <asm/pointer_auth.h>
|
2016-11-04 04:23:05 +08:00
|
|
|
#include <asm/stack_pointer.h>
|
2012-03-05 19:49:27 +08:00
|
|
|
#include <asm/stacktrace.h>
|
|
|
|
|
|
|
|
/*
|
|
|
|
* AArch64 PCS assigns the frame pointer to x29.
|
|
|
|
*
|
|
|
|
* A simple function prologue looks like this:
|
|
|
|
* sub sp, sp, #0x10
|
|
|
|
* stp x29, x30, [sp]
|
|
|
|
* mov x29, sp
|
|
|
|
*
|
|
|
|
* A simple function epilogue looks like this:
|
|
|
|
* mov sp, x29
|
|
|
|
* ldp x29, x30, [sp]
|
|
|
|
* add sp, sp, #0x10
|
|
|
|
*/
|
2019-07-02 21:07:29 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Unwind from one frame record (A) to the next frame record (B).
|
|
|
|
*
|
|
|
|
* We terminate early if the location of B indicates a malformed chain of frame
|
|
|
|
* records (e.g. a cycle), determined based on the location and fp value of A
|
|
|
|
* and the location (but not the fp value) of B.
|
|
|
|
*/
|
2015-12-15 16:33:40 +08:00
|
|
|
int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
|
2012-03-05 19:49:27 +08:00
|
|
|
{
|
|
|
|
unsigned long fp = frame->fp;
|
2019-07-02 21:07:29 +08:00
|
|
|
struct stack_info info;
|
2017-07-22 19:48:34 +08:00
|
|
|
|
2021-01-14 01:31:55 +08:00
|
|
|
/* Terminal record; nothing to unwind */
|
|
|
|
if (!fp)
|
2021-02-25 00:50:37 +08:00
|
|
|
return -ENOENT;
|
2021-01-14 01:31:55 +08:00
|
|
|
|
2017-07-22 19:48:34 +08:00
|
|
|
if (fp & 0xf)
|
|
|
|
return -EINVAL;
|
2015-12-04 19:02:26 +08:00
|
|
|
|
arm64: fix dump_backtrace/unwind_frame with NULL tsk
In some places, dump_backtrace() is called with a NULL tsk parameter,
e.g. in bug_handler() in arch/arm64, or indirectly via show_stack() in
core code. The expectation is that this is treated as if current were
passed instead of NULL. Similar is true of unwind_frame().
Commit a80a0eb70c358f8c ("arm64: make irq_stack_ptr more robust") didn't
take this into account. In dump_backtrace() it compares tsk against
current *before* we check if tsk is NULL, and in unwind_frame() we never
set tsk if it is NULL.
Due to this, we won't initialise irq_stack_ptr in either function. In
dump_backtrace() this results in calling dump_mem() for memory
immediately above the IRQ stack range, rather than for the relevant
range on the task stack. In unwind_frame we'll reject unwinding frames
on the IRQ stack.
In either case this results in incomplete or misleading backtrace
information, but is not otherwise problematic. The initial percpu areas
(including the IRQ stacks) are allocated in the linear map, and dump_mem
uses __get_user(), so we shouldn't access anything with side-effects,
and will handle holes safely.
This patch fixes the issue by having both functions handle the NULL tsk
case before doing anything else with tsk.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Fixes: a80a0eb70c358f8c ("arm64: make irq_stack_ptr more robust")
Acked-by: James Morse <james.morse@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Yang Shi <yang.shi@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2016-09-24 00:55:05 +08:00
|
|
|
if (!tsk)
|
|
|
|
tsk = current;
|
|
|
|
|
2019-07-02 21:07:29 +08:00
|
|
|
if (!on_accessible_stack(tsk, fp, &info))
|
2012-03-05 19:49:27 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2019-07-02 21:07:29 +08:00
|
|
|
if (test_bit(info.type, frame->stacks_done))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* As stacks grow downward, any valid record on the same stack must be
|
|
|
|
* at a strictly higher address than the prior record.
|
|
|
|
*
|
|
|
|
* Stacks can nest in several valid orders, e.g.
|
|
|
|
*
|
|
|
|
* TASK -> IRQ -> OVERFLOW -> SDEI_NORMAL
|
|
|
|
* TASK -> SDEI_NORMAL -> SDEI_CRITICAL -> OVERFLOW
|
|
|
|
*
|
|
|
|
* ... but the nesting itself is strict. Once we transition from one
|
|
|
|
* stack to another, it's never valid to unwind back to that first
|
|
|
|
* stack.
|
|
|
|
*/
|
|
|
|
if (info.type == frame->prev_type) {
|
|
|
|
if (fp <= frame->prev_fp)
|
|
|
|
return -EINVAL;
|
|
|
|
} else {
|
|
|
|
set_bit(frame->prev_type, frame->stacks_done);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Record this frame record's values and location. The prev_fp and
|
|
|
|
* prev_type are only meaningful to the next unwind_frame() invocation.
|
|
|
|
*/
|
2016-02-09 01:13:09 +08:00
|
|
|
frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp));
|
|
|
|
frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8));
|
2019-07-02 21:07:29 +08:00
|
|
|
frame->prev_fp = fp;
|
|
|
|
frame->prev_type = info.type;
|
2012-03-05 19:49:27 +08:00
|
|
|
|
2015-12-15 16:33:41 +08:00
|
|
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
arm64: fix dump_backtrace/unwind_frame with NULL tsk
In some places, dump_backtrace() is called with a NULL tsk parameter,
e.g. in bug_handler() in arch/arm64, or indirectly via show_stack() in
core code. The expectation is that this is treated as if current were
passed instead of NULL. Similar is true of unwind_frame().
Commit a80a0eb70c358f8c ("arm64: make irq_stack_ptr more robust") didn't
take this into account. In dump_backtrace() it compares tsk against
current *before* we check if tsk is NULL, and in unwind_frame() we never
set tsk if it is NULL.
Due to this, we won't initialise irq_stack_ptr in either function. In
dump_backtrace() this results in calling dump_mem() for memory
immediately above the IRQ stack range, rather than for the relevant
range on the task stack. In unwind_frame we'll reject unwinding frames
on the IRQ stack.
In either case this results in incomplete or misleading backtrace
information, but is not otherwise problematic. The initial percpu areas
(including the IRQ stacks) are allocated in the linear map, and dump_mem
uses __get_user(), so we shouldn't access anything with side-effects,
and will handle holes safely.
This patch fixes the issue by having both functions handle the NULL tsk
case before doing anything else with tsk.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Fixes: a80a0eb70c358f8c ("arm64: make irq_stack_ptr more robust")
Acked-by: James Morse <james.morse@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Yang Shi <yang.shi@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2016-09-24 00:55:05 +08:00
|
|
|
if (tsk->ret_stack &&
|
2020-03-13 17:04:59 +08:00
|
|
|
(ptrauth_strip_insn_pac(frame->pc) == (unsigned long)return_to_handler)) {
|
2018-12-08 02:13:28 +08:00
|
|
|
struct ftrace_ret_stack *ret_stack;
|
2015-12-15 16:33:41 +08:00
|
|
|
/*
|
|
|
|
* This is a case where function graph tracer has
|
|
|
|
* modified a return address (LR) in a stack frame
|
|
|
|
* to hook a function return.
|
|
|
|
* So replace it to an original value.
|
|
|
|
*/
|
2018-12-08 02:13:28 +08:00
|
|
|
ret_stack = ftrace_graph_get_ret_stack(tsk, frame->graph++);
|
|
|
|
if (WARN_ON_ONCE(!ret_stack))
|
|
|
|
return -EINVAL;
|
|
|
|
frame->pc = ret_stack->ret;
|
2015-12-15 16:33:41 +08:00
|
|
|
}
|
|
|
|
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
|
|
|
|
|
2020-03-13 17:04:59 +08:00
|
|
|
frame->pc = ptrauth_strip_insn_pac(frame->pc);
|
|
|
|
|
2012-03-05 19:49:27 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2019-07-25 16:16:05 +08:00
|
|
|
NOKPROBE_SYMBOL(unwind_frame);
|
2012-03-05 19:49:27 +08:00
|
|
|
|
2015-12-15 16:33:40 +08:00
|
|
|
void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
|
2020-09-14 23:34:08 +08:00
|
|
|
bool (*fn)(void *, unsigned long), void *data)
|
2012-03-05 19:49:27 +08:00
|
|
|
{
|
|
|
|
while (1) {
|
|
|
|
int ret;
|
|
|
|
|
2020-09-14 23:34:08 +08:00
|
|
|
if (!fn(data, frame->pc))
|
2012-03-05 19:49:27 +08:00
|
|
|
break;
|
2015-12-15 16:33:40 +08:00
|
|
|
ret = unwind_frame(tsk, frame);
|
2012-03-05 19:49:27 +08:00
|
|
|
if (ret < 0)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2019-07-25 16:16:05 +08:00
|
|
|
NOKPROBE_SYMBOL(walk_stackframe);
|
2012-03-05 19:49:27 +08:00
|
|
|
|
2020-09-21 20:23:41 +08:00
|
|
|
static void dump_backtrace_entry(unsigned long where, const char *loglvl)
|
|
|
|
{
|
|
|
|
printk("%s %pS\n", loglvl, (void *)where);
|
|
|
|
}
|
|
|
|
|
|
|
|
void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
|
|
|
|
const char *loglvl)
|
|
|
|
{
|
|
|
|
struct stackframe frame;
|
|
|
|
int skip = 0;
|
|
|
|
|
|
|
|
pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
|
|
|
|
|
|
|
|
if (regs) {
|
|
|
|
if (user_mode(regs))
|
|
|
|
return;
|
|
|
|
skip = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!tsk)
|
|
|
|
tsk = current;
|
|
|
|
|
|
|
|
if (!try_get_task_stack(tsk))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (tsk == current) {
|
|
|
|
start_backtrace(&frame,
|
|
|
|
(unsigned long)__builtin_frame_address(0),
|
|
|
|
(unsigned long)dump_backtrace);
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* task blocked in __switch_to
|
|
|
|
*/
|
|
|
|
start_backtrace(&frame,
|
|
|
|
thread_saved_fp(tsk),
|
|
|
|
thread_saved_pc(tsk));
|
|
|
|
}
|
|
|
|
|
|
|
|
printk("%sCall trace:\n", loglvl);
|
|
|
|
do {
|
|
|
|
/* skip until specified stack frame */
|
|
|
|
if (!skip) {
|
|
|
|
dump_backtrace_entry(frame.pc, loglvl);
|
|
|
|
} else if (frame.fp == regs->regs[29]) {
|
|
|
|
skip = 0;
|
|
|
|
/*
|
|
|
|
* Mostly, this is the case where this function is
|
|
|
|
* called in panic/abort. As exception handler's
|
|
|
|
* stack frame does not contain the corresponding pc
|
|
|
|
* at which an exception has taken place, use regs->pc
|
|
|
|
* instead.
|
|
|
|
*/
|
|
|
|
dump_backtrace_entry(regs->pc, loglvl);
|
|
|
|
}
|
|
|
|
} while (!unwind_frame(tsk, &frame));
|
|
|
|
|
|
|
|
put_task_stack(tsk);
|
|
|
|
}
|
|
|
|
|
|
|
|
void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
|
|
|
|
{
|
|
|
|
dump_backtrace(NULL, tsk, loglvl);
|
|
|
|
barrier();
|
|
|
|
}
|
|
|
|
|
2012-03-05 19:49:27 +08:00
|
|
|
#ifdef CONFIG_STACKTRACE
|
|
|
|
|
2020-09-14 23:34:09 +08:00
|
|
|
void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
|
|
|
|
struct task_struct *task, struct pt_regs *regs)
|
2012-03-05 19:49:27 +08:00
|
|
|
{
|
|
|
|
struct stackframe frame;
|
|
|
|
|
2020-09-14 23:34:09 +08:00
|
|
|
if (regs)
|
|
|
|
start_backtrace(&frame, regs->regs[29], regs->pc);
|
|
|
|
else if (task == current)
|
2019-07-02 21:07:28 +08:00
|
|
|
start_backtrace(&frame,
|
|
|
|
(unsigned long)__builtin_frame_address(0),
|
2020-09-14 23:34:09 +08:00
|
|
|
(unsigned long)arch_stack_walk);
|
|
|
|
else
|
|
|
|
start_backtrace(&frame, thread_saved_fp(task),
|
|
|
|
thread_saved_pc(task));
|
2016-11-04 04:23:08 +08:00
|
|
|
|
2020-09-14 23:34:09 +08:00
|
|
|
walk_stackframe(task, &frame, consume_entry, cookie);
|
2012-03-05 19:49:27 +08:00
|
|
|
}
|
2017-09-14 07:28:32 +08:00
|
|
|
|
2012-03-05 19:49:27 +08:00
|
|
|
#endif
|