2012-03-05 19:49:27 +08:00
|
|
|
/*
|
|
|
|
* Based on arch/arm/kernel/traps.c
|
|
|
|
*
|
|
|
|
* Copyright (C) 1995-2009 Russell King
|
|
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
|
2015-07-24 23:37:48 +08:00
|
|
|
#include <linux/bug.h>
|
2012-03-05 19:49:27 +08:00
|
|
|
#include <linux/signal.h>
|
|
|
|
#include <linux/personality.h>
|
|
|
|
#include <linux/kallsyms.h>
|
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <linux/uaccess.h>
|
|
|
|
#include <linux/hardirq.h>
|
|
|
|
#include <linux/kdebug.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/kexec.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/init.h>
|
2017-02-09 01:51:30 +08:00
|
|
|
#include <linux/sched/signal.h>
|
2017-02-09 01:51:35 +08:00
|
|
|
#include <linux/sched/debug.h>
|
2017-02-09 01:51:37 +08:00
|
|
|
#include <linux/sched/task_stack.h>
|
2012-03-05 19:49:27 +08:00
|
|
|
#include <linux/syscalls.h>
|
2017-02-04 07:16:44 +08:00
|
|
|
#include <linux/mm_types.h>
|
2012-03-05 19:49:27 +08:00
|
|
|
|
|
|
|
#include <asm/atomic.h>
|
2015-07-24 23:37:48 +08:00
|
|
|
#include <asm/bug.h>
|
2013-03-16 16:48:13 +08:00
|
|
|
#include <asm/debug-monitors.h>
|
2014-11-18 20:16:30 +08:00
|
|
|
#include <asm/esr.h>
|
2015-07-24 23:37:48 +08:00
|
|
|
#include <asm/insn.h>
|
2012-03-05 19:49:27 +08:00
|
|
|
#include <asm/traps.h>
|
2016-11-04 04:23:05 +08:00
|
|
|
#include <asm/stack_pointer.h>
|
2012-03-05 19:49:27 +08:00
|
|
|
#include <asm/stacktrace.h>
|
|
|
|
#include <asm/exception.h>
|
|
|
|
#include <asm/system_misc.h>
|
2016-06-29 01:07:32 +08:00
|
|
|
#include <asm/sysreg.h>
|
2012-03-05 19:49:27 +08:00
|
|
|
|
|
|
|
static const char *handler[]= {
|
|
|
|
"Synchronous Abort",
|
|
|
|
"IRQ",
|
|
|
|
"FIQ",
|
|
|
|
"Error"
|
|
|
|
};
|
|
|
|
|
|
|
|
int show_unhandled_signals = 1;
|
|
|
|
|
|
|
|
/*
|
arm64: simplify dump_mem
Currently dump_mem attempts to dump memory in 64-bit chunks when
reporting a failure in 64-bit code, or 32-bit chunks when reporting a
failure in 32-bit code. We added code to handle these two cases
separately in commit e147ae6d7f908412 ("arm64: modify the dump mem for
64 bit addresses").
However, in all cases dump_mem is called, the failing context is a
kernel rather than user context. Additionally dump_mem is assumed to
only be used for kernel contexts, as internally it switches to
KERNEL_DS, and its callers pass kernel stack bounds.
This patch removes the redundant 32-bit chunk logic and associated
compat parameter, largely reverting the aforementioned commit. For the
call in __die(), the check of in_interrupt() is removed also, as __die()
is only called in response to faults from the kernel's exception level,
and thus the !user_mode(regs) check is sufficient. Were this not the
case, the used of task_stack_page(tsk) to generate the stack bounds
would be erroneous.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-06-13 18:15:15 +08:00
|
|
|
* Dump out the contents of some kernel memory nicely...
|
2012-03-05 19:49:27 +08:00
|
|
|
*/
|
|
|
|
static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
|
arm64: simplify dump_mem
Currently dump_mem attempts to dump memory in 64-bit chunks when
reporting a failure in 64-bit code, or 32-bit chunks when reporting a
failure in 32-bit code. We added code to handle these two cases
separately in commit e147ae6d7f908412 ("arm64: modify the dump mem for
64 bit addresses").
However, in all cases dump_mem is called, the failing context is a
kernel rather than user context. Additionally dump_mem is assumed to
only be used for kernel contexts, as internally it switches to
KERNEL_DS, and its callers pass kernel stack bounds.
This patch removes the redundant 32-bit chunk logic and associated
compat parameter, largely reverting the aforementioned commit. For the
call in __die(), the check of in_interrupt() is removed also, as __die()
is only called in response to faults from the kernel's exception level,
and thus the !user_mode(regs) check is sufficient. Were this not the
case, the used of task_stack_page(tsk) to generate the stack bounds
would be erroneous.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-06-13 18:15:15 +08:00
|
|
|
unsigned long top)
|
2012-03-05 19:49:27 +08:00
|
|
|
{
|
|
|
|
unsigned long first;
|
|
|
|
mm_segment_t fs;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We need to switch to kernel mode so that we can use __get_user
|
2016-06-13 18:15:14 +08:00
|
|
|
* to safely read from kernel space.
|
2012-03-05 19:49:27 +08:00
|
|
|
*/
|
|
|
|
fs = get_fs();
|
|
|
|
set_fs(KERNEL_DS);
|
|
|
|
|
|
|
|
printk("%s%s(0x%016lx to 0x%016lx)\n", lvl, str, bottom, top);
|
|
|
|
|
|
|
|
for (first = bottom & ~31; first < top; first += 32) {
|
|
|
|
unsigned long p;
|
|
|
|
char str[sizeof(" 12345678") * 8 + 1];
|
|
|
|
|
|
|
|
memset(str, ' ', sizeof(str));
|
|
|
|
str[sizeof(str) - 1] = '\0';
|
|
|
|
|
arm64: simplify dump_mem
Currently dump_mem attempts to dump memory in 64-bit chunks when
reporting a failure in 64-bit code, or 32-bit chunks when reporting a
failure in 32-bit code. We added code to handle these two cases
separately in commit e147ae6d7f908412 ("arm64: modify the dump mem for
64 bit addresses").
However, in all cases dump_mem is called, the failing context is a
kernel rather than user context. Additionally dump_mem is assumed to
only be used for kernel contexts, as internally it switches to
KERNEL_DS, and its callers pass kernel stack bounds.
This patch removes the redundant 32-bit chunk logic and associated
compat parameter, largely reverting the aforementioned commit. For the
call in __die(), the check of in_interrupt() is removed also, as __die()
is only called in response to faults from the kernel's exception level,
and thus the !user_mode(regs) check is sufficient. Were this not the
case, the used of task_stack_page(tsk) to generate the stack bounds
would be erroneous.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-06-13 18:15:15 +08:00
|
|
|
for (p = first, i = 0; i < (32 / 8)
|
|
|
|
&& p < top; i++, p += 8) {
|
2012-03-05 19:49:27 +08:00
|
|
|
if (p >= bottom && p < top) {
|
2015-07-10 16:23:59 +08:00
|
|
|
unsigned long val;
|
|
|
|
|
arm64: simplify dump_mem
Currently dump_mem attempts to dump memory in 64-bit chunks when
reporting a failure in 64-bit code, or 32-bit chunks when reporting a
failure in 32-bit code. We added code to handle these two cases
separately in commit e147ae6d7f908412 ("arm64: modify the dump mem for
64 bit addresses").
However, in all cases dump_mem is called, the failing context is a
kernel rather than user context. Additionally dump_mem is assumed to
only be used for kernel contexts, as internally it switches to
KERNEL_DS, and its callers pass kernel stack bounds.
This patch removes the redundant 32-bit chunk logic and associated
compat parameter, largely reverting the aforementioned commit. For the
call in __die(), the check of in_interrupt() is removed also, as __die()
is only called in response to faults from the kernel's exception level,
and thus the !user_mode(regs) check is sufficient. Were this not the
case, the used of task_stack_page(tsk) to generate the stack bounds
would be erroneous.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-06-13 18:15:15 +08:00
|
|
|
if (__get_user(val, (unsigned long *)p) == 0)
|
|
|
|
sprintf(str + i * 17, " %016lx", val);
|
|
|
|
else
|
|
|
|
sprintf(str + i * 17, " ????????????????");
|
2012-03-05 19:49:27 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
printk("%s%04lx:%s\n", lvl, first & 0xffff, str);
|
|
|
|
}
|
|
|
|
|
|
|
|
set_fs(fs);
|
|
|
|
}
|
|
|
|
|
2015-10-17 22:28:11 +08:00
|
|
|
static void dump_backtrace_entry(unsigned long where)
|
2012-03-05 19:49:27 +08:00
|
|
|
{
|
2015-10-17 22:28:11 +08:00
|
|
|
/*
|
|
|
|
* Note that 'where' can have a physical address, but it's not handled.
|
|
|
|
*/
|
2012-03-05 19:49:27 +08:00
|
|
|
print_ip_sym(where);
|
|
|
|
}
|
|
|
|
|
2016-06-13 18:15:14 +08:00
|
|
|
static void __dump_instr(const char *lvl, struct pt_regs *regs)
|
2012-03-05 19:49:27 +08:00
|
|
|
{
|
|
|
|
unsigned long addr = instruction_pointer(regs);
|
|
|
|
char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = -4; i < 1; i++) {
|
|
|
|
unsigned int val, bad;
|
|
|
|
|
|
|
|
bad = __get_user(val, &((u32 *)addr)[i]);
|
|
|
|
|
|
|
|
if (!bad)
|
|
|
|
p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val);
|
|
|
|
else {
|
|
|
|
p += sprintf(p, "bad PC value");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
printk("%sCode: %s\n", lvl, str);
|
2016-06-13 18:15:14 +08:00
|
|
|
}
|
2012-03-05 19:49:27 +08:00
|
|
|
|
2016-06-13 18:15:14 +08:00
|
|
|
static void dump_instr(const char *lvl, struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
if (!user_mode(regs)) {
|
|
|
|
mm_segment_t fs = get_fs();
|
|
|
|
set_fs(KERNEL_DS);
|
|
|
|
__dump_instr(lvl, regs);
|
|
|
|
set_fs(fs);
|
|
|
|
} else {
|
|
|
|
__dump_instr(lvl, regs);
|
|
|
|
}
|
2012-03-05 19:49:27 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
|
|
|
|
{
|
|
|
|
struct stackframe frame;
|
2016-02-12 05:53:10 +08:00
|
|
|
unsigned long irq_stack_ptr;
|
2015-12-15 16:33:41 +08:00
|
|
|
int skip;
|
2012-03-05 19:49:27 +08:00
|
|
|
|
arm64: fix dump_backtrace/unwind_frame with NULL tsk
In some places, dump_backtrace() is called with a NULL tsk parameter,
e.g. in bug_handler() in arch/arm64, or indirectly via show_stack() in
core code. The expectation is that this is treated as if current were
passed instead of NULL. Similar is true of unwind_frame().
Commit a80a0eb70c358f8c ("arm64: make irq_stack_ptr more robust") didn't
take this into account. In dump_backtrace() it compares tsk against
current *before* we check if tsk is NULL, and in unwind_frame() we never
set tsk if it is NULL.
Due to this, we won't initialise irq_stack_ptr in either function. In
dump_backtrace() this results in calling dump_mem() for memory
immediately above the IRQ stack range, rather than for the relevant
range on the task stack. In unwind_frame we'll reject unwinding frames
on the IRQ stack.
In either case this results in incomplete or misleading backtrace
information, but is not otherwise problematic. The initial percpu areas
(including the IRQ stacks) are allocated in the linear map, and dump_mem
uses __get_user(), so we shouldn't access anything with side-effects,
and will handle holes safely.
This patch fixes the issue by having both functions handle the NULL tsk
case before doing anything else with tsk.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Fixes: a80a0eb70c358f8c ("arm64: make irq_stack_ptr more robust")
Acked-by: James Morse <james.morse@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Yang Shi <yang.shi@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2016-09-24 00:55:05 +08:00
|
|
|
pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
|
|
|
|
|
|
|
|
if (!tsk)
|
|
|
|
tsk = current;
|
|
|
|
|
2016-11-04 04:23:08 +08:00
|
|
|
if (!try_get_task_stack(tsk))
|
|
|
|
return;
|
|
|
|
|
2016-02-12 05:53:10 +08:00
|
|
|
/*
|
|
|
|
* Switching between stacks is valid when tracing current and in
|
|
|
|
* non-preemptible context.
|
|
|
|
*/
|
|
|
|
if (tsk == current && !preemptible())
|
|
|
|
irq_stack_ptr = IRQ_STACK_PTR(smp_processor_id());
|
|
|
|
else
|
|
|
|
irq_stack_ptr = 0;
|
|
|
|
|
2015-12-15 16:33:41 +08:00
|
|
|
if (tsk == current) {
|
2012-03-05 19:49:27 +08:00
|
|
|
frame.fp = (unsigned long)__builtin_frame_address(0);
|
2014-08-27 12:29:32 +08:00
|
|
|
frame.sp = current_stack_pointer;
|
2012-03-05 19:49:27 +08:00
|
|
|
frame.pc = (unsigned long)dump_backtrace;
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* task blocked in __switch_to
|
|
|
|
*/
|
|
|
|
frame.fp = thread_saved_fp(tsk);
|
|
|
|
frame.sp = thread_saved_sp(tsk);
|
|
|
|
frame.pc = thread_saved_pc(tsk);
|
|
|
|
}
|
2015-12-15 16:33:41 +08:00
|
|
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|
|
|
frame.graph = tsk->curr_ret_stack;
|
|
|
|
#endif
|
2012-03-05 19:49:27 +08:00
|
|
|
|
2015-12-15 16:33:41 +08:00
|
|
|
skip = !!regs;
|
2015-12-22 00:44:27 +08:00
|
|
|
printk("Call trace:\n");
|
2012-03-05 19:49:27 +08:00
|
|
|
while (1) {
|
|
|
|
unsigned long where = frame.pc;
|
2015-10-17 22:28:11 +08:00
|
|
|
unsigned long stack;
|
2012-03-05 19:49:27 +08:00
|
|
|
int ret;
|
|
|
|
|
2015-12-15 16:33:41 +08:00
|
|
|
/* skip until specified stack frame */
|
|
|
|
if (!skip) {
|
|
|
|
dump_backtrace_entry(where);
|
|
|
|
} else if (frame.fp == regs->regs[29]) {
|
|
|
|
skip = 0;
|
|
|
|
/*
|
|
|
|
* Mostly, this is the case where this function is
|
|
|
|
* called in panic/abort. As exception handler's
|
|
|
|
* stack frame does not contain the corresponding pc
|
|
|
|
* at which an exception has taken place, use regs->pc
|
|
|
|
* instead.
|
|
|
|
*/
|
|
|
|
dump_backtrace_entry(regs->pc);
|
|
|
|
}
|
2015-12-15 16:33:40 +08:00
|
|
|
ret = unwind_frame(tsk, &frame);
|
2012-03-05 19:49:27 +08:00
|
|
|
if (ret < 0)
|
|
|
|
break;
|
2015-10-17 22:28:11 +08:00
|
|
|
stack = frame.sp;
|
2015-12-04 19:02:26 +08:00
|
|
|
if (in_exception_text(where)) {
|
|
|
|
/*
|
|
|
|
* If we switched to the irq_stack before calling this
|
|
|
|
* exception handler, then the pt_regs will be on the
|
|
|
|
* task stack. The easiest way to tell is if the large
|
|
|
|
* pt_regs would overlap with the end of the irq_stack.
|
|
|
|
*/
|
|
|
|
if (stack < irq_stack_ptr &&
|
|
|
|
(stack + sizeof(struct pt_regs)) > irq_stack_ptr)
|
|
|
|
stack = IRQ_STACK_TO_TASK_STACK(irq_stack_ptr);
|
|
|
|
|
2015-10-17 22:28:11 +08:00
|
|
|
dump_mem("", "Exception stack", stack,
|
arm64: simplify dump_mem
Currently dump_mem attempts to dump memory in 64-bit chunks when
reporting a failure in 64-bit code, or 32-bit chunks when reporting a
failure in 32-bit code. We added code to handle these two cases
separately in commit e147ae6d7f908412 ("arm64: modify the dump mem for
64 bit addresses").
However, in all cases dump_mem is called, the failing context is a
kernel rather than user context. Additionally dump_mem is assumed to
only be used for kernel contexts, as internally it switches to
KERNEL_DS, and its callers pass kernel stack bounds.
This patch removes the redundant 32-bit chunk logic and associated
compat parameter, largely reverting the aforementioned commit. For the
call in __die(), the check of in_interrupt() is removed also, as __die()
is only called in response to faults from the kernel's exception level,
and thus the !user_mode(regs) check is sufficient. Were this not the
case, the used of task_stack_page(tsk) to generate the stack bounds
would be erroneous.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-06-13 18:15:15 +08:00
|
|
|
stack + sizeof(struct pt_regs));
|
2015-12-04 19:02:26 +08:00
|
|
|
}
|
2012-03-05 19:49:27 +08:00
|
|
|
}
|
2016-11-04 04:23:08 +08:00
|
|
|
|
|
|
|
put_task_stack(tsk);
|
2012-03-05 19:49:27 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void show_stack(struct task_struct *tsk, unsigned long *sp)
|
|
|
|
{
|
|
|
|
dump_backtrace(NULL, tsk);
|
|
|
|
barrier();
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_PREEMPT
|
|
|
|
#define S_PREEMPT " PREEMPT"
|
|
|
|
#else
|
|
|
|
#define S_PREEMPT ""
|
|
|
|
#endif
|
|
|
|
#define S_SMP " SMP"
|
|
|
|
|
2016-11-04 04:23:06 +08:00
|
|
|
static int __die(const char *str, int err, struct pt_regs *regs)
|
2012-03-05 19:49:27 +08:00
|
|
|
{
|
2016-11-04 04:23:06 +08:00
|
|
|
struct task_struct *tsk = current;
|
2012-03-05 19:49:27 +08:00
|
|
|
static int die_counter;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
pr_emerg("Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n",
|
|
|
|
str, err, ++die_counter);
|
|
|
|
|
|
|
|
/* trap and error numbers are mostly meaningless on ARM */
|
|
|
|
ret = notify_die(DIE_OOPS, str, regs, err, 0, SIGSEGV);
|
|
|
|
if (ret == NOTIFY_STOP)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
print_modules();
|
|
|
|
__show_regs(regs);
|
|
|
|
pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n",
|
2016-11-04 04:23:06 +08:00
|
|
|
TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk),
|
|
|
|
end_of_stack(tsk));
|
2012-03-05 19:49:27 +08:00
|
|
|
|
arm64: simplify dump_mem
Currently dump_mem attempts to dump memory in 64-bit chunks when
reporting a failure in 64-bit code, or 32-bit chunks when reporting a
failure in 32-bit code. We added code to handle these two cases
separately in commit e147ae6d7f908412 ("arm64: modify the dump mem for
64 bit addresses").
However, in all cases dump_mem is called, the failing context is a
kernel rather than user context. Additionally dump_mem is assumed to
only be used for kernel contexts, as internally it switches to
KERNEL_DS, and its callers pass kernel stack bounds.
This patch removes the redundant 32-bit chunk logic and associated
compat parameter, largely reverting the aforementioned commit. For the
call in __die(), the check of in_interrupt() is removed also, as __die()
is only called in response to faults from the kernel's exception level,
and thus the !user_mode(regs) check is sufficient. Were this not the
case, the used of task_stack_page(tsk) to generate the stack bounds
would be erroneous.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-06-13 18:15:15 +08:00
|
|
|
if (!user_mode(regs)) {
|
2012-03-05 19:49:27 +08:00
|
|
|
dump_mem(KERN_EMERG, "Stack: ", regs->sp,
|
arm64: simplify dump_mem
Currently dump_mem attempts to dump memory in 64-bit chunks when
reporting a failure in 64-bit code, or 32-bit chunks when reporting a
failure in 32-bit code. We added code to handle these two cases
separately in commit e147ae6d7f908412 ("arm64: modify the dump mem for
64 bit addresses").
However, in all cases dump_mem is called, the failing context is a
kernel rather than user context. Additionally dump_mem is assumed to
only be used for kernel contexts, as internally it switches to
KERNEL_DS, and its callers pass kernel stack bounds.
This patch removes the redundant 32-bit chunk logic and associated
compat parameter, largely reverting the aforementioned commit. For the
call in __die(), the check of in_interrupt() is removed also, as __die()
is only called in response to faults from the kernel's exception level,
and thus the !user_mode(regs) check is sufficient. Were this not the
case, the used of task_stack_page(tsk) to generate the stack bounds
would be erroneous.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-06-13 18:15:15 +08:00
|
|
|
THREAD_SIZE + (unsigned long)task_stack_page(tsk));
|
2012-03-05 19:49:27 +08:00
|
|
|
dump_backtrace(regs, tsk);
|
|
|
|
dump_instr(KERN_EMERG, regs);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static DEFINE_RAW_SPINLOCK(die_lock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This function is protected against re-entrancy.
|
|
|
|
*/
|
|
|
|
void die(const char *str, struct pt_regs *regs, int err)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
oops_enter();
|
|
|
|
|
|
|
|
raw_spin_lock_irq(&die_lock);
|
|
|
|
console_verbose();
|
|
|
|
bust_spinlocks(1);
|
2016-11-04 04:23:06 +08:00
|
|
|
ret = __die(str, err, regs);
|
2012-03-05 19:49:27 +08:00
|
|
|
|
2016-11-04 04:23:06 +08:00
|
|
|
if (regs && kexec_should_crash(current))
|
2012-03-05 19:49:27 +08:00
|
|
|
crash_kexec(regs);
|
|
|
|
|
|
|
|
bust_spinlocks(0);
|
2013-01-21 14:47:39 +08:00
|
|
|
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
|
2012-03-05 19:49:27 +08:00
|
|
|
raw_spin_unlock_irq(&die_lock);
|
|
|
|
oops_exit();
|
|
|
|
|
|
|
|
if (in_interrupt())
|
|
|
|
panic("Fatal exception in interrupt");
|
|
|
|
if (panic_on_oops)
|
|
|
|
panic("Fatal exception");
|
|
|
|
if (ret != NOTIFY_STOP)
|
|
|
|
do_exit(SIGSEGV);
|
|
|
|
}
|
|
|
|
|
|
|
|
void arm64_notify_die(const char *str, struct pt_regs *regs,
|
|
|
|
struct siginfo *info, int err)
|
|
|
|
{
|
2014-04-07 06:04:12 +08:00
|
|
|
if (user_mode(regs)) {
|
|
|
|
current->thread.fault_address = 0;
|
|
|
|
current->thread.fault_code = err;
|
2012-03-05 19:49:27 +08:00
|
|
|
force_sig_info(info->si_signo, info, current);
|
2014-04-07 06:04:12 +08:00
|
|
|
} else {
|
2012-03-05 19:49:27 +08:00
|
|
|
die(str, regs, err);
|
2014-04-07 06:04:12 +08:00
|
|
|
}
|
2012-03-05 19:49:27 +08:00
|
|
|
}
|
|
|
|
|
2014-11-18 19:41:22 +08:00
|
|
|
static LIST_HEAD(undef_hook);
|
|
|
|
static DEFINE_RAW_SPINLOCK(undef_lock);
|
|
|
|
|
|
|
|
void register_undef_hook(struct undef_hook *hook)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
raw_spin_lock_irqsave(&undef_lock, flags);
|
|
|
|
list_add(&hook->node, &undef_hook);
|
|
|
|
raw_spin_unlock_irqrestore(&undef_lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
void unregister_undef_hook(struct undef_hook *hook)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
raw_spin_lock_irqsave(&undef_lock, flags);
|
|
|
|
list_del(&hook->node);
|
|
|
|
raw_spin_unlock_irqrestore(&undef_lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int call_undef_hook(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
struct undef_hook *hook;
|
|
|
|
unsigned long flags;
|
|
|
|
u32 instr;
|
|
|
|
int (*fn)(struct pt_regs *regs, u32 instr) = NULL;
|
|
|
|
void __user *pc = (void __user *)instruction_pointer(regs);
|
|
|
|
|
|
|
|
if (!user_mode(regs))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
if (compat_thumb_mode(regs)) {
|
|
|
|
/* 16-bit Thumb instruction */
|
|
|
|
if (get_user(instr, (u16 __user *)pc))
|
|
|
|
goto exit;
|
|
|
|
instr = le16_to_cpu(instr);
|
|
|
|
if (aarch32_insn_is_wide(instr)) {
|
|
|
|
u32 instr2;
|
|
|
|
|
|
|
|
if (get_user(instr2, (u16 __user *)(pc + 2)))
|
|
|
|
goto exit;
|
|
|
|
instr2 = le16_to_cpu(instr2);
|
|
|
|
instr = (instr << 16) | instr2;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* 32-bit ARM instruction */
|
|
|
|
if (get_user(instr, (u32 __user *)pc))
|
|
|
|
goto exit;
|
|
|
|
instr = le32_to_cpu(instr);
|
|
|
|
}
|
|
|
|
|
|
|
|
raw_spin_lock_irqsave(&undef_lock, flags);
|
|
|
|
list_for_each_entry(hook, &undef_hook, node)
|
|
|
|
if ((instr & hook->instr_mask) == hook->instr_val &&
|
|
|
|
(regs->pstate & hook->pstate_mask) == hook->pstate_val)
|
|
|
|
fn = hook->fn;
|
|
|
|
|
|
|
|
raw_spin_unlock_irqrestore(&undef_lock, flags);
|
|
|
|
exit:
|
|
|
|
return fn ? fn(regs, instr) : 1;
|
|
|
|
}
|
|
|
|
|
2016-06-29 01:07:31 +08:00
|
|
|
static void force_signal_inject(int signal, int code, struct pt_regs *regs,
|
|
|
|
unsigned long address)
|
2012-03-05 19:49:27 +08:00
|
|
|
{
|
|
|
|
siginfo_t info;
|
|
|
|
void __user *pc = (void __user *)instruction_pointer(regs);
|
2016-06-29 01:07:31 +08:00
|
|
|
const char *desc;
|
|
|
|
|
|
|
|
switch (signal) {
|
|
|
|
case SIGILL:
|
|
|
|
desc = "undefined instruction";
|
|
|
|
break;
|
|
|
|
case SIGSEGV:
|
|
|
|
desc = "illegal memory access";
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
desc = "bad mode";
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unhandled_signal(current, signal) &&
|
|
|
|
show_unhandled_signals_ratelimited()) {
|
|
|
|
pr_info("%s[%d]: %s: pc=%p\n",
|
|
|
|
current->comm, task_pid_nr(current), desc, pc);
|
|
|
|
dump_instr(KERN_INFO, regs);
|
|
|
|
}
|
|
|
|
|
|
|
|
info.si_signo = signal;
|
|
|
|
info.si_errno = 0;
|
|
|
|
info.si_code = code;
|
|
|
|
info.si_addr = pc;
|
|
|
|
|
|
|
|
arm64_notify_die(desc, regs, &info, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set up process info to signal segmentation fault - called on access error.
|
|
|
|
*/
|
|
|
|
void arm64_notify_segfault(struct pt_regs *regs, unsigned long addr)
|
|
|
|
{
|
|
|
|
int code;
|
|
|
|
|
|
|
|
down_read(¤t->mm->mmap_sem);
|
|
|
|
if (find_vma(current->mm, addr) == NULL)
|
|
|
|
code = SEGV_MAPERR;
|
|
|
|
else
|
|
|
|
code = SEGV_ACCERR;
|
|
|
|
up_read(¤t->mm->mmap_sem);
|
2012-03-05 19:49:27 +08:00
|
|
|
|
2016-06-29 01:07:31 +08:00
|
|
|
force_signal_inject(SIGSEGV, code, regs, addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
|
|
|
|
{
|
2012-03-05 19:49:27 +08:00
|
|
|
/* check for AArch32 breakpoint instructions */
|
2013-03-16 16:48:13 +08:00
|
|
|
if (!aarch32_break_handler(regs))
|
2012-03-05 19:49:27 +08:00
|
|
|
return;
|
|
|
|
|
2014-11-18 19:41:22 +08:00
|
|
|
if (call_undef_hook(regs) == 0)
|
|
|
|
return;
|
|
|
|
|
2016-06-29 01:07:31 +08:00
|
|
|
force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0);
|
2012-03-05 19:49:27 +08:00
|
|
|
}
|
|
|
|
|
2016-10-18 18:27:46 +08:00
|
|
|
int cpu_enable_cache_maint_trap(void *__unused)
|
2016-06-29 01:07:32 +08:00
|
|
|
{
|
|
|
|
config_sctlr_el1(SCTLR_EL1_UCI, 0);
|
2016-10-18 18:27:46 +08:00
|
|
|
return 0;
|
2016-06-29 01:07:32 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
#define __user_cache_maint(insn, address, res) \
|
2016-09-02 21:54:03 +08:00
|
|
|
if (untagged_addr(address) >= user_addr_max()) { \
|
2016-10-19 21:40:54 +08:00
|
|
|
res = -EFAULT; \
|
2016-09-02 21:54:03 +08:00
|
|
|
} else { \
|
|
|
|
uaccess_ttbr0_enable(); \
|
2016-10-19 21:40:54 +08:00
|
|
|
asm volatile ( \
|
|
|
|
"1: " insn ", %1\n" \
|
|
|
|
" mov %w0, #0\n" \
|
|
|
|
"2:\n" \
|
|
|
|
" .pushsection .fixup,\"ax\"\n" \
|
|
|
|
" .align 2\n" \
|
|
|
|
"3: mov %w0, %w2\n" \
|
|
|
|
" b 2b\n" \
|
|
|
|
" .popsection\n" \
|
|
|
|
_ASM_EXTABLE(1b, 3b) \
|
|
|
|
: "=r" (res) \
|
2016-09-02 21:54:03 +08:00
|
|
|
: "r" (address), "i" (-EFAULT)); \
|
|
|
|
uaccess_ttbr0_disable(); \
|
|
|
|
}
|
2016-06-29 01:07:32 +08:00
|
|
|
|
2016-09-09 21:07:15 +08:00
|
|
|
static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
|
2016-06-29 01:07:32 +08:00
|
|
|
{
|
|
|
|
unsigned long address;
|
2016-09-09 21:07:15 +08:00
|
|
|
int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
|
|
|
|
int crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
|
|
|
|
int ret = 0;
|
2016-06-29 01:07:32 +08:00
|
|
|
|
2017-02-09 23:19:19 +08:00
|
|
|
address = pt_regs_read_reg(regs, rt);
|
2016-06-29 01:07:32 +08:00
|
|
|
|
2016-09-09 21:07:15 +08:00
|
|
|
switch (crm) {
|
|
|
|
case ESR_ELx_SYS64_ISS_CRM_DC_CVAU: /* DC CVAU, gets promoted */
|
|
|
|
__user_cache_maint("dc civac", address, ret);
|
|
|
|
break;
|
|
|
|
case ESR_ELx_SYS64_ISS_CRM_DC_CVAC: /* DC CVAC, gets promoted */
|
|
|
|
__user_cache_maint("dc civac", address, ret);
|
|
|
|
break;
|
|
|
|
case ESR_ELx_SYS64_ISS_CRM_DC_CIVAC: /* DC CIVAC */
|
|
|
|
__user_cache_maint("dc civac", address, ret);
|
|
|
|
break;
|
|
|
|
case ESR_ELx_SYS64_ISS_CRM_IC_IVAU: /* IC IVAU */
|
|
|
|
__user_cache_maint("ic ivau", address, ret);
|
|
|
|
break;
|
|
|
|
default:
|
2016-06-29 01:07:32 +08:00
|
|
|
force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
arm64_notify_segfault(regs, address);
|
|
|
|
else
|
|
|
|
regs->pc += 4;
|
|
|
|
}
|
|
|
|
|
2016-09-09 21:07:16 +08:00
|
|
|
static void ctr_read_handler(unsigned int esr, struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
|
2017-02-09 23:19:19 +08:00
|
|
|
unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0);
|
|
|
|
|
|
|
|
pt_regs_write_reg(regs, rt, val);
|
2016-09-09 21:07:16 +08:00
|
|
|
|
|
|
|
regs->pc += 4;
|
|
|
|
}
|
|
|
|
|
2016-09-09 21:07:15 +08:00
|
|
|
struct sys64_hook {
|
|
|
|
unsigned int esr_mask;
|
|
|
|
unsigned int esr_val;
|
|
|
|
void (*handler)(unsigned int esr, struct pt_regs *regs);
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct sys64_hook sys64_hooks[] = {
|
|
|
|
{
|
|
|
|
.esr_mask = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_MASK,
|
|
|
|
.esr_val = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_VAL,
|
|
|
|
.handler = user_cache_maint_handler,
|
|
|
|
},
|
2016-09-09 21:07:16 +08:00
|
|
|
{
|
|
|
|
/* Trap read access to CTR_EL0 */
|
|
|
|
.esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
|
|
|
|
.esr_val = ESR_ELx_SYS64_ISS_SYS_CTR_READ,
|
|
|
|
.handler = ctr_read_handler,
|
|
|
|
},
|
2016-09-09 21:07:15 +08:00
|
|
|
{},
|
|
|
|
};
|
|
|
|
|
|
|
|
asmlinkage void __exception do_sysinstr(unsigned int esr, struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
struct sys64_hook *hook;
|
|
|
|
|
|
|
|
for (hook = sys64_hooks; hook->handler; hook++)
|
|
|
|
if ((hook->esr_mask & esr) == hook->esr_val) {
|
|
|
|
hook->handler(esr, regs);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
arm64: handle sys and undef traps consistently
If an EL0 instruction in the SYS class triggers an exception, do_sysintr
looks for a sys64_hook matching the instruction, and if none is found,
injects a SIGILL. This mirrors what we do for undefined instruction
encodings in do_undefinstr, where we look for an undef_hook matching the
instruction, and if none is found, inject a SIGILL.
Over time, new SYS instruction encodings may be allocated. Prior to
allocation, exceptions resulting from these would be handled by
do_undefinstr, whereas after allocation these may be handled by
do_sysintr.
To ensure that we have consistent behaviour if and when this happens, it
would be beneficial to have do_sysinstr fall back to do_undefinstr.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Reviewed-by: Suzuki Poulose <suzuki.poulose@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2017-01-28 00:15:38 +08:00
|
|
|
/*
|
|
|
|
* New SYS instructions may previously have been undefined at EL0. Fall
|
|
|
|
* back to our usual undefined instruction handler so that we handle
|
|
|
|
* these consistently.
|
|
|
|
*/
|
|
|
|
do_undefinstr(regs);
|
2016-09-09 21:07:15 +08:00
|
|
|
}
|
|
|
|
|
2012-03-05 19:49:27 +08:00
|
|
|
long compat_arm_syscall(struct pt_regs *regs);
|
|
|
|
|
|
|
|
asmlinkage long do_ni_syscall(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
long ret;
|
|
|
|
if (is_compat_task()) {
|
|
|
|
ret = compat_arm_syscall(regs);
|
|
|
|
if (ret != -ENOSYS)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2015-06-19 22:28:03 +08:00
|
|
|
if (show_unhandled_signals_ratelimited()) {
|
2012-03-05 19:49:27 +08:00
|
|
|
pr_info("%s[%d]: syscall %d\n", current->comm,
|
|
|
|
task_pid_nr(current), (int)regs->syscallno);
|
|
|
|
dump_instr("", regs);
|
|
|
|
if (user_mode(regs))
|
|
|
|
__show_regs(regs);
|
|
|
|
}
|
|
|
|
|
|
|
|
return sys_ni_syscall();
|
|
|
|
}
|
|
|
|
|
2014-11-18 20:16:30 +08:00
|
|
|
static const char *esr_class_str[] = {
|
|
|
|
[0 ... ESR_ELx_EC_MAX] = "UNRECOGNIZED EC",
|
|
|
|
[ESR_ELx_EC_UNKNOWN] = "Unknown/Uncategorized",
|
|
|
|
[ESR_ELx_EC_WFx] = "WFI/WFE",
|
|
|
|
[ESR_ELx_EC_CP15_32] = "CP15 MCR/MRC",
|
|
|
|
[ESR_ELx_EC_CP15_64] = "CP15 MCRR/MRRC",
|
|
|
|
[ESR_ELx_EC_CP14_MR] = "CP14 MCR/MRC",
|
|
|
|
[ESR_ELx_EC_CP14_LS] = "CP14 LDC/STC",
|
|
|
|
[ESR_ELx_EC_FP_ASIMD] = "ASIMD",
|
|
|
|
[ESR_ELx_EC_CP10_ID] = "CP10 MRC/VMRS",
|
|
|
|
[ESR_ELx_EC_CP14_64] = "CP14 MCRR/MRRC",
|
|
|
|
[ESR_ELx_EC_ILL] = "PSTATE.IL",
|
|
|
|
[ESR_ELx_EC_SVC32] = "SVC (AArch32)",
|
|
|
|
[ESR_ELx_EC_HVC32] = "HVC (AArch32)",
|
|
|
|
[ESR_ELx_EC_SMC32] = "SMC (AArch32)",
|
|
|
|
[ESR_ELx_EC_SVC64] = "SVC (AArch64)",
|
|
|
|
[ESR_ELx_EC_HVC64] = "HVC (AArch64)",
|
|
|
|
[ESR_ELx_EC_SMC64] = "SMC (AArch64)",
|
|
|
|
[ESR_ELx_EC_SYS64] = "MSR/MRS (AArch64)",
|
|
|
|
[ESR_ELx_EC_IMP_DEF] = "EL3 IMP DEF",
|
|
|
|
[ESR_ELx_EC_IABT_LOW] = "IABT (lower EL)",
|
|
|
|
[ESR_ELx_EC_IABT_CUR] = "IABT (current EL)",
|
|
|
|
[ESR_ELx_EC_PC_ALIGN] = "PC Alignment",
|
|
|
|
[ESR_ELx_EC_DABT_LOW] = "DABT (lower EL)",
|
|
|
|
[ESR_ELx_EC_DABT_CUR] = "DABT (current EL)",
|
|
|
|
[ESR_ELx_EC_SP_ALIGN] = "SP Alignment",
|
|
|
|
[ESR_ELx_EC_FP_EXC32] = "FP (AArch32)",
|
|
|
|
[ESR_ELx_EC_FP_EXC64] = "FP (AArch64)",
|
|
|
|
[ESR_ELx_EC_SERROR] = "SError",
|
|
|
|
[ESR_ELx_EC_BREAKPT_LOW] = "Breakpoint (lower EL)",
|
|
|
|
[ESR_ELx_EC_BREAKPT_CUR] = "Breakpoint (current EL)",
|
|
|
|
[ESR_ELx_EC_SOFTSTP_LOW] = "Software Step (lower EL)",
|
|
|
|
[ESR_ELx_EC_SOFTSTP_CUR] = "Software Step (current EL)",
|
|
|
|
[ESR_ELx_EC_WATCHPT_LOW] = "Watchpoint (lower EL)",
|
|
|
|
[ESR_ELx_EC_WATCHPT_CUR] = "Watchpoint (current EL)",
|
|
|
|
[ESR_ELx_EC_BKPT32] = "BKPT (AArch32)",
|
|
|
|
[ESR_ELx_EC_VECTOR32] = "Vector catch (AArch32)",
|
|
|
|
[ESR_ELx_EC_BRK64] = "BRK (AArch64)",
|
|
|
|
};
|
|
|
|
|
|
|
|
const char *esr_get_class_string(u32 esr)
|
|
|
|
{
|
2016-05-31 19:33:01 +08:00
|
|
|
return esr_class_str[ESR_ELx_EC(esr)];
|
2014-11-18 20:16:30 +08:00
|
|
|
}
|
|
|
|
|
2012-03-05 19:49:27 +08:00
|
|
|
/*
|
arm64: avoid returning from bad_mode
Generally, taking an unexpected exception should be a fatal event, and
bad_mode is intended to cater for this. However, it should be possible
to contain unexpected synchronous exceptions from EL0 without bringing
the kernel down, by sending a SIGILL to the task.
We tried to apply this approach in commit 9955ac47f4ba1c95 ("arm64:
don't kill the kernel on a bad esr from el0"), by sending a signal for
any bad_mode call resulting from an EL0 exception.
However, this also applies to other unexpected exceptions, such as
SError and FIQ. The entry paths for these exceptions branch to bad_mode
without configuring the link register, and have no kernel_exit. Thus, if
we take one of these exceptions from EL0, bad_mode will eventually
return to the original user link register value.
This patch fixes this by introducing a new bad_el0_sync handler to cater
for the recoverable case, and restoring bad_mode to its original state,
whereby it calls panic() and never returns. The recoverable case
branches to bad_el0_sync with a bl, and returns to userspace via the
usual ret_to_user mechanism.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Fixes: 9955ac47f4ba1c95 ("arm64: don't kill the kernel on a bad esr from el0")
Reported-by: Mark Salter <msalter@redhat.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: stable@vger.kernel.org
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2017-01-19 01:23:41 +08:00
|
|
|
* bad_mode handles the impossible case in the exception vector. This is always
|
|
|
|
* fatal.
|
2012-03-05 19:49:27 +08:00
|
|
|
*/
|
|
|
|
asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
|
|
|
|
{
|
|
|
|
console_verbose();
|
|
|
|
|
2016-05-31 19:07:47 +08:00
|
|
|
pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n",
|
|
|
|
handler[reason], smp_processor_id(), esr,
|
|
|
|
esr_get_class_string(esr));
|
arm64: avoid returning from bad_mode
Generally, taking an unexpected exception should be a fatal event, and
bad_mode is intended to cater for this. However, it should be possible
to contain unexpected synchronous exceptions from EL0 without bringing
the kernel down, by sending a SIGILL to the task.
We tried to apply this approach in commit 9955ac47f4ba1c95 ("arm64:
don't kill the kernel on a bad esr from el0"), by sending a signal for
any bad_mode call resulting from an EL0 exception.
However, this also applies to other unexpected exceptions, such as
SError and FIQ. The entry paths for these exceptions branch to bad_mode
without configuring the link register, and have no kernel_exit. Thus, if
we take one of these exceptions from EL0, bad_mode will eventually
return to the original user link register value.
This patch fixes this by introducing a new bad_el0_sync handler to cater
for the recoverable case, and restoring bad_mode to its original state,
whereby it calls panic() and never returns. The recoverable case
branches to bad_el0_sync with a bl, and returns to userspace via the
usual ret_to_user mechanism.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Fixes: 9955ac47f4ba1c95 ("arm64: don't kill the kernel on a bad esr from el0")
Reported-by: Mark Salter <msalter@redhat.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: stable@vger.kernel.org
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2017-01-19 01:23:41 +08:00
|
|
|
|
|
|
|
die("Oops - bad mode", regs, 0);
|
|
|
|
local_irq_disable();
|
|
|
|
panic("bad mode");
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* bad_el0_sync handles unexpected, but potentially recoverable synchronous
|
|
|
|
* exceptions taken from EL0. Unlike bad_mode, this returns.
|
|
|
|
*/
|
|
|
|
asmlinkage void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
|
|
|
|
{
|
|
|
|
siginfo_t info;
|
|
|
|
void __user *pc = (void __user *)instruction_pointer(regs);
|
|
|
|
console_verbose();
|
|
|
|
|
|
|
|
pr_crit("Bad EL0 synchronous exception detected on CPU%d, code 0x%08x -- %s\n",
|
|
|
|
smp_processor_id(), esr, esr_get_class_string(esr));
|
2013-05-28 22:54:15 +08:00
|
|
|
__show_regs(regs);
|
|
|
|
|
|
|
|
info.si_signo = SIGILL;
|
|
|
|
info.si_errno = 0;
|
|
|
|
info.si_code = ILL_ILLOPC;
|
|
|
|
info.si_addr = pc;
|
2012-03-05 19:49:27 +08:00
|
|
|
|
arm64: avoid returning from bad_mode
Generally, taking an unexpected exception should be a fatal event, and
bad_mode is intended to cater for this. However, it should be possible
to contain unexpected synchronous exceptions from EL0 without bringing
the kernel down, by sending a SIGILL to the task.
We tried to apply this approach in commit 9955ac47f4ba1c95 ("arm64:
don't kill the kernel on a bad esr from el0"), by sending a signal for
any bad_mode call resulting from an EL0 exception.
However, this also applies to other unexpected exceptions, such as
SError and FIQ. The entry paths for these exceptions branch to bad_mode
without configuring the link register, and have no kernel_exit. Thus, if
we take one of these exceptions from EL0, bad_mode will eventually
return to the original user link register value.
This patch fixes this by introducing a new bad_el0_sync handler to cater
for the recoverable case, and restoring bad_mode to its original state,
whereby it calls panic() and never returns. The recoverable case
branches to bad_el0_sync with a bl, and returns to userspace via the
usual ret_to_user mechanism.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Fixes: 9955ac47f4ba1c95 ("arm64: don't kill the kernel on a bad esr from el0")
Reported-by: Mark Salter <msalter@redhat.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: stable@vger.kernel.org
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2017-01-19 01:23:41 +08:00
|
|
|
current->thread.fault_address = 0;
|
|
|
|
current->thread.fault_code = 0;
|
|
|
|
|
|
|
|
force_sig_info(info.si_signo, &info, current);
|
2012-03-05 19:49:27 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void __pte_error(const char *file, int line, unsigned long val)
|
|
|
|
{
|
2015-12-22 00:44:27 +08:00
|
|
|
pr_err("%s:%d: bad pte %016lx.\n", file, line, val);
|
2012-03-05 19:49:27 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void __pmd_error(const char *file, int line, unsigned long val)
|
|
|
|
{
|
2015-12-22 00:44:27 +08:00
|
|
|
pr_err("%s:%d: bad pmd %016lx.\n", file, line, val);
|
2012-03-05 19:49:27 +08:00
|
|
|
}
|
|
|
|
|
2014-05-12 17:40:51 +08:00
|
|
|
void __pud_error(const char *file, int line, unsigned long val)
|
|
|
|
{
|
2015-12-22 00:44:27 +08:00
|
|
|
pr_err("%s:%d: bad pud %016lx.\n", file, line, val);
|
2014-05-12 17:40:51 +08:00
|
|
|
}
|
|
|
|
|
2012-03-05 19:49:27 +08:00
|
|
|
void __pgd_error(const char *file, int line, unsigned long val)
|
|
|
|
{
|
2015-12-22 00:44:27 +08:00
|
|
|
pr_err("%s:%d: bad pgd %016lx.\n", file, line, val);
|
2012-03-05 19:49:27 +08:00
|
|
|
}
|
|
|
|
|
2015-07-24 23:37:48 +08:00
|
|
|
/* GENERIC_BUG traps */
|
|
|
|
|
|
|
|
int is_valid_bugaddr(unsigned long addr)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* bug_handler() only called for BRK #BUG_BRK_IMM.
|
|
|
|
* So the answer is trivial -- any spurious instances with no
|
|
|
|
* bug table entry will be rejected by report_bug() and passed
|
|
|
|
* back to the debug-monitors code and handled as a fatal
|
|
|
|
* unexpected debug exception.
|
|
|
|
*/
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int bug_handler(struct pt_regs *regs, unsigned int esr)
|
|
|
|
{
|
|
|
|
if (user_mode(regs))
|
|
|
|
return DBG_HOOK_ERROR;
|
|
|
|
|
|
|
|
switch (report_bug(regs->pc, regs)) {
|
|
|
|
case BUG_TRAP_TYPE_BUG:
|
|
|
|
die("Oops - BUG", regs, 0);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case BUG_TRAP_TYPE_WARN:
|
2015-07-24 23:37:49 +08:00
|
|
|
/* Ideally, report_bug() should backtrace for us... but no. */
|
|
|
|
dump_backtrace(regs, NULL);
|
2015-07-24 23:37:48 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
/* unknown/unrecognised bug trap type */
|
|
|
|
return DBG_HOOK_ERROR;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If thread survives, skip over the BUG instruction and continue: */
|
|
|
|
regs->pc += AARCH64_INSN_SIZE; /* skip BRK and resume */
|
|
|
|
return DBG_HOOK_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct break_hook bug_break_hook = {
|
|
|
|
.esr_val = 0xf2000000 | BUG_BRK_IMM,
|
|
|
|
.esr_mask = 0xffffffff,
|
|
|
|
.fn = bug_handler,
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Initial handler for AArch64 BRK exceptions
|
|
|
|
* This handler only used until debug_traps_init().
|
|
|
|
*/
|
|
|
|
int __init early_brk64(unsigned long addr, unsigned int esr,
|
|
|
|
struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
return bug_handler(regs, esr) != DBG_HOOK_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* This registration must happen early, before debug_traps_init(). */
|
2012-03-05 19:49:27 +08:00
|
|
|
void __init trap_init(void)
|
|
|
|
{
|
2015-07-24 23:37:48 +08:00
|
|
|
register_break_hook(&bug_break_hook);
|
2012-03-05 19:49:27 +08:00
|
|
|
}
|