2005-04-17 06:20:36 +08:00
|
|
|
/**
|
|
|
|
* @file backtrace.c
|
|
|
|
*
|
|
|
|
* @remark Copyright 2002 OProfile authors
|
|
|
|
* @remark Read the file COPYING
|
|
|
|
*
|
|
|
|
* @author John Levon
|
|
|
|
* @author David Smith
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/oprofile.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <asm/ptrace.h>
|
[PATCH] mm: kill check_user_page_readable
check_user_page_readable is a problematic variant of follow_page. It's used
only by oprofile's i386 and arm backtrace code, at interrupt time, to
establish whether a userspace stackframe is currently readable.
This is problematic, because we want to push the page_table_lock down inside
follow_page, and later split it; whereas oprofile is doing a spin_trylock on
it (in the i386 case, forgotten in the arm case), and needs that to pin
perhaps two pages spanned by the stackframe (which might be covered by
different locks when we split).
I think oprofile is going about this in the wrong way: it doesn't need to know
the area is readable (neither i386 nor arm uses read protection of user
pages), it doesn't need to pin the memory, it should simply
__copy_from_user_inatomic, and see if that succeeds or not. Sorry, but I've
not got around to devising the sparse __user annotations for this.
Then we can eliminate check_user_page_readable, and return to a single
follow_page without the __follow_page variants.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 09:16:32 +08:00
|
|
|
#include <asm/uaccess.h>
|
2007-10-20 02:35:03 +08:00
|
|
|
#include <asm/stacktrace.h>
|
2010-09-29 22:46:47 +08:00
|
|
|
#include <linux/compat.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-10-20 02:35:03 +08:00
|
|
|
static void backtrace_warning_symbol(void *data, char *msg,
|
|
|
|
unsigned long symbol)
|
|
|
|
{
|
|
|
|
/* Ignore warnings */
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-10-20 02:35:03 +08:00
|
|
|
static void backtrace_warning(void *data, char *msg)
|
2006-02-14 23:19:04 +08:00
|
|
|
{
|
2007-10-20 02:35:03 +08:00
|
|
|
/* Ignore warnings */
|
|
|
|
}
|
2006-02-14 23:19:04 +08:00
|
|
|
|
2007-10-20 02:35:03 +08:00
|
|
|
static int backtrace_stack(void *data, char *name)
|
|
|
|
{
|
|
|
|
/* Yes, we want all stacks */
|
|
|
|
return 0;
|
|
|
|
}
|
2006-02-14 23:19:04 +08:00
|
|
|
|
2008-01-30 20:33:07 +08:00
|
|
|
static void backtrace_address(void *data, unsigned long addr, int reliable)
|
2007-10-20 02:35:03 +08:00
|
|
|
{
|
|
|
|
unsigned int *depth = data;
|
|
|
|
|
|
|
|
if ((*depth)--)
|
|
|
|
oprofile_add_trace(addr);
|
2006-02-14 23:19:04 +08:00
|
|
|
}
|
|
|
|
|
2007-10-20 02:35:03 +08:00
|
|
|
static struct stacktrace_ops backtrace_ops = {
|
2009-12-17 12:40:33 +08:00
|
|
|
.warning = backtrace_warning,
|
|
|
|
.warning_symbol = backtrace_warning_symbol,
|
|
|
|
.stack = backtrace_stack,
|
|
|
|
.address = backtrace_address,
|
|
|
|
.walk_stack = print_context_stack,
|
2007-10-20 02:35:03 +08:00
|
|
|
};
|
|
|
|
|
2010-09-29 22:46:47 +08:00
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
static struct stack_frame_ia32 *
|
|
|
|
dump_user_backtrace_32(struct stack_frame_ia32 *head)
|
|
|
|
{
|
|
|
|
struct stack_frame_ia32 bufhead[2];
|
|
|
|
struct stack_frame_ia32 *fp;
|
|
|
|
|
|
|
|
/* Also check accessibility of one struct frame_head beyond */
|
|
|
|
if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
|
|
|
|
return NULL;
|
|
|
|
if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
|
|
|
|
|
|
|
|
oprofile_add_trace(bufhead[0].return_address);
|
|
|
|
|
|
|
|
/* frame pointers should strictly progress back up the stack
|
|
|
|
* (towards higher addresses) */
|
|
|
|
if (head >= fp)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return fp;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
x86_backtrace_32(struct pt_regs * const regs, unsigned int depth)
|
|
|
|
{
|
|
|
|
struct stack_frame_ia32 *head;
|
|
|
|
|
|
|
|
/* User process is 32-bit */
|
|
|
|
if (!current || !test_thread_flag(TIF_IA32))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
head = (struct stack_frame_ia32 *) regs->bp;
|
|
|
|
while (depth-- && head)
|
|
|
|
head = dump_user_backtrace_32(head);
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
#else
|
|
|
|
static inline int
|
|
|
|
x86_backtrace_32(struct pt_regs * const regs, unsigned int depth)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_COMPAT */
|
|
|
|
|
2010-09-29 22:46:46 +08:00
|
|
|
static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2010-09-29 22:46:46 +08:00
|
|
|
struct stack_frame bufhead[2];
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-09-29 22:46:46 +08:00
|
|
|
/* Also check accessibility of one struct stack_frame beyond */
|
[PATCH] mm: kill check_user_page_readable
check_user_page_readable is a problematic variant of follow_page. It's used
only by oprofile's i386 and arm backtrace code, at interrupt time, to
establish whether a userspace stackframe is currently readable.
This is problematic, because we want to push the page_table_lock down inside
follow_page, and later split it; whereas oprofile is doing a spin_trylock on
it (in the i386 case, forgotten in the arm case), and needs that to pin
perhaps two pages spanned by the stackframe (which might be covered by
different locks when we split).
I think oprofile is going about this in the wrong way: it doesn't need to know
the area is readable (neither i386 nor arm uses read protection of user
pages), it doesn't need to pin the memory, it should simply
__copy_from_user_inatomic, and see if that succeeds or not. Sorry, but I've
not got around to devising the sparse __user annotations for this.
Then we can eliminate check_user_page_readable, and return to a single
follow_page without the __follow_page variants.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 09:16:32 +08:00
|
|
|
if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
|
|
|
|
return NULL;
|
|
|
|
if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
|
2005-04-17 06:20:36 +08:00
|
|
|
return NULL;
|
|
|
|
|
2010-09-29 22:46:46 +08:00
|
|
|
oprofile_add_trace(bufhead[0].return_address);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
[PATCH] mm: kill check_user_page_readable
check_user_page_readable is a problematic variant of follow_page. It's used
only by oprofile's i386 and arm backtrace code, at interrupt time, to
establish whether a userspace stackframe is currently readable.
This is problematic, because we want to push the page_table_lock down inside
follow_page, and later split it; whereas oprofile is doing a spin_trylock on
it (in the i386 case, forgotten in the arm case), and needs that to pin
perhaps two pages spanned by the stackframe (which might be covered by
different locks when we split).
I think oprofile is going about this in the wrong way: it doesn't need to know
the area is readable (neither i386 nor arm uses read protection of user
pages), it doesn't need to pin the memory, it should simply
__copy_from_user_inatomic, and see if that succeeds or not. Sorry, but I've
not got around to devising the sparse __user annotations for this.
Then we can eliminate check_user_page_readable, and return to a single
follow_page without the __follow_page variants.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 09:16:32 +08:00
|
|
|
/* frame pointers should strictly progress back up the stack
|
|
|
|
* (towards higher addresses) */
|
2010-09-29 22:46:46 +08:00
|
|
|
if (head >= bufhead[0].next_frame)
|
[PATCH] mm: kill check_user_page_readable
check_user_page_readable is a problematic variant of follow_page. It's used
only by oprofile's i386 and arm backtrace code, at interrupt time, to
establish whether a userspace stackframe is currently readable.
This is problematic, because we want to push the page_table_lock down inside
follow_page, and later split it; whereas oprofile is doing a spin_trylock on
it (in the i386 case, forgotten in the arm case), and needs that to pin
perhaps two pages spanned by the stackframe (which might be covered by
different locks when we split).
I think oprofile is going about this in the wrong way: it doesn't need to know
the area is readable (neither i386 nor arm uses read protection of user
pages), it doesn't need to pin the memory, it should simply
__copy_from_user_inatomic, and see if that succeeds or not. Sorry, but I've
not got around to devising the sparse __user annotations for this.
Then we can eliminate check_user_page_readable, and return to a single
follow_page without the __follow_page variants.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 09:16:32 +08:00
|
|
|
return NULL;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-09-29 22:46:46 +08:00
|
|
|
return bufhead[0].next_frame;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
x86_backtrace(struct pt_regs * const regs, unsigned int depth)
|
|
|
|
{
|
2010-09-29 22:46:46 +08:00
|
|
|
struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-06-23 15:08:44 +08:00
|
|
|
if (!user_mode_vm(regs)) {
|
2009-05-12 05:03:00 +08:00
|
|
|
unsigned long stack = kernel_stack_pointer(regs);
|
2007-10-20 02:35:03 +08:00
|
|
|
if (depth)
|
x86: Eliminate bp argument from the stack tracing routines
The various stack tracing routines take a 'bp' argument in which the
caller is supposed to provide the base pointer to use, or 0 if doesn't
have one. Since bp is garbage whenever CONFIG_FRAME_POINTER is not
defined, this means all callers in principle should either always pass
0, or be conditional on CONFIG_FRAME_POINTER.
However, there are only really three use cases for stack tracing:
(a) Trace the current task, including IRQ stack if any
(b) Trace the current task, but skip IRQ stack
(c) Trace some other task
In all cases, if CONFIG_FRAME_POINTER is not defined, bp should just
be 0. If it _is_ defined, then
- in case (a) bp should be gotten directly from the CPU's register, so
the caller should pass NULL for regs,
- in case (b) the caller should should pass the IRQ registers to
dump_trace(),
- in case (c) bp should be gotten from the top of the task's stack, so
the caller should pass NULL for regs.
Hence, the bp argument is not necessary because the combination of
task and regs is sufficient to determine an appropriate value for bp.
This patch introduces a new inline function stack_frame(task, regs)
that computes the desired bp. This function is then called from the
two versions of dump_stack().
Signed-off-by: Soren Sandmann <ssp@redhat.com>
Acked-by: Steven Rostedt <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arjan van de Ven <arjan@infradead.org>,
Cc: Frederic Weisbecker <fweisbec@gmail.com>,
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>,
LKML-Reference: <m3oc9rop28.fsf@dhcp-100-3-82.bos.redhat.com>>
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
2010-11-05 17:59:39 +08:00
|
|
|
dump_trace(NULL, regs, (unsigned long *)stack,
|
2007-10-20 02:35:03 +08:00
|
|
|
&backtrace_ops, &depth);
|
2005-04-17 06:20:36 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2010-09-29 22:46:47 +08:00
|
|
|
if (x86_backtrace_32(regs, depth))
|
|
|
|
return;
|
|
|
|
|
[PATCH] mm: kill check_user_page_readable
check_user_page_readable is a problematic variant of follow_page. It's used
only by oprofile's i386 and arm backtrace code, at interrupt time, to
establish whether a userspace stackframe is currently readable.
This is problematic, because we want to push the page_table_lock down inside
follow_page, and later split it; whereas oprofile is doing a spin_trylock on
it (in the i386 case, forgotten in the arm case), and needs that to pin
perhaps two pages spanned by the stackframe (which might be covered by
different locks when we split).
I think oprofile is going about this in the wrong way: it doesn't need to know
the area is readable (neither i386 nor arm uses read protection of user
pages), it doesn't need to pin the memory, it should simply
__copy_from_user_inatomic, and see if that succeeds or not. Sorry, but I've
not got around to devising the sparse __user annotations for this.
Then we can eliminate check_user_page_readable, and return to a single
follow_page without the __follow_page variants.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 09:16:32 +08:00
|
|
|
while (depth-- && head)
|
2006-02-14 23:19:04 +08:00
|
|
|
head = dump_user_backtrace(head);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|