tracing: Cleanup stack trace code

- Remove the extra array member of stack_dump_trace[] along with the
  ARRAY_SIZE - 1 initialization for struct stack_trace :: max_entries.

  Both are historical leftovers of no value. The stack tracer never exceeds
  the array and there is no extra storage requirement either.

- Make variables which are only used in trace_stack.c static.

- Simplify the enable/disable logic.

- Rename stack_trace_print() as it's using the stack_trace_ namespace. Free
  the name up for stack trace related functions.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Steven Rostedt <rostedt@goodmis.org>
Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Alexander Potapenko <glider@google.com>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: linux-mm@kvack.org
Cc: David Rientjes <rientjes@google.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: kasan-dev@googlegroups.com
Cc: Mike Rapoport <rppt@linux.vnet.ibm.com>
Cc: Akinobu Mita <akinobu.mita@gmail.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: iommu@lists.linux-foundation.org
Cc: Robin Murphy <robin.murphy@arm.com>
Cc: Marek Szyprowski <m.szyprowski@samsung.com>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: David Sterba <dsterba@suse.com>
Cc: Chris Mason <clm@fb.com>
Cc: Josef Bacik <josef@toxicpanda.com>
Cc: linux-btrfs@vger.kernel.org
Cc: dm-devel@redhat.com
Cc: Mike Snitzer <snitzer@redhat.com>
Cc: Alasdair Kergon <agk@redhat.com>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: intel-gfx@lists.freedesktop.org
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Cc: dri-devel@lists.freedesktop.org
Cc: David Airlie <airlied@linux.ie>
Cc: Jani Nikula <jani.nikula@linux.intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Tom Zanussi <tom.zanussi@linux.intel.com>
Cc: Miroslav Benes <mbenes@suse.cz>
Cc: linux-arch@vger.kernel.org
Link: https://lkml.kernel.org/r/20190425094801.230654524@linutronix.de
This commit is contained in:
Thomas Gleixner 2019-04-25 11:44:54 +02:00
parent 4285f2fcef
commit 3d9a807291
2 changed files with 17 additions and 43 deletions

View File

@ -241,21 +241,11 @@ static inline void ftrace_free_mem(struct module *mod, void *start, void *end) {
#ifdef CONFIG_STACK_TRACER #ifdef CONFIG_STACK_TRACER
#define STACK_TRACE_ENTRIES 500
struct stack_trace;
extern unsigned stack_trace_index[];
extern struct stack_trace stack_trace_max;
extern unsigned long stack_trace_max_size;
extern arch_spinlock_t stack_trace_max_lock;
extern int stack_tracer_enabled; extern int stack_tracer_enabled;
void stack_trace_print(void);
int int stack_trace_sysctl(struct ctl_table *table, int write,
stack_trace_sysctl(struct ctl_table *table, int write, void __user *buffer, size_t *lenp,
void __user *buffer, size_t *lenp, loff_t *ppos);
loff_t *ppos);
/* DO NOT MODIFY THIS VARIABLE DIRECTLY! */ /* DO NOT MODIFY THIS VARIABLE DIRECTLY! */
DECLARE_PER_CPU(int, disable_stack_tracer); DECLARE_PER_CPU(int, disable_stack_tracer);

View File

@ -18,30 +18,26 @@
#include "trace.h" #include "trace.h"
static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES + 1]; #define STACK_TRACE_ENTRIES 500
unsigned stack_trace_index[STACK_TRACE_ENTRIES];
static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES];
static unsigned stack_trace_index[STACK_TRACE_ENTRIES];
/*
* Reserve one entry for the passed in ip. This will allow
* us to remove most or all of the stack size overhead
* added by the stack tracer itself.
*/
struct stack_trace stack_trace_max = { struct stack_trace stack_trace_max = {
.max_entries = STACK_TRACE_ENTRIES - 1, .max_entries = STACK_TRACE_ENTRIES,
.entries = &stack_dump_trace[0], .entries = &stack_dump_trace[0],
}; };
unsigned long stack_trace_max_size; static unsigned long stack_trace_max_size;
arch_spinlock_t stack_trace_max_lock = static arch_spinlock_t stack_trace_max_lock =
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
DEFINE_PER_CPU(int, disable_stack_tracer); DEFINE_PER_CPU(int, disable_stack_tracer);
static DEFINE_MUTEX(stack_sysctl_mutex); static DEFINE_MUTEX(stack_sysctl_mutex);
int stack_tracer_enabled; int stack_tracer_enabled;
static int last_stack_tracer_enabled;
void stack_trace_print(void) static void print_max_stack(void)
{ {
long i; long i;
int size; int size;
@ -61,16 +57,7 @@ void stack_trace_print(void)
} }
} }
/* static void check_stack(unsigned long ip, unsigned long *stack)
* When arch-specific code overrides this function, the following
* data should be filled up, assuming stack_trace_max_lock is held to
* prevent concurrent updates.
* stack_trace_index[]
* stack_trace_max
* stack_trace_max_size
*/
void __weak
check_stack(unsigned long ip, unsigned long *stack)
{ {
unsigned long this_size, flags; unsigned long *p, *top, *start; unsigned long this_size, flags; unsigned long *p, *top, *start;
static int tracer_frame; static int tracer_frame;
@ -179,7 +166,7 @@ check_stack(unsigned long ip, unsigned long *stack)
stack_trace_max.nr_entries = x; stack_trace_max.nr_entries = x;
if (task_stack_end_corrupted(current)) { if (task_stack_end_corrupted(current)) {
stack_trace_print(); print_max_stack();
BUG(); BUG();
} }
@ -412,23 +399,21 @@ stack_trace_sysctl(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, void __user *buffer, size_t *lenp,
loff_t *ppos) loff_t *ppos)
{ {
int was_enabled;
int ret; int ret;
mutex_lock(&stack_sysctl_mutex); mutex_lock(&stack_sysctl_mutex);
was_enabled = !!stack_tracer_enabled;
ret = proc_dointvec(table, write, buffer, lenp, ppos); ret = proc_dointvec(table, write, buffer, lenp, ppos);
if (ret || !write || if (ret || !write || (was_enabled == !!stack_tracer_enabled))
(last_stack_tracer_enabled == !!stack_tracer_enabled))
goto out; goto out;
last_stack_tracer_enabled = !!stack_tracer_enabled;
if (stack_tracer_enabled) if (stack_tracer_enabled)
register_ftrace_function(&trace_ops); register_ftrace_function(&trace_ops);
else else
unregister_ftrace_function(&trace_ops); unregister_ftrace_function(&trace_ops);
out: out:
mutex_unlock(&stack_sysctl_mutex); mutex_unlock(&stack_sysctl_mutex);
return ret; return ret;
@ -444,7 +429,6 @@ static __init int enable_stacktrace(char *str)
strncpy(stack_trace_filter_buf, str + len, COMMAND_LINE_SIZE); strncpy(stack_trace_filter_buf, str + len, COMMAND_LINE_SIZE);
stack_tracer_enabled = 1; stack_tracer_enabled = 1;
last_stack_tracer_enabled = 1;
return 1; return 1;
} }
__setup("stacktrace", enable_stacktrace); __setup("stacktrace", enable_stacktrace);