perf core: Pass max stack as a perf_callchain_entry context
This makes perf_callchain_{user,kernel}() receive the max stack as context for the perf_callchain_entry, instead of accessing the global sysctl_perf_event_max_stack. Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexei Starovoitov <ast@kernel.org> Cc: Brendan Gregg <brendan.d.gregg@gmail.com> Cc: David Ahern <dsahern@gmail.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: He Kuang <hekuang@huawei.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Milian Wolff <milian.wolff@kdab.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vince Weaver <vincent.weaver@maine.edu> Cc: Wang Nan <wangnan0@huawei.com> Cc: Zefan Li <lizefan@huawei.com> Link: http://lkml.kernel.org/n/tip-kolmn1yo40p7jhswxwrc7rrd@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
parent
a831100aee
commit
cfbcf46845
|
@ -48,7 +48,7 @@ struct arc_callchain_trace {
|
|||
static int callchain_trace(unsigned int addr, void *data)
|
||||
{
|
||||
struct arc_callchain_trace *ctrl = data;
|
||||
struct perf_callchain_entry *entry = ctrl->perf_stuff;
|
||||
struct perf_callchain_entry_ctx *entry = ctrl->perf_stuff;
|
||||
perf_callchain_store(entry, addr);
|
||||
|
||||
if (ctrl->depth++ < 3)
|
||||
|
@ -58,7 +58,7 @@ static int callchain_trace(unsigned int addr, void *data)
|
|||
}
|
||||
|
||||
void
|
||||
perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
|
||||
perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
|
||||
{
|
||||
struct arc_callchain_trace ctrl = {
|
||||
.depth = 0,
|
||||
|
@ -69,7 +69,7 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
|
|||
}
|
||||
|
||||
void
|
||||
perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
|
||||
perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
|
||||
{
|
||||
/*
|
||||
* User stack can't be unwound trivially with kernel dwarf unwinder
|
||||
|
|
|
@ -31,7 +31,7 @@ struct frame_tail {
|
|||
*/
|
||||
static struct frame_tail __user *
|
||||
user_backtrace(struct frame_tail __user *tail,
|
||||
struct perf_callchain_entry *entry)
|
||||
struct perf_callchain_entry_ctx *entry)
|
||||
{
|
||||
struct frame_tail buftail;
|
||||
unsigned long err;
|
||||
|
@ -59,7 +59,7 @@ user_backtrace(struct frame_tail __user *tail,
|
|||
}
|
||||
|
||||
void
|
||||
perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
|
||||
perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
|
||||
{
|
||||
struct frame_tail __user *tail;
|
||||
|
||||
|
@ -75,7 +75,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
|
|||
|
||||
tail = (struct frame_tail __user *)regs->ARM_fp - 1;
|
||||
|
||||
while ((entry->nr < sysctl_perf_event_max_stack) &&
|
||||
while ((entry->entry->nr < entry->max_stack) &&
|
||||
tail && !((unsigned long)tail & 0x3))
|
||||
tail = user_backtrace(tail, entry);
|
||||
}
|
||||
|
@ -89,13 +89,13 @@ static int
|
|||
callchain_trace(struct stackframe *fr,
|
||||
void *data)
|
||||
{
|
||||
struct perf_callchain_entry *entry = data;
|
||||
struct perf_callchain_entry_ctx *entry = data;
|
||||
perf_callchain_store(entry, fr->pc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
|
||||
perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
|
||||
{
|
||||
struct stackframe fr;
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ struct frame_tail {
|
|||
*/
|
||||
static struct frame_tail __user *
|
||||
user_backtrace(struct frame_tail __user *tail,
|
||||
struct perf_callchain_entry *entry)
|
||||
struct perf_callchain_entry_ctx *entry)
|
||||
{
|
||||
struct frame_tail buftail;
|
||||
unsigned long err;
|
||||
|
@ -76,7 +76,7 @@ struct compat_frame_tail {
|
|||
|
||||
static struct compat_frame_tail __user *
|
||||
compat_user_backtrace(struct compat_frame_tail __user *tail,
|
||||
struct perf_callchain_entry *entry)
|
||||
struct perf_callchain_entry_ctx *entry)
|
||||
{
|
||||
struct compat_frame_tail buftail;
|
||||
unsigned long err;
|
||||
|
@ -106,7 +106,7 @@ compat_user_backtrace(struct compat_frame_tail __user *tail,
|
|||
}
|
||||
#endif /* CONFIG_COMPAT */
|
||||
|
||||
void perf_callchain_user(struct perf_callchain_entry *entry,
|
||||
void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
|
||||
|
@ -122,7 +122,7 @@ void perf_callchain_user(struct perf_callchain_entry *entry,
|
|||
|
||||
tail = (struct frame_tail __user *)regs->regs[29];
|
||||
|
||||
while (entry->nr < sysctl_perf_event_max_stack &&
|
||||
while (entry->entry->nr < entry->max_stack &&
|
||||
tail && !((unsigned long)tail & 0xf))
|
||||
tail = user_backtrace(tail, entry);
|
||||
} else {
|
||||
|
@ -132,7 +132,7 @@ void perf_callchain_user(struct perf_callchain_entry *entry,
|
|||
|
||||
tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
|
||||
|
||||
while ((entry->nr < sysctl_perf_event_max_stack) &&
|
||||
while ((entry->entry->nr < entry->max_stack) &&
|
||||
tail && !((unsigned long)tail & 0x3))
|
||||
tail = compat_user_backtrace(tail, entry);
|
||||
#endif
|
||||
|
@ -146,12 +146,12 @@ void perf_callchain_user(struct perf_callchain_entry *entry,
|
|||
*/
|
||||
static int callchain_trace(struct stackframe *frame, void *data)
|
||||
{
|
||||
struct perf_callchain_entry *entry = data;
|
||||
struct perf_callchain_entry_ctx *entry = data;
|
||||
perf_callchain_store(entry, frame->pc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void perf_callchain_kernel(struct perf_callchain_entry *entry,
|
||||
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
struct stackframe frame;
|
||||
|
|
|
@ -29,7 +29,7 @@ static bool is_valid_call(unsigned long calladdr)
|
|||
|
||||
static struct metag_frame __user *
|
||||
user_backtrace(struct metag_frame __user *user_frame,
|
||||
struct perf_callchain_entry *entry)
|
||||
struct perf_callchain_entry_ctx *entry)
|
||||
{
|
||||
struct metag_frame frame;
|
||||
unsigned long calladdr;
|
||||
|
@ -56,7 +56,7 @@ user_backtrace(struct metag_frame __user *user_frame,
|
|||
}
|
||||
|
||||
void
|
||||
perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
|
||||
perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
|
||||
{
|
||||
unsigned long sp = regs->ctx.AX[0].U0;
|
||||
struct metag_frame __user *frame;
|
||||
|
@ -65,7 +65,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
|
|||
|
||||
--frame;
|
||||
|
||||
while ((entry->nr < sysctl_perf_event_max_stack) && frame)
|
||||
while ((entry->entry->nr < entry->max_stack) && frame)
|
||||
frame = user_backtrace(frame, entry);
|
||||
}
|
||||
|
||||
|
@ -78,13 +78,13 @@ static int
|
|||
callchain_trace(struct stackframe *fr,
|
||||
void *data)
|
||||
{
|
||||
struct perf_callchain_entry *entry = data;
|
||||
struct perf_callchain_entry_ctx *entry = data;
|
||||
perf_callchain_store(entry, fr->pc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
|
||||
perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
|
||||
{
|
||||
struct stackframe fr;
|
||||
|
||||
|
|
|
@ -25,8 +25,8 @@
|
|||
* the user stack callchains, we will add it here.
|
||||
*/
|
||||
|
||||
static void save_raw_perf_callchain(struct perf_callchain_entry *entry,
|
||||
unsigned long reg29)
|
||||
static void save_raw_perf_callchain(struct perf_callchain_entry_ctx *entry,
|
||||
unsigned long reg29)
|
||||
{
|
||||
unsigned long *sp = (unsigned long *)reg29;
|
||||
unsigned long addr;
|
||||
|
@ -35,14 +35,14 @@ static void save_raw_perf_callchain(struct perf_callchain_entry *entry,
|
|||
addr = *sp++;
|
||||
if (__kernel_text_address(addr)) {
|
||||
perf_callchain_store(entry, addr);
|
||||
if (entry->nr >= sysctl_perf_event_max_stack)
|
||||
if (entry->entry->nr >= entry->max_stack)
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void perf_callchain_kernel(struct perf_callchain_entry *entry,
|
||||
struct pt_regs *regs)
|
||||
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
unsigned long sp = regs->regs[29];
|
||||
#ifdef CONFIG_KALLSYMS
|
||||
|
@ -59,7 +59,7 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry,
|
|||
}
|
||||
do {
|
||||
perf_callchain_store(entry, pc);
|
||||
if (entry->nr >= sysctl_perf_event_max_stack)
|
||||
if (entry->entry->nr >= entry->max_stack)
|
||||
break;
|
||||
pc = unwind_stack(current, &sp, pc, &ra);
|
||||
} while (pc);
|
||||
|
|
|
@ -47,7 +47,7 @@ static int valid_next_sp(unsigned long sp, unsigned long prev_sp)
|
|||
}
|
||||
|
||||
void
|
||||
perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
|
||||
perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
|
||||
{
|
||||
unsigned long sp, next_sp;
|
||||
unsigned long next_ip;
|
||||
|
@ -232,7 +232,7 @@ static int sane_signal_64_frame(unsigned long sp)
|
|||
puc == (unsigned long) &sf->uc;
|
||||
}
|
||||
|
||||
static void perf_callchain_user_64(struct perf_callchain_entry *entry,
|
||||
static void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
unsigned long sp, next_sp;
|
||||
|
@ -247,7 +247,7 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry,
|
|||
sp = regs->gpr[1];
|
||||
perf_callchain_store(entry, next_ip);
|
||||
|
||||
while (entry->nr < sysctl_perf_event_max_stack) {
|
||||
while (entry->entry->nr < entry->max_stack) {
|
||||
fp = (unsigned long __user *) sp;
|
||||
if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp))
|
||||
return;
|
||||
|
@ -319,7 +319,7 @@ static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
|
|||
return rc;
|
||||
}
|
||||
|
||||
static inline void perf_callchain_user_64(struct perf_callchain_entry *entry,
|
||||
static inline void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
}
|
||||
|
@ -439,7 +439,7 @@ static unsigned int __user *signal_frame_32_regs(unsigned int sp,
|
|||
return mctx->mc_gregs;
|
||||
}
|
||||
|
||||
static void perf_callchain_user_32(struct perf_callchain_entry *entry,
|
||||
static void perf_callchain_user_32(struct perf_callchain_entry_ctx *entry,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
unsigned int sp, next_sp;
|
||||
|
@ -453,7 +453,7 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
|
|||
sp = regs->gpr[1];
|
||||
perf_callchain_store(entry, next_ip);
|
||||
|
||||
while (entry->nr < sysctl_perf_event_max_stack) {
|
||||
while (entry->entry->nr < entry->max_stack) {
|
||||
fp = (unsigned int __user *) (unsigned long) sp;
|
||||
if (!valid_user_sp(sp, 0) || read_user_stack_32(fp, &next_sp))
|
||||
return;
|
||||
|
@ -487,7 +487,7 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
|
|||
}
|
||||
|
||||
void
|
||||
perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
|
||||
perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
|
||||
{
|
||||
if (current_is_64bit())
|
||||
perf_callchain_user_64(entry, regs);
|
||||
|
|
|
@ -224,13 +224,13 @@ arch_initcall(service_level_perf_register);
|
|||
|
||||
static int __perf_callchain_kernel(void *data, unsigned long address)
|
||||
{
|
||||
struct perf_callchain_entry *entry = data;
|
||||
struct perf_callchain_entry_ctx *entry = data;
|
||||
|
||||
perf_callchain_store(entry, address);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void perf_callchain_kernel(struct perf_callchain_entry *entry,
|
||||
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
if (user_mode(regs))
|
||||
|
|
|
@ -21,7 +21,7 @@ static int callchain_stack(void *data, char *name)
|
|||
|
||||
static void callchain_address(void *data, unsigned long addr, int reliable)
|
||||
{
|
||||
struct perf_callchain_entry *entry = data;
|
||||
struct perf_callchain_entry_ctx *entry = data;
|
||||
|
||||
if (reliable)
|
||||
perf_callchain_store(entry, addr);
|
||||
|
@ -33,7 +33,7 @@ static const struct stacktrace_ops callchain_ops = {
|
|||
};
|
||||
|
||||
void
|
||||
perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
|
||||
perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
|
||||
{
|
||||
perf_callchain_store(entry, regs->pc);
|
||||
|
||||
|
|
|
@ -1711,7 +1711,7 @@ static int __init init_hw_perf_events(void)
|
|||
}
|
||||
pure_initcall(init_hw_perf_events);
|
||||
|
||||
void perf_callchain_kernel(struct perf_callchain_entry *entry,
|
||||
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
unsigned long ksp, fp;
|
||||
|
@ -1756,7 +1756,7 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry,
|
|||
}
|
||||
}
|
||||
#endif
|
||||
} while (entry->nr < sysctl_perf_event_max_stack);
|
||||
} while (entry->entry->nr < entry->max_stack);
|
||||
}
|
||||
|
||||
static inline int
|
||||
|
@ -1769,7 +1769,7 @@ valid_user_frame(const void __user *fp, unsigned long size)
|
|||
return (__range_not_ok(fp, size, TASK_SIZE) == 0);
|
||||
}
|
||||
|
||||
static void perf_callchain_user_64(struct perf_callchain_entry *entry,
|
||||
static void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
unsigned long ufp;
|
||||
|
@ -1790,10 +1790,10 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry,
|
|||
pc = sf.callers_pc;
|
||||
ufp = (unsigned long)sf.fp + STACK_BIAS;
|
||||
perf_callchain_store(entry, pc);
|
||||
} while (entry->nr < sysctl_perf_event_max_stack);
|
||||
} while (entry->entry->nr < entry->max_stack);
|
||||
}
|
||||
|
||||
static void perf_callchain_user_32(struct perf_callchain_entry *entry,
|
||||
static void perf_callchain_user_32(struct perf_callchain_entry_ctx *entry,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
unsigned long ufp;
|
||||
|
@ -1822,11 +1822,11 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
|
|||
ufp = (unsigned long)sf.fp;
|
||||
}
|
||||
perf_callchain_store(entry, pc);
|
||||
} while (entry->nr < sysctl_perf_event_max_stack);
|
||||
} while (entry->entry->nr < entry->max_stack);
|
||||
}
|
||||
|
||||
void
|
||||
perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
|
||||
perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
|
||||
{
|
||||
u64 saved_fault_address = current_thread_info()->fault_address;
|
||||
u8 saved_fault_code = get_thread_fault_code();
|
||||
|
|
|
@ -941,7 +941,7 @@ arch_initcall(init_hw_perf_events);
|
|||
/*
|
||||
* Tile specific backtracing code for perf_events.
|
||||
*/
|
||||
static inline void perf_callchain(struct perf_callchain_entry *entry,
|
||||
static inline void perf_callchain(struct perf_callchain_entry_ctx *entry,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
struct KBacktraceIterator kbt;
|
||||
|
@ -992,13 +992,13 @@ static inline void perf_callchain(struct perf_callchain_entry *entry,
|
|||
}
|
||||
}
|
||||
|
||||
void perf_callchain_user(struct perf_callchain_entry *entry,
|
||||
void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
perf_callchain(entry, regs);
|
||||
}
|
||||
|
||||
void perf_callchain_kernel(struct perf_callchain_entry *entry,
|
||||
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
perf_callchain(entry, regs);
|
||||
|
|
|
@ -2202,7 +2202,7 @@ static int backtrace_stack(void *data, char *name)
|
|||
|
||||
static int backtrace_address(void *data, unsigned long addr, int reliable)
|
||||
{
|
||||
struct perf_callchain_entry *entry = data;
|
||||
struct perf_callchain_entry_ctx *entry = data;
|
||||
|
||||
return perf_callchain_store(entry, addr);
|
||||
}
|
||||
|
@ -2214,7 +2214,7 @@ static const struct stacktrace_ops backtrace_ops = {
|
|||
};
|
||||
|
||||
void
|
||||
perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
|
||||
perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
|
||||
{
|
||||
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
|
||||
/* TODO: We don't support guest os callchain now */
|
||||
|
@ -2268,7 +2268,7 @@ static unsigned long get_segment_base(unsigned int segment)
|
|||
#include <asm/compat.h>
|
||||
|
||||
static inline int
|
||||
perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
|
||||
perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *entry)
|
||||
{
|
||||
/* 32-bit process in 64-bit kernel. */
|
||||
unsigned long ss_base, cs_base;
|
||||
|
@ -2283,7 +2283,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
|
|||
|
||||
fp = compat_ptr(ss_base + regs->bp);
|
||||
pagefault_disable();
|
||||
while (entry->nr < sysctl_perf_event_max_stack) {
|
||||
while (entry->entry->nr < entry->max_stack) {
|
||||
unsigned long bytes;
|
||||
frame.next_frame = 0;
|
||||
frame.return_address = 0;
|
||||
|
@ -2309,14 +2309,14 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
|
|||
}
|
||||
#else
|
||||
static inline int
|
||||
perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
|
||||
perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *entry)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
void
|
||||
perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
|
||||
perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
|
||||
{
|
||||
struct stack_frame frame;
|
||||
const void __user *fp;
|
||||
|
@ -2343,7 +2343,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
|
|||
return;
|
||||
|
||||
pagefault_disable();
|
||||
while (entry->nr < sysctl_perf_event_max_stack) {
|
||||
while (entry->entry->nr < entry->max_stack) {
|
||||
unsigned long bytes;
|
||||
frame.next_frame = NULL;
|
||||
frame.return_address = 0;
|
||||
|
|
|
@ -323,23 +323,23 @@ static void xtensa_pmu_read(struct perf_event *event)
|
|||
|
||||
static int callchain_trace(struct stackframe *frame, void *data)
|
||||
{
|
||||
struct perf_callchain_entry *entry = data;
|
||||
struct perf_callchain_entry_ctx *entry = data;
|
||||
|
||||
perf_callchain_store(entry, frame->pc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void perf_callchain_kernel(struct perf_callchain_entry *entry,
|
||||
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
xtensa_backtrace_kernel(regs, sysctl_perf_event_max_stack,
|
||||
xtensa_backtrace_kernel(regs, entry->max_stack,
|
||||
callchain_trace, NULL, entry);
|
||||
}
|
||||
|
||||
void perf_callchain_user(struct perf_callchain_entry *entry,
|
||||
void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
xtensa_backtrace_user(regs, sysctl_perf_event_max_stack,
|
||||
xtensa_backtrace_user(regs, entry->max_stack,
|
||||
callchain_trace, entry);
|
||||
}
|
||||
|
||||
|
|
|
@ -61,6 +61,11 @@ struct perf_callchain_entry {
|
|||
__u64 ip[0]; /* /proc/sys/kernel/perf_event_max_stack */
|
||||
};
|
||||
|
||||
struct perf_callchain_entry_ctx {
|
||||
struct perf_callchain_entry *entry;
|
||||
u32 max_stack;
|
||||
};
|
||||
|
||||
struct perf_raw_record {
|
||||
u32 size;
|
||||
void *data;
|
||||
|
@ -1063,19 +1068,20 @@ extern void perf_event_fork(struct task_struct *tsk);
|
|||
/* Callchains */
|
||||
DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
|
||||
|
||||
extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs);
|
||||
extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs);
|
||||
extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
|
||||
extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
|
||||
extern struct perf_callchain_entry *
|
||||
get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
|
||||
bool crosstask, bool add_mark);
|
||||
u32 max_stack, bool crosstask, bool add_mark);
|
||||
extern int get_callchain_buffers(void);
|
||||
extern void put_callchain_buffers(void);
|
||||
|
||||
extern int sysctl_perf_event_max_stack;
|
||||
|
||||
static inline int perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
|
||||
static inline int perf_callchain_store(struct perf_callchain_entry_ctx *ctx, u64 ip)
|
||||
{
|
||||
if (entry->nr < sysctl_perf_event_max_stack) {
|
||||
struct perf_callchain_entry *entry = ctx->entry;
|
||||
if (entry->nr < ctx->max_stack) {
|
||||
entry->ip[entry->nr++] = ip;
|
||||
return 0;
|
||||
} else {
|
||||
|
|
|
@ -136,7 +136,8 @@ static u64 bpf_get_stackid(u64 r1, u64 r2, u64 flags, u64 r4, u64 r5)
|
|||
BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
|
||||
return -EINVAL;
|
||||
|
||||
trace = get_perf_callchain(regs, init_nr, kernel, user, false, false);
|
||||
trace = get_perf_callchain(regs, init_nr, kernel, user,
|
||||
sysctl_perf_event_max_stack, false, false);
|
||||
|
||||
if (unlikely(!trace))
|
||||
/* couldn't fetch the stack trace */
|
||||
|
|
|
@ -32,12 +32,12 @@ static DEFINE_MUTEX(callchain_mutex);
|
|||
static struct callchain_cpus_entries *callchain_cpus_entries;
|
||||
|
||||
|
||||
__weak void perf_callchain_kernel(struct perf_callchain_entry *entry,
|
||||
__weak void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
}
|
||||
|
||||
__weak void perf_callchain_user(struct perf_callchain_entry *entry,
|
||||
__weak void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
}
|
||||
|
@ -176,14 +176,15 @@ perf_callchain(struct perf_event *event, struct pt_regs *regs)
|
|||
if (!kernel && !user)
|
||||
return NULL;
|
||||
|
||||
return get_perf_callchain(regs, 0, kernel, user, crosstask, true);
|
||||
return get_perf_callchain(regs, 0, kernel, user, sysctl_perf_event_max_stack, crosstask, true);
|
||||
}
|
||||
|
||||
struct perf_callchain_entry *
|
||||
get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
|
||||
bool crosstask, bool add_mark)
|
||||
u32 max_stack, bool crosstask, bool add_mark)
|
||||
{
|
||||
struct perf_callchain_entry *entry;
|
||||
struct perf_callchain_entry_ctx ctx;
|
||||
int rctx;
|
||||
|
||||
entry = get_callchain_entry(&rctx);
|
||||
|
@ -193,12 +194,15 @@ get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
|
|||
if (!entry)
|
||||
goto exit_put;
|
||||
|
||||
ctx.entry = entry;
|
||||
ctx.max_stack = max_stack;
|
||||
|
||||
entry->nr = init_nr;
|
||||
|
||||
if (kernel && !user_mode(regs)) {
|
||||
if (add_mark)
|
||||
perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
|
||||
perf_callchain_kernel(entry, regs);
|
||||
perf_callchain_store(&ctx, PERF_CONTEXT_KERNEL);
|
||||
perf_callchain_kernel(&ctx, regs);
|
||||
}
|
||||
|
||||
if (user) {
|
||||
|
@ -214,8 +218,8 @@ get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
|
|||
goto exit_put;
|
||||
|
||||
if (add_mark)
|
||||
perf_callchain_store(entry, PERF_CONTEXT_USER);
|
||||
perf_callchain_user(entry, regs);
|
||||
perf_callchain_store(&ctx, PERF_CONTEXT_USER);
|
||||
perf_callchain_user(&ctx, regs);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue