2014-08-20 06:00:05 +08:00
|
|
|
#include <linux/ftrace.h>
|
|
|
|
#include <linux/mm.h>
|
2005-09-28 18:28:14 +08:00
|
|
|
|
|
|
|
#include <asm/processor.h>
|
2012-03-29 01:30:02 +08:00
|
|
|
#include <asm/switch_to.h>
|
2014-08-20 06:00:05 +08:00
|
|
|
#include <asm/cacheflush.h>
|
2012-08-10 20:21:21 +08:00
|
|
|
#include <asm/epapr_hcalls.h>
|
2005-09-28 18:28:14 +08:00
|
|
|
|
2016-02-10 00:08:27 +08:00
|
|
|
#ifdef CONFIG_PPC64
|
2014-08-20 06:00:05 +08:00
|
|
|
EXPORT_SYMBOL(flush_dcache_range);
|
2016-02-10 00:08:27 +08:00
|
|
|
#endif
|
2014-08-20 06:00:05 +08:00
|
|
|
EXPORT_SYMBOL(flush_icache_range);
|
|
|
|
|
|
|
|
EXPORT_SYMBOL(empty_zero_page);
|
|
|
|
|
|
|
|
long long __bswapdi2(long long);
|
|
|
|
EXPORT_SYMBOL(__bswapdi2);
|
2005-09-28 18:28:14 +08:00
|
|
|
|
2008-10-07 07:06:12 +08:00
|
|
|
#ifdef CONFIG_FUNCTION_TRACER
|
2008-05-23 02:31:07 +08:00
|
|
|
EXPORT_SYMBOL(_mcount);
|
|
|
|
#endif
|
|
|
|
|
2013-07-14 17:02:06 +08:00
|
|
|
#ifdef CONFIG_PPC_FPU
|
2013-09-10 18:21:10 +08:00
|
|
|
EXPORT_SYMBOL(load_fp_state);
|
|
|
|
EXPORT_SYMBOL(store_fp_state);
|
2013-07-14 17:02:06 +08:00
|
|
|
#endif
|
2014-08-20 06:00:05 +08:00
|
|
|
|
2005-10-10 20:50:37 +08:00
|
|
|
#ifdef CONFIG_ALTIVEC
|
2013-09-10 18:21:10 +08:00
|
|
|
EXPORT_SYMBOL(load_vr_state);
|
|
|
|
EXPORT_SYMBOL(store_vr_state);
|
2005-09-28 18:28:14 +08:00
|
|
|
#endif
|
|
|
|
|
2012-08-10 20:21:21 +08:00
|
|
|
#ifdef CONFIG_EPAPR_PARAVIRT
|
|
|
|
EXPORT_SYMBOL(epapr_hypercall_start);
|
|
|
|
#endif
|
powerpc: Reimplement __get_SP() as a function not a define
Li Zhong points out an issue with our current __get_SP()
implementation. If ftrace function tracing is enabled (ie -pg
profiling using _mcount) we spill a stack frame on 64bit all the
time.
If a function calls __get_SP() and later calls a function that is
tail call optimised, we will pop the stack frame and the value
returned by __get_SP() is no longer valid. An example from Li can
be found in save_stack_trace -> save_context_stack:
c0000000000432c0 <.save_stack_trace>:
c0000000000432c0: mflr r0
c0000000000432c4: std r0,16(r1)
c0000000000432c8: stdu r1,-128(r1) <-- stack frame for _mcount
c0000000000432cc: std r3,112(r1)
c0000000000432d0: bl <._mcount>
c0000000000432d4: nop
c0000000000432d8: mr r4,r1 <-- __get_SP()
c0000000000432dc: ld r5,632(r13)
c0000000000432e0: ld r3,112(r1)
c0000000000432e4: li r6,1
c0000000000432e8: addi r1,r1,128 <-- pop stack frame
c0000000000432ec: ld r0,16(r1)
c0000000000432f0: mtlr r0
c0000000000432f4: b <.save_context_stack> <-- tail call optimized
save_context_stack ends up with a stack pointer below the current
one, and it is likely to be scribbled over.
Fix this by making __get_SP() a function which returns the
callers stack frame. Also replace inline assembly which grabs
the stack pointer in save_stack_trace and show_stack with
__get_SP().
This also fixes an issue with perf_arch_fetch_caller_regs().
It currently unwinds the stack once, which will skip a
valid stack frame on a leaf function. With the __get_SP() fixes
in this patch, we never need to unwind the stack frame to get
to the first interesting frame.
We have to export __get_SP() because perf_arch_fetch_caller_regs()
(which is used in modules) calls it from a header file.
Reported-by: Li Zhong <zhong@linux.vnet.ibm.com>
Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2014-10-13 16:41:38 +08:00
|
|
|
|
2014-10-13 16:41:39 +08:00
|
|
|
EXPORT_SYMBOL(current_stack_pointer);
|