[S390] fix s390 assembler code alignments

The alignment is missing for various global symbols in s390 assembly code.
With a recent gcc and an instruction like stgrl this can lead to a
specification exception if the instruction uses such a mis-aligned address.

Specify the alignment explicitely and while add it define __ALIGN for s390
and use the ENTRY define to save some lines of code.

Signed-off-by: Jan Glauber <jang@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
Jan Glauber 2011-07-24 10:48:19 +02:00 committed by Martin Schwidefsky
parent 603d1a50ac
commit 144d634a21
21 changed files with 378 additions and 680 deletions

View File

@ -7,14 +7,14 @@
*/ */
#include <linux/init.h> #include <linux/init.h>
#include <linux/linkage.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/page.h> #include <asm/page.h>
#include "sizes.h" #include "sizes.h"
__HEAD __HEAD
.globl startup_continue ENTRY(startup_continue)
startup_continue:
basr %r13,0 # get base basr %r13,0 # get base
.LPG1: .LPG1:
# setup stack # setup stack

View File

@ -7,14 +7,14 @@
*/ */
#include <linux/init.h> #include <linux/init.h>
#include <linux/linkage.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/page.h> #include <asm/page.h>
#include "sizes.h" #include "sizes.h"
__HEAD __HEAD
.globl startup_continue ENTRY(startup_continue)
startup_continue:
basr %r13,0 # get base basr %r13,0 # get base
.LPG1: .LPG1:
# setup stack # setup stack

View File

@ -1,6 +1,9 @@
#ifndef __ASM_LINKAGE_H #ifndef __ASM_LINKAGE_H
#define __ASM_LINKAGE_H #define __ASM_LINKAGE_H
/* Nothing to see here... */ #include <linux/stringify.h>
#define __ALIGN .align 4, 0x07
#define __ALIGN_STR __stringify(__ALIGN)
#endif #endif

View File

@ -6,13 +6,13 @@
* Michael Holzheu <holzheu@de.ibm.com> * Michael Holzheu <holzheu@de.ibm.com>
*/ */
#include <linux/linkage.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
.globl s390_base_mcck_handler ENTRY(s390_base_mcck_handler)
s390_base_mcck_handler:
basr %r13,0 basr %r13,0
0: lg %r15,__LC_PANIC_STACK # load panic stack 0: lg %r15,__LC_PANIC_STACK # load panic stack
aghi %r15,-STACK_FRAME_OVERHEAD aghi %r15,-STACK_FRAME_OVERHEAD
@ -26,13 +26,13 @@ s390_base_mcck_handler:
lpswe __LC_MCK_OLD_PSW lpswe __LC_MCK_OLD_PSW
.section .bss .section .bss
.align 8
.globl s390_base_mcck_handler_fn .globl s390_base_mcck_handler_fn
s390_base_mcck_handler_fn: s390_base_mcck_handler_fn:
.quad 0 .quad 0
.previous .previous
.globl s390_base_ext_handler ENTRY(s390_base_ext_handler)
s390_base_ext_handler:
stmg %r0,%r15,__LC_SAVE_AREA stmg %r0,%r15,__LC_SAVE_AREA
basr %r13,0 basr %r13,0
0: aghi %r15,-STACK_FRAME_OVERHEAD 0: aghi %r15,-STACK_FRAME_OVERHEAD
@ -46,13 +46,13 @@ s390_base_ext_handler:
lpswe __LC_EXT_OLD_PSW lpswe __LC_EXT_OLD_PSW
.section .bss .section .bss
.align 8
.globl s390_base_ext_handler_fn .globl s390_base_ext_handler_fn
s390_base_ext_handler_fn: s390_base_ext_handler_fn:
.quad 0 .quad 0
.previous .previous
.globl s390_base_pgm_handler ENTRY(s390_base_pgm_handler)
s390_base_pgm_handler:
stmg %r0,%r15,__LC_SAVE_AREA stmg %r0,%r15,__LC_SAVE_AREA
basr %r13,0 basr %r13,0
0: aghi %r15,-STACK_FRAME_OVERHEAD 0: aghi %r15,-STACK_FRAME_OVERHEAD
@ -70,6 +70,7 @@ disabled_wait_psw:
.quad 0x0002000180000000,0x0000000000000000 + s390_base_pgm_handler .quad 0x0002000180000000,0x0000000000000000 + s390_base_pgm_handler
.section .bss .section .bss
.align 8
.globl s390_base_pgm_handler_fn .globl s390_base_pgm_handler_fn
s390_base_pgm_handler_fn: s390_base_pgm_handler_fn:
.quad 0 .quad 0
@ -77,8 +78,7 @@ s390_base_pgm_handler_fn:
#else /* CONFIG_64BIT */ #else /* CONFIG_64BIT */
.globl s390_base_mcck_handler ENTRY(s390_base_mcck_handler)
s390_base_mcck_handler:
basr %r13,0 basr %r13,0
0: l %r15,__LC_PANIC_STACK # load panic stack 0: l %r15,__LC_PANIC_STACK # load panic stack
ahi %r15,-STACK_FRAME_OVERHEAD ahi %r15,-STACK_FRAME_OVERHEAD
@ -93,13 +93,13 @@ s390_base_mcck_handler:
2: .long s390_base_mcck_handler_fn 2: .long s390_base_mcck_handler_fn
.section .bss .section .bss
.align 4
.globl s390_base_mcck_handler_fn .globl s390_base_mcck_handler_fn
s390_base_mcck_handler_fn: s390_base_mcck_handler_fn:
.long 0 .long 0
.previous .previous
.globl s390_base_ext_handler ENTRY(s390_base_ext_handler)
s390_base_ext_handler:
stm %r0,%r15,__LC_SAVE_AREA stm %r0,%r15,__LC_SAVE_AREA
basr %r13,0 basr %r13,0
0: ahi %r15,-STACK_FRAME_OVERHEAD 0: ahi %r15,-STACK_FRAME_OVERHEAD
@ -115,13 +115,13 @@ s390_base_ext_handler:
2: .long s390_base_ext_handler_fn 2: .long s390_base_ext_handler_fn
.section .bss .section .bss
.align 4
.globl s390_base_ext_handler_fn .globl s390_base_ext_handler_fn
s390_base_ext_handler_fn: s390_base_ext_handler_fn:
.long 0 .long 0
.previous .previous
.globl s390_base_pgm_handler ENTRY(s390_base_pgm_handler)
s390_base_pgm_handler:
stm %r0,%r15,__LC_SAVE_AREA stm %r0,%r15,__LC_SAVE_AREA
basr %r13,0 basr %r13,0
0: ahi %r15,-STACK_FRAME_OVERHEAD 0: ahi %r15,-STACK_FRAME_OVERHEAD
@ -142,6 +142,7 @@ disabled_wait_psw:
.long 0x000a0000,0x00000000 + s390_base_pgm_handler .long 0x000a0000,0x00000000 + s390_base_pgm_handler
.section .bss .section .bss
.align 4
.globl s390_base_pgm_handler_fn .globl s390_base_pgm_handler_fn
s390_base_pgm_handler_fn: s390_base_pgm_handler_fn:
.long 0 .long 0

File diff suppressed because it is too large Load Diff

View File

@ -9,8 +9,8 @@
* Heiko Carstens <heiko.carstens@de.ibm.com> * Heiko Carstens <heiko.carstens@de.ibm.com>
*/ */
#include <linux/linkage.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/linkage.h>
#include <asm/cache.h> #include <asm/cache.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
@ -197,8 +197,7 @@ STACK_SIZE = 1 << STACK_SHIFT
* Returns: * Returns:
* gpr2 = prev * gpr2 = prev
*/ */
.globl __switch_to ENTRY(__switch_to)
__switch_to:
basr %r1,0 basr %r1,0
0: l %r4,__THREAD_info(%r2) # get thread_info of prev 0: l %r4,__THREAD_info(%r2) # get thread_info of prev
l %r5,__THREAD_info(%r3) # get thread_info of next l %r5,__THREAD_info(%r3) # get thread_info of next
@ -224,8 +223,7 @@ __critical_start:
* are executed with interrupts enabled. * are executed with interrupts enabled.
*/ */
.globl system_call ENTRY(system_call)
system_call:
stpt __LC_SYNC_ENTER_TIMER stpt __LC_SYNC_ENTER_TIMER
sysc_saveall: sysc_saveall:
SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
@ -388,8 +386,7 @@ sysc_tracenogo:
# #
# a new process exits the kernel with ret_from_fork # a new process exits the kernel with ret_from_fork
# #
.globl ret_from_fork ENTRY(ret_from_fork)
ret_from_fork:
l %r13,__LC_SVC_NEW_PSW+4 l %r13,__LC_SVC_NEW_PSW+4
l %r12,__LC_THREAD_INFO # load pointer to thread_info struct l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
tm SP_PSW+1(%r15),0x01 # forking a kernel thread ? tm SP_PSW+1(%r15),0x01 # forking a kernel thread ?
@ -405,8 +402,7 @@ ret_from_fork:
# kernel_execve function needs to deal with pt_regs that is not # kernel_execve function needs to deal with pt_regs that is not
# at the usual place # at the usual place
# #
.globl kernel_execve ENTRY(kernel_execve)
kernel_execve:
stm %r12,%r15,48(%r15) stm %r12,%r15,48(%r15)
lr %r14,%r15 lr %r14,%r15
l %r13,__LC_SVC_NEW_PSW+4 l %r13,__LC_SVC_NEW_PSW+4
@ -438,8 +434,7 @@ kernel_execve:
* Program check handler routine * Program check handler routine
*/ */
.globl pgm_check_handler ENTRY(pgm_check_handler)
pgm_check_handler:
/* /*
* First we need to check for a special case: * First we need to check for a special case:
* Single stepping an instruction that disables the PER event mask will * Single stepping an instruction that disables the PER event mask will
@ -565,8 +560,7 @@ kernel_per:
* IO interrupt handler routine * IO interrupt handler routine
*/ */
.globl io_int_handler ENTRY(io_int_handler)
io_int_handler:
stck __LC_INT_CLOCK stck __LC_INT_CLOCK
stpt __LC_ASYNC_ENTER_TIMER stpt __LC_ASYNC_ENTER_TIMER
SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16 SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16
@ -703,8 +697,7 @@ io_notify_resume:
* External interrupt handler routine * External interrupt handler routine
*/ */
.globl ext_int_handler ENTRY(ext_int_handler)
ext_int_handler:
stck __LC_INT_CLOCK stck __LC_INT_CLOCK
stpt __LC_ASYNC_ENTER_TIMER stpt __LC_ASYNC_ENTER_TIMER
SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16 SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16
@ -731,8 +724,7 @@ __critical_end:
* Machine check handler routines * Machine check handler routines
*/ */
.globl mcck_int_handler ENTRY(mcck_int_handler)
mcck_int_handler:
stck __LC_MCCK_CLOCK stck __LC_MCCK_CLOCK
spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer
lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs
@ -818,8 +810,7 @@ mcck_return:
*/ */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
__CPUINIT __CPUINIT
.globl restart_int_handler ENTRY(restart_int_handler)
restart_int_handler:
basr %r1,0 basr %r1,0
restart_base: restart_base:
spt restart_vtime-restart_base(%r1) spt restart_vtime-restart_base(%r1)
@ -848,8 +839,7 @@ restart_vtime:
/* /*
* If we do not run with SMP enabled, let the new CPU crash ... * If we do not run with SMP enabled, let the new CPU crash ...
*/ */
.globl restart_int_handler ENTRY(restart_int_handler)
restart_int_handler:
basr %r1,0 basr %r1,0
restart_base: restart_base:
lpsw restart_crash-restart_base(%r1) lpsw restart_crash-restart_base(%r1)

View File

@ -9,8 +9,8 @@
* Heiko Carstens <heiko.carstens@de.ibm.com> * Heiko Carstens <heiko.carstens@de.ibm.com>
*/ */
#include <linux/linkage.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/linkage.h>
#include <asm/cache.h> #include <asm/cache.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
@ -219,8 +219,7 @@ _TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING)
* Returns: * Returns:
* gpr2 = prev * gpr2 = prev
*/ */
.globl __switch_to ENTRY(__switch_to)
__switch_to:
lg %r4,__THREAD_info(%r2) # get thread_info of prev lg %r4,__THREAD_info(%r2) # get thread_info of prev
lg %r5,__THREAD_info(%r3) # get thread_info of next lg %r5,__THREAD_info(%r3) # get thread_info of next
tm __TI_flags+7(%r4),_TIF_MCCK_PENDING # machine check pending? tm __TI_flags+7(%r4),_TIF_MCCK_PENDING # machine check pending?
@ -245,8 +244,7 @@ __critical_start:
* are executed with interrupts enabled. * are executed with interrupts enabled.
*/ */
.globl system_call ENTRY(system_call)
system_call:
stpt __LC_SYNC_ENTER_TIMER stpt __LC_SYNC_ENTER_TIMER
sysc_saveall: sysc_saveall:
SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
@ -408,8 +406,7 @@ sysc_tracenogo:
# #
# a new process exits the kernel with ret_from_fork # a new process exits the kernel with ret_from_fork
# #
.globl ret_from_fork ENTRY(ret_from_fork)
ret_from_fork:
lg %r13,__LC_SVC_NEW_PSW+8 lg %r13,__LC_SVC_NEW_PSW+8
lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
tm SP_PSW+1(%r15),0x01 # forking a kernel thread ? tm SP_PSW+1(%r15),0x01 # forking a kernel thread ?
@ -424,8 +421,7 @@ ret_from_fork:
# kernel_execve function needs to deal with pt_regs that is not # kernel_execve function needs to deal with pt_regs that is not
# at the usual place # at the usual place
# #
.globl kernel_execve ENTRY(kernel_execve)
kernel_execve:
stmg %r12,%r15,96(%r15) stmg %r12,%r15,96(%r15)
lgr %r14,%r15 lgr %r14,%r15
aghi %r15,-SP_SIZE aghi %r15,-SP_SIZE
@ -455,8 +451,7 @@ kernel_execve:
* Program check handler routine * Program check handler routine
*/ */
.globl pgm_check_handler ENTRY(pgm_check_handler)
pgm_check_handler:
/* /*
* First we need to check for a special case: * First we need to check for a special case:
* Single stepping an instruction that disables the PER event mask will * Single stepping an instruction that disables the PER event mask will
@ -584,8 +579,7 @@ kernel_per:
/* /*
* IO interrupt handler routine * IO interrupt handler routine
*/ */
.globl io_int_handler ENTRY(io_int_handler)
io_int_handler:
stck __LC_INT_CLOCK stck __LC_INT_CLOCK
stpt __LC_ASYNC_ENTER_TIMER stpt __LC_ASYNC_ENTER_TIMER
SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+40 SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+40
@ -719,8 +713,7 @@ io_notify_resume:
/* /*
* External interrupt handler routine * External interrupt handler routine
*/ */
.globl ext_int_handler ENTRY(ext_int_handler)
ext_int_handler:
stck __LC_INT_CLOCK stck __LC_INT_CLOCK
stpt __LC_ASYNC_ENTER_TIMER stpt __LC_ASYNC_ENTER_TIMER
SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+40 SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+40
@ -749,8 +742,7 @@ __critical_end:
/* /*
* Machine check handler routines * Machine check handler routines
*/ */
.globl mcck_int_handler ENTRY(mcck_int_handler)
mcck_int_handler:
stck __LC_MCCK_CLOCK stck __LC_MCCK_CLOCK
la %r1,4095 # revalidate r1 la %r1,4095 # revalidate r1
spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer
@ -836,8 +828,7 @@ mcck_done:
*/ */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
__CPUINIT __CPUINIT
.globl restart_int_handler ENTRY(restart_int_handler)
restart_int_handler:
basr %r1,0 basr %r1,0
restart_base: restart_base:
spt restart_vtime-restart_base(%r1) spt restart_vtime-restart_base(%r1)
@ -864,8 +855,7 @@ restart_vtime:
/* /*
* If we do not run with SMP enabled, let the new CPU crash ... * If we do not run with SMP enabled, let the new CPU crash ...
*/ */
.globl restart_int_handler ENTRY(restart_int_handler)
restart_int_handler:
basr %r1,0 basr %r1,0
restart_base: restart_base:
lpswe restart_crash-restart_base(%r1) lpswe restart_crash-restart_base(%r1)
@ -1055,8 +1045,7 @@ cleanup_io_restore_insn:
* %r2 pointer to sie control block * %r2 pointer to sie control block
* %r3 guest register save area * %r3 guest register save area
*/ */
.globl sie64a ENTRY(sie64a)
sie64a:
stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers
stg %r2,__SF_EMPTY(%r15) # save control block pointer stg %r2,__SF_EMPTY(%r15) # save control block pointer
stg %r3,__SF_EMPTY+8(%r15) # save guest register save area stg %r3,__SF_EMPTY+8(%r15) # save guest register save area

View File

@ -22,6 +22,7 @@
*/ */
#include <linux/init.h> #include <linux/init.h>
#include <linux/linkage.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/page.h> #include <asm/page.h>
@ -383,8 +384,7 @@ iplstart:
# doesn't need a builtin ipl record. # doesn't need a builtin ipl record.
# #
.org 0x800 .org 0x800
.globl start ENTRY(start)
start:
stm %r0,%r15,0x07b0 # store registers stm %r0,%r15,0x07b0 # store registers
basr %r12,%r0 basr %r12,%r0
.base: .base:
@ -448,8 +448,7 @@ start:
# or linload or SALIPL # or linload or SALIPL
# #
.org 0x10000 .org 0x10000
.globl startup ENTRY(startup)
startup:
basr %r13,0 # get base basr %r13,0 # get base
.LPG0: .LPG0:
xc 0x200(256),0x200 # partially clear lowcore xc 0x200(256),0x200 # partially clear lowcore

View File

@ -11,13 +11,13 @@
*/ */
#include <linux/init.h> #include <linux/init.h>
#include <linux/linkage.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/page.h> #include <asm/page.h>
__HEAD __HEAD
.globl startup_continue ENTRY(startup_continue)
startup_continue:
basr %r13,0 # get base basr %r13,0 # get base
.LPG1: .LPG1:
@ -78,8 +78,7 @@ startup_continue:
.Lbase_cc: .Lbase_cc:
.long sched_clock_base_cc .long sched_clock_base_cc
.globl _ehead ENTRY(_ehead)
_ehead:
#ifdef CONFIG_SHARED_KERNEL #ifdef CONFIG_SHARED_KERNEL
.org 0x100000 - 0x11000 # head.o ends at 0x11000 .org 0x100000 - 0x11000 # head.o ends at 0x11000
@ -88,8 +87,8 @@ _ehead:
# #
# startup-code, running in absolute addressing mode # startup-code, running in absolute addressing mode
# #
.globl _stext ENTRY(_stext)
_stext: basr %r13,0 # get base basr %r13,0 # get base
.LPG3: .LPG3:
# check control registers # check control registers
stctl %c0,%c15,0(%r15) stctl %c0,%c15,0(%r15)

View File

@ -11,13 +11,13 @@
*/ */
#include <linux/init.h> #include <linux/init.h>
#include <linux/linkage.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/page.h> #include <asm/page.h>
__HEAD __HEAD
.globl startup_continue ENTRY(startup_continue)
startup_continue:
larl %r1,sched_clock_base_cc larl %r1,sched_clock_base_cc
mvc 0(8,%r1),__LC_LAST_UPDATE_CLOCK mvc 0(8,%r1),__LC_LAST_UPDATE_CLOCK
larl %r13,.LPG1 # get base larl %r13,.LPG1 # get base
@ -76,8 +76,7 @@ startup_continue:
.long 0x80000000,0,0,0 # invalid access-list entries .long 0x80000000,0,0,0 # invalid access-list entries
.endr .endr
.globl _ehead ENTRY(_ehead)
_ehead:
#ifdef CONFIG_SHARED_KERNEL #ifdef CONFIG_SHARED_KERNEL
.org 0x100000 - 0x11000 # head.o ends at 0x11000 .org 0x100000 - 0x11000 # head.o ends at 0x11000
@ -86,8 +85,8 @@ _ehead:
# #
# startup-code, running in absolute addressing mode # startup-code, running in absolute addressing mode
# #
.globl _stext ENTRY(_stext)
_stext: basr %r13,0 # get base basr %r13,0 # get base
.LPG3: .LPG3:
# check control registers # check control registers
stctg %c0,%c15,0(%r15) stctg %c0,%c15,0(%r15)

View File

@ -5,21 +5,19 @@
* *
*/ */
#include <linux/linkage.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
.section .kprobes.text, "ax" .section .kprobes.text, "ax"
.globl ftrace_stub ENTRY(ftrace_stub)
ftrace_stub:
br %r14 br %r14
.globl _mcount ENTRY(_mcount)
_mcount:
#ifdef CONFIG_DYNAMIC_FTRACE #ifdef CONFIG_DYNAMIC_FTRACE
br %r14 br %r14
.globl ftrace_caller ENTRY(ftrace_caller)
ftrace_caller:
#endif #endif
stm %r2,%r5,16(%r15) stm %r2,%r5,16(%r15)
bras %r1,2f bras %r1,2f
@ -41,8 +39,7 @@ ftrace_caller:
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
l %r2,100(%r15) l %r2,100(%r15)
l %r3,152(%r15) l %r3,152(%r15)
.globl ftrace_graph_caller ENTRY(ftrace_graph_caller)
ftrace_graph_caller:
# The bras instruction gets runtime patched to call prepare_ftrace_return. # The bras instruction gets runtime patched to call prepare_ftrace_return.
# See ftrace_enable_ftrace_graph_caller. The patched instruction is: # See ftrace_enable_ftrace_graph_caller. The patched instruction is:
# bras %r14,prepare_ftrace_return # bras %r14,prepare_ftrace_return
@ -56,8 +53,7 @@ ftrace_graph_caller:
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl return_to_handler ENTRY(return_to_handler)
return_to_handler:
stm %r2,%r5,16(%r15) stm %r2,%r5,16(%r15)
st %r14,56(%r15) st %r14,56(%r15)
lr %r0,%r15 lr %r0,%r15

View File

@ -5,21 +5,19 @@
* *
*/ */
#include <linux/linkage.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
.section .kprobes.text, "ax" .section .kprobes.text, "ax"
.globl ftrace_stub ENTRY(ftrace_stub)
ftrace_stub:
br %r14 br %r14
.globl _mcount ENTRY(_mcount)
_mcount:
#ifdef CONFIG_DYNAMIC_FTRACE #ifdef CONFIG_DYNAMIC_FTRACE
br %r14 br %r14
.globl ftrace_caller ENTRY(ftrace_caller)
ftrace_caller:
#endif #endif
larl %r1,function_trace_stop larl %r1,function_trace_stop
icm %r1,0xf,0(%r1) icm %r1,0xf,0(%r1)
@ -37,8 +35,7 @@ ftrace_caller:
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
lg %r2,168(%r15) lg %r2,168(%r15)
lg %r3,272(%r15) lg %r3,272(%r15)
.globl ftrace_graph_caller ENTRY(ftrace_graph_caller)
ftrace_graph_caller:
# The bras instruction gets runtime patched to call prepare_ftrace_return. # The bras instruction gets runtime patched to call prepare_ftrace_return.
# See ftrace_enable_ftrace_graph_caller. The patched instruction is: # See ftrace_enable_ftrace_graph_caller. The patched instruction is:
# bras %r14,prepare_ftrace_return # bras %r14,prepare_ftrace_return
@ -52,8 +49,7 @@ ftrace_graph_caller:
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl return_to_handler ENTRY(return_to_handler)
return_to_handler:
stmg %r2,%r5,32(%r15) stmg %r2,%r5,32(%r15)
lgr %r1,%r15 lgr %r1,%r15
aghi %r15,-160 aghi %r15,-160

View File

@ -6,14 +6,15 @@
* Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com) * Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com)
*/ */
#include <linux/linkage.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
# #
# do_reipl_asm # do_reipl_asm
# Parameter: r2 = schid of reipl device # Parameter: r2 = schid of reipl device
# #
.globl do_reipl_asm ENTRY(do_reipl_asm)
do_reipl_asm: basr %r13,0 basr %r13,0
.Lpg0: lpsw .Lnewpsw-.Lpg0(%r13) .Lpg0: lpsw .Lnewpsw-.Lpg0(%r13)
.Lpg1: # do store status of all registers .Lpg1: # do store status of all registers

View File

@ -4,6 +4,7 @@
* Denis Joseph Barrow, * Denis Joseph Barrow,
*/ */
#include <linux/linkage.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
# #
@ -11,8 +12,8 @@
# Parameter: r2 = schid of reipl device # Parameter: r2 = schid of reipl device
# #
.globl do_reipl_asm ENTRY(do_reipl_asm)
do_reipl_asm: basr %r13,0 basr %r13,0
.Lpg0: lpswe .Lnewpsw-.Lpg0(%r13) .Lpg0: lpswe .Lnewpsw-.Lpg0(%r13)
.Lpg1: # do store status of all registers .Lpg1: # do store status of all registers

View File

@ -8,6 +8,8 @@
* *
*/ */
#include <linux/linkage.h>
/* /*
* moves the new kernel to its destination... * moves the new kernel to its destination...
* %r2 = pointer to first kimage_entry_t * %r2 = pointer to first kimage_entry_t
@ -22,8 +24,7 @@
*/ */
.text .text
.globl relocate_kernel ENTRY(relocate_kernel)
relocate_kernel:
basr %r13,0 # base address basr %r13,0 # base address
.base: .base:
stnsm sys_msk-.base(%r13),0xfb # disable DAT stnsm sys_msk-.base(%r13),0xfb # disable DAT
@ -112,6 +113,7 @@
.byte 0 .byte 0
.align 8 .align 8
relocate_kernel_end: relocate_kernel_end:
.align 8
.globl relocate_kernel_len .globl relocate_kernel_len
relocate_kernel_len: relocate_kernel_len:
.quad relocate_kernel_end - relocate_kernel .quad relocate_kernel_end - relocate_kernel

View File

@ -8,6 +8,8 @@
* *
*/ */
#include <linux/linkage.h>
/* /*
* moves the new kernel to its destination... * moves the new kernel to its destination...
* %r2 = pointer to first kimage_entry_t * %r2 = pointer to first kimage_entry_t
@ -23,8 +25,7 @@
*/ */
.text .text
.globl relocate_kernel ENTRY(relocate_kernel)
relocate_kernel:
basr %r13,0 # base address basr %r13,0 # base address
.base: .base:
stnsm sys_msk-.base(%r13),0xfb # disable DAT stnsm sys_msk-.base(%r13),0xfb # disable DAT
@ -115,6 +116,7 @@
.byte 0 .byte 0
.align 8 .align 8
relocate_kernel_end: relocate_kernel_end:
.align 8
.globl relocate_kernel_len .globl relocate_kernel_len
relocate_kernel_len: relocate_kernel_len:
.quad relocate_kernel_end - relocate_kernel .quad relocate_kernel_end - relocate_kernel

View File

@ -8,6 +8,8 @@
* *
*/ */
#include <linux/linkage.h>
LC_EXT_NEW_PSW = 0x58 # addr of ext int handler LC_EXT_NEW_PSW = 0x58 # addr of ext int handler
LC_EXT_NEW_PSW_64 = 0x1b0 # addr of ext int handler 64 bit LC_EXT_NEW_PSW_64 = 0x1b0 # addr of ext int handler 64 bit
LC_EXT_INT_PARAM = 0x80 # addr of ext int parameter LC_EXT_INT_PARAM = 0x80 # addr of ext int parameter
@ -260,8 +262,7 @@ _sclp_print:
# R2 = 0 on success, 1 on failure # R2 = 0 on success, 1 on failure
# #
.globl _sclp_print_early ENTRY(_sclp_print_early)
_sclp_print_early:
stm %r6,%r15,24(%r15) # save registers stm %r6,%r15,24(%r15) # save registers
ahi %r15,-96 # create stack frame ahi %r15,-96 # create stack frame
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT

View File

@ -5,6 +5,7 @@
* *
*/ */
#include <linux/linkage.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
@ -16,9 +17,7 @@
# %r6 - destination cpu # %r6 - destination cpu
.section .text .section .text
.align 4 ENTRY(smp_switch_to_cpu)
.globl smp_switch_to_cpu
smp_switch_to_cpu:
stm %r6,%r15,__SF_GPRS(%r15) stm %r6,%r15,__SF_GPRS(%r15)
lr %r1,%r15 lr %r1,%r15
ahi %r15,-STACK_FRAME_OVERHEAD ahi %r15,-STACK_FRAME_OVERHEAD
@ -33,8 +32,7 @@ smp_switch_to_cpu:
brc 2,2b /* busy, try again */ brc 2,2b /* busy, try again */
3: j 3b 3: j 3b
.globl smp_restart_cpu ENTRY(smp_restart_cpu)
smp_restart_cpu:
basr %r13,0 basr %r13,0
0: la %r1,.gprregs_addr-0b(%r13) 0: la %r1,.gprregs_addr-0b(%r13)
l %r1,0(%r1) l %r1,0(%r1)

View File

@ -5,6 +5,7 @@
* *
*/ */
#include <linux/linkage.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
@ -16,9 +17,7 @@
# %r6 - destination cpu # %r6 - destination cpu
.section .text .section .text
.align 4 ENTRY(smp_switch_to_cpu)
.globl smp_switch_to_cpu
smp_switch_to_cpu:
stmg %r6,%r15,__SF_GPRS(%r15) stmg %r6,%r15,__SF_GPRS(%r15)
lgr %r1,%r15 lgr %r1,%r15
aghi %r15,-STACK_FRAME_OVERHEAD aghi %r15,-STACK_FRAME_OVERHEAD
@ -31,8 +30,7 @@ smp_switch_to_cpu:
brc 2,2b /* busy, try again */ brc 2,2b /* busy, try again */
3: j 3b 3: j 3b
.globl smp_restart_cpu ENTRY(smp_restart_cpu)
smp_restart_cpu:
larl %r1,.gprregs larl %r1,.gprregs
lmg %r0,%r15,0(%r1) lmg %r0,%r15,0(%r1)
1: sigp %r0,%r5,__SIGP_SENSE /* Wait for calling CPU */ 1: sigp %r0,%r5,__SIGP_SENSE /* Wait for calling CPU */

View File

@ -7,6 +7,7 @@
* Michael Holzheu <holzheu@linux.vnet.ibm.com> * Michael Holzheu <holzheu@linux.vnet.ibm.com>
*/ */
#include <linux/linkage.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
@ -22,9 +23,7 @@
* This function runs with disabled interrupts. * This function runs with disabled interrupts.
*/ */
.section .text .section .text
.align 4 ENTRY(swsusp_arch_suspend)
.globl swsusp_arch_suspend
swsusp_arch_suspend:
stmg %r6,%r15,__SF_GPRS(%r15) stmg %r6,%r15,__SF_GPRS(%r15)
lgr %r1,%r15 lgr %r1,%r15
aghi %r15,-STACK_FRAME_OVERHEAD aghi %r15,-STACK_FRAME_OVERHEAD
@ -112,8 +111,7 @@ swsusp_arch_suspend:
* Then we return to the function that called swsusp_arch_suspend(). * Then we return to the function that called swsusp_arch_suspend().
* swsusp_arch_resume() runs with disabled interrupts. * swsusp_arch_resume() runs with disabled interrupts.
*/ */
.globl swsusp_arch_resume ENTRY(swsusp_arch_resume)
swsusp_arch_resume:
stmg %r6,%r15,__SF_GPRS(%r15) stmg %r6,%r15,__SF_GPRS(%r15)
lgr %r1,%r15 lgr %r1,%r15
aghi %r15,-STACK_FRAME_OVERHEAD aghi %r15,-STACK_FRAME_OVERHEAD

View File

@ -1,5 +1,7 @@
# S/390 __udiv_qrnnd # S/390 __udiv_qrnnd
#include <linux/linkage.h>
# r2 : &__r # r2 : &__r
# r3 : upper half of 64 bit word n # r3 : upper half of 64 bit word n
# r4 : lower half of 64 bit word n # r4 : lower half of 64 bit word n
@ -8,8 +10,7 @@
# the quotient q is to be returned # the quotient q is to be returned
.text .text
.globl __udiv_qrnnd ENTRY(__udiv_qrnnd)
__udiv_qrnnd:
st %r2,24(%r15) # store pointer to reminder for later st %r2,24(%r15) # store pointer to reminder for later
lr %r0,%r3 # reload n lr %r0,%r3 # reload n
lr %r1,%r4 lr %r1,%r4