2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* linux/arch/arm/kernel/entry-armv.S
|
|
|
|
*
|
|
|
|
* Copyright (C) 1996,1997,1998 Russell King.
|
|
|
|
* ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
|
2006-01-14 05:05:25 +08:00
|
|
|
* nommu support by Hyok S. Choi (hyok.choi@samsung.com)
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* Low-level vector interface routines
|
|
|
|
*
|
2007-12-04 21:33:33 +08:00
|
|
|
* Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction
|
|
|
|
* that causes it to save wrong values... Be aware!
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
|
2005-10-30 04:44:55 +08:00
|
|
|
#include <asm/memory.h>
|
2011-02-06 23:32:24 +08:00
|
|
|
#include <asm/glue-df.h>
|
|
|
|
#include <asm/glue-pf.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <asm/vfpmacros.h>
|
2008-08-05 23:14:15 +08:00
|
|
|
#include <mach/entry-macro.S>
|
2006-06-21 20:31:52 +08:00
|
|
|
#include <asm/thread_notify.h>
|
2009-02-16 18:42:09 +08:00
|
|
|
#include <asm/unwind.h>
|
2009-11-10 07:53:29 +08:00
|
|
|
#include <asm/unistd.h>
|
2010-07-05 21:53:10 +08:00
|
|
|
#include <asm/tls.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
#include "entry-header.S"
|
2010-12-22 20:20:08 +08:00
|
|
|
#include <asm/entry-macro-multi.S>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-05-22 01:14:44 +08:00
|
|
|
/*
|
|
|
|
* Interrupt handling. Preserves r7, r8, r9
|
|
|
|
*/
|
|
|
|
.macro irq_handler
|
2010-12-13 16:42:34 +08:00
|
|
|
#ifdef CONFIG_MULTI_IRQ_HANDLER
|
|
|
|
ldr r5, =handle_arch_irq
|
|
|
|
mov r0, sp
|
|
|
|
ldr r5, [r5]
|
|
|
|
adr lr, BSYM(9997f)
|
|
|
|
teq r5, #0
|
|
|
|
movne pc, r5
|
2005-11-09 03:08:05 +08:00
|
|
|
#endif
|
2010-12-22 20:20:08 +08:00
|
|
|
arch_irq_handler_default
|
2010-09-04 17:47:48 +08:00
|
|
|
9997:
|
2005-05-22 01:14:44 +08:00
|
|
|
.endm
|
|
|
|
|
2011-06-26 17:22:08 +08:00
|
|
|
.macro pabt_helper
|
2011-06-26 02:25:02 +08:00
|
|
|
@ PABORT handler takes fault address in r4
|
2011-06-26 17:22:08 +08:00
|
|
|
#ifdef MULTI_PABORT
|
2011-06-25 22:46:08 +08:00
|
|
|
ldr ip, .LCprocfns
|
2011-06-26 17:22:08 +08:00
|
|
|
mov lr, pc
|
2011-06-25 22:46:08 +08:00
|
|
|
ldr pc, [ip, #PROCESSOR_PABT_FUNC]
|
2011-06-26 17:22:08 +08:00
|
|
|
#else
|
|
|
|
bl CPU_PABORT_HANDLER
|
|
|
|
#endif
|
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro dabt_helper
|
2011-06-25 22:44:20 +08:00
|
|
|
mov r2, r4
|
|
|
|
mov r3, r5
|
2011-06-26 17:22:08 +08:00
|
|
|
|
|
|
|
@
|
|
|
|
@ Call the processor-specific abort handler:
|
|
|
|
@
|
|
|
|
@ r2 - aborted context pc
|
|
|
|
@ r3 - aborted context cpsr
|
|
|
|
@
|
|
|
|
@ The abort handler must return the aborted address in r0, and
|
|
|
|
@ the fault status register in r1. r9 must be preserved.
|
|
|
|
@
|
|
|
|
#ifdef MULTI_DABORT
|
2011-06-25 22:46:08 +08:00
|
|
|
ldr ip, .LCprocfns
|
2011-06-26 17:22:08 +08:00
|
|
|
mov lr, pc
|
2011-06-25 22:46:08 +08:00
|
|
|
ldr pc, [ip, #PROCESSOR_DABT_FUNC]
|
2011-06-26 17:22:08 +08:00
|
|
|
#else
|
|
|
|
bl CPU_DABORT_HANDLER
|
|
|
|
#endif
|
|
|
|
.endm
|
|
|
|
|
2007-12-04 04:27:56 +08:00
|
|
|
#ifdef CONFIG_KPROBES
|
|
|
|
.section .kprobes.text,"ax",%progbits
|
|
|
|
#else
|
|
|
|
.text
|
|
|
|
#endif
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Invalid mode handlers
|
|
|
|
*/
|
2005-06-01 05:22:32 +08:00
|
|
|
.macro inv_entry, reason
|
|
|
|
sub sp, sp, #S_FRAME_SIZE
|
2009-07-24 19:32:54 +08:00
|
|
|
ARM( stmib sp, {r1 - lr} )
|
|
|
|
THUMB( stmia sp, {r0 - r12} )
|
|
|
|
THUMB( str sp, [sp, #S_SP] )
|
|
|
|
THUMB( str lr, [sp, #S_LR] )
|
2005-04-17 06:20:36 +08:00
|
|
|
mov r1, #\reason
|
|
|
|
.endm
|
|
|
|
|
|
|
|
__pabt_invalid:
|
2005-06-01 05:22:32 +08:00
|
|
|
inv_entry BAD_PREFETCH
|
|
|
|
b common_invalid
|
2008-08-28 18:22:32 +08:00
|
|
|
ENDPROC(__pabt_invalid)
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
__dabt_invalid:
|
2005-06-01 05:22:32 +08:00
|
|
|
inv_entry BAD_DATA
|
|
|
|
b common_invalid
|
2008-08-28 18:22:32 +08:00
|
|
|
ENDPROC(__dabt_invalid)
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
__irq_invalid:
|
2005-06-01 05:22:32 +08:00
|
|
|
inv_entry BAD_IRQ
|
|
|
|
b common_invalid
|
2008-08-28 18:22:32 +08:00
|
|
|
ENDPROC(__irq_invalid)
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
__und_invalid:
|
2005-06-01 05:22:32 +08:00
|
|
|
inv_entry BAD_UNDEFINSTR
|
|
|
|
|
|
|
|
@
|
|
|
|
@ XXX fall through to common_invalid
|
|
|
|
@
|
|
|
|
|
|
|
|
@
|
|
|
|
@ common_invalid - generic code for failed exception (re-entrant version of handlers)
|
|
|
|
@
|
|
|
|
common_invalid:
|
|
|
|
zero_fp
|
|
|
|
|
|
|
|
ldmia r0, {r4 - r6}
|
|
|
|
add r0, sp, #S_PC @ here for interlock avoidance
|
|
|
|
mov r7, #-1 @ "" "" "" ""
|
|
|
|
str r4, [sp] @ save preserved r0
|
|
|
|
stmia r0, {r5 - r7} @ lr_<exception>,
|
|
|
|
@ cpsr_<exception>, "old_r0"
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
mov r0, sp
|
|
|
|
b bad_mode
|
2008-08-28 18:22:32 +08:00
|
|
|
ENDPROC(__und_invalid)
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* SVC mode handlers
|
|
|
|
*/
|
2006-01-15 00:18:08 +08:00
|
|
|
|
|
|
|
#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
|
|
|
|
#define SPFIX(code...) code
|
|
|
|
#else
|
|
|
|
#define SPFIX(code...)
|
|
|
|
#endif
|
|
|
|
|
2007-12-15 04:56:01 +08:00
|
|
|
.macro svc_entry, stack_hole=0
|
2009-02-16 18:42:09 +08:00
|
|
|
UNWIND(.fnstart )
|
|
|
|
UNWIND(.save {r0 - pc} )
|
2009-07-24 19:32:54 +08:00
|
|
|
sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
|
|
|
|
#ifdef CONFIG_THUMB2_KERNEL
|
|
|
|
SPFIX( str r0, [sp] ) @ temporarily saved
|
|
|
|
SPFIX( mov r0, sp )
|
|
|
|
SPFIX( tst r0, #4 ) @ test original stack alignment
|
|
|
|
SPFIX( ldr r0, [sp] ) @ restored
|
|
|
|
#else
|
2006-01-15 00:18:08 +08:00
|
|
|
SPFIX( tst sp, #4 )
|
2009-07-24 19:32:54 +08:00
|
|
|
#endif
|
|
|
|
SPFIX( subeq sp, sp, #4 )
|
|
|
|
stmia sp, {r1 - r12}
|
2005-06-01 05:22:32 +08:00
|
|
|
|
2011-06-25 22:44:20 +08:00
|
|
|
ldmia r0, {r3 - r5}
|
|
|
|
add r7, sp, #S_SP - 4 @ here for interlock avoidance
|
|
|
|
mov r6, #-1 @ "" "" "" ""
|
|
|
|
add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
|
|
|
|
SPFIX( addeq r2, r2, #4 )
|
|
|
|
str r3, [sp, #-4]! @ save the "real" r0 copied
|
2005-06-01 05:22:32 +08:00
|
|
|
@ from the exception stack
|
|
|
|
|
2011-06-25 22:44:20 +08:00
|
|
|
mov r3, lr
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
@
|
|
|
|
@ We are now ready to fill in the remaining blanks on the stack:
|
|
|
|
@
|
2011-06-25 22:44:20 +08:00
|
|
|
@ r2 - sp_svc
|
|
|
|
@ r3 - lr_svc
|
|
|
|
@ r4 - lr_<exception>, already fixed up for correct return/restart
|
|
|
|
@ r5 - spsr_<exception>
|
|
|
|
@ r6 - orig_r0 (see pt_regs definition in ptrace.h)
|
2005-04-17 06:20:36 +08:00
|
|
|
@
|
2011-06-25 22:44:20 +08:00
|
|
|
stmia r7, {r2 - r6}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-06-25 18:44:06 +08:00
|
|
|
#ifdef CONFIG_TRACE_IRQFLAGS
|
|
|
|
bl trace_hardirqs_off
|
|
|
|
#endif
|
2011-06-26 00:35:19 +08:00
|
|
|
.endm
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-06-26 00:35:19 +08:00
|
|
|
.align 5
|
|
|
|
__dabt_svc:
|
|
|
|
svc_entry
|
2011-06-26 17:22:08 +08:00
|
|
|
dabt_helper
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
@
|
2011-06-25 18:44:06 +08:00
|
|
|
@ call main handler
|
2005-04-17 06:20:36 +08:00
|
|
|
@
|
|
|
|
mov r2, sp
|
|
|
|
bl do_DataAbort
|
|
|
|
|
|
|
|
@
|
|
|
|
@ IRQs off again before pulling preserved data off the stack
|
|
|
|
@
|
2010-07-10 17:10:18 +08:00
|
|
|
disable_irq_notrace
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
@
|
|
|
|
@ restore SPSR and restart the instruction
|
|
|
|
@
|
2011-06-25 22:44:20 +08:00
|
|
|
ldr r5, [sp, #S_PSR]
|
2011-06-25 18:44:06 +08:00
|
|
|
#ifdef CONFIG_TRACE_IRQFLAGS
|
|
|
|
tst r5, #PSR_I_BIT
|
|
|
|
bleq trace_hardirqs_on
|
|
|
|
tst r5, #PSR_I_BIT
|
|
|
|
blne trace_hardirqs_off
|
|
|
|
#endif
|
2011-06-25 22:44:20 +08:00
|
|
|
svc_exit r5 @ return from exception
|
2009-02-16 18:42:09 +08:00
|
|
|
UNWIND(.fnend )
|
2008-08-28 18:22:32 +08:00
|
|
|
ENDPROC(__dabt_svc)
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
.align 5
|
|
|
|
__irq_svc:
|
2005-06-01 05:22:32 +08:00
|
|
|
svc_entry
|
2005-05-22 01:14:44 +08:00
|
|
|
irq_handler
|
2011-06-25 17:57:57 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
#ifdef CONFIG_PREEMPT
|
2011-06-25 17:57:57 +08:00
|
|
|
get_thread_info tsk
|
|
|
|
ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
|
2005-05-22 01:15:45 +08:00
|
|
|
ldr r0, [tsk, #TI_FLAGS] @ get flags
|
2008-04-14 00:47:35 +08:00
|
|
|
teq r8, #0 @ if preempt count != 0
|
|
|
|
movne r0, #0 @ force flags to 0
|
2005-04-17 06:20:36 +08:00
|
|
|
tst r0, #_TIF_NEED_RESCHED
|
|
|
|
blne svc_preempt
|
|
|
|
#endif
|
2011-06-25 22:44:20 +08:00
|
|
|
ldr r5, [sp, #S_PSR]
|
2006-08-27 19:07:02 +08:00
|
|
|
#ifdef CONFIG_TRACE_IRQFLAGS
|
2011-06-25 23:57:50 +08:00
|
|
|
@ The parent context IRQs must have been enabled to get here in
|
|
|
|
@ the first place, so there's no point checking the PSR I bit.
|
|
|
|
bl trace_hardirqs_on
|
2006-08-27 19:07:02 +08:00
|
|
|
#endif
|
2011-06-25 22:44:20 +08:00
|
|
|
svc_exit r5 @ return from exception
|
2009-02-16 18:42:09 +08:00
|
|
|
UNWIND(.fnend )
|
2008-08-28 18:22:32 +08:00
|
|
|
ENDPROC(__irq_svc)
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
.ltorg
|
|
|
|
|
|
|
|
#ifdef CONFIG_PREEMPT
|
|
|
|
svc_preempt:
|
2008-04-14 00:47:35 +08:00
|
|
|
mov r8, lr
|
2005-04-17 06:20:36 +08:00
|
|
|
1: bl preempt_schedule_irq @ irq en/disable is done inside
|
2005-05-22 01:15:45 +08:00
|
|
|
ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
|
2005-04-17 06:20:36 +08:00
|
|
|
tst r0, #_TIF_NEED_RESCHED
|
2008-04-14 00:47:35 +08:00
|
|
|
moveq pc, r8 @ go again
|
2005-04-17 06:20:36 +08:00
|
|
|
b 1b
|
|
|
|
#endif
|
|
|
|
|
|
|
|
.align 5
|
|
|
|
__und_svc:
|
2007-12-15 04:56:01 +08:00
|
|
|
#ifdef CONFIG_KPROBES
|
|
|
|
@ If a kprobe is about to simulate a "stmdb sp..." instruction,
|
|
|
|
@ it obviously needs free stack space which then will belong to
|
|
|
|
@ the saved context.
|
|
|
|
svc_entry 64
|
|
|
|
#else
|
2005-06-01 05:22:32 +08:00
|
|
|
svc_entry
|
2007-12-15 04:56:01 +08:00
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
@
|
|
|
|
@ call emulation code, which returns using r9 if it has emulated
|
|
|
|
@ the instruction, or the more conventional lr if we are to treat
|
|
|
|
@ this as a real undefined instruction
|
|
|
|
@
|
|
|
|
@ r0 - instruction
|
|
|
|
@
|
2009-09-19 06:27:07 +08:00
|
|
|
#ifndef CONFIG_THUMB2_KERNEL
|
2011-06-25 22:44:20 +08:00
|
|
|
ldr r0, [r4, #-4]
|
2009-09-19 06:27:07 +08:00
|
|
|
#else
|
2011-06-25 22:44:20 +08:00
|
|
|
ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2
|
2009-09-19 06:27:07 +08:00
|
|
|
and r9, r0, #0xf800
|
|
|
|
cmp r9, #0xe800 @ 32-bit instruction if xx >= 0
|
2011-06-25 22:44:20 +08:00
|
|
|
ldrhhs r9, [r4] @ bottom 16 bits
|
2009-09-19 06:27:07 +08:00
|
|
|
orrhs r0, r9, r0, lsl #16
|
|
|
|
#endif
|
2009-07-24 19:32:54 +08:00
|
|
|
adr r9, BSYM(1f)
|
2011-06-25 22:44:20 +08:00
|
|
|
mov r2, r4
|
2005-04-17 06:20:36 +08:00
|
|
|
bl call_fpe
|
|
|
|
|
|
|
|
mov r0, sp @ struct pt_regs *regs
|
|
|
|
bl do_undefinstr
|
|
|
|
|
|
|
|
@
|
|
|
|
@ IRQs off again before pulling preserved data off the stack
|
|
|
|
@
|
2010-07-10 17:10:18 +08:00
|
|
|
1: disable_irq_notrace
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
@
|
|
|
|
@ restore SPSR and restart the instruction
|
|
|
|
@
|
2011-06-25 22:44:20 +08:00
|
|
|
ldr r5, [sp, #S_PSR] @ Get SVC cpsr
|
2011-06-25 23:55:58 +08:00
|
|
|
#ifdef CONFIG_TRACE_IRQFLAGS
|
|
|
|
tst r5, #PSR_I_BIT
|
|
|
|
bleq trace_hardirqs_on
|
|
|
|
tst r5, #PSR_I_BIT
|
|
|
|
blne trace_hardirqs_off
|
|
|
|
#endif
|
2011-06-25 22:44:20 +08:00
|
|
|
svc_exit r5 @ return from exception
|
2009-02-16 18:42:09 +08:00
|
|
|
UNWIND(.fnend )
|
2008-08-28 18:22:32 +08:00
|
|
|
ENDPROC(__und_svc)
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
.align 5
|
|
|
|
__pabt_svc:
|
2005-06-01 05:22:32 +08:00
|
|
|
svc_entry
|
2011-06-26 17:22:08 +08:00
|
|
|
pabt_helper
|
2009-09-25 20:39:47 +08:00
|
|
|
mov r2, sp @ regs
|
2005-04-17 06:20:36 +08:00
|
|
|
bl do_PrefetchAbort @ call abort handler
|
|
|
|
|
|
|
|
@
|
|
|
|
@ IRQs off again before pulling preserved data off the stack
|
|
|
|
@
|
2010-07-10 17:10:18 +08:00
|
|
|
disable_irq_notrace
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
@
|
|
|
|
@ restore SPSR and restart the instruction
|
|
|
|
@
|
2011-06-25 22:44:20 +08:00
|
|
|
ldr r5, [sp, #S_PSR]
|
2011-06-25 18:44:06 +08:00
|
|
|
#ifdef CONFIG_TRACE_IRQFLAGS
|
|
|
|
tst r5, #PSR_I_BIT
|
|
|
|
bleq trace_hardirqs_on
|
|
|
|
tst r5, #PSR_I_BIT
|
|
|
|
blne trace_hardirqs_off
|
|
|
|
#endif
|
2011-06-25 22:44:20 +08:00
|
|
|
svc_exit r5 @ return from exception
|
2009-02-16 18:42:09 +08:00
|
|
|
UNWIND(.fnend )
|
2008-08-28 18:22:32 +08:00
|
|
|
ENDPROC(__pabt_svc)
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
.align 5
|
2005-06-01 01:02:00 +08:00
|
|
|
.LCcralign:
|
|
|
|
.word cr_alignment
|
2008-04-19 05:43:07 +08:00
|
|
|
#ifdef MULTI_DABORT
|
2005-04-17 06:20:36 +08:00
|
|
|
.LCprocfns:
|
|
|
|
.word processor
|
|
|
|
#endif
|
|
|
|
.LCfp:
|
|
|
|
.word fp_enter
|
|
|
|
|
|
|
|
/*
|
|
|
|
* User mode handlers
|
2006-01-15 00:18:08 +08:00
|
|
|
*
|
|
|
|
* EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2006-01-15 00:18:08 +08:00
|
|
|
|
|
|
|
#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7)
|
|
|
|
#error "sizeof(struct pt_regs) must be a multiple of 8"
|
|
|
|
#endif
|
|
|
|
|
2005-06-01 05:22:32 +08:00
|
|
|
.macro usr_entry
|
2009-02-16 18:42:09 +08:00
|
|
|
UNWIND(.fnstart )
|
|
|
|
UNWIND(.cantunwind ) @ don't unwind the user space
|
2005-06-01 05:22:32 +08:00
|
|
|
sub sp, sp, #S_FRAME_SIZE
|
2009-07-24 19:32:54 +08:00
|
|
|
ARM( stmib sp, {r1 - r12} )
|
|
|
|
THUMB( stmia sp, {r0 - r12} )
|
2005-06-01 05:22:32 +08:00
|
|
|
|
2011-06-25 22:44:20 +08:00
|
|
|
ldmia r0, {r3 - r5}
|
2005-06-01 05:22:32 +08:00
|
|
|
add r0, sp, #S_PC @ here for interlock avoidance
|
2011-06-25 22:44:20 +08:00
|
|
|
mov r6, #-1 @ "" "" "" ""
|
2005-06-01 05:22:32 +08:00
|
|
|
|
2011-06-25 22:44:20 +08:00
|
|
|
str r3, [sp] @ save the "real" r0 copied
|
2005-06-01 05:22:32 +08:00
|
|
|
@ from the exception stack
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
@
|
|
|
|
@ We are now ready to fill in the remaining blanks on the stack:
|
|
|
|
@
|
2011-06-25 22:44:20 +08:00
|
|
|
@ r4 - lr_<exception>, already fixed up for correct return/restart
|
|
|
|
@ r5 - spsr_<exception>
|
|
|
|
@ r6 - orig_r0 (see pt_regs definition in ptrace.h)
|
2005-04-17 06:20:36 +08:00
|
|
|
@
|
|
|
|
@ Also, separately save sp_usr and lr_usr
|
|
|
|
@
|
2011-06-25 22:44:20 +08:00
|
|
|
stmia r0, {r4 - r6}
|
2009-07-24 19:32:54 +08:00
|
|
|
ARM( stmdb r0, {sp, lr}^ )
|
|
|
|
THUMB( store_user_sp_lr r0, r1, S_SP - S_PC )
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
@
|
|
|
|
@ Enable the alignment trap while in kernel mode
|
|
|
|
@
|
2005-06-01 01:02:00 +08:00
|
|
|
alignment_trap r0
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
@
|
|
|
|
@ Clear FP to mark the first stack frame
|
|
|
|
@
|
|
|
|
zero_fp
|
2011-06-26 00:35:19 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_IRQSOFF_TRACER
|
|
|
|
bl trace_hardirqs_off
|
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
.endm
|
|
|
|
|
[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space. It however can produce spurious false negative if a
processor exception occurs in the middle of the operation. Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though. This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
#include <stdio.h>
typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
int main()
{
int i, x = 0;
for (i = 0; i < 100000000; i++) {
int v = x;
if (__kernel_cmpxchg(v, v+1, &x))
printf("failed at %d: %d vs %d\n", i, v, x);
}
printf("done with %d vs %d\n", i, x);
return 0;
}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2007-11-21 00:20:29 +08:00
|
|
|
.macro kuser_cmpxchg_check
|
|
|
|
#if __LINUX_ARM_ARCH__ < 6 && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
|
|
|
|
#ifndef CONFIG_MMU
|
|
|
|
#warning "NPTL on non MMU needs fixing"
|
|
|
|
#else
|
|
|
|
@ Make sure our user space atomic helper is restarted
|
|
|
|
@ if it was interrupted in a critical region. Here we
|
|
|
|
@ perform a quick test inline since it should be false
|
|
|
|
@ 99.9999% of the time. The rest is done out of line.
|
2011-06-25 22:44:20 +08:00
|
|
|
cmp r4, #TASK_SIZE
|
[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space. It however can produce spurious false negative if a
processor exception occurs in the middle of the operation. Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though. This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
#include <stdio.h>
typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
int main()
{
int i, x = 0;
for (i = 0; i < 100000000; i++) {
int v = x;
if (__kernel_cmpxchg(v, v+1, &x))
printf("failed at %d: %d vs %d\n", i, v, x);
}
printf("done with %d vs %d\n", i, x);
return 0;
}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2007-11-21 00:20:29 +08:00
|
|
|
blhs kuser_cmpxchg_fixup
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
.endm
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
.align 5
|
|
|
|
__dabt_usr:
|
2005-06-01 05:22:32 +08:00
|
|
|
usr_entry
|
[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space. It however can produce spurious false negative if a
processor exception occurs in the middle of the operation. Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though. This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
#include <stdio.h>
typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
int main()
{
int i, x = 0;
for (i = 0; i < 100000000; i++) {
int v = x;
if (__kernel_cmpxchg(v, v+1, &x))
printf("failed at %d: %d vs %d\n", i, v, x);
}
printf("done with %d vs %d\n", i, x);
return 0;
}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2007-11-21 00:20:29 +08:00
|
|
|
kuser_cmpxchg_check
|
2011-06-26 17:22:08 +08:00
|
|
|
dabt_helper
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
mov r2, sp
|
2009-07-24 19:32:54 +08:00
|
|
|
adr lr, BSYM(ret_from_exception)
|
2005-04-17 06:20:36 +08:00
|
|
|
b do_DataAbort
|
2009-02-16 18:42:09 +08:00
|
|
|
UNWIND(.fnend )
|
2008-08-28 18:22:32 +08:00
|
|
|
ENDPROC(__dabt_usr)
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
.align 5
|
|
|
|
__irq_usr:
|
2005-06-01 05:22:32 +08:00
|
|
|
usr_entry
|
2011-06-26 01:28:19 +08:00
|
|
|
kuser_cmpxchg_check
|
2005-05-22 01:14:44 +08:00
|
|
|
irq_handler
|
2011-06-25 17:57:57 +08:00
|
|
|
get_thread_info tsk
|
2005-04-17 06:20:36 +08:00
|
|
|
mov why, #0
|
2011-06-05 09:24:58 +08:00
|
|
|
b ret_to_user_from_irq
|
2009-02-16 18:42:09 +08:00
|
|
|
UNWIND(.fnend )
|
2008-08-28 18:22:32 +08:00
|
|
|
ENDPROC(__irq_usr)
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
.ltorg
|
|
|
|
|
|
|
|
.align 5
|
|
|
|
__und_usr:
|
2005-06-01 05:22:32 +08:00
|
|
|
usr_entry
|
2011-06-26 01:28:19 +08:00
|
|
|
|
2011-06-25 22:44:20 +08:00
|
|
|
mov r2, r4
|
|
|
|
mov r3, r5
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
@
|
|
|
|
@ fall through to the emulation code, which returns using r9 if
|
|
|
|
@ it has emulated the instruction, or the more conventional lr
|
|
|
|
@ if we are to treat this as a real undefined instruction
|
|
|
|
@
|
|
|
|
@ r0 - instruction
|
|
|
|
@
|
2009-07-24 19:32:54 +08:00
|
|
|
adr r9, BSYM(ret_from_exception)
|
|
|
|
adr lr, BSYM(__und_usr_unknown)
|
2008-04-19 05:43:08 +08:00
|
|
|
tst r3, #PSR_T_BIT @ Thumb mode?
|
2009-07-24 19:32:54 +08:00
|
|
|
itet eq @ explicit IT needed for the 1f label
|
2008-04-19 05:43:08 +08:00
|
|
|
subeq r4, r2, #4 @ ARM instr at LR - 4
|
|
|
|
subne r4, r2, #2 @ Thumb instr at LR - 2
|
|
|
|
1: ldreqt r0, [r4]
|
2009-05-30 21:00:18 +08:00
|
|
|
#ifdef CONFIG_CPU_ENDIAN_BE8
|
|
|
|
reveq r0, r0 @ little endian instruction
|
|
|
|
#endif
|
2008-04-19 05:43:08 +08:00
|
|
|
beq call_fpe
|
|
|
|
@ Thumb instruction
|
|
|
|
#if __LINUX_ARM_ARCH__ >= 7
|
2009-07-24 19:32:54 +08:00
|
|
|
2:
|
|
|
|
ARM( ldrht r5, [r4], #2 )
|
|
|
|
THUMB( ldrht r5, [r4] )
|
|
|
|
THUMB( add r4, r4, #2 )
|
2008-04-19 05:43:08 +08:00
|
|
|
and r0, r5, #0xf800 @ mask bits 111x x... .... ....
|
|
|
|
cmp r0, #0xe800 @ 32bit instruction if xx != 0
|
|
|
|
blo __und_usr_unknown
|
|
|
|
3: ldrht r0, [r4]
|
|
|
|
add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
|
|
|
|
orr r0, r0, r5, lsl #16
|
|
|
|
#else
|
|
|
|
b __und_usr_unknown
|
|
|
|
#endif
|
2009-02-16 18:42:09 +08:00
|
|
|
UNWIND(.fnend )
|
2008-08-28 18:22:32 +08:00
|
|
|
ENDPROC(__und_usr)
|
2008-04-19 05:43:08 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
@
|
|
|
|
@ fallthrough to call_fpe
|
|
|
|
@
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The out of line fixup for the ldrt above.
|
|
|
|
*/
|
2010-04-19 17:15:03 +08:00
|
|
|
.pushsection .fixup, "ax"
|
2008-04-19 05:43:08 +08:00
|
|
|
4: mov pc, r9
|
2010-04-19 17:15:03 +08:00
|
|
|
.popsection
|
|
|
|
.pushsection __ex_table,"a"
|
2008-04-19 05:43:08 +08:00
|
|
|
.long 1b, 4b
|
|
|
|
#if __LINUX_ARM_ARCH__ >= 7
|
|
|
|
.long 2b, 4b
|
|
|
|
.long 3b, 4b
|
|
|
|
#endif
|
2010-04-19 17:15:03 +08:00
|
|
|
.popsection
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Check whether the instruction is a co-processor instruction.
|
|
|
|
* If yes, we need to call the relevant co-processor handler.
|
|
|
|
*
|
|
|
|
* Note that we don't do a full check here for the co-processor
|
|
|
|
* instructions; all instructions with bit 27 set are well
|
|
|
|
* defined. The only instructions that should fault are the
|
|
|
|
* co-processor instructions. However, we have to watch out
|
|
|
|
* for the ARM6/ARM7 SWI bug.
|
|
|
|
*
|
2008-01-11 02:16:17 +08:00
|
|
|
* NEON is a special case that has to be handled here. Not all
|
|
|
|
* NEON instructions are co-processor instructions, so we have
|
|
|
|
* to make a special case of checking for them. Plus, there's
|
|
|
|
* five groups of them, so we have a table of mask/opcode pairs
|
|
|
|
* to check against, and if any match then we branch off into the
|
|
|
|
* NEON handler code.
|
|
|
|
*
|
2005-04-17 06:20:36 +08:00
|
|
|
* Emulators may wish to make use of the following registers:
|
|
|
|
* r0 = instruction opcode.
|
|
|
|
* r2 = PC+4
|
2007-01-07 06:53:48 +08:00
|
|
|
* r9 = normal "successful" return address
|
2005-04-17 06:20:36 +08:00
|
|
|
* r10 = this threads thread_info structure.
|
2007-01-07 06:53:48 +08:00
|
|
|
* lr = unrecognised instruction return address
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2008-04-19 05:43:08 +08:00
|
|
|
@
|
|
|
|
@ Fall-through from Thumb-2 __und_usr
|
|
|
|
@
|
|
|
|
#ifdef CONFIG_NEON
|
|
|
|
adr r6, .LCneon_thumb_opcodes
|
|
|
|
b 2f
|
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
call_fpe:
|
2008-01-11 02:16:17 +08:00
|
|
|
#ifdef CONFIG_NEON
|
2008-04-19 05:43:08 +08:00
|
|
|
adr r6, .LCneon_arm_opcodes
|
2008-01-11 02:16:17 +08:00
|
|
|
2:
|
|
|
|
ldr r7, [r6], #4 @ mask value
|
|
|
|
cmp r7, #0 @ end mask?
|
|
|
|
beq 1f
|
|
|
|
and r8, r0, r7
|
|
|
|
ldr r7, [r6], #4 @ opcode bits matching in mask
|
|
|
|
cmp r8, r7 @ NEON instruction?
|
|
|
|
bne 2b
|
|
|
|
get_thread_info r10
|
|
|
|
mov r7, #1
|
|
|
|
strb r7, [r10, #TI_USED_CP + 10] @ mark CP#10 as used
|
|
|
|
strb r7, [r10, #TI_USED_CP + 11] @ mark CP#11 as used
|
|
|
|
b do_vfp @ let VFP handler handle this
|
|
|
|
1:
|
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27
|
2008-04-19 05:43:08 +08:00
|
|
|
tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2
|
2005-04-17 06:20:36 +08:00
|
|
|
#if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710)
|
|
|
|
and r8, r0, #0x0f000000 @ mask out op-code bits
|
|
|
|
teqne r8, #0x0f000000 @ SWI (ARM6/7 bug)?
|
|
|
|
#endif
|
|
|
|
moveq pc, lr
|
|
|
|
get_thread_info r10 @ get current thread
|
|
|
|
and r8, r0, #0x00000f00 @ mask out CP number
|
2009-07-24 19:32:54 +08:00
|
|
|
THUMB( lsr r8, r8, #8 )
|
2005-04-17 06:20:36 +08:00
|
|
|
mov r7, #1
|
|
|
|
add r6, r10, #TI_USED_CP
|
2009-07-24 19:32:54 +08:00
|
|
|
ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[]
|
|
|
|
THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[]
|
2005-04-17 06:20:36 +08:00
|
|
|
#ifdef CONFIG_IWMMXT
|
|
|
|
@ Test if we need to give access to iWMMXt coprocessors
|
|
|
|
ldr r5, [r10, #TI_FLAGS]
|
|
|
|
rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only
|
|
|
|
movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1)
|
|
|
|
bcs iwmmxt_task_enable
|
|
|
|
#endif
|
2009-07-24 19:32:54 +08:00
|
|
|
ARM( add pc, pc, r8, lsr #6 )
|
|
|
|
THUMB( lsl r8, r8, #2 )
|
|
|
|
THUMB( add pc, r8 )
|
|
|
|
nop
|
|
|
|
|
2009-10-13 00:31:20 +08:00
|
|
|
movw_pc lr @ CP#0
|
2009-07-24 19:32:54 +08:00
|
|
|
W(b) do_fpe @ CP#1 (FPE)
|
|
|
|
W(b) do_fpe @ CP#2 (FPE)
|
2009-10-13 00:31:20 +08:00
|
|
|
movw_pc lr @ CP#3
|
2006-06-28 06:03:03 +08:00
|
|
|
#ifdef CONFIG_CRUNCH
|
|
|
|
b crunch_task_enable @ CP#4 (MaverickCrunch)
|
|
|
|
b crunch_task_enable @ CP#5 (MaverickCrunch)
|
|
|
|
b crunch_task_enable @ CP#6 (MaverickCrunch)
|
|
|
|
#else
|
2009-10-13 00:31:20 +08:00
|
|
|
movw_pc lr @ CP#4
|
|
|
|
movw_pc lr @ CP#5
|
|
|
|
movw_pc lr @ CP#6
|
2006-06-28 06:03:03 +08:00
|
|
|
#endif
|
2009-10-13 00:31:20 +08:00
|
|
|
movw_pc lr @ CP#7
|
|
|
|
movw_pc lr @ CP#8
|
|
|
|
movw_pc lr @ CP#9
|
2005-04-17 06:20:36 +08:00
|
|
|
#ifdef CONFIG_VFP
|
2009-07-24 19:32:54 +08:00
|
|
|
W(b) do_vfp @ CP#10 (VFP)
|
|
|
|
W(b) do_vfp @ CP#11 (VFP)
|
2005-04-17 06:20:36 +08:00
|
|
|
#else
|
2009-10-13 00:31:20 +08:00
|
|
|
movw_pc lr @ CP#10 (VFP)
|
|
|
|
movw_pc lr @ CP#11 (VFP)
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif
|
2009-10-13 00:31:20 +08:00
|
|
|
movw_pc lr @ CP#12
|
|
|
|
movw_pc lr @ CP#13
|
|
|
|
movw_pc lr @ CP#14 (Debug)
|
|
|
|
movw_pc lr @ CP#15 (Control)
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-01-11 02:16:17 +08:00
|
|
|
#ifdef CONFIG_NEON
|
|
|
|
.align 6
|
|
|
|
|
2008-04-19 05:43:08 +08:00
|
|
|
.LCneon_arm_opcodes:
|
2008-01-11 02:16:17 +08:00
|
|
|
.word 0xfe000000 @ mask
|
|
|
|
.word 0xf2000000 @ opcode
|
|
|
|
|
|
|
|
.word 0xff100000 @ mask
|
|
|
|
.word 0xf4000000 @ opcode
|
|
|
|
|
2008-04-19 05:43:08 +08:00
|
|
|
.word 0x00000000 @ mask
|
|
|
|
.word 0x00000000 @ opcode
|
|
|
|
|
|
|
|
.LCneon_thumb_opcodes:
|
|
|
|
.word 0xef000000 @ mask
|
|
|
|
.word 0xef000000 @ opcode
|
|
|
|
|
|
|
|
.word 0xff100000 @ mask
|
|
|
|
.word 0xf9000000 @ opcode
|
|
|
|
|
2008-01-11 02:16:17 +08:00
|
|
|
.word 0x00000000 @ mask
|
|
|
|
.word 0x00000000 @ opcode
|
|
|
|
#endif
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
do_fpe:
|
2006-03-15 20:33:43 +08:00
|
|
|
enable_irq
|
2005-04-17 06:20:36 +08:00
|
|
|
ldr r4, .LCfp
|
|
|
|
add r10, r10, #TI_FPSTATE @ r10 = workspace
|
|
|
|
ldr pc, [r4] @ Call FP module USR entry point
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The FP module is called with these registers set:
|
|
|
|
* r0 = instruction
|
|
|
|
* r2 = PC+4
|
|
|
|
* r9 = normal "successful" return address
|
|
|
|
* r10 = FP workspace
|
|
|
|
* lr = unrecognised FP instruction return address
|
|
|
|
*/
|
|
|
|
|
2010-04-30 17:45:46 +08:00
|
|
|
.pushsection .data
|
2005-04-17 06:20:36 +08:00
|
|
|
ENTRY(fp_enter)
|
2007-01-07 06:53:48 +08:00
|
|
|
.word no_fp
|
2010-04-30 17:45:46 +08:00
|
|
|
.popsection
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2009-09-19 06:27:07 +08:00
|
|
|
ENTRY(no_fp)
|
|
|
|
mov pc, lr
|
|
|
|
ENDPROC(no_fp)
|
2007-01-07 06:53:48 +08:00
|
|
|
|
|
|
|
__und_usr_unknown:
|
2009-01-28 07:20:00 +08:00
|
|
|
enable_irq
|
2005-04-17 06:20:36 +08:00
|
|
|
mov r0, sp
|
2009-07-24 19:32:54 +08:00
|
|
|
adr lr, BSYM(ret_from_exception)
|
2005-04-17 06:20:36 +08:00
|
|
|
b do_undefinstr
|
2008-08-28 18:22:32 +08:00
|
|
|
ENDPROC(__und_usr_unknown)
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
.align 5
|
|
|
|
__pabt_usr:
|
2005-06-01 05:22:32 +08:00
|
|
|
usr_entry
|
2011-06-26 17:22:08 +08:00
|
|
|
pabt_helper
|
2009-09-25 20:39:47 +08:00
|
|
|
mov r2, sp @ regs
|
2005-04-17 06:20:36 +08:00
|
|
|
bl do_PrefetchAbort @ call abort handler
|
2009-02-16 18:42:09 +08:00
|
|
|
UNWIND(.fnend )
|
2005-04-17 06:20:36 +08:00
|
|
|
/* fall through */
|
|
|
|
/*
|
|
|
|
* This is the return code to user mode for abort handlers
|
|
|
|
*/
|
|
|
|
ENTRY(ret_from_exception)
|
2009-02-16 18:42:09 +08:00
|
|
|
UNWIND(.fnstart )
|
|
|
|
UNWIND(.cantunwind )
|
2005-04-17 06:20:36 +08:00
|
|
|
get_thread_info tsk
|
|
|
|
mov why, #0
|
|
|
|
b ret_to_user
|
2009-02-16 18:42:09 +08:00
|
|
|
UNWIND(.fnend )
|
2008-08-28 18:22:32 +08:00
|
|
|
ENDPROC(__pabt_usr)
|
|
|
|
ENDPROC(ret_from_exception)
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Register switch for ARMv3 and ARMv4 processors
|
|
|
|
* r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
|
|
|
|
* previous and next are guaranteed not to be the same.
|
|
|
|
*/
|
|
|
|
ENTRY(__switch_to)
|
2009-02-16 18:42:09 +08:00
|
|
|
UNWIND(.fnstart )
|
|
|
|
UNWIND(.cantunwind )
|
2005-04-17 06:20:36 +08:00
|
|
|
add ip, r1, #TI_CPU_SAVE
|
|
|
|
ldr r3, [r2, #TI_TP_VALUE]
|
2009-07-24 19:32:54 +08:00
|
|
|
ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack
|
|
|
|
THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
|
|
|
|
THUMB( str sp, [ip], #4 )
|
|
|
|
THUMB( str lr, [ip], #4 )
|
2010-09-13 23:03:21 +08:00
|
|
|
#ifdef CONFIG_CPU_USE_DOMAINS
|
2006-06-21 20:31:52 +08:00
|
|
|
ldr r6, [r2, #TI_CPU_DOMAIN]
|
2006-01-14 05:05:25 +08:00
|
|
|
#endif
|
2010-07-05 21:53:10 +08:00
|
|
|
set_tls r3, r4, r5
|
2010-06-08 09:50:33 +08:00
|
|
|
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
|
|
|
|
ldr r7, [r2, #TI_TASK]
|
|
|
|
ldr r8, =__stack_chk_guard
|
|
|
|
ldr r7, [r7, #TSK_STACK_CANARY]
|
|
|
|
#endif
|
2010-09-13 23:03:21 +08:00
|
|
|
#ifdef CONFIG_CPU_USE_DOMAINS
|
2005-04-17 06:20:36 +08:00
|
|
|
mcr p15, 0, r6, c3, c0, 0 @ Set domain register
|
|
|
|
#endif
|
2006-06-21 20:31:52 +08:00
|
|
|
mov r5, r0
|
|
|
|
add r4, r2, #TI_CPU_SAVE
|
|
|
|
ldr r0, =thread_notify_head
|
|
|
|
mov r1, #THREAD_NOTIFY_SWITCH
|
|
|
|
bl atomic_notifier_call_chain
|
2010-06-08 09:50:33 +08:00
|
|
|
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
|
|
|
|
str r7, [r8]
|
|
|
|
#endif
|
2009-07-24 19:32:54 +08:00
|
|
|
THUMB( mov ip, r4 )
|
2006-06-21 20:31:52 +08:00
|
|
|
mov r0, r5
|
2009-07-24 19:32:54 +08:00
|
|
|
ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously
|
|
|
|
THUMB( ldmia ip!, {r4 - sl, fp} ) @ Load all regs saved previously
|
|
|
|
THUMB( ldr sp, [ip], #4 )
|
|
|
|
THUMB( ldr pc, [ip] )
|
2009-02-16 18:42:09 +08:00
|
|
|
UNWIND(.fnend )
|
2008-08-28 18:22:32 +08:00
|
|
|
ENDPROC(__switch_to)
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
__INIT
|
2005-04-30 05:08:33 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* User helpers.
|
|
|
|
*
|
|
|
|
* These are segment of kernel provided user code reachable from user space
|
|
|
|
* at a fixed address in kernel memory. This is used to provide user space
|
|
|
|
* with some operations which require kernel help because of unimplemented
|
|
|
|
* native feature and/or instructions in many ARM CPUs. The idea is for
|
|
|
|
* this code to be executed directly in user mode for best efficiency but
|
|
|
|
* which is too intimate with the kernel counter part to be left to user
|
|
|
|
* libraries. In fact this code might even differ from one CPU to another
|
|
|
|
* depending on the available instruction set and restrictions like on
|
|
|
|
* SMP systems. In other words, the kernel reserves the right to change
|
|
|
|
* this code as needed without warning. Only the entry points and their
|
|
|
|
* results are guaranteed to be stable.
|
|
|
|
*
|
|
|
|
* Each segment is 32-byte aligned and will be moved to the top of the high
|
|
|
|
* vector page. New segments (if ever needed) must be added in front of
|
|
|
|
* existing ones. This mechanism should be used only for things that are
|
|
|
|
* really small and justified, and not be abused freely.
|
|
|
|
*
|
|
|
|
* User space is expected to implement those things inline when optimizing
|
|
|
|
* for a processor that has the necessary native support, but only if such
|
|
|
|
* resulting binaries are already to be incompatible with earlier ARM
|
|
|
|
* processors due to the use of unsupported instructions other than what
|
|
|
|
* is provided here. In other words don't make binaries unable to run on
|
|
|
|
* earlier processors just for the sake of not using these kernel helpers
|
|
|
|
* if your compiled code is not going to use the new instructions for other
|
|
|
|
* purpose.
|
|
|
|
*/
|
2009-07-24 19:32:54 +08:00
|
|
|
THUMB( .arm )
|
2005-04-30 05:08:33 +08:00
|
|
|
|
2006-08-19 00:20:15 +08:00
|
|
|
.macro usr_ret, reg
|
|
|
|
#ifdef CONFIG_ARM_THUMB
|
|
|
|
bx \reg
|
|
|
|
#else
|
|
|
|
mov pc, \reg
|
|
|
|
#endif
|
|
|
|
.endm
|
|
|
|
|
2005-04-30 05:08:33 +08:00
|
|
|
.align 5
|
|
|
|
.globl __kuser_helper_start
|
|
|
|
__kuser_helper_start:
|
|
|
|
|
2005-12-20 06:20:51 +08:00
|
|
|
/*
|
|
|
|
* Reference prototype:
|
|
|
|
*
|
|
|
|
* void __kernel_memory_barrier(void)
|
|
|
|
*
|
|
|
|
* Input:
|
|
|
|
*
|
|
|
|
* lr = return address
|
|
|
|
*
|
|
|
|
* Output:
|
|
|
|
*
|
|
|
|
* none
|
|
|
|
*
|
|
|
|
* Clobbered:
|
|
|
|
*
|
[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space. It however can produce spurious false negative if a
processor exception occurs in the middle of the operation. Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though. This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
#include <stdio.h>
typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
int main()
{
int i, x = 0;
for (i = 0; i < 100000000; i++) {
int v = x;
if (__kernel_cmpxchg(v, v+1, &x))
printf("failed at %d: %d vs %d\n", i, v, x);
}
printf("done with %d vs %d\n", i, x);
return 0;
}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2007-11-21 00:20:29 +08:00
|
|
|
* none
|
2005-12-20 06:20:51 +08:00
|
|
|
*
|
|
|
|
* Definition and user space usage example:
|
|
|
|
*
|
|
|
|
* typedef void (__kernel_dmb_t)(void);
|
|
|
|
* #define __kernel_dmb (*(__kernel_dmb_t *)0xffff0fa0)
|
|
|
|
*
|
|
|
|
* Apply any needed memory barrier to preserve consistency with data modified
|
|
|
|
* manually and __kuser_cmpxchg usage.
|
|
|
|
*
|
|
|
|
* This could be used as follows:
|
|
|
|
*
|
|
|
|
* #define __kernel_dmb() \
|
|
|
|
* asm volatile ( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #95" \
|
2006-03-29 05:19:29 +08:00
|
|
|
* : : : "r0", "lr","cc" )
|
2005-12-20 06:20:51 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
__kuser_memory_barrier: @ 0xffff0fa0
|
ARM: 6516/1: Allow SMP_ON_UP to work with Thumb-2 kernels.
* __fixup_smp_on_up has been modified with support for the
THUMB2_KERNEL case. For THUMB2_KERNEL only, fixups are split
into halfwords in case of misalignment, since we can't rely on
unaligned accesses working before turning the MMU on.
No attempt is made to optimise the aligned case, since the
number of fixups is typically small, and it seems best to keep
the code as simple as possible.
* Add a rotate in the fixup_smp code in order to support
CPU_BIG_ENDIAN, as suggested by Nicolas Pitre.
* Add an assembly-time sanity-check to ALT_UP() to ensure that
the content really is the right size (4 bytes).
(No check is done for ALT_SMP(). Possibly, this could be fixed
by splitting the two uses ot ALT_SMP() (ALT_SMP...SMP_UP versus
ALT_SMP...SMP_UP_B) into two macros. In the first case,
ALT_SMP needs to expand to >= 4 bytes, not == 4.)
* smp_mpidr.h (which implements ALT_SMP()/ALT_UP() manually due
to macro limitations) has not been modified: the affected
instruction (mov) has no 16-bit encoding, so the correct
instruction size is satisfied in this case.
* A "mode" parameter has been added to smp_dmb:
smp_dmb arm @ assumes 4-byte instructions (for ARM code, e.g. kuser)
smp_dmb @ uses W() to ensure 4-byte instructions for ALT_SMP()
This avoids assembly failures due to use of W() inside smp_dmb,
when assembling pure-ARM code in the vectors page.
There might be a better way to achieve this.
* Kconfig: make SMP_ON_UP depend on
(!THUMB2_KERNEL || !BIG_ENDIAN) i.e., THUMB2_KERNEL is now
supported, but only if !BIG_ENDIAN (The fixup code for Thumb-2
currently assumes little-endian order.)
Tested using a single generic realview kernel on:
ARM RealView PB-A8 (CONFIG_THUMB2_KERNEL={n,y})
ARM RealView PBX-A9 (SMP)
Signed-off-by: Dave Martin <dave.martin@linaro.org>
Acked-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2010-12-01 22:39:23 +08:00
|
|
|
smp_dmb arm
|
2006-08-19 00:20:15 +08:00
|
|
|
usr_ret lr
|
2005-12-20 06:20:51 +08:00
|
|
|
|
|
|
|
.align 5
|
|
|
|
|
2005-04-30 05:08:33 +08:00
|
|
|
/*
|
|
|
|
* Reference prototype:
|
|
|
|
*
|
|
|
|
* int __kernel_cmpxchg(int oldval, int newval, int *ptr)
|
|
|
|
*
|
|
|
|
* Input:
|
|
|
|
*
|
|
|
|
* r0 = oldval
|
|
|
|
* r1 = newval
|
|
|
|
* r2 = ptr
|
|
|
|
* lr = return address
|
|
|
|
*
|
|
|
|
* Output:
|
|
|
|
*
|
|
|
|
* r0 = returned value (zero or non-zero)
|
|
|
|
* C flag = set if r0 == 0, clear if r0 != 0
|
|
|
|
*
|
|
|
|
* Clobbered:
|
|
|
|
*
|
|
|
|
* r3, ip, flags
|
|
|
|
*
|
|
|
|
* Definition and user space usage example:
|
|
|
|
*
|
|
|
|
* typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
|
|
|
|
* #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
|
|
|
|
*
|
|
|
|
* Atomically store newval in *ptr if *ptr is equal to oldval for user space.
|
|
|
|
* Return zero if *ptr was changed or non-zero if no exchange happened.
|
|
|
|
* The C flag is also set if *ptr was changed to allow for assembly
|
|
|
|
* optimization in the calling code.
|
|
|
|
*
|
2006-02-09 05:19:37 +08:00
|
|
|
* Notes:
|
|
|
|
*
|
|
|
|
* - This routine already includes memory barriers as needed.
|
|
|
|
*
|
2005-04-30 05:08:33 +08:00
|
|
|
* For example, a user space atomic_add implementation could look like this:
|
|
|
|
*
|
|
|
|
* #define atomic_add(ptr, val) \
|
|
|
|
* ({ register unsigned int *__ptr asm("r2") = (ptr); \
|
|
|
|
* register unsigned int __result asm("r1"); \
|
|
|
|
* asm volatile ( \
|
|
|
|
* "1: @ atomic_add\n\t" \
|
|
|
|
* "ldr r0, [r2]\n\t" \
|
|
|
|
* "mov r3, #0xffff0fff\n\t" \
|
|
|
|
* "add lr, pc, #4\n\t" \
|
|
|
|
* "add r1, r0, %2\n\t" \
|
|
|
|
* "add pc, r3, #(0xffff0fc0 - 0xffff0fff)\n\t" \
|
|
|
|
* "bcc 1b" \
|
|
|
|
* : "=&r" (__result) \
|
|
|
|
* : "r" (__ptr), "rIL" (val) \
|
|
|
|
* : "r0","r3","ip","lr","cc","memory" ); \
|
|
|
|
* __result; })
|
|
|
|
*/
|
|
|
|
|
|
|
|
__kuser_cmpxchg: @ 0xffff0fc0
|
|
|
|
|
2005-06-09 02:00:47 +08:00
|
|
|
#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
|
2005-04-30 05:08:33 +08:00
|
|
|
|
2005-06-09 02:00:47 +08:00
|
|
|
/*
|
|
|
|
* Poor you. No fast solution possible...
|
|
|
|
* The kernel itself must perform the operation.
|
|
|
|
* A special ghost syscall is used for that (see traps.c).
|
|
|
|
*/
|
2006-01-19 06:38:49 +08:00
|
|
|
stmfd sp!, {r7, lr}
|
2010-12-02 01:12:43 +08:00
|
|
|
ldr r7, 1f @ it's 20 bits
|
2009-11-10 07:53:29 +08:00
|
|
|
swi __ARM_NR_cmpxchg
|
2006-01-19 06:38:49 +08:00
|
|
|
ldmfd sp!, {r7, pc}
|
2009-11-10 07:53:29 +08:00
|
|
|
1: .word __ARM_NR_cmpxchg
|
2005-06-09 02:00:47 +08:00
|
|
|
|
|
|
|
#elif __LINUX_ARM_ARCH__ < 6
|
2005-04-30 05:08:33 +08:00
|
|
|
|
[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space. It however can produce spurious false negative if a
processor exception occurs in the middle of the operation. Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though. This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
#include <stdio.h>
typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
int main()
{
int i, x = 0;
for (i = 0; i < 100000000; i++) {
int v = x;
if (__kernel_cmpxchg(v, v+1, &x))
printf("failed at %d: %d vs %d\n", i, v, x);
}
printf("done with %d vs %d\n", i, x);
return 0;
}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2007-11-21 00:20:29 +08:00
|
|
|
#ifdef CONFIG_MMU
|
|
|
|
|
2005-04-30 05:08:33 +08:00
|
|
|
/*
|
[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space. It however can produce spurious false negative if a
processor exception occurs in the middle of the operation. Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though. This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
#include <stdio.h>
typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
int main()
{
int i, x = 0;
for (i = 0; i < 100000000; i++) {
int v = x;
if (__kernel_cmpxchg(v, v+1, &x))
printf("failed at %d: %d vs %d\n", i, v, x);
}
printf("done with %d vs %d\n", i, x);
return 0;
}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2007-11-21 00:20:29 +08:00
|
|
|
* The only thing that can break atomicity in this cmpxchg
|
|
|
|
* implementation is either an IRQ or a data abort exception
|
|
|
|
* causing another process/thread to be scheduled in the middle
|
|
|
|
* of the critical sequence. To prevent this, code is added to
|
|
|
|
* the IRQ and data abort exception handlers to set the pc back
|
|
|
|
* to the beginning of the critical section if it is found to be
|
|
|
|
* within that critical section (see kuser_cmpxchg_fixup).
|
2005-04-30 05:08:33 +08:00
|
|
|
*/
|
[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space. It however can produce spurious false negative if a
processor exception occurs in the middle of the operation. Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though. This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
#include <stdio.h>
typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
int main()
{
int i, x = 0;
for (i = 0; i < 100000000; i++) {
int v = x;
if (__kernel_cmpxchg(v, v+1, &x))
printf("failed at %d: %d vs %d\n", i, v, x);
}
printf("done with %d vs %d\n", i, x);
return 0;
}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2007-11-21 00:20:29 +08:00
|
|
|
1: ldr r3, [r2] @ load current val
|
|
|
|
subs r3, r3, r0 @ compare with oldval
|
|
|
|
2: streq r1, [r2] @ store newval if eq
|
|
|
|
rsbs r0, r3, #0 @ set return val and C flag
|
|
|
|
usr_ret lr
|
|
|
|
|
|
|
|
.text
|
|
|
|
kuser_cmpxchg_fixup:
|
|
|
|
@ Called from kuser_cmpxchg_check macro.
|
2011-06-25 22:44:20 +08:00
|
|
|
@ r4 = address of interrupted insn (must be preserved).
|
[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space. It however can produce spurious false negative if a
processor exception occurs in the middle of the operation. Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though. This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
#include <stdio.h>
typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
int main()
{
int i, x = 0;
for (i = 0; i < 100000000; i++) {
int v = x;
if (__kernel_cmpxchg(v, v+1, &x))
printf("failed at %d: %d vs %d\n", i, v, x);
}
printf("done with %d vs %d\n", i, x);
return 0;
}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2007-11-21 00:20:29 +08:00
|
|
|
@ sp = saved regs. r7 and r8 are clobbered.
|
|
|
|
@ 1b = first critical insn, 2b = last critical insn.
|
2011-06-25 22:44:20 +08:00
|
|
|
@ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
|
[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space. It however can produce spurious false negative if a
processor exception occurs in the middle of the operation. Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though. This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
#include <stdio.h>
typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
int main()
{
int i, x = 0;
for (i = 0; i < 100000000; i++) {
int v = x;
if (__kernel_cmpxchg(v, v+1, &x))
printf("failed at %d: %d vs %d\n", i, v, x);
}
printf("done with %d vs %d\n", i, x);
return 0;
}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2007-11-21 00:20:29 +08:00
|
|
|
mov r7, #0xffff0fff
|
|
|
|
sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
|
2011-06-25 22:44:20 +08:00
|
|
|
subs r8, r4, r7
|
[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space. It however can produce spurious false negative if a
processor exception occurs in the middle of the operation. Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though. This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
#include <stdio.h>
typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
int main()
{
int i, x = 0;
for (i = 0; i < 100000000; i++) {
int v = x;
if (__kernel_cmpxchg(v, v+1, &x))
printf("failed at %d: %d vs %d\n", i, v, x);
}
printf("done with %d vs %d\n", i, x);
return 0;
}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2007-11-21 00:20:29 +08:00
|
|
|
rsbcss r8, r8, #(2b - 1b)
|
|
|
|
strcs r7, [sp, #S_PC]
|
|
|
|
mov pc, lr
|
|
|
|
.previous
|
|
|
|
|
2006-02-09 05:19:37 +08:00
|
|
|
#else
|
|
|
|
#warning "NPTL on non MMU needs fixing"
|
|
|
|
mov r0, #-1
|
|
|
|
adds r0, r0, #0
|
2006-08-19 00:20:15 +08:00
|
|
|
usr_ret lr
|
[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space. It however can produce spurious false negative if a
processor exception occurs in the middle of the operation. Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though. This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
#include <stdio.h>
typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
int main()
{
int i, x = 0;
for (i = 0; i < 100000000; i++) {
int v = x;
if (__kernel_cmpxchg(v, v+1, &x))
printf("failed at %d: %d vs %d\n", i, v, x);
}
printf("done with %d vs %d\n", i, x);
return 0;
}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2007-11-21 00:20:29 +08:00
|
|
|
#endif
|
2005-04-30 05:08:33 +08:00
|
|
|
|
|
|
|
#else
|
|
|
|
|
ARM: 6516/1: Allow SMP_ON_UP to work with Thumb-2 kernels.
* __fixup_smp_on_up has been modified with support for the
THUMB2_KERNEL case. For THUMB2_KERNEL only, fixups are split
into halfwords in case of misalignment, since we can't rely on
unaligned accesses working before turning the MMU on.
No attempt is made to optimise the aligned case, since the
number of fixups is typically small, and it seems best to keep
the code as simple as possible.
* Add a rotate in the fixup_smp code in order to support
CPU_BIG_ENDIAN, as suggested by Nicolas Pitre.
* Add an assembly-time sanity-check to ALT_UP() to ensure that
the content really is the right size (4 bytes).
(No check is done for ALT_SMP(). Possibly, this could be fixed
by splitting the two uses ot ALT_SMP() (ALT_SMP...SMP_UP versus
ALT_SMP...SMP_UP_B) into two macros. In the first case,
ALT_SMP needs to expand to >= 4 bytes, not == 4.)
* smp_mpidr.h (which implements ALT_SMP()/ALT_UP() manually due
to macro limitations) has not been modified: the affected
instruction (mov) has no 16-bit encoding, so the correct
instruction size is satisfied in this case.
* A "mode" parameter has been added to smp_dmb:
smp_dmb arm @ assumes 4-byte instructions (for ARM code, e.g. kuser)
smp_dmb @ uses W() to ensure 4-byte instructions for ALT_SMP()
This avoids assembly failures due to use of W() inside smp_dmb,
when assembling pure-ARM code in the vectors page.
There might be a better way to achieve this.
* Kconfig: make SMP_ON_UP depend on
(!THUMB2_KERNEL || !BIG_ENDIAN) i.e., THUMB2_KERNEL is now
supported, but only if !BIG_ENDIAN (The fixup code for Thumb-2
currently assumes little-endian order.)
Tested using a single generic realview kernel on:
ARM RealView PB-A8 (CONFIG_THUMB2_KERNEL={n,y})
ARM RealView PBX-A9 (SMP)
Signed-off-by: Dave Martin <dave.martin@linaro.org>
Acked-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2010-12-01 22:39:23 +08:00
|
|
|
smp_dmb arm
|
[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space. It however can produce spurious false negative if a
processor exception occurs in the middle of the operation. Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though. This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
#include <stdio.h>
typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
int main()
{
int i, x = 0;
for (i = 0; i < 100000000; i++) {
int v = x;
if (__kernel_cmpxchg(v, v+1, &x))
printf("failed at %d: %d vs %d\n", i, v, x);
}
printf("done with %d vs %d\n", i, x);
return 0;
}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2007-11-21 00:20:29 +08:00
|
|
|
1: ldrex r3, [r2]
|
2005-04-30 05:08:33 +08:00
|
|
|
subs r3, r3, r0
|
|
|
|
strexeq r3, r1, [r2]
|
[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space. It however can produce spurious false negative if a
processor exception occurs in the middle of the operation. Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though. This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
#include <stdio.h>
typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
int main()
{
int i, x = 0;
for (i = 0; i < 100000000; i++) {
int v = x;
if (__kernel_cmpxchg(v, v+1, &x))
printf("failed at %d: %d vs %d\n", i, v, x);
}
printf("done with %d vs %d\n", i, x);
return 0;
}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2007-11-21 00:20:29 +08:00
|
|
|
teqeq r3, #1
|
|
|
|
beq 1b
|
2005-04-30 05:08:33 +08:00
|
|
|
rsbs r0, r3, #0
|
[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space. It however can produce spurious false negative if a
processor exception occurs in the middle of the operation. Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though. This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
#include <stdio.h>
typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
int main()
{
int i, x = 0;
for (i = 0; i < 100000000; i++) {
int v = x;
if (__kernel_cmpxchg(v, v+1, &x))
printf("failed at %d: %d vs %d\n", i, v, x);
}
printf("done with %d vs %d\n", i, x);
return 0;
}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2007-11-21 00:20:29 +08:00
|
|
|
/* beware -- each __kuser slot must be 8 instructions max */
|
2010-09-04 17:47:48 +08:00
|
|
|
ALT_SMP(b __kuser_memory_barrier)
|
|
|
|
ALT_UP(usr_ret lr)
|
2005-04-30 05:08:33 +08:00
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
.align 5
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Reference prototype:
|
|
|
|
*
|
|
|
|
* int __kernel_get_tls(void)
|
|
|
|
*
|
|
|
|
* Input:
|
|
|
|
*
|
|
|
|
* lr = return address
|
|
|
|
*
|
|
|
|
* Output:
|
|
|
|
*
|
|
|
|
* r0 = TLS value
|
|
|
|
*
|
|
|
|
* Clobbered:
|
|
|
|
*
|
[ARM] 4659/1: remove possibilities for spurious false negative with __kuser_cmpxchg
The ARM __kuser_cmpxchg routine is meant to implement an atomic cmpxchg
in user space. It however can produce spurious false negative if a
processor exception occurs in the middle of the operation. Normally
this is not a problem since cmpxchg is typically called in a loop until
it succeeds to implement an atomic increment for example.
Some use cases which don't involve a loop require that the operation be
100% reliable though. This patch changes the implementation so to
reattempt the operation after an exception has occurred in the critical
section rather than abort it.
Here's a simple program to test the fix (don't use CONFIG_NO_HZ in your
kernel as this depends on a sufficiently high interrupt rate):
#include <stdio.h>
typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
int main()
{
int i, x = 0;
for (i = 0; i < 100000000; i++) {
int v = x;
if (__kernel_cmpxchg(v, v+1, &x))
printf("failed at %d: %d vs %d\n", i, v, x);
}
printf("done with %d vs %d\n", i, x);
return 0;
}
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2007-11-21 00:20:29 +08:00
|
|
|
* none
|
2005-04-30 05:08:33 +08:00
|
|
|
*
|
|
|
|
* Definition and user space usage example:
|
|
|
|
*
|
|
|
|
* typedef int (__kernel_get_tls_t)(void);
|
|
|
|
* #define __kernel_get_tls (*(__kernel_get_tls_t *)0xffff0fe0)
|
|
|
|
*
|
|
|
|
* Get the TLS value as previously set via the __ARM_NR_set_tls syscall.
|
|
|
|
*
|
|
|
|
* This could be used as follows:
|
|
|
|
*
|
|
|
|
* #define __kernel_get_tls() \
|
|
|
|
* ({ register unsigned int __val asm("r0"); \
|
|
|
|
* asm( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #31" \
|
|
|
|
* : "=r" (__val) : : "lr","cc" ); \
|
|
|
|
* __val; })
|
|
|
|
*/
|
|
|
|
|
|
|
|
__kuser_get_tls: @ 0xffff0fe0
|
2010-07-05 21:53:10 +08:00
|
|
|
ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init
|
2006-08-19 00:20:15 +08:00
|
|
|
usr_ret lr
|
2010-07-05 21:53:10 +08:00
|
|
|
mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code
|
|
|
|
.rep 4
|
|
|
|
.word 0 @ 0xffff0ff0 software TLS value, then
|
|
|
|
.endr @ pad up to __kuser_helper_version
|
2005-04-30 05:08:33 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Reference declaration:
|
|
|
|
*
|
|
|
|
* extern unsigned int __kernel_helper_version;
|
|
|
|
*
|
|
|
|
* Definition and user space usage example:
|
|
|
|
*
|
|
|
|
* #define __kernel_helper_version (*(unsigned int *)0xffff0ffc)
|
|
|
|
*
|
|
|
|
* User space may read this to determine the curent number of helpers
|
|
|
|
* available.
|
|
|
|
*/
|
|
|
|
|
|
|
|
__kuser_helper_version: @ 0xffff0ffc
|
|
|
|
.word ((__kuser_helper_end - __kuser_helper_start) >> 5)
|
|
|
|
|
|
|
|
.globl __kuser_helper_end
|
|
|
|
__kuser_helper_end:
|
|
|
|
|
2009-07-24 19:32:54 +08:00
|
|
|
THUMB( .thumb )
|
2005-04-30 05:08:33 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Vector stubs.
|
|
|
|
*
|
2005-04-26 22:17:42 +08:00
|
|
|
* This code is copied to 0xffff0200 so we can use branches in the
|
|
|
|
* vectors, rather than ldr's. Note that this code must not
|
|
|
|
* exceed 0x300 bytes.
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
|
|
|
* Common stub entry macro:
|
|
|
|
* Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
|
2005-06-01 05:22:32 +08:00
|
|
|
*
|
|
|
|
* SP points to a minimal amount of processor-private memory, the address
|
|
|
|
* of which is copied into r0 for the mode specific abort handler.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2005-11-06 22:42:37 +08:00
|
|
|
.macro vector_stub, name, mode, correction=0
|
2005-04-17 06:20:36 +08:00
|
|
|
.align 5
|
|
|
|
|
|
|
|
vector_\name:
|
|
|
|
.if \correction
|
|
|
|
sub lr, lr, #\correction
|
|
|
|
.endif
|
2005-06-01 05:22:32 +08:00
|
|
|
|
|
|
|
@
|
|
|
|
@ Save r0, lr_<exception> (parent PC) and spsr_<exception>
|
|
|
|
@ (parent CPSR)
|
|
|
|
@
|
|
|
|
stmia sp, {r0, lr} @ save r0, lr
|
2005-04-17 06:20:36 +08:00
|
|
|
mrs lr, spsr
|
2005-06-01 05:22:32 +08:00
|
|
|
str lr, [sp, #8] @ save spsr
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
@
|
2005-06-01 05:22:32 +08:00
|
|
|
@ Prepare for SVC32 mode. IRQs remain disabled.
|
2005-04-17 06:20:36 +08:00
|
|
|
@
|
2005-06-01 05:22:32 +08:00
|
|
|
mrs r0, cpsr
|
2009-07-24 19:32:54 +08:00
|
|
|
eor r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
|
2005-06-01 05:22:32 +08:00
|
|
|
msr spsr_cxsf, r0
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-06-01 05:22:32 +08:00
|
|
|
@
|
|
|
|
@ the branch table must immediately follow this code
|
|
|
|
@
|
|
|
|
and lr, lr, #0x0f
|
2009-07-24 19:32:54 +08:00
|
|
|
THUMB( adr r0, 1f )
|
|
|
|
THUMB( ldr lr, [r0, lr, lsl #2] )
|
2005-11-06 22:42:37 +08:00
|
|
|
mov r0, sp
|
2009-07-24 19:32:54 +08:00
|
|
|
ARM( ldr lr, [pc, lr, lsl #2] )
|
2005-06-01 05:22:32 +08:00
|
|
|
movs pc, lr @ branch to handler in SVC mode
|
2008-08-28 18:22:32 +08:00
|
|
|
ENDPROC(vector_\name)
|
2009-07-24 19:32:52 +08:00
|
|
|
|
|
|
|
.align 2
|
|
|
|
@ handler addresses follow this label
|
|
|
|
1:
|
2005-04-17 06:20:36 +08:00
|
|
|
.endm
|
|
|
|
|
2005-04-26 22:17:42 +08:00
|
|
|
.globl __stubs_start
|
2005-04-17 06:20:36 +08:00
|
|
|
__stubs_start:
|
|
|
|
/*
|
|
|
|
* Interrupt dispatcher
|
|
|
|
*/
|
2005-11-06 22:42:37 +08:00
|
|
|
vector_stub irq, IRQ_MODE, 4
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
.long __irq_usr @ 0 (USR_26 / USR_32)
|
|
|
|
.long __irq_invalid @ 1 (FIQ_26 / FIQ_32)
|
|
|
|
.long __irq_invalid @ 2 (IRQ_26 / IRQ_32)
|
|
|
|
.long __irq_svc @ 3 (SVC_26 / SVC_32)
|
|
|
|
.long __irq_invalid @ 4
|
|
|
|
.long __irq_invalid @ 5
|
|
|
|
.long __irq_invalid @ 6
|
|
|
|
.long __irq_invalid @ 7
|
|
|
|
.long __irq_invalid @ 8
|
|
|
|
.long __irq_invalid @ 9
|
|
|
|
.long __irq_invalid @ a
|
|
|
|
.long __irq_invalid @ b
|
|
|
|
.long __irq_invalid @ c
|
|
|
|
.long __irq_invalid @ d
|
|
|
|
.long __irq_invalid @ e
|
|
|
|
.long __irq_invalid @ f
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Data abort dispatcher
|
|
|
|
* Enter in ABT mode, spsr = USR CPSR, lr = USR PC
|
|
|
|
*/
|
2005-11-06 22:42:37 +08:00
|
|
|
vector_stub dabt, ABT_MODE, 8
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
.long __dabt_usr @ 0 (USR_26 / USR_32)
|
|
|
|
.long __dabt_invalid @ 1 (FIQ_26 / FIQ_32)
|
|
|
|
.long __dabt_invalid @ 2 (IRQ_26 / IRQ_32)
|
|
|
|
.long __dabt_svc @ 3 (SVC_26 / SVC_32)
|
|
|
|
.long __dabt_invalid @ 4
|
|
|
|
.long __dabt_invalid @ 5
|
|
|
|
.long __dabt_invalid @ 6
|
|
|
|
.long __dabt_invalid @ 7
|
|
|
|
.long __dabt_invalid @ 8
|
|
|
|
.long __dabt_invalid @ 9
|
|
|
|
.long __dabt_invalid @ a
|
|
|
|
.long __dabt_invalid @ b
|
|
|
|
.long __dabt_invalid @ c
|
|
|
|
.long __dabt_invalid @ d
|
|
|
|
.long __dabt_invalid @ e
|
|
|
|
.long __dabt_invalid @ f
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Prefetch abort dispatcher
|
|
|
|
* Enter in ABT mode, spsr = USR CPSR, lr = USR PC
|
|
|
|
*/
|
2005-11-06 22:42:37 +08:00
|
|
|
vector_stub pabt, ABT_MODE, 4
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
.long __pabt_usr @ 0 (USR_26 / USR_32)
|
|
|
|
.long __pabt_invalid @ 1 (FIQ_26 / FIQ_32)
|
|
|
|
.long __pabt_invalid @ 2 (IRQ_26 / IRQ_32)
|
|
|
|
.long __pabt_svc @ 3 (SVC_26 / SVC_32)
|
|
|
|
.long __pabt_invalid @ 4
|
|
|
|
.long __pabt_invalid @ 5
|
|
|
|
.long __pabt_invalid @ 6
|
|
|
|
.long __pabt_invalid @ 7
|
|
|
|
.long __pabt_invalid @ 8
|
|
|
|
.long __pabt_invalid @ 9
|
|
|
|
.long __pabt_invalid @ a
|
|
|
|
.long __pabt_invalid @ b
|
|
|
|
.long __pabt_invalid @ c
|
|
|
|
.long __pabt_invalid @ d
|
|
|
|
.long __pabt_invalid @ e
|
|
|
|
.long __pabt_invalid @ f
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Undef instr entry dispatcher
|
|
|
|
* Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
|
|
|
|
*/
|
2005-11-06 22:42:37 +08:00
|
|
|
vector_stub und, UND_MODE
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
.long __und_usr @ 0 (USR_26 / USR_32)
|
|
|
|
.long __und_invalid @ 1 (FIQ_26 / FIQ_32)
|
|
|
|
.long __und_invalid @ 2 (IRQ_26 / IRQ_32)
|
|
|
|
.long __und_svc @ 3 (SVC_26 / SVC_32)
|
|
|
|
.long __und_invalid @ 4
|
|
|
|
.long __und_invalid @ 5
|
|
|
|
.long __und_invalid @ 6
|
|
|
|
.long __und_invalid @ 7
|
|
|
|
.long __und_invalid @ 8
|
|
|
|
.long __und_invalid @ 9
|
|
|
|
.long __und_invalid @ a
|
|
|
|
.long __und_invalid @ b
|
|
|
|
.long __und_invalid @ c
|
|
|
|
.long __und_invalid @ d
|
|
|
|
.long __und_invalid @ e
|
|
|
|
.long __und_invalid @ f
|
|
|
|
|
|
|
|
.align 5
|
|
|
|
|
|
|
|
/*=============================================================================
|
|
|
|
* Undefined FIQs
|
|
|
|
*-----------------------------------------------------------------------------
|
|
|
|
* Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
|
|
|
|
* MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg.
|
|
|
|
* Basically to switch modes, we *HAVE* to clobber one register... brain
|
|
|
|
* damage alert! I don't think that we can execute any code in here in any
|
|
|
|
* other mode than FIQ... Ok you can switch to another mode, but you can't
|
|
|
|
* get out of that mode without clobbering one register.
|
|
|
|
*/
|
|
|
|
vector_fiq:
|
|
|
|
disable_fiq
|
|
|
|
subs pc, lr, #4
|
|
|
|
|
|
|
|
/*=============================================================================
|
|
|
|
* Address exception handler
|
|
|
|
*-----------------------------------------------------------------------------
|
|
|
|
* These aren't too critical.
|
|
|
|
* (they're not supposed to happen, and won't happen in 32-bit data mode).
|
|
|
|
*/
|
|
|
|
|
|
|
|
vector_addrexcptn:
|
|
|
|
b vector_addrexcptn
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We group all the following data together to optimise
|
|
|
|
* for CPUs with separate I & D caches.
|
|
|
|
*/
|
|
|
|
.align 5
|
|
|
|
|
|
|
|
.LCvswi:
|
|
|
|
.word vector_swi
|
|
|
|
|
2005-04-26 22:17:42 +08:00
|
|
|
.globl __stubs_end
|
2005-04-17 06:20:36 +08:00
|
|
|
__stubs_end:
|
|
|
|
|
2005-04-26 22:17:42 +08:00
|
|
|
.equ stubs_offset, __vectors_start + 0x200 - __stubs_start
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-04-26 22:17:42 +08:00
|
|
|
.globl __vectors_start
|
|
|
|
__vectors_start:
|
2009-07-24 19:32:54 +08:00
|
|
|
ARM( swi SYS_ERROR0 )
|
|
|
|
THUMB( svc #0 )
|
|
|
|
THUMB( nop )
|
|
|
|
W(b) vector_und + stubs_offset
|
|
|
|
W(ldr) pc, .LCvswi + stubs_offset
|
|
|
|
W(b) vector_pabt + stubs_offset
|
|
|
|
W(b) vector_dabt + stubs_offset
|
|
|
|
W(b) vector_addrexcptn + stubs_offset
|
|
|
|
W(b) vector_irq + stubs_offset
|
|
|
|
W(b) vector_fiq + stubs_offset
|
2005-04-26 22:17:42 +08:00
|
|
|
|
|
|
|
.globl __vectors_end
|
|
|
|
__vectors_end:
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
.data
|
|
|
|
|
|
|
|
.globl cr_alignment
|
|
|
|
.globl cr_no_alignment
|
|
|
|
cr_alignment:
|
|
|
|
.space 4
|
|
|
|
cr_no_alignment:
|
|
|
|
.space 4
|
2010-12-13 16:42:34 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_MULTI_IRQ_HANDLER
|
|
|
|
.globl handle_arch_irq
|
|
|
|
handle_arch_irq:
|
|
|
|
.space 4
|
|
|
|
#endif
|