mirror of https://gitee.com/openkylin/linux.git
[PATCH] ARM: 2664/2: add support for atomic ops on pre-ARMv6 SMP systems
Patch from Nicolas Pitre Not that there might be many of them on the planet, but at least RMK apparently has one. Signed-off-by: Nicolas Pitre Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
This commit is contained in:
parent
aeabbbbe12
commit
dcef1f6346
|
@ -269,7 +269,7 @@ __pabt_svc:
|
|||
add r5, sp, #S_PC
|
||||
ldmia r7, {r2 - r4} @ Get USR pc, cpsr
|
||||
|
||||
#if __LINUX_ARM_ARCH__ < 6
|
||||
#if __LINUX_ARM_ARCH__ < 6 && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
|
||||
@ make sure our user space atomic helper is aborted
|
||||
cmp r2, #VIRT_OFFSET
|
||||
bichs r3, r3, #PSR_Z_BIT
|
||||
|
@ -616,11 +616,17 @@ __kuser_helper_start:
|
|||
|
||||
__kuser_cmpxchg: @ 0xffff0fc0
|
||||
|
||||
#if __LINUX_ARM_ARCH__ < 6
|
||||
#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
|
||||
|
||||
#ifdef CONFIG_SMP /* sanity check */
|
||||
#error "CONFIG_SMP on a machine supporting pre-ARMv6 processors?"
|
||||
#endif
|
||||
/*
|
||||
* Poor you. No fast solution possible...
|
||||
* The kernel itself must perform the operation.
|
||||
* A special ghost syscall is used for that (see traps.c).
|
||||
*/
|
||||
swi #0x9ffff0
|
||||
mov pc, lr
|
||||
|
||||
#elif __LINUX_ARM_ARCH__ < 6
|
||||
|
||||
/*
|
||||
* Theory of operation:
|
||||
|
|
|
@ -464,6 +464,55 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
|
|||
#endif
|
||||
return 0;
|
||||
|
||||
#ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG
|
||||
/*
|
||||
* Atomically store r1 in *r2 if *r2 is equal to r0 for user space.
|
||||
* Return zero in r0 if *MEM was changed or non-zero if no exchange
|
||||
* happened. Also set the user C flag accordingly.
|
||||
* If access permissions have to be fixed up then non-zero is
|
||||
* returned and the operation has to be re-attempted.
|
||||
*
|
||||
* *NOTE*: This is a ghost syscall private to the kernel. Only the
|
||||
* __kuser_cmpxchg code in entry-armv.S should be aware of its
|
||||
* existence. Don't ever use this from user code.
|
||||
*/
|
||||
case 0xfff0:
|
||||
{
|
||||
extern void do_DataAbort(unsigned long addr, unsigned int fsr,
|
||||
struct pt_regs *regs);
|
||||
unsigned long val;
|
||||
unsigned long addr = regs->ARM_r2;
|
||||
struct mm_struct *mm = current->mm;
|
||||
pgd_t *pgd; pmd_t *pmd; pte_t *pte;
|
||||
|
||||
regs->ARM_cpsr &= ~PSR_C_BIT;
|
||||
spin_lock(&mm->page_table_lock);
|
||||
pgd = pgd_offset(mm, addr);
|
||||
if (!pgd_present(*pgd))
|
||||
goto bad_access;
|
||||
pmd = pmd_offset(pgd, addr);
|
||||
if (!pmd_present(*pmd))
|
||||
goto bad_access;
|
||||
pte = pte_offset_map(pmd, addr);
|
||||
if (!pte_present(*pte) || !pte_write(*pte))
|
||||
goto bad_access;
|
||||
val = *(unsigned long *)addr;
|
||||
val -= regs->ARM_r0;
|
||||
if (val == 0) {
|
||||
*(unsigned long *)addr = regs->ARM_r1;
|
||||
regs->ARM_cpsr |= PSR_C_BIT;
|
||||
}
|
||||
spin_unlock(&mm->page_table_lock);
|
||||
return val;
|
||||
|
||||
bad_access:
|
||||
spin_unlock(&mm->page_table_lock);
|
||||
/* simulate a read access fault */
|
||||
do_DataAbort(addr, 15 + (1 << 11), regs);
|
||||
return -1;
|
||||
}
|
||||
#endif
|
||||
|
||||
default:
|
||||
/* Calls 9f00xx..9f07ff are defined to return -ENOSYS
|
||||
if not implemented, rather than raising SIGILL. This
|
||||
|
|
|
@ -422,3 +422,11 @@ config HAS_TLS_REG
|
|||
assume directly accessing that register and always obtain the
|
||||
expected value only on ARMv7 and above.
|
||||
|
||||
config NEEDS_SYSCALL_FOR_CMPXCHG
|
||||
bool
|
||||
default y if SMP && (CPU_32v5 || CPU_32v4 || CPU_32v3)
|
||||
help
|
||||
SMP on a pre-ARMv6 processor? Well OK then.
|
||||
Forget about fast user space cmpxchg support.
|
||||
It is just not possible.
|
||||
|
||||
|
|
Loading…
Reference in New Issue