mirror of https://gitee.com/openkylin/linux.git
s390/cmpxchg: use compiler builtins
The kernel build for s390 fails for gcc compilers with version 3.x, set the minimum required version of gcc to version 4.3. As the atomic builtins are available with all gcc 4.x compilers, use the __sync_val_compare_and_swap and __sync_bool_compare_and_swap functions to replace the complex macro and inline assembler magic in include/asm/cmpxchg.h. The compiler can just-do-it and generates better code with the builtins. While we are at it use __sync_bool_compare_and_swap for the _raw_compare_and_swap function in the spinlock code as well. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
parent
fcbe08d66f
commit
f318a1229b
|
@ -11,201 +11,29 @@
|
|||
#include <linux/types.h>
|
||||
#include <linux/bug.h>
|
||||
|
||||
extern void __xchg_called_with_bad_pointer(void);
|
||||
|
||||
static inline unsigned long __xchg(unsigned long x, void *ptr, int size)
|
||||
{
|
||||
unsigned long addr, old;
|
||||
int shift;
|
||||
|
||||
switch (size) {
|
||||
case 1:
|
||||
addr = (unsigned long) ptr;
|
||||
shift = (3 ^ (addr & 3)) << 3;
|
||||
addr ^= addr & 3;
|
||||
asm volatile(
|
||||
" l %0,%4\n"
|
||||
"0: lr 0,%0\n"
|
||||
" nr 0,%3\n"
|
||||
" or 0,%2\n"
|
||||
" cs %0,0,%4\n"
|
||||
" jl 0b\n"
|
||||
: "=&d" (old), "=Q" (*(int *) addr)
|
||||
: "d" ((x & 0xff) << shift), "d" (~(0xff << shift)),
|
||||
"Q" (*(int *) addr) : "memory", "cc", "0");
|
||||
return old >> shift;
|
||||
case 2:
|
||||
addr = (unsigned long) ptr;
|
||||
shift = (2 ^ (addr & 2)) << 3;
|
||||
addr ^= addr & 2;
|
||||
asm volatile(
|
||||
" l %0,%4\n"
|
||||
"0: lr 0,%0\n"
|
||||
" nr 0,%3\n"
|
||||
" or 0,%2\n"
|
||||
" cs %0,0,%4\n"
|
||||
" jl 0b\n"
|
||||
: "=&d" (old), "=Q" (*(int *) addr)
|
||||
: "d" ((x & 0xffff) << shift), "d" (~(0xffff << shift)),
|
||||
"Q" (*(int *) addr) : "memory", "cc", "0");
|
||||
return old >> shift;
|
||||
case 4:
|
||||
asm volatile(
|
||||
" l %0,%3\n"
|
||||
"0: cs %0,%2,%3\n"
|
||||
" jl 0b\n"
|
||||
: "=&d" (old), "=Q" (*(int *) ptr)
|
||||
: "d" (x), "Q" (*(int *) ptr)
|
||||
: "memory", "cc");
|
||||
return old;
|
||||
#ifdef CONFIG_64BIT
|
||||
case 8:
|
||||
asm volatile(
|
||||
" lg %0,%3\n"
|
||||
"0: csg %0,%2,%3\n"
|
||||
" jl 0b\n"
|
||||
: "=&d" (old), "=m" (*(long *) ptr)
|
||||
: "d" (x), "Q" (*(long *) ptr)
|
||||
: "memory", "cc");
|
||||
return old;
|
||||
#endif /* CONFIG_64BIT */
|
||||
}
|
||||
__xchg_called_with_bad_pointer();
|
||||
return x;
|
||||
}
|
||||
|
||||
#define xchg(ptr, x) \
|
||||
({ \
|
||||
__typeof__(*(ptr)) __ret; \
|
||||
__ret = (__typeof__(*(ptr))) \
|
||||
__xchg((unsigned long)(x), (void *)(ptr), sizeof(*(ptr)));\
|
||||
__ret; \
|
||||
#define cmpxchg(ptr, o, n) \
|
||||
({ \
|
||||
__typeof__(*(ptr)) __o = (o); \
|
||||
__typeof__(*(ptr)) __n = (n); \
|
||||
(__typeof__(*(ptr))) __sync_val_compare_and_swap((ptr),__o,__n);\
|
||||
})
|
||||
|
||||
/*
|
||||
* Atomic compare and exchange. Compare OLD with MEM, if identical,
|
||||
* store NEW in MEM. Return the initial value in MEM. Success is
|
||||
* indicated by comparing RETURN with OLD.
|
||||
*/
|
||||
#define cmpxchg64 cmpxchg
|
||||
#define cmpxchg_local cmpxchg
|
||||
#define cmpxchg64_local cmpxchg
|
||||
|
||||
#define xchg(ptr, x) \
|
||||
({ \
|
||||
__typeof__(ptr) __ptr = (ptr); \
|
||||
__typeof__(*(ptr)) __old; \
|
||||
do { \
|
||||
__old = *__ptr; \
|
||||
} while (!__sync_bool_compare_and_swap(__ptr, __old, x)); \
|
||||
__old; \
|
||||
})
|
||||
|
||||
#define __HAVE_ARCH_CMPXCHG
|
||||
|
||||
extern void __cmpxchg_called_with_bad_pointer(void);
|
||||
|
||||
static inline unsigned long __cmpxchg(void *ptr, unsigned long old,
|
||||
unsigned long new, int size)
|
||||
{
|
||||
unsigned long addr, prev, tmp;
|
||||
int shift;
|
||||
|
||||
switch (size) {
|
||||
case 1:
|
||||
addr = (unsigned long) ptr;
|
||||
shift = (3 ^ (addr & 3)) << 3;
|
||||
addr ^= addr & 3;
|
||||
asm volatile(
|
||||
" l %0,%2\n"
|
||||
"0: nr %0,%5\n"
|
||||
" lr %1,%0\n"
|
||||
" or %0,%3\n"
|
||||
" or %1,%4\n"
|
||||
" cs %0,%1,%2\n"
|
||||
" jnl 1f\n"
|
||||
" xr %1,%0\n"
|
||||
" nr %1,%5\n"
|
||||
" jnz 0b\n"
|
||||
"1:"
|
||||
: "=&d" (prev), "=&d" (tmp), "+Q" (*(int *) addr)
|
||||
: "d" ((old & 0xff) << shift),
|
||||
"d" ((new & 0xff) << shift),
|
||||
"d" (~(0xff << shift))
|
||||
: "memory", "cc");
|
||||
return prev >> shift;
|
||||
case 2:
|
||||
addr = (unsigned long) ptr;
|
||||
shift = (2 ^ (addr & 2)) << 3;
|
||||
addr ^= addr & 2;
|
||||
asm volatile(
|
||||
" l %0,%2\n"
|
||||
"0: nr %0,%5\n"
|
||||
" lr %1,%0\n"
|
||||
" or %0,%3\n"
|
||||
" or %1,%4\n"
|
||||
" cs %0,%1,%2\n"
|
||||
" jnl 1f\n"
|
||||
" xr %1,%0\n"
|
||||
" nr %1,%5\n"
|
||||
" jnz 0b\n"
|
||||
"1:"
|
||||
: "=&d" (prev), "=&d" (tmp), "+Q" (*(int *) addr)
|
||||
: "d" ((old & 0xffff) << shift),
|
||||
"d" ((new & 0xffff) << shift),
|
||||
"d" (~(0xffff << shift))
|
||||
: "memory", "cc");
|
||||
return prev >> shift;
|
||||
case 4:
|
||||
asm volatile(
|
||||
" cs %0,%3,%1\n"
|
||||
: "=&d" (prev), "=Q" (*(int *) ptr)
|
||||
: "0" (old), "d" (new), "Q" (*(int *) ptr)
|
||||
: "memory", "cc");
|
||||
return prev;
|
||||
#ifdef CONFIG_64BIT
|
||||
case 8:
|
||||
asm volatile(
|
||||
" csg %0,%3,%1\n"
|
||||
: "=&d" (prev), "=Q" (*(long *) ptr)
|
||||
: "0" (old), "d" (new), "Q" (*(long *) ptr)
|
||||
: "memory", "cc");
|
||||
return prev;
|
||||
#endif /* CONFIG_64BIT */
|
||||
}
|
||||
__cmpxchg_called_with_bad_pointer();
|
||||
return old;
|
||||
}
|
||||
|
||||
#define cmpxchg(ptr, o, n) \
|
||||
({ \
|
||||
__typeof__(*(ptr)) __ret; \
|
||||
__ret = (__typeof__(*(ptr))) \
|
||||
__cmpxchg((ptr), (unsigned long)(o), (unsigned long)(n), \
|
||||
sizeof(*(ptr))); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
#define cmpxchg64(ptr, o, n) \
|
||||
({ \
|
||||
cmpxchg((ptr), (o), (n)); \
|
||||
})
|
||||
#else /* CONFIG_64BIT */
|
||||
static inline unsigned long long __cmpxchg64(void *ptr,
|
||||
unsigned long long old,
|
||||
unsigned long long new)
|
||||
{
|
||||
register_pair rp_old = {.pair = old};
|
||||
register_pair rp_new = {.pair = new};
|
||||
unsigned long long *ullptr = ptr;
|
||||
|
||||
asm volatile(
|
||||
" cds %0,%2,%1"
|
||||
: "+d" (rp_old), "+Q" (*ullptr)
|
||||
: "d" (rp_new)
|
||||
: "memory", "cc");
|
||||
return rp_old.pair;
|
||||
}
|
||||
|
||||
#define cmpxchg64(ptr, o, n) \
|
||||
({ \
|
||||
__typeof__(*(ptr)) __ret; \
|
||||
__ret = (__typeof__(*(ptr))) \
|
||||
__cmpxchg64((ptr), \
|
||||
(unsigned long long)(o), \
|
||||
(unsigned long long)(n)); \
|
||||
__ret; \
|
||||
})
|
||||
#endif /* CONFIG_64BIT */
|
||||
|
||||
#define __cmpxchg_double_op(p1, p2, o1, o2, n1, n2, insn) \
|
||||
({ \
|
||||
register __typeof__(*(p1)) __old1 asm("2") = (o1); \
|
||||
|
@ -265,40 +93,4 @@ extern void __cmpxchg_double_called_with_bad_pointer(void);
|
|||
|
||||
#define system_has_cmpxchg_double() 1
|
||||
|
||||
#include <asm-generic/cmpxchg-local.h>
|
||||
|
||||
static inline unsigned long __cmpxchg_local(void *ptr,
|
||||
unsigned long old,
|
||||
unsigned long new, int size)
|
||||
{
|
||||
switch (size) {
|
||||
case 1:
|
||||
case 2:
|
||||
case 4:
|
||||
#ifdef CONFIG_64BIT
|
||||
case 8:
|
||||
#endif
|
||||
return __cmpxchg(ptr, old, new, size);
|
||||
default:
|
||||
return __cmpxchg_local_generic(ptr, old, new, size);
|
||||
}
|
||||
|
||||
return old;
|
||||
}
|
||||
|
||||
/*
|
||||
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
|
||||
* them available.
|
||||
*/
|
||||
#define cmpxchg_local(ptr, o, n) \
|
||||
({ \
|
||||
__typeof__(*(ptr)) __ret; \
|
||||
__ret = (__typeof__(*(ptr))) \
|
||||
__cmpxchg_local((ptr), (unsigned long)(o), \
|
||||
(unsigned long)(n), sizeof(*(ptr))); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define cmpxchg64_local(ptr, o, n) cmpxchg64((ptr), (o), (n))
|
||||
|
||||
#endif /* __ASM_CMPXCHG_H */
|
||||
|
|
|
@ -18,14 +18,7 @@ extern int spin_retry;
|
|||
static inline int
|
||||
_raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
|
||||
{
|
||||
unsigned int old_expected = old;
|
||||
|
||||
asm volatile(
|
||||
" cs %0,%3,%1"
|
||||
: "=d" (old), "=Q" (*lock)
|
||||
: "0" (old), "d" (new), "Q" (*lock)
|
||||
: "cc", "memory" );
|
||||
return old == old_expected;
|
||||
return __sync_bool_compare_and_swap(lock, old, new);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -17,8 +17,8 @@
|
|||
* Make sure that the compiler is new enough. We want a compiler that
|
||||
* is known to work with the "Q" assembler constraint.
|
||||
*/
|
||||
#if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
|
||||
#error Your compiler is too old; please use version 3.3.3 or newer
|
||||
#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 3)
|
||||
#error Your compiler is too old; please use version 4.3 or newer
|
||||
#endif
|
||||
|
||||
int main(void)
|
||||
|
|
Loading…
Reference in New Issue