xtensa: use named assembly arguments in cmpxchg.h

Numeric assembly arguments are hard to understand and assembly code that
uses them is hard to modify. Use named arguments in __cmpxchg_u32 and
xchg_u32.

Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
This commit is contained in:
Max Filippov 2019-10-16 01:52:38 -07:00
parent 13e28135d6
commit 812e708a4c
1 changed files with 35 additions and 35 deletions

View File

@ -27,25 +27,25 @@ __cmpxchg_u32(volatile int *p, int old, int new)
unsigned long tmp, result; unsigned long tmp, result;
__asm__ __volatile__( __asm__ __volatile__(
"1: l32ex %0, %3\n" "1: l32ex %[result], %[addr]\n"
" bne %0, %4, 2f\n" " bne %[result], %[cmp], 2f\n"
" mov %1, %2\n" " mov %[tmp], %[new]\n"
" s32ex %1, %3\n" " s32ex %[tmp], %[addr]\n"
" getex %1\n" " getex %[tmp]\n"
" beqz %1, 1b\n" " beqz %[tmp], 1b\n"
"2:\n" "2:\n"
: "=&a" (result), "=&a" (tmp) : [result] "=&a" (result), [tmp] "=&a" (tmp)
: "a" (new), "a" (p), "a" (old) : [new] "a" (new), [addr] "a" (p), [cmp] "a" (old)
: "memory" : "memory"
); );
return result; return result;
#elif XCHAL_HAVE_S32C1I #elif XCHAL_HAVE_S32C1I
__asm__ __volatile__( __asm__ __volatile__(
" wsr %2, scompare1\n" " wsr %[cmp], scompare1\n"
" s32c1i %0, %1, 0\n" " s32c1i %[new], %[addr], 0\n"
: "+a" (new) : [new] "+a" (new)
: "a" (p), "a" (old) : [addr] "a" (p), [cmp] "a" (old)
: "memory" : "memory"
); );
@ -53,14 +53,14 @@ __cmpxchg_u32(volatile int *p, int old, int new)
#else #else
__asm__ __volatile__( __asm__ __volatile__(
" rsil a15, "__stringify(TOPLEVEL)"\n" " rsil a15, "__stringify(TOPLEVEL)"\n"
" l32i %0, %1, 0\n" " l32i %[old], %[addr], 0\n"
" bne %0, %2, 1f\n" " bne %[old], %[cmp], 1f\n"
" s32i %3, %1, 0\n" " s32i %[new], %[addr], 0\n"
"1:\n" "1:\n"
" wsr a15, ps\n" " wsr a15, ps\n"
" rsync\n" " rsync\n"
: "=&a" (old) : [old] "=&a" (old)
: "a" (p), "a" (old), "r" (new) : [addr] "a" (p), [cmp] "a" (old), [new] "r" (new)
: "a15", "memory"); : "a15", "memory");
return old; return old;
#endif #endif
@ -129,13 +129,13 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
unsigned long tmp, result; unsigned long tmp, result;
__asm__ __volatile__( __asm__ __volatile__(
"1: l32ex %0, %3\n" "1: l32ex %[result], %[addr]\n"
" mov %1, %2\n" " mov %[tmp], %[val]\n"
" s32ex %1, %3\n" " s32ex %[tmp], %[addr]\n"
" getex %1\n" " getex %[tmp]\n"
" beqz %1, 1b\n" " beqz %[tmp], 1b\n"
: "=&a" (result), "=&a" (tmp) : [result] "=&a" (result), [tmp] "=&a" (tmp)
: "a" (val), "a" (m) : [val] "a" (val), [addr] "a" (m)
: "memory" : "memory"
); );
@ -143,13 +143,13 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
#elif XCHAL_HAVE_S32C1I #elif XCHAL_HAVE_S32C1I
unsigned long tmp, result; unsigned long tmp, result;
__asm__ __volatile__( __asm__ __volatile__(
"1: l32i %1, %2, 0\n" "1: l32i %[tmp], %[addr], 0\n"
" mov %0, %3\n" " mov %[result], %[val]\n"
" wsr %1, scompare1\n" " wsr %[tmp], scompare1\n"
" s32c1i %0, %2, 0\n" " s32c1i %[result], %[addr], 0\n"
" bne %0, %1, 1b\n" " bne %[result], %[tmp], 1b\n"
: "=&a" (result), "=&a" (tmp) : [result] "=&a" (result), [tmp] "=&a" (tmp)
: "a" (m), "a" (val) : [addr] "a" (m), [val] "a" (val)
: "memory" : "memory"
); );
return result; return result;
@ -157,12 +157,12 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
unsigned long tmp; unsigned long tmp;
__asm__ __volatile__( __asm__ __volatile__(
" rsil a15, "__stringify(TOPLEVEL)"\n" " rsil a15, "__stringify(TOPLEVEL)"\n"
" l32i %0, %1, 0\n" " l32i %[tmp], %[addr], 0\n"
" s32i %2, %1, 0\n" " s32i %[val], %[addr], 0\n"
" wsr a15, ps\n" " wsr a15, ps\n"
" rsync\n" " rsync\n"
: "=&a" (tmp) : [tmp] "=&a" (tmp)
: "a" (m), "a" (val) : [addr] "a" (m), [val] "a" (val)
: "a15", "memory"); : "a15", "memory");
return tmp; return tmp;
#endif #endif