2011-05-05 02:38:26 +08:00
|
|
|
/*
|
|
|
|
* Copyright 2011 Tilera Corporation. All Rights Reserved.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation, version 2.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful, but
|
|
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
|
|
* NON INFRINGEMENT. See the GNU General Public License for
|
|
|
|
* more details.
|
|
|
|
*
|
2011-07-27 07:09:06 +08:00
|
|
|
* Do not include directly; use <linux/atomic.h>.
|
2011-05-05 02:38:26 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _ASM_TILE_ATOMIC_64_H
|
|
|
|
#define _ASM_TILE_ATOMIC_64_H
|
|
|
|
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
|
2012-03-29 01:30:03 +08:00
|
|
|
#include <asm/barrier.h>
|
2011-05-05 02:38:26 +08:00
|
|
|
#include <arch/spr_def.h>
|
|
|
|
|
|
|
|
/* First, the 32-bit atomic ops that are "real" on our 64-bit platform. */
|
|
|
|
|
2015-09-18 17:13:10 +08:00
|
|
|
#define atomic_set(v, i) WRITE_ONCE((v)->counter, (i))
|
2011-05-05 02:38:26 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The smp_mb() operations throughout are to support the fact that
|
|
|
|
* Linux requires memory barriers before and after the operation,
|
|
|
|
* on any routine which updates memory and returns a value.
|
|
|
|
*/
|
|
|
|
|
2016-04-26 21:54:56 +08:00
|
|
|
/*
|
|
|
|
* Note a subtlety of the locking here. We are required to provide a
|
|
|
|
* full memory barrier before and after the operation. However, we
|
|
|
|
* only provide an explicit mb before the operation. After the
|
|
|
|
* operation, we use barrier() to get a full mb for free, because:
|
|
|
|
*
|
|
|
|
* (1) The barrier directive to the compiler prohibits any instructions
|
|
|
|
* being statically hoisted before the barrier;
|
|
|
|
* (2) the microarchitecture will not issue any further instructions
|
|
|
|
* until the fetchadd result is available for the "+ i" add instruction;
|
|
|
|
* (3) the smb_mb before the fetchadd ensures that no other memory
|
|
|
|
* operations are in flight at this point.
|
|
|
|
*/
|
2011-05-05 02:38:26 +08:00
|
|
|
static inline int atomic_add_return(int i, atomic_t *v)
|
|
|
|
{
|
|
|
|
int val;
|
|
|
|
smp_mb(); /* barrier for proper semantics */
|
|
|
|
val = __insn_fetchadd4((void *)&v->counter, i) + i;
|
2016-04-26 21:54:56 +08:00
|
|
|
barrier(); /* equivalent to smp_mb(); see block comment above */
|
2011-05-05 02:38:26 +08:00
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
2016-04-18 07:16:03 +08:00
|
|
|
#define ATOMIC_OPS(op) \
|
|
|
|
static inline int atomic_fetch_##op(int i, atomic_t *v) \
|
|
|
|
{ \
|
|
|
|
int val; \
|
|
|
|
smp_mb(); \
|
|
|
|
val = __insn_fetch##op##4((void *)&v->counter, i); \
|
|
|
|
smp_mb(); \
|
|
|
|
return val; \
|
|
|
|
} \
|
|
|
|
static inline void atomic_##op(int i, atomic_t *v) \
|
|
|
|
{ \
|
|
|
|
__insn_fetch##op##4((void *)&v->counter, i); \
|
|
|
|
}
|
|
|
|
|
|
|
|
ATOMIC_OPS(add)
|
|
|
|
ATOMIC_OPS(and)
|
|
|
|
ATOMIC_OPS(or)
|
|
|
|
|
|
|
|
#undef ATOMIC_OPS
|
|
|
|
|
|
|
|
static inline int atomic_fetch_xor(int i, atomic_t *v)
|
2011-05-05 02:38:26 +08:00
|
|
|
{
|
|
|
|
int guess, oldval = v->counter;
|
2016-04-18 07:16:03 +08:00
|
|
|
smp_mb();
|
2011-05-05 02:38:26 +08:00
|
|
|
do {
|
|
|
|
guess = oldval;
|
2016-04-18 07:16:03 +08:00
|
|
|
__insn_mtspr(SPR_CMPEXCH_VALUE, guess);
|
|
|
|
oldval = __insn_cmpexch4(&v->counter, guess ^ i);
|
2011-05-05 02:38:26 +08:00
|
|
|
} while (guess != oldval);
|
2016-04-18 07:16:03 +08:00
|
|
|
smp_mb();
|
2011-07-27 07:09:07 +08:00
|
|
|
return oldval;
|
2011-05-05 02:38:26 +08:00
|
|
|
}
|
|
|
|
|
tile: Provide atomic_{or,xor,and}
Implement atomic logic ops -- atomic_{or,xor,and}.
For tilegx, these are relatively straightforward; the architecture
provides atomic "or" and "and", both 32-bit and 64-bit. To support
xor we provide a loop using "cmpexch".
For the older 32-bit tilepro architecture, we have to extend
the set of low-level assembly routines to include 32-bit "and",
as well as all three 64-bit routines. Somewhat confusingly,
some 32-bit versions are already used by the bitops inlines, with
parameter types appropriate for bitops, so we have to do a bit of
casting to match "int" to "unsigned long".
Signed-off-by: Chris Metcalf <cmetcalf@ezchip.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1436474297-32187-1-git-send-email-cmetcalf@ezchip.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2015-07-10 04:38:17 +08:00
|
|
|
static inline void atomic_xor(int i, atomic_t *v)
|
|
|
|
{
|
|
|
|
int guess, oldval = v->counter;
|
|
|
|
do {
|
|
|
|
guess = oldval;
|
|
|
|
__insn_mtspr(SPR_CMPEXCH_VALUE, guess);
|
|
|
|
oldval = __insn_cmpexch4(&v->counter, guess ^ i);
|
|
|
|
} while (guess != oldval);
|
|
|
|
}
|
|
|
|
|
2016-04-18 07:16:03 +08:00
|
|
|
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
|
|
|
{
|
|
|
|
int guess, oldval = v->counter;
|
|
|
|
do {
|
|
|
|
if (oldval == u)
|
|
|
|
break;
|
|
|
|
guess = oldval;
|
|
|
|
oldval = cmpxchg(&v->counter, guess, guess + a);
|
|
|
|
} while (guess != oldval);
|
|
|
|
return oldval;
|
|
|
|
}
|
|
|
|
|
2011-05-05 02:38:26 +08:00
|
|
|
/* Now the true 64-bit operations. */
|
|
|
|
|
|
|
|
#define ATOMIC64_INIT(i) { (i) }
|
|
|
|
|
2015-09-18 17:13:10 +08:00
|
|
|
#define atomic64_read(v) READ_ONCE((v)->counter)
|
|
|
|
#define atomic64_set(v, i) WRITE_ONCE((v)->counter, (i))
|
2011-05-05 02:38:26 +08:00
|
|
|
|
|
|
|
static inline long atomic64_add_return(long i, atomic64_t *v)
|
|
|
|
{
|
|
|
|
int val;
|
|
|
|
smp_mb(); /* barrier for proper semantics */
|
|
|
|
val = __insn_fetchadd((void *)&v->counter, i) + i;
|
2016-04-26 21:54:56 +08:00
|
|
|
barrier(); /* equivalent to smp_mb; see atomic_add_return() */
|
2011-05-05 02:38:26 +08:00
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
2016-04-18 07:16:03 +08:00
|
|
|
#define ATOMIC64_OPS(op) \
|
|
|
|
static inline long atomic64_fetch_##op(long i, atomic64_t *v) \
|
|
|
|
{ \
|
|
|
|
long val; \
|
|
|
|
smp_mb(); \
|
|
|
|
val = __insn_fetch##op((void *)&v->counter, i); \
|
|
|
|
smp_mb(); \
|
|
|
|
return val; \
|
|
|
|
} \
|
|
|
|
static inline void atomic64_##op(long i, atomic64_t *v) \
|
|
|
|
{ \
|
|
|
|
__insn_fetch##op((void *)&v->counter, i); \
|
|
|
|
}
|
|
|
|
|
|
|
|
ATOMIC64_OPS(add)
|
|
|
|
ATOMIC64_OPS(and)
|
|
|
|
ATOMIC64_OPS(or)
|
|
|
|
|
|
|
|
#undef ATOMIC64_OPS
|
|
|
|
|
|
|
|
static inline long atomic64_fetch_xor(long i, atomic64_t *v)
|
2011-05-05 02:38:26 +08:00
|
|
|
{
|
|
|
|
long guess, oldval = v->counter;
|
2016-04-18 07:16:03 +08:00
|
|
|
smp_mb();
|
2011-05-05 02:38:26 +08:00
|
|
|
do {
|
|
|
|
guess = oldval;
|
2016-04-18 07:16:03 +08:00
|
|
|
__insn_mtspr(SPR_CMPEXCH_VALUE, guess);
|
|
|
|
oldval = __insn_cmpexch(&v->counter, guess ^ i);
|
2011-05-05 02:38:26 +08:00
|
|
|
} while (guess != oldval);
|
2016-04-18 07:16:03 +08:00
|
|
|
smp_mb();
|
|
|
|
return oldval;
|
tile: Provide atomic_{or,xor,and}
Implement atomic logic ops -- atomic_{or,xor,and}.
For tilegx, these are relatively straightforward; the architecture
provides atomic "or" and "and", both 32-bit and 64-bit. To support
xor we provide a loop using "cmpexch".
For the older 32-bit tilepro architecture, we have to extend
the set of low-level assembly routines to include 32-bit "and",
as well as all three 64-bit routines. Somewhat confusingly,
some 32-bit versions are already used by the bitops inlines, with
parameter types appropriate for bitops, so we have to do a bit of
casting to match "int" to "unsigned long".
Signed-off-by: Chris Metcalf <cmetcalf@ezchip.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1436474297-32187-1-git-send-email-cmetcalf@ezchip.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2015-07-10 04:38:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void atomic64_xor(long i, atomic64_t *v)
|
|
|
|
{
|
|
|
|
long guess, oldval = v->counter;
|
|
|
|
do {
|
|
|
|
guess = oldval;
|
|
|
|
__insn_mtspr(SPR_CMPEXCH_VALUE, guess);
|
|
|
|
oldval = __insn_cmpexch(&v->counter, guess ^ i);
|
|
|
|
} while (guess != oldval);
|
|
|
|
}
|
|
|
|
|
2016-04-18 07:16:03 +08:00
|
|
|
static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
|
|
|
|
{
|
|
|
|
long guess, oldval = v->counter;
|
|
|
|
do {
|
|
|
|
if (oldval == u)
|
|
|
|
break;
|
|
|
|
guess = oldval;
|
|
|
|
oldval = cmpxchg(&v->counter, guess, guess + a);
|
|
|
|
} while (guess != oldval);
|
|
|
|
return oldval != u;
|
|
|
|
}
|
|
|
|
|
2011-05-05 02:38:26 +08:00
|
|
|
#define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v))
|
2016-04-18 07:16:03 +08:00
|
|
|
#define atomic64_fetch_sub(i, v) atomic64_fetch_add(-(i), (v))
|
2011-05-05 02:38:26 +08:00
|
|
|
#define atomic64_sub(i, v) atomic64_add(-(i), (v))
|
|
|
|
#define atomic64_inc_return(v) atomic64_add_return(1, (v))
|
|
|
|
#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
|
|
|
|
#define atomic64_inc(v) atomic64_add(1, (v))
|
|
|
|
#define atomic64_dec(v) atomic64_sub(1, (v))
|
|
|
|
|
|
|
|
#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
|
|
|
|
#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
|
|
|
|
#define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0)
|
|
|
|
#define atomic64_add_negative(i, v) (atomic64_add_return((i), (v)) < 0)
|
|
|
|
|
|
|
|
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
|
|
|
|
|
|
|
|
#endif /* !__ASSEMBLY__ */
|
|
|
|
|
|
|
|
#endif /* _ASM_TILE_ATOMIC_64_H */
|