atomics/treewide: Rename __atomic_add_unless() => atomic_fetch_add_unless()
While __atomic_add_unless() was originally intended as a building-block for atomic_add_unless(), it's now used in a number of places around the kernel. It's the only common atomic operation named __atomic*(), rather than atomic_*(), and for consistency it would be better named atomic_fetch_add_unless(). This lack of consistency is slightly confusing, and gets in the way of scripting atomics. Given that, let's clean things up and promote it to an official part of the atomics API, in the form of atomic_fetch_add_unless(). This patch converts definitions and invocations over to the new name, including the instrumented version, using the following script: ---- git grep -w __atomic_add_unless | while read line; do sed -i '{s/\<__atomic_add_unless\>/atomic_fetch_add_unless/}' "${line%%:*}"; done git grep -w __arch_atomic_add_unless | while read line; do sed -i '{s/\<__arch_atomic_add_unless\>/arch_atomic_fetch_add_unless/}' "${line%%:*}"; done ---- Note that we do not have atomic{64,_long}_fetch_add_unless(), which will be introduced by later patches. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Will Deacon <will.deacon@arm.com> Acked-by: Geert Uytterhoeven <geert@linux-m68k.org> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Palmer Dabbelt <palmer@sifive.com> Cc: Boqun Feng <boqun.feng@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: https://lore.kernel.org/lkml/20180621121321.4761-2-mark.rutland@arm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
356c6fe7d8
commit
bfc18e389c
|
@ -206,7 +206,7 @@ ATOMIC_OPS(xor, xor)
|
|||
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
|
||||
|
||||
/**
|
||||
* __atomic_add_unless - add unless the number is a given value
|
||||
* atomic_fetch_add_unless - add unless the number is a given value
|
||||
* @v: pointer of type atomic_t
|
||||
* @a: the amount to add to v...
|
||||
* @u: ...unless v is equal to u.
|
||||
|
@ -214,7 +214,7 @@ ATOMIC_OPS(xor, xor)
|
|||
* Atomically adds @a to @v, so long as it was not @u.
|
||||
* Returns the old value of @v.
|
||||
*/
|
||||
static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
|
||||
static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
|
||||
{
|
||||
int c, new, old;
|
||||
smp_mb();
|
||||
|
|
|
@ -309,7 +309,7 @@ ATOMIC_OPS(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3)
|
|||
#undef ATOMIC_OP
|
||||
|
||||
/**
|
||||
* __atomic_add_unless - add unless the number is a given value
|
||||
* atomic_fetch_add_unless - add unless the number is a given value
|
||||
* @v: pointer of type atomic_t
|
||||
* @a: the amount to add to v...
|
||||
* @u: ...unless v is equal to u.
|
||||
|
@ -317,7 +317,7 @@ ATOMIC_OPS(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3)
|
|||
* Atomically adds @a to @v, so long as it was not @u.
|
||||
* Returns the old value of @v
|
||||
*/
|
||||
#define __atomic_add_unless(v, a, u) \
|
||||
#define atomic_fetch_add_unless(v, a, u) \
|
||||
({ \
|
||||
int c, old; \
|
||||
\
|
||||
|
|
|
@ -130,7 +130,7 @@ static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
|
|||
}
|
||||
#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
|
||||
|
||||
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
||||
static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
|
||||
{
|
||||
int oldval, newval;
|
||||
unsigned long tmp;
|
||||
|
@ -215,7 +215,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
||||
static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
|
||||
{
|
||||
int c, old;
|
||||
|
||||
|
|
|
@ -125,7 +125,7 @@
|
|||
#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
|
||||
#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
|
||||
#define atomic_add_negative(i, v) (atomic_add_return((i), (v)) < 0)
|
||||
#define __atomic_add_unless(v, a, u) ___atomic_add_unless(v, a, u,)
|
||||
#define atomic_fetch_add_unless(v, a, u) ___atomic_add_unless(v, a, u,)
|
||||
#define atomic_andnot atomic_andnot
|
||||
|
||||
/*
|
||||
|
|
|
@ -94,7 +94,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
||||
static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
|
||||
{
|
||||
int ret;
|
||||
h8300flags flags;
|
||||
|
|
|
@ -164,7 +164,7 @@ ATOMIC_OPS(xor)
|
|||
#undef ATOMIC_OP
|
||||
|
||||
/**
|
||||
* __atomic_add_unless - add unless the number is a given value
|
||||
* atomic_fetch_add_unless - add unless the number is a given value
|
||||
* @v: pointer to value
|
||||
* @a: amount to add
|
||||
* @u: unless value is equal to u
|
||||
|
@ -173,7 +173,7 @@ ATOMIC_OPS(xor)
|
|||
*
|
||||
*/
|
||||
|
||||
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
||||
static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
|
||||
{
|
||||
int __oldval;
|
||||
register int tmp;
|
||||
|
|
|
@ -215,7 +215,7 @@ ATOMIC64_FETCH_OP(xor, ^)
|
|||
(cmpxchg(&((v)->counter), old, new))
|
||||
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
|
||||
|
||||
static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
|
||||
static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
|
||||
{
|
||||
int c, old;
|
||||
c = atomic_read(v);
|
||||
|
|
|
@ -211,7 +211,7 @@ static inline int atomic_add_negative(int i, atomic_t *v)
|
|||
return c != 0;
|
||||
}
|
||||
|
||||
static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
|
||||
static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
|
||||
{
|
||||
int c, old;
|
||||
c = atomic_read(v);
|
||||
|
|
|
@ -275,7 +275,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
|
|||
#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
|
||||
|
||||
/**
|
||||
* __atomic_add_unless - add unless the number is a given value
|
||||
* atomic_fetch_add_unless - add unless the number is a given value
|
||||
* @v: pointer of type atomic_t
|
||||
* @a: the amount to add to v...
|
||||
* @u: ...unless v is equal to u.
|
||||
|
@ -283,7 +283,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
|
|||
* Atomically adds @a to @v, so long as it was not @u.
|
||||
* Returns the old value of @v.
|
||||
*/
|
||||
static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
|
||||
static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
|
||||
{
|
||||
int c, old;
|
||||
c = atomic_read(v);
|
||||
|
|
|
@ -100,7 +100,7 @@ ATOMIC_OP(xor)
|
|||
*
|
||||
* This is often used through atomic_inc_not_zero()
|
||||
*/
|
||||
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
||||
static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
|
||||
{
|
||||
int old, tmp;
|
||||
|
||||
|
@ -119,7 +119,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
|||
|
||||
return old;
|
||||
}
|
||||
#define __atomic_add_unless __atomic_add_unless
|
||||
#define atomic_fetch_add_unless atomic_fetch_add_unless
|
||||
|
||||
#include <asm-generic/atomic.h>
|
||||
|
||||
|
|
|
@ -78,7 +78,7 @@ static __inline__ int atomic_read(const atomic_t *v)
|
|||
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
|
||||
|
||||
/**
|
||||
* __atomic_add_unless - add unless the number is a given value
|
||||
* atomic_fetch_add_unless - add unless the number is a given value
|
||||
* @v: pointer of type atomic_t
|
||||
* @a: the amount to add to v...
|
||||
* @u: ...unless v is equal to u.
|
||||
|
@ -86,7 +86,7 @@ static __inline__ int atomic_read(const atomic_t *v)
|
|||
* Atomically adds @a to @v, so long as it was not @u.
|
||||
* Returns the old value of @v.
|
||||
*/
|
||||
static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
|
||||
static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
|
||||
{
|
||||
int c, old;
|
||||
c = atomic_read(v);
|
||||
|
|
|
@ -218,7 +218,7 @@ static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
|
|||
#define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
|
||||
|
||||
/**
|
||||
* __atomic_add_unless - add unless the number is a given value
|
||||
* atomic_fetch_add_unless - add unless the number is a given value
|
||||
* @v: pointer of type atomic_t
|
||||
* @a: the amount to add to v...
|
||||
* @u: ...unless v is equal to u.
|
||||
|
@ -226,13 +226,13 @@ static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
|
|||
* Atomically adds @a to @v, so long as it was not @u.
|
||||
* Returns the old value of @v.
|
||||
*/
|
||||
static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
|
||||
static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
|
||||
{
|
||||
int t;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
PPC_ATOMIC_ENTRY_BARRIER
|
||||
"1: lwarx %0,0,%1 # __atomic_add_unless\n\
|
||||
"1: lwarx %0,0,%1 # atomic_fetch_add_unless\n\
|
||||
cmpw 0,%0,%3 \n\
|
||||
beq 2f \n\
|
||||
add %0,%2,%0 \n"
|
||||
|
@ -538,7 +538,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
|
|||
|
||||
__asm__ __volatile__ (
|
||||
PPC_ATOMIC_ENTRY_BARRIER
|
||||
"1: ldarx %0,0,%1 # __atomic_add_unless\n\
|
||||
"1: ldarx %0,0,%1 # atomic_fetch_add_unless\n\
|
||||
cmpd 0,%0,%3 \n\
|
||||
beq 2f \n\
|
||||
add %0,%2,%0 \n"
|
||||
|
|
|
@ -332,7 +332,7 @@ ATOMIC_OP(dec_and_test, dec, ==, 0, 64)
|
|||
#undef ATOMIC_OP
|
||||
|
||||
/* This is required to provide a full barrier on success. */
|
||||
static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
||||
static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
|
||||
{
|
||||
int prev, rc;
|
||||
|
||||
|
@ -381,7 +381,7 @@ static __always_inline int atomic64_add_unless(atomic64_t *v, long a, long u)
|
|||
*/
|
||||
static __always_inline int atomic_inc_not_zero(atomic_t *v)
|
||||
{
|
||||
return __atomic_add_unless(v, 1, 0);
|
||||
return atomic_fetch_add_unless(v, 1, 0);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_GENERIC_ATOMIC64
|
||||
|
|
|
@ -90,7 +90,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
|
|||
return __atomic_cmpxchg(&v->counter, old, new);
|
||||
}
|
||||
|
||||
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
||||
static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
|
||||
{
|
||||
int c, old;
|
||||
c = atomic_read(v);
|
||||
|
|
|
@ -46,7 +46,7 @@
|
|||
#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
|
||||
|
||||
/**
|
||||
* __atomic_add_unless - add unless the number is a given value
|
||||
* atomic_fetch_add_unless - add unless the number is a given value
|
||||
* @v: pointer of type atomic_t
|
||||
* @a: the amount to add to v...
|
||||
* @u: ...unless v is equal to u.
|
||||
|
@ -54,7 +54,7 @@
|
|||
* Atomically adds @a to @v, so long as it was not @u.
|
||||
* Returns the old value of @v.
|
||||
*/
|
||||
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
||||
static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
|
||||
{
|
||||
int c, old;
|
||||
c = atomic_read(v);
|
||||
|
|
|
@ -27,7 +27,7 @@ int atomic_fetch_or(int, atomic_t *);
|
|||
int atomic_fetch_xor(int, atomic_t *);
|
||||
int atomic_cmpxchg(atomic_t *, int, int);
|
||||
int atomic_xchg(atomic_t *, int);
|
||||
int __atomic_add_unless(atomic_t *, int, int);
|
||||
int atomic_fetch_add_unless(atomic_t *, int, int);
|
||||
void atomic_set(atomic_t *, int);
|
||||
|
||||
#define atomic_set_release(v, i) atomic_set((v), (i))
|
||||
|
|
|
@ -89,7 +89,7 @@ static inline int atomic_xchg(atomic_t *v, int new)
|
|||
return xchg(&v->counter, new);
|
||||
}
|
||||
|
||||
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
||||
static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
|
||||
{
|
||||
int c, old;
|
||||
c = atomic_read(v);
|
||||
|
|
|
@ -95,7 +95,7 @@ int atomic_cmpxchg(atomic_t *v, int old, int new)
|
|||
}
|
||||
EXPORT_SYMBOL(atomic_cmpxchg);
|
||||
|
||||
int __atomic_add_unless(atomic_t *v, int a, int u)
|
||||
int atomic_fetch_add_unless(atomic_t *v, int a, int u)
|
||||
{
|
||||
int ret;
|
||||
unsigned long flags;
|
||||
|
@ -107,7 +107,7 @@ int __atomic_add_unless(atomic_t *v, int a, int u)
|
|||
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(__atomic_add_unless);
|
||||
EXPORT_SYMBOL(atomic_fetch_add_unless);
|
||||
|
||||
/* Atomic operations are already serializing */
|
||||
void atomic_set(atomic_t *v, int i)
|
||||
|
|
|
@ -254,7 +254,7 @@ static inline int arch_atomic_fetch_xor(int i, atomic_t *v)
|
|||
}
|
||||
|
||||
/**
|
||||
* __arch_atomic_add_unless - add unless the number is already a given value
|
||||
* arch_atomic_fetch_add_unless - add unless the number is already a given value
|
||||
* @v: pointer of type atomic_t
|
||||
* @a: the amount to add to v...
|
||||
* @u: ...unless v is equal to u.
|
||||
|
@ -262,7 +262,7 @@ static inline int arch_atomic_fetch_xor(int i, atomic_t *v)
|
|||
* Atomically adds @a to @v, so long as @v was not already @u.
|
||||
* Returns the old value of @v.
|
||||
*/
|
||||
static __always_inline int __arch_atomic_add_unless(atomic_t *v, int a, int u)
|
||||
static __always_inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
|
||||
{
|
||||
int c = arch_atomic_read(v);
|
||||
|
||||
|
|
|
@ -275,7 +275,7 @@ ATOMIC_OPS(xor)
|
|||
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
|
||||
|
||||
/**
|
||||
* __atomic_add_unless - add unless the number is a given value
|
||||
* atomic_fetch_add_unless - add unless the number is a given value
|
||||
* @v: pointer of type atomic_t
|
||||
* @a: the amount to add to v...
|
||||
* @u: ...unless v is equal to u.
|
||||
|
@ -283,7 +283,7 @@ ATOMIC_OPS(xor)
|
|||
* Atomically adds @a to @v, so long as it was not @u.
|
||||
* Returns the old value of @v.
|
||||
*/
|
||||
static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
|
||||
static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
|
||||
{
|
||||
int c, old;
|
||||
c = atomic_read(v);
|
||||
|
|
|
@ -61,7 +61,7 @@ static int atomic_inc_return_safe(atomic_t *v)
|
|||
{
|
||||
unsigned int counter;
|
||||
|
||||
counter = (unsigned int)__atomic_add_unless(v, 1, 0);
|
||||
counter = (unsigned int)atomic_fetch_add_unless(v, 1, 0);
|
||||
if (counter <= (unsigned int)INT_MAX)
|
||||
return (int)counter;
|
||||
|
||||
|
|
|
@ -121,7 +121,7 @@ static int uverbs_try_lock_object(struct ib_uobject *uobj, bool exclusive)
|
|||
* this lock.
|
||||
*/
|
||||
if (!exclusive)
|
||||
return __atomic_add_unless(&uobj->usecnt, 1, -1) == -1 ?
|
||||
return atomic_fetch_add_unless(&uobj->usecnt, 1, -1) == -1 ?
|
||||
-EBUSY : 0;
|
||||
|
||||
/* lock is either WRITE or DESTROY - should be exclusive */
|
||||
|
|
|
@ -648,7 +648,7 @@ static void afs_wake_up_async_call(struct sock *sk, struct rxrpc_call *rxcall,
|
|||
trace_afs_notify_call(rxcall, call);
|
||||
call->need_attention = true;
|
||||
|
||||
u = __atomic_add_unless(&call->usage, 1, 0);
|
||||
u = atomic_fetch_add_unless(&call->usage, 1, 0);
|
||||
if (u != 0) {
|
||||
trace_afs_call(call, afs_call_trace_wake, u,
|
||||
atomic_read(&call->net->nr_outstanding_calls),
|
||||
|
|
|
@ -84,10 +84,10 @@ static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 ne
|
|||
}
|
||||
#endif
|
||||
|
||||
static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
||||
static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
|
||||
{
|
||||
kasan_check_write(v, sizeof(*v));
|
||||
return __arch_atomic_add_unless(v, a, u);
|
||||
return arch_atomic_fetch_add_unless(v, a, u);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -221,8 +221,8 @@ static inline void atomic_dec(atomic_t *v)
|
|||
#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
|
||||
#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
|
||||
|
||||
#ifndef __atomic_add_unless
|
||||
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
||||
#ifndef atomic_fetch_add_unless
|
||||
static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
|
||||
{
|
||||
int c, old;
|
||||
c = atomic_read(v);
|
||||
|
|
|
@ -530,7 +530,7 @@
|
|||
*/
|
||||
static inline int atomic_add_unless(atomic_t *v, int a, int u)
|
||||
{
|
||||
return __atomic_add_unless(v, a, u) != u;
|
||||
return atomic_fetch_add_unless(v, a, u) != u;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -575,7 +575,7 @@ static struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map,
|
|||
{
|
||||
int refold;
|
||||
|
||||
refold = __atomic_add_unless(&map->refcnt, 1, 0);
|
||||
refold = atomic_fetch_add_unless(&map->refcnt, 1, 0);
|
||||
|
||||
if (refold >= BPF_MAX_REFCNT) {
|
||||
__bpf_map_put(map, false);
|
||||
|
@ -1142,7 +1142,7 @@ struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
|
|||
{
|
||||
int refold;
|
||||
|
||||
refold = __atomic_add_unless(&prog->aux->refcnt, 1, 0);
|
||||
refold = atomic_fetch_add_unless(&prog->aux->refcnt, 1, 0);
|
||||
|
||||
if (refold >= BPF_MAX_REFCNT) {
|
||||
__bpf_prog_put(prog, false);
|
||||
|
|
|
@ -415,7 +415,7 @@ void rxrpc_incoming_call(struct rxrpc_sock *rx,
|
|||
bool rxrpc_queue_call(struct rxrpc_call *call)
|
||||
{
|
||||
const void *here = __builtin_return_address(0);
|
||||
int n = __atomic_add_unless(&call->usage, 1, 0);
|
||||
int n = atomic_fetch_add_unless(&call->usage, 1, 0);
|
||||
if (n == 0)
|
||||
return false;
|
||||
if (rxrpc_queue_work(&call->processor))
|
||||
|
|
|
@ -266,7 +266,7 @@ void rxrpc_kill_connection(struct rxrpc_connection *conn)
|
|||
bool rxrpc_queue_conn(struct rxrpc_connection *conn)
|
||||
{
|
||||
const void *here = __builtin_return_address(0);
|
||||
int n = __atomic_add_unless(&conn->usage, 1, 0);
|
||||
int n = atomic_fetch_add_unless(&conn->usage, 1, 0);
|
||||
if (n == 0)
|
||||
return false;
|
||||
if (rxrpc_queue_work(&conn->processor))
|
||||
|
@ -309,7 +309,7 @@ rxrpc_get_connection_maybe(struct rxrpc_connection *conn)
|
|||
const void *here = __builtin_return_address(0);
|
||||
|
||||
if (conn) {
|
||||
int n = __atomic_add_unless(&conn->usage, 1, 0);
|
||||
int n = atomic_fetch_add_unless(&conn->usage, 1, 0);
|
||||
if (n > 0)
|
||||
trace_rxrpc_conn(conn, rxrpc_conn_got, n + 1, here);
|
||||
else
|
||||
|
|
|
@ -305,7 +305,7 @@ struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
|
|||
const void *here = __builtin_return_address(0);
|
||||
|
||||
if (local) {
|
||||
int n = __atomic_add_unless(&local->usage, 1, 0);
|
||||
int n = atomic_fetch_add_unless(&local->usage, 1, 0);
|
||||
if (n > 0)
|
||||
trace_rxrpc_local(local, rxrpc_local_got, n + 1, here);
|
||||
else
|
||||
|
|
|
@ -406,7 +406,7 @@ struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer)
|
|||
const void *here = __builtin_return_address(0);
|
||||
|
||||
if (peer) {
|
||||
int n = __atomic_add_unless(&peer->usage, 1, 0);
|
||||
int n = atomic_fetch_add_unless(&peer->usage, 1, 0);
|
||||
if (n > 0)
|
||||
trace_rxrpc_peer(peer, rxrpc_peer_got, n + 1, here);
|
||||
else
|
||||
|
|
Loading…
Reference in New Issue