mirror of https://gitee.com/openkylin/linux.git
locking/core: Remove {read,spin,write}_can_lock()
Outside of the locking code itself, {read,spin,write}_can_lock() have no users in tree. Apparmor (the last remaining user of write_can_lock()) got moved over to lockdep by the previous patch. This patch removes the use of {read,spin,write}_can_lock() from the BUILD_LOCK_OPS macro, deferring to the trylock operation for testing the lock status, and subsequently removes the unused macros altogether. They aren't guaranteed to work in a concurrent environment and can give incorrect results in the case of qrwlock. Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: paulmck@linux.vnet.ibm.com Link: http://lkml.kernel.org/r/1507055129-12300-2-git-send-email-will.deacon@arm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
26c4eb192c
commit
a8a217c221
|
@ -54,16 +54,6 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
|
|||
|
||||
/***********************************************************/
|
||||
|
||||
static inline int arch_read_can_lock(arch_rwlock_t *lock)
|
||||
{
|
||||
return (lock->lock & 1) == 0;
|
||||
}
|
||||
|
||||
static inline int arch_write_can_lock(arch_rwlock_t *lock)
|
||||
{
|
||||
return lock->lock == 0;
|
||||
}
|
||||
|
||||
static inline void arch_read_lock(arch_rwlock_t *lock)
|
||||
{
|
||||
long regx;
|
||||
|
|
|
@ -410,9 +410,6 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
|
|||
|
||||
#endif
|
||||
|
||||
#define arch_read_can_lock(x) ((x)->counter > 0)
|
||||
#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
|
||||
|
||||
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
|
||||
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
|
||||
|
||||
|
|
|
@ -193,9 +193,6 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
|
|||
dsb_sev();
|
||||
}
|
||||
|
||||
/* write_can_lock - would write_trylock() succeed? */
|
||||
#define arch_write_can_lock(x) (ACCESS_ONCE((x)->lock) == 0)
|
||||
|
||||
/*
|
||||
* Read locks are a bit more hairy:
|
||||
* - Exclusively load the lock value.
|
||||
|
@ -273,9 +270,6 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
|
|||
}
|
||||
}
|
||||
|
||||
/* read_can_lock - would read_trylock() succeed? */
|
||||
#define arch_read_can_lock(x) (ACCESS_ONCE((x)->lock) < 0x80000000)
|
||||
|
||||
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
|
||||
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
|
||||
|
||||
|
|
|
@ -48,16 +48,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
|
|||
__raw_spin_unlock_asm(&lock->lock);
|
||||
}
|
||||
|
||||
static inline int arch_read_can_lock(arch_rwlock_t *rw)
|
||||
{
|
||||
return __raw_uncached_fetch_asm(&rw->lock) > 0;
|
||||
}
|
||||
|
||||
static inline int arch_write_can_lock(arch_rwlock_t *rw)
|
||||
{
|
||||
return __raw_uncached_fetch_asm(&rw->lock) == RW_LOCK_BIAS;
|
||||
}
|
||||
|
||||
static inline void arch_read_lock(arch_rwlock_t *rw)
|
||||
{
|
||||
__raw_read_lock_asm(&rw->lock);
|
||||
|
|
|
@ -86,16 +86,6 @@ static inline int arch_read_trylock(arch_rwlock_t *lock)
|
|||
return temp;
|
||||
}
|
||||
|
||||
static inline int arch_read_can_lock(arch_rwlock_t *rwlock)
|
||||
{
|
||||
return rwlock->lock == 0;
|
||||
}
|
||||
|
||||
static inline int arch_write_can_lock(arch_rwlock_t *rwlock)
|
||||
{
|
||||
return rwlock->lock == 0;
|
||||
}
|
||||
|
||||
/* Stuffs a -1 in the lock value? */
|
||||
static inline void arch_write_lock(arch_rwlock_t *lock)
|
||||
{
|
||||
|
|
|
@ -127,9 +127,6 @@ static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
|
|||
arch_spin_lock(lock);
|
||||
}
|
||||
|
||||
#define arch_read_can_lock(rw) (*(volatile int *)(rw) >= 0)
|
||||
#define arch_write_can_lock(rw) (*(volatile int *)(rw) == 0)
|
||||
|
||||
#ifdef ASM_SUPPORTED
|
||||
|
||||
static __always_inline void
|
||||
|
|
|
@ -137,18 +137,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
|
|||
* semaphore.h for details. -ben
|
||||
*/
|
||||
|
||||
/**
|
||||
* read_can_lock - would read_trylock() succeed?
|
||||
* @lock: the rwlock in question.
|
||||
*/
|
||||
#define arch_read_can_lock(x) ((int)(x)->lock > 0)
|
||||
|
||||
/**
|
||||
* write_can_lock - would write_trylock() succeed?
|
||||
* @lock: the rwlock in question.
|
||||
*/
|
||||
#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
|
||||
|
||||
static inline void arch_read_lock(arch_rwlock_t *rw)
|
||||
{
|
||||
unsigned long tmp0, tmp1;
|
||||
|
|
|
@ -136,21 +136,6 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
|
|||
: "memory");
|
||||
}
|
||||
|
||||
/* write_can_lock - would write_trylock() succeed? */
|
||||
static inline int arch_write_can_lock(arch_rwlock_t *rw)
|
||||
{
|
||||
int ret;
|
||||
|
||||
asm volatile ("LNKGETD %0, [%1]\n"
|
||||
"CMP %0, #0\n"
|
||||
"MOV %0, #1\n"
|
||||
"XORNZ %0, %0, %0\n"
|
||||
: "=&d" (ret)
|
||||
: "da" (&rw->lock)
|
||||
: "cc");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Read locks are a bit more hairy:
|
||||
* - Exclusively load the lock value.
|
||||
|
@ -224,21 +209,6 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
|
|||
return tmp;
|
||||
}
|
||||
|
||||
/* read_can_lock - would read_trylock() succeed? */
|
||||
static inline int arch_read_can_lock(arch_rwlock_t *rw)
|
||||
{
|
||||
int tmp;
|
||||
|
||||
asm volatile ("LNKGETD %0, [%1]\n"
|
||||
"CMP %0, %2\n"
|
||||
"MOV %0, #1\n"
|
||||
"XORZ %0, %0, %0\n"
|
||||
: "=&d" (tmp)
|
||||
: "da" (&rw->lock), "bd" (0x80000000)
|
||||
: "cc");
|
||||
return tmp;
|
||||
}
|
||||
|
||||
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
|
||||
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
|
||||
|
||||
|
|
|
@ -104,16 +104,6 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
|
|||
rw->lock = 0;
|
||||
}
|
||||
|
||||
/* write_can_lock - would write_trylock() succeed? */
|
||||
static inline int arch_write_can_lock(arch_rwlock_t *rw)
|
||||
{
|
||||
unsigned int ret;
|
||||
|
||||
barrier();
|
||||
ret = rw->lock;
|
||||
return (ret == 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Read locks are a bit more hairy:
|
||||
* - Exclusively load the lock value.
|
||||
|
@ -171,14 +161,4 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
|
|||
return (ret < 0x80000000);
|
||||
}
|
||||
|
||||
/* read_can_lock - would read_trylock() succeed? */
|
||||
static inline int arch_read_can_lock(arch_rwlock_t *rw)
|
||||
{
|
||||
unsigned int ret;
|
||||
|
||||
barrier();
|
||||
ret = rw->lock;
|
||||
return (ret < 0x80000000);
|
||||
}
|
||||
|
||||
#endif /* __ASM_SPINLOCK_LOCK1_H */
|
||||
|
|
|
@ -98,18 +98,6 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock,
|
|||
* read-locks.
|
||||
*/
|
||||
|
||||
/**
|
||||
* read_can_lock - would read_trylock() succeed?
|
||||
* @lock: the rwlock in question.
|
||||
*/
|
||||
#define arch_read_can_lock(x) ((int)(x)->lock > 0)
|
||||
|
||||
/**
|
||||
* write_can_lock - would write_trylock() succeed?
|
||||
* @lock: the rwlock in question.
|
||||
*/
|
||||
#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
|
||||
|
||||
/*
|
||||
* On mn10300, we implement read-write locks as a 32-bit counter
|
||||
* with the high bit (sign) being the "contended" bit.
|
||||
|
|
|
@ -168,24 +168,6 @@ static __inline__ int arch_write_trylock(arch_rwlock_t *rw)
|
|||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* read_can_lock - would read_trylock() succeed?
|
||||
* @lock: the rwlock in question.
|
||||
*/
|
||||
static __inline__ int arch_read_can_lock(arch_rwlock_t *rw)
|
||||
{
|
||||
return rw->counter >= 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* write_can_lock - would write_trylock() succeed?
|
||||
* @lock: the rwlock in question.
|
||||
*/
|
||||
static __inline__ int arch_write_can_lock(arch_rwlock_t *rw)
|
||||
{
|
||||
return !rw->counter;
|
||||
}
|
||||
|
||||
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
|
||||
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
|
||||
|
||||
|
|
|
@ -181,9 +181,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
|
|||
* read-locks.
|
||||
*/
|
||||
|
||||
#define arch_read_can_lock(rw) ((rw)->lock >= 0)
|
||||
#define arch_write_can_lock(rw) (!(rw)->lock)
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
#define __DO_SIGN_EXTEND "extsw %0,%0\n"
|
||||
#define WRLOCK_TOKEN LOCK_TOKEN /* it's negative */
|
||||
|
|
|
@ -110,18 +110,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lp)
|
|||
* read-locks.
|
||||
*/
|
||||
|
||||
/**
|
||||
* read_can_lock - would read_trylock() succeed?
|
||||
* @lock: the rwlock in question.
|
||||
*/
|
||||
#define arch_read_can_lock(x) ((int)(x)->lock >= 0)
|
||||
|
||||
/**
|
||||
* write_can_lock - would write_trylock() succeed?
|
||||
* @lock: the rwlock in question.
|
||||
*/
|
||||
#define arch_write_can_lock(x) ((x)->lock == 0)
|
||||
|
||||
extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
|
||||
extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
|
||||
|
||||
|
|
|
@ -53,18 +53,6 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
|
|||
* read-locks.
|
||||
*/
|
||||
|
||||
/**
|
||||
* read_can_lock - would read_trylock() succeed?
|
||||
* @lock: the rwlock in question.
|
||||
*/
|
||||
#define arch_read_can_lock(x) ((x)->lock > 0)
|
||||
|
||||
/**
|
||||
* write_can_lock - would write_trylock() succeed?
|
||||
* @lock: the rwlock in question.
|
||||
*/
|
||||
#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
|
||||
|
||||
static inline void arch_read_lock(arch_rwlock_t *rw)
|
||||
{
|
||||
unsigned old;
|
||||
|
|
|
@ -89,18 +89,6 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
|
|||
* read-locks.
|
||||
*/
|
||||
|
||||
/**
|
||||
* read_can_lock - would read_trylock() succeed?
|
||||
* @lock: the rwlock in question.
|
||||
*/
|
||||
#define arch_read_can_lock(x) ((x)->lock > 0)
|
||||
|
||||
/**
|
||||
* write_can_lock - would write_trylock() succeed?
|
||||
* @lock: the rwlock in question.
|
||||
*/
|
||||
#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
|
||||
|
||||
static inline void arch_read_lock(arch_rwlock_t *rw)
|
||||
{
|
||||
unsigned long tmp;
|
||||
|
|
|
@ -190,9 +190,6 @@ static inline int __arch_read_trylock(arch_rwlock_t *rw)
|
|||
#define arch_read_relax(lock) cpu_relax()
|
||||
#define arch_write_relax(lock) cpu_relax()
|
||||
|
||||
#define arch_read_can_lock(rw) (!((rw)->lock & 0xff))
|
||||
#define arch_write_can_lock(rw) (!(rw)->lock)
|
||||
|
||||
#endif /* !(__ASSEMBLY__) */
|
||||
|
||||
#endif /* __SPARC_SPINLOCK_H */
|
||||
|
|
|
@ -79,22 +79,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
|
|||
#define _RD_COUNT_SHIFT 24
|
||||
#define _RD_COUNT_WIDTH 8
|
||||
|
||||
/**
|
||||
* arch_read_can_lock() - would read_trylock() succeed?
|
||||
*/
|
||||
static inline int arch_read_can_lock(arch_rwlock_t *rwlock)
|
||||
{
|
||||
return (rwlock->lock << _RD_COUNT_WIDTH) == 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* arch_write_can_lock() - would write_trylock() succeed?
|
||||
*/
|
||||
static inline int arch_write_can_lock(arch_rwlock_t *rwlock)
|
||||
{
|
||||
return rwlock->lock == 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* arch_read_lock() - acquire a read lock.
|
||||
*/
|
||||
|
|
|
@ -93,24 +93,6 @@ static inline int arch_write_val_locked(int val)
|
|||
return val < 0; /* Optimize "val & __WRITE_LOCK_BIT". */
|
||||
}
|
||||
|
||||
/**
|
||||
* read_can_lock - would read_trylock() succeed?
|
||||
* @lock: the rwlock in question.
|
||||
*/
|
||||
static inline int arch_read_can_lock(arch_rwlock_t *rw)
|
||||
{
|
||||
return !arch_write_val_locked(rw->lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* write_can_lock - would write_trylock() succeed?
|
||||
* @lock: the rwlock in question.
|
||||
*/
|
||||
static inline int arch_write_can_lock(arch_rwlock_t *rw)
|
||||
{
|
||||
return rw->lock == 0;
|
||||
}
|
||||
|
||||
extern void __read_lock_failed(arch_rwlock_t *rw);
|
||||
|
||||
static inline void arch_read_lock(arch_rwlock_t *rw)
|
||||
|
|
|
@ -97,8 +97,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
|
|||
* 0x80000000 one writer owns the rwlock, no other writers, no readers
|
||||
*/
|
||||
|
||||
#define arch_write_can_lock(x) ((x)->lock == 0)
|
||||
|
||||
static inline void arch_write_lock(arch_rwlock_t *rw)
|
||||
{
|
||||
unsigned long tmp;
|
||||
|
|
|
@ -52,24 +52,6 @@
|
|||
extern void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts);
|
||||
extern void queued_write_lock_slowpath(struct qrwlock *lock);
|
||||
|
||||
/**
|
||||
* queued_read_can_lock- would read_trylock() succeed?
|
||||
* @lock: Pointer to queue rwlock structure
|
||||
*/
|
||||
static inline int queued_read_can_lock(struct qrwlock *lock)
|
||||
{
|
||||
return !(atomic_read(&lock->cnts) & _QW_WMASK);
|
||||
}
|
||||
|
||||
/**
|
||||
* queued_write_can_lock- would write_trylock() succeed?
|
||||
* @lock: Pointer to queue rwlock structure
|
||||
*/
|
||||
static inline int queued_write_can_lock(struct qrwlock *lock)
|
||||
{
|
||||
return !atomic_read(&lock->cnts);
|
||||
}
|
||||
|
||||
/**
|
||||
* queued_read_trylock - try to acquire read lock of a queue rwlock
|
||||
* @lock : Pointer to queue rwlock structure
|
||||
|
@ -169,8 +151,6 @@ static inline void queued_write_unlock(struct qrwlock *lock)
|
|||
* Remapping rwlock architecture specific functions to the corresponding
|
||||
* queue rwlock functions.
|
||||
*/
|
||||
#define arch_read_can_lock(l) queued_read_can_lock(l)
|
||||
#define arch_write_can_lock(l) queued_write_can_lock(l)
|
||||
#define arch_read_lock(l) queued_read_lock(l)
|
||||
#define arch_write_lock(l) queued_write_lock(l)
|
||||
#define arch_read_trylock(l) queued_read_trylock(l)
|
||||
|
|
|
@ -50,9 +50,6 @@ do { \
|
|||
# define do_raw_write_unlock(rwlock) do {arch_write_unlock(&(rwlock)->raw_lock); __release(lock); } while (0)
|
||||
#endif
|
||||
|
||||
#define read_can_lock(rwlock) arch_read_can_lock(&(rwlock)->raw_lock)
|
||||
#define write_can_lock(rwlock) arch_write_can_lock(&(rwlock)->raw_lock)
|
||||
|
||||
/*
|
||||
* Define the various rw_lock methods. Note we define these
|
||||
* regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various
|
||||
|
|
|
@ -278,12 +278,6 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
|
|||
1 : ({ local_irq_restore(flags); 0; }); \
|
||||
})
|
||||
|
||||
/**
|
||||
* raw_spin_can_lock - would raw_spin_trylock() succeed?
|
||||
* @lock: the spinlock in question.
|
||||
*/
|
||||
#define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
|
||||
|
||||
/* Include rwlock functions */
|
||||
#include <linux/rwlock.h>
|
||||
|
||||
|
@ -396,11 +390,6 @@ static __always_inline int spin_is_contended(spinlock_t *lock)
|
|||
return raw_spin_is_contended(&lock->rlock);
|
||||
}
|
||||
|
||||
static __always_inline int spin_can_lock(spinlock_t *lock)
|
||||
{
|
||||
return raw_spin_can_lock(&lock->rlock);
|
||||
}
|
||||
|
||||
#define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock)
|
||||
|
||||
/*
|
||||
|
|
|
@ -77,7 +77,4 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
|
|||
|
||||
#define arch_spin_is_contended(lock) (((void)(lock), 0))
|
||||
|
||||
#define arch_read_can_lock(lock) (((void)(lock), 1))
|
||||
#define arch_write_can_lock(lock) (((void)(lock), 1))
|
||||
|
||||
#endif /* __LINUX_SPINLOCK_UP_H */
|
||||
|
|
|
@ -32,8 +32,6 @@
|
|||
* include/linux/spinlock_api_smp.h
|
||||
*/
|
||||
#else
|
||||
#define raw_read_can_lock(l) read_can_lock(l)
|
||||
#define raw_write_can_lock(l) write_can_lock(l)
|
||||
|
||||
/*
|
||||
* Some architectures can relax in favour of the CPU owning the lock.
|
||||
|
@ -68,7 +66,7 @@ void __lockfunc __raw_##op##_lock(locktype##_t *lock) \
|
|||
\
|
||||
if (!(lock)->break_lock) \
|
||||
(lock)->break_lock = 1; \
|
||||
while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\
|
||||
while ((lock)->break_lock) \
|
||||
arch_##op##_relax(&lock->raw_lock); \
|
||||
} \
|
||||
(lock)->break_lock = 0; \
|
||||
|
@ -88,7 +86,7 @@ unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \
|
|||
\
|
||||
if (!(lock)->break_lock) \
|
||||
(lock)->break_lock = 1; \
|
||||
while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\
|
||||
while ((lock)->break_lock) \
|
||||
arch_##op##_relax(&lock->raw_lock); \
|
||||
} \
|
||||
(lock)->break_lock = 0; \
|
||||
|
|
Loading…
Reference in New Issue