mirror of https://gitee.com/openkylin/linux.git
These are the v5.12 updates for the locking subsystem:
- Core locking primitives updates: - Remove mutex_trylock_recursive() from the API - no users left - Simplify + constify the futex code a bit - Lockdep updates: - Teach lockdep about local_lock_t - Add CONFIG_DEBUG_IRQFLAGS=y debug config option to check for potentially unsafe IRQ mask restoration patterns. (I.e. calling raw_local_irq_restore() with IRQs enabled.) - Add wait context self-tests - Fix graph lock corner case corrupting internal data structures - Fix noinstr annotations - LKMM updates: - Simplify the litmus tests - Documentation fixes - KCSAN updates: - Re-enable KCSAN instrumentation in lib/random32.c - Misc fixes: - Don't branch-trace static label APIs - DocBook fix - Remove stale leftover empty file Signed-off-by: Ingo Molnar <mingo@kernel.org> -----BEGIN PGP SIGNATURE----- iQJFBAABCgAvFiEEBpT5eoXrXCwVQwEKEnMQ0APhK1gFAmAs//sRHG1pbmdvQGtl cm5lbC5vcmcACgkQEnMQ0APhK1im+g/9G8taVrfiBQ7hg4PoEo28w8fzu5pGBOWd rYzUNJO96dW262FbQE6txGDBeGEahnVTz1sGwqKcy1NfZgQBCWj4uZMOluyECrY3 SV8Iccz2+M6CV+pyjM6Agm7OrgEHxlB/oorZy3TD6s2YeuR6nVGfO3vAXbNNeAsk N8TR5mKY8ELbKXkjrc4KauOOiaqsQmVMuV/l/1DLoydDxATYq4Fczh0lcIdwMtYB pqzWAKa0Qy2mKcHXe2YMYjddn2JEcDWNGJCsmZTa6m45aaAW1XyICLLxcQ2X8aL+ aj9rxYTBkZl9vAjrICfbJTtYku6fN48JiDoNRQxUShGVmVKAlHxYQ4vZ7dJz0NHz EdRrd9JIr25ImXNHlX2KCKGc/aUm4TvDtNVXCdxVlZGwnEEF8J5VocWKRKmXmA1W MkAvPnXnynqRfcMkFaTtTfdMTan41uEixwEnUy++JTuNSMx2ie3VGMC0MgxvTBiH iKN5iVtZVa1mUN2593Jd1qdZvGQMeIydMj+WaT4xh5hptjLCGLg4yPgYuoO7vNMT uEfv8oODvTN8BqEixNP1Ef9pzxujuSiPoO4ZO4DNnbJJZVw1TwAZIK5Zz1wR1Zso Wf1LKPaEOyqz5cFAJ/OxcnxvxMv3fat0vhLNzJlBEFEgKmfRhbsQVUNNL1AcdMJA +Npbj/v5seo= =BYju -----END PGP SIGNATURE----- Merge tag 'locking-core-2021-02-17' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull locking updates from Ingo Molnar: "Core locking primitives updates: - Remove mutex_trylock_recursive() from the API - no users left - Simplify + constify the futex code a bit Lockdep updates: - Teach lockdep about local_lock_t - Add CONFIG_DEBUG_IRQFLAGS=y debug config option to check for potentially unsafe IRQ mask restoration patterns. (I.e. calling raw_local_irq_restore() with IRQs enabled.) - Add wait context self-tests - Fix graph lock corner case corrupting internal data structures - Fix noinstr annotations LKMM updates: - Simplify the litmus tests - Documentation fixes KCSAN updates: - Re-enable KCSAN instrumentation in lib/random32.c Misc fixes: - Don't branch-trace static label APIs - DocBook fix - Remove stale leftover empty file" * tag 'locking-core-2021-02-17' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (24 commits) checkpatch: Don't check for mutex_trylock_recursive() locking/mutex: Kill mutex_trylock_recursive() s390: Use arch_local_irq_{save,restore}() in early boot code lockdep: Noinstr annotate warn_bogus_irq_restore() locking/lockdep: Avoid unmatched unlock locking/rwsem: Remove empty rwsem.h locking/rtmutex: Add missing kernel-doc markup futex: Remove unneeded gotos futex: Change utime parameter to be 'const ... *' lockdep: report broken irq restoration jump_label: Do not profile branch annotations locking: Add Reviewers locking/selftests: Add local_lock inversion tests locking/lockdep: Exclude local_lock_t from IRQ inversions locking/lockdep: Clean up check_redundant() a bit locking/lockdep: Add a skip() function to __bfs() locking/lockdep: Mark local_lock_t locking/selftests: More granular debug_locks_verbose lockdep/selftest: Add wait context selftests tools/memory-model: Fix typo in klitmus7 compatibility table ...
This commit is contained in:
commit
9eef023345
|
@ -802,13 +802,14 @@
|
||||||
insecure, please do not use on production kernels.
|
insecure, please do not use on production kernels.
|
||||||
|
|
||||||
debug_locks_verbose=
|
debug_locks_verbose=
|
||||||
[KNL] verbose self-tests
|
[KNL] verbose locking self-tests
|
||||||
Format=<0|1>
|
Format: <int>
|
||||||
Print debugging info while doing the locking API
|
Print debugging info while doing the locking API
|
||||||
self-tests.
|
self-tests.
|
||||||
We default to 0 (no extra messages), setting it to
|
Bitmask for the various LOCKTYPE_ tests. Defaults to 0
|
||||||
1 will print _a lot_ more information - normally
|
(no extra messages), setting it to -1 (all bits set)
|
||||||
only useful to kernel developers.
|
will print _a_lot_ more information - normally only
|
||||||
|
useful to lockdep developers.
|
||||||
|
|
||||||
debug_objects [KNL] Enable object debugging
|
debug_objects [KNL] Enable object debugging
|
||||||
|
|
||||||
|
|
|
@ -10342,6 +10342,8 @@ LOCKING PRIMITIVES
|
||||||
M: Peter Zijlstra <peterz@infradead.org>
|
M: Peter Zijlstra <peterz@infradead.org>
|
||||||
M: Ingo Molnar <mingo@redhat.com>
|
M: Ingo Molnar <mingo@redhat.com>
|
||||||
M: Will Deacon <will@kernel.org>
|
M: Will Deacon <will@kernel.org>
|
||||||
|
R: Waiman Long <longman@redhat.com>
|
||||||
|
R: Boqun Feng <boqun.feng@gmail.com> (LOCKDEP)
|
||||||
L: linux-kernel@vger.kernel.org
|
L: linux-kernel@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git locking/core
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git locking/core
|
||||||
|
|
|
@ -66,13 +66,13 @@ int sclp_early_cmd(sclp_cmdw_t cmd, void *sccb)
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
raw_local_irq_save(flags);
|
flags = arch_local_irq_save();
|
||||||
rc = sclp_service_call(cmd, sccb);
|
rc = sclp_service_call(cmd, sccb);
|
||||||
if (rc)
|
if (rc)
|
||||||
goto out;
|
goto out;
|
||||||
sclp_early_wait_irq();
|
sclp_early_wait_irq();
|
||||||
out:
|
out:
|
||||||
raw_local_irq_restore(flags);
|
arch_local_irq_restore(flags);
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -76,6 +76,8 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
|
||||||
#else
|
#else
|
||||||
# define likely(x) __builtin_expect(!!(x), 1)
|
# define likely(x) __builtin_expect(!!(x), 1)
|
||||||
# define unlikely(x) __builtin_expect(!!(x), 0)
|
# define unlikely(x) __builtin_expect(!!(x), 0)
|
||||||
|
# define likely_notrace(x) likely(x)
|
||||||
|
# define unlikely_notrace(x) unlikely(x)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Optimization barrier */
|
/* Optimization barrier */
|
||||||
|
|
|
@ -149,6 +149,17 @@ do { \
|
||||||
# define start_critical_timings() do { } while (0)
|
# define start_critical_timings() do { } while (0)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_DEBUG_IRQFLAGS
|
||||||
|
extern void warn_bogus_irq_restore(void);
|
||||||
|
#define raw_check_bogus_irq_restore() \
|
||||||
|
do { \
|
||||||
|
if (unlikely(!arch_irqs_disabled())) \
|
||||||
|
warn_bogus_irq_restore(); \
|
||||||
|
} while (0)
|
||||||
|
#else
|
||||||
|
#define raw_check_bogus_irq_restore() do { } while (0)
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Wrap the arch provided IRQ routines to provide appropriate checks.
|
* Wrap the arch provided IRQ routines to provide appropriate checks.
|
||||||
*/
|
*/
|
||||||
|
@ -162,6 +173,7 @@ do { \
|
||||||
#define raw_local_irq_restore(flags) \
|
#define raw_local_irq_restore(flags) \
|
||||||
do { \
|
do { \
|
||||||
typecheck(unsigned long, flags); \
|
typecheck(unsigned long, flags); \
|
||||||
|
raw_check_bogus_irq_restore(); \
|
||||||
arch_local_irq_restore(flags); \
|
arch_local_irq_restore(flags); \
|
||||||
} while (0)
|
} while (0)
|
||||||
#define raw_local_save_flags(flags) \
|
#define raw_local_save_flags(flags) \
|
||||||
|
|
|
@ -261,14 +261,14 @@ static __always_inline void jump_label_init(void)
|
||||||
|
|
||||||
static __always_inline bool static_key_false(struct static_key *key)
|
static __always_inline bool static_key_false(struct static_key *key)
|
||||||
{
|
{
|
||||||
if (unlikely(static_key_count(key) > 0))
|
if (unlikely_notrace(static_key_count(key) > 0))
|
||||||
return true;
|
return true;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline bool static_key_true(struct static_key *key)
|
static __always_inline bool static_key_true(struct static_key *key)
|
||||||
{
|
{
|
||||||
if (likely(static_key_count(key) > 0))
|
if (likely_notrace(static_key_count(key) > 0))
|
||||||
return true;
|
return true;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -460,7 +460,7 @@ extern bool ____wrong_branch_error(void);
|
||||||
branch = !arch_static_branch_jump(&(x)->key, true); \
|
branch = !arch_static_branch_jump(&(x)->key, true); \
|
||||||
else \
|
else \
|
||||||
branch = ____wrong_branch_error(); \
|
branch = ____wrong_branch_error(); \
|
||||||
likely(branch); \
|
likely_notrace(branch); \
|
||||||
})
|
})
|
||||||
|
|
||||||
#define static_branch_unlikely(x) \
|
#define static_branch_unlikely(x) \
|
||||||
|
@ -472,13 +472,13 @@ extern bool ____wrong_branch_error(void);
|
||||||
branch = arch_static_branch(&(x)->key, false); \
|
branch = arch_static_branch(&(x)->key, false); \
|
||||||
else \
|
else \
|
||||||
branch = ____wrong_branch_error(); \
|
branch = ____wrong_branch_error(); \
|
||||||
unlikely(branch); \
|
unlikely_notrace(branch); \
|
||||||
})
|
})
|
||||||
|
|
||||||
#else /* !CONFIG_JUMP_LABEL */
|
#else /* !CONFIG_JUMP_LABEL */
|
||||||
|
|
||||||
#define static_branch_likely(x) likely(static_key_enabled(&(x)->key))
|
#define static_branch_likely(x) likely_notrace(static_key_enabled(&(x)->key))
|
||||||
#define static_branch_unlikely(x) unlikely(static_key_enabled(&(x)->key))
|
#define static_branch_unlikely(x) unlikely_notrace(static_key_enabled(&(x)->key))
|
||||||
|
|
||||||
#endif /* CONFIG_JUMP_LABEL */
|
#endif /* CONFIG_JUMP_LABEL */
|
||||||
|
|
||||||
|
|
|
@ -18,6 +18,7 @@ typedef struct {
|
||||||
.dep_map = { \
|
.dep_map = { \
|
||||||
.name = #lockname, \
|
.name = #lockname, \
|
||||||
.wait_type_inner = LD_WAIT_CONFIG, \
|
.wait_type_inner = LD_WAIT_CONFIG, \
|
||||||
|
.lock_type = LD_LOCK_PERCPU, \
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
# define LL_DEP_MAP_INIT(lockname)
|
# define LL_DEP_MAP_INIT(lockname)
|
||||||
|
@ -30,7 +31,9 @@ do { \
|
||||||
static struct lock_class_key __key; \
|
static struct lock_class_key __key; \
|
||||||
\
|
\
|
||||||
debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
|
debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
|
||||||
lockdep_init_map_wait(&(lock)->dep_map, #lock, &__key, 0, LD_WAIT_CONFIG);\
|
lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, 0, \
|
||||||
|
LD_WAIT_CONFIG, LD_WAIT_INV, \
|
||||||
|
LD_LOCK_PERCPU); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||||
|
|
|
@ -185,12 +185,19 @@ extern void lockdep_unregister_key(struct lock_class_key *key);
|
||||||
* to lockdep:
|
* to lockdep:
|
||||||
*/
|
*/
|
||||||
|
|
||||||
extern void lockdep_init_map_waits(struct lockdep_map *lock, const char *name,
|
extern void lockdep_init_map_type(struct lockdep_map *lock, const char *name,
|
||||||
struct lock_class_key *key, int subclass, short inner, short outer);
|
struct lock_class_key *key, int subclass, u8 inner, u8 outer, u8 lock_type);
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
lockdep_init_map_waits(struct lockdep_map *lock, const char *name,
|
||||||
|
struct lock_class_key *key, int subclass, u8 inner, u8 outer)
|
||||||
|
{
|
||||||
|
lockdep_init_map_type(lock, name, key, subclass, inner, LD_WAIT_INV, LD_LOCK_NORMAL);
|
||||||
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
lockdep_init_map_wait(struct lockdep_map *lock, const char *name,
|
lockdep_init_map_wait(struct lockdep_map *lock, const char *name,
|
||||||
struct lock_class_key *key, int subclass, short inner)
|
struct lock_class_key *key, int subclass, u8 inner)
|
||||||
{
|
{
|
||||||
lockdep_init_map_waits(lock, name, key, subclass, inner, LD_WAIT_INV);
|
lockdep_init_map_waits(lock, name, key, subclass, inner, LD_WAIT_INV);
|
||||||
}
|
}
|
||||||
|
@ -340,6 +347,8 @@ static inline void lockdep_set_selftest_task(struct task_struct *task)
|
||||||
# define lock_set_class(l, n, k, s, i) do { } while (0)
|
# define lock_set_class(l, n, k, s, i) do { } while (0)
|
||||||
# define lock_set_subclass(l, s, i) do { } while (0)
|
# define lock_set_subclass(l, s, i) do { } while (0)
|
||||||
# define lockdep_init() do { } while (0)
|
# define lockdep_init() do { } while (0)
|
||||||
|
# define lockdep_init_map_type(lock, name, key, sub, inner, outer, type) \
|
||||||
|
do { (void)(name); (void)(key); } while (0)
|
||||||
# define lockdep_init_map_waits(lock, name, key, sub, inner, outer) \
|
# define lockdep_init_map_waits(lock, name, key, sub, inner, outer) \
|
||||||
do { (void)(name); (void)(key); } while (0)
|
do { (void)(name); (void)(key); } while (0)
|
||||||
# define lockdep_init_map_wait(lock, name, key, sub, inner) \
|
# define lockdep_init_map_wait(lock, name, key, sub, inner) \
|
||||||
|
|
|
@ -30,6 +30,12 @@ enum lockdep_wait_type {
|
||||||
LD_WAIT_MAX, /* must be last */
|
LD_WAIT_MAX, /* must be last */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum lockdep_lock_type {
|
||||||
|
LD_LOCK_NORMAL = 0, /* normal, catch all */
|
||||||
|
LD_LOCK_PERCPU, /* percpu */
|
||||||
|
LD_LOCK_MAX,
|
||||||
|
};
|
||||||
|
|
||||||
#ifdef CONFIG_LOCKDEP
|
#ifdef CONFIG_LOCKDEP
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -119,8 +125,10 @@ struct lock_class {
|
||||||
int name_version;
|
int name_version;
|
||||||
const char *name;
|
const char *name;
|
||||||
|
|
||||||
short wait_type_inner;
|
u8 wait_type_inner;
|
||||||
short wait_type_outer;
|
u8 wait_type_outer;
|
||||||
|
u8 lock_type;
|
||||||
|
/* u8 hole; */
|
||||||
|
|
||||||
#ifdef CONFIG_LOCK_STAT
|
#ifdef CONFIG_LOCK_STAT
|
||||||
unsigned long contention_point[LOCKSTAT_POINTS];
|
unsigned long contention_point[LOCKSTAT_POINTS];
|
||||||
|
@ -169,8 +177,10 @@ struct lockdep_map {
|
||||||
struct lock_class_key *key;
|
struct lock_class_key *key;
|
||||||
struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES];
|
struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES];
|
||||||
const char *name;
|
const char *name;
|
||||||
short wait_type_outer; /* can be taken in this context */
|
u8 wait_type_outer; /* can be taken in this context */
|
||||||
short wait_type_inner; /* presents this context */
|
u8 wait_type_inner; /* presents this context */
|
||||||
|
u8 lock_type;
|
||||||
|
/* u8 hole; */
|
||||||
#ifdef CONFIG_LOCK_STAT
|
#ifdef CONFIG_LOCK_STAT
|
||||||
int cpu;
|
int cpu;
|
||||||
unsigned long ip;
|
unsigned long ip;
|
||||||
|
|
|
@ -199,29 +199,4 @@ extern void mutex_unlock(struct mutex *lock);
|
||||||
|
|
||||||
extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
|
extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
|
||||||
|
|
||||||
/*
|
|
||||||
* These values are chosen such that FAIL and SUCCESS match the
|
|
||||||
* values of the regular mutex_trylock().
|
|
||||||
*/
|
|
||||||
enum mutex_trylock_recursive_enum {
|
|
||||||
MUTEX_TRYLOCK_FAILED = 0,
|
|
||||||
MUTEX_TRYLOCK_SUCCESS = 1,
|
|
||||||
MUTEX_TRYLOCK_RECURSIVE,
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* mutex_trylock_recursive - trylock variant that allows recursive locking
|
|
||||||
* @lock: mutex to be locked
|
|
||||||
*
|
|
||||||
* This function should not be used, _ever_. It is purely for hysterical GEM
|
|
||||||
* raisins, and once those are gone this will be removed.
|
|
||||||
*
|
|
||||||
* Returns:
|
|
||||||
* - MUTEX_TRYLOCK_FAILED - trylock failed,
|
|
||||||
* - MUTEX_TRYLOCK_SUCCESS - lock acquired,
|
|
||||||
* - MUTEX_TRYLOCK_RECURSIVE - we already owned the lock.
|
|
||||||
*/
|
|
||||||
extern /* __deprecated */ __must_check enum mutex_trylock_recursive_enum
|
|
||||||
mutex_trylock_recursive(struct mutex *lock);
|
|
||||||
|
|
||||||
#endif /* __LINUX_MUTEX_H */
|
#endif /* __LINUX_MUTEX_H */
|
||||||
|
|
|
@ -607,11 +607,11 @@ asmlinkage long sys_unshare(unsigned long unshare_flags);
|
||||||
|
|
||||||
/* kernel/futex.c */
|
/* kernel/futex.c */
|
||||||
asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val,
|
asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val,
|
||||||
struct __kernel_timespec __user *utime, u32 __user *uaddr2,
|
const struct __kernel_timespec __user *utime,
|
||||||
u32 val3);
|
u32 __user *uaddr2, u32 val3);
|
||||||
asmlinkage long sys_futex_time32(u32 __user *uaddr, int op, u32 val,
|
asmlinkage long sys_futex_time32(u32 __user *uaddr, int op, u32 val,
|
||||||
struct old_timespec32 __user *utime, u32 __user *uaddr2,
|
const struct old_timespec32 __user *utime,
|
||||||
u32 val3);
|
u32 __user *uaddr2, u32 val3);
|
||||||
asmlinkage long sys_get_robust_list(int pid,
|
asmlinkage long sys_get_robust_list(int pid,
|
||||||
struct robust_list_head __user * __user *head_ptr,
|
struct robust_list_head __user * __user *head_ptr,
|
||||||
size_t __user *len_ptr);
|
size_t __user *len_ptr);
|
||||||
|
|
|
@ -3012,7 +3012,7 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
|
||||||
* Success, we're done! No tricky corner cases.
|
* Success, we're done! No tricky corner cases.
|
||||||
*/
|
*/
|
||||||
if (!ret)
|
if (!ret)
|
||||||
goto out_putkey;
|
return ret;
|
||||||
/*
|
/*
|
||||||
* The atomic access to the futex value generated a
|
* The atomic access to the futex value generated a
|
||||||
* pagefault, so retry the user-access and the wakeup:
|
* pagefault, so retry the user-access and the wakeup:
|
||||||
|
@ -3029,7 +3029,7 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
|
||||||
* wake_futex_pi has detected invalid state. Tell user
|
* wake_futex_pi has detected invalid state. Tell user
|
||||||
* space.
|
* space.
|
||||||
*/
|
*/
|
||||||
goto out_putkey;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -3050,7 +3050,7 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
|
||||||
|
|
||||||
default:
|
default:
|
||||||
WARN_ON_ONCE(1);
|
WARN_ON_ONCE(1);
|
||||||
goto out_putkey;
|
return ret;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3061,7 +3061,6 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
spin_unlock(&hb->lock);
|
spin_unlock(&hb->lock);
|
||||||
out_putkey:
|
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
pi_retry:
|
pi_retry:
|
||||||
|
@ -3763,8 +3762,8 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
|
||||||
|
|
||||||
|
|
||||||
SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
|
SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
|
||||||
struct __kernel_timespec __user *, utime, u32 __user *, uaddr2,
|
const struct __kernel_timespec __user *, utime,
|
||||||
u32, val3)
|
u32 __user *, uaddr2, u32, val3)
|
||||||
{
|
{
|
||||||
struct timespec64 ts;
|
struct timespec64 ts;
|
||||||
ktime_t t, *tp = NULL;
|
ktime_t t, *tp = NULL;
|
||||||
|
@ -3959,7 +3958,7 @@ COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid,
|
||||||
|
|
||||||
#ifdef CONFIG_COMPAT_32BIT_TIME
|
#ifdef CONFIG_COMPAT_32BIT_TIME
|
||||||
SYSCALL_DEFINE6(futex_time32, u32 __user *, uaddr, int, op, u32, val,
|
SYSCALL_DEFINE6(futex_time32, u32 __user *, uaddr, int, op, u32, val,
|
||||||
struct old_timespec32 __user *, utime, u32 __user *, uaddr2,
|
const struct old_timespec32 __user *, utime, u32 __user *, uaddr2,
|
||||||
u32, val3)
|
u32, val3)
|
||||||
{
|
{
|
||||||
struct timespec64 ts;
|
struct timespec64 ts;
|
||||||
|
|
|
@ -12,7 +12,6 @@
|
||||||
#include <linux/moduleparam.h>
|
#include <linux/moduleparam.h>
|
||||||
#include <linux/percpu.h>
|
#include <linux/percpu.h>
|
||||||
#include <linux/preempt.h>
|
#include <linux/preempt.h>
|
||||||
#include <linux/random.h>
|
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
#include <linux/uaccess.h>
|
#include <linux/uaccess.h>
|
||||||
|
|
||||||
|
@ -101,7 +100,7 @@ static atomic_long_t watchpoints[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS-1];
|
||||||
static DEFINE_PER_CPU(long, kcsan_skip);
|
static DEFINE_PER_CPU(long, kcsan_skip);
|
||||||
|
|
||||||
/* For kcsan_prandom_u32_max(). */
|
/* For kcsan_prandom_u32_max(). */
|
||||||
static DEFINE_PER_CPU(struct rnd_state, kcsan_rand_state);
|
static DEFINE_PER_CPU(u32, kcsan_rand_state);
|
||||||
|
|
||||||
static __always_inline atomic_long_t *find_watchpoint(unsigned long addr,
|
static __always_inline atomic_long_t *find_watchpoint(unsigned long addr,
|
||||||
size_t size,
|
size_t size,
|
||||||
|
@ -275,20 +274,17 @@ should_watch(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Returns a pseudo-random number in interval [0, ep_ro). See prandom_u32_max()
|
* Returns a pseudo-random number in interval [0, ep_ro). Simple linear
|
||||||
* for more details.
|
* congruential generator, using constants from "Numerical Recipes".
|
||||||
*
|
|
||||||
* The open-coded version here is using only safe primitives for all contexts
|
|
||||||
* where we can have KCSAN instrumentation. In particular, we cannot use
|
|
||||||
* prandom_u32() directly, as its tracepoint could cause recursion.
|
|
||||||
*/
|
*/
|
||||||
static u32 kcsan_prandom_u32_max(u32 ep_ro)
|
static u32 kcsan_prandom_u32_max(u32 ep_ro)
|
||||||
{
|
{
|
||||||
struct rnd_state *state = &get_cpu_var(kcsan_rand_state);
|
u32 state = this_cpu_read(kcsan_rand_state);
|
||||||
const u32 res = prandom_u32_state(state);
|
|
||||||
|
|
||||||
put_cpu_var(kcsan_rand_state);
|
state = 1664525 * state + 1013904223;
|
||||||
return (u32)(((u64) res * ep_ro) >> 32);
|
this_cpu_write(kcsan_rand_state, state);
|
||||||
|
|
||||||
|
return state % ep_ro;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void reset_kcsan_skip(void)
|
static inline void reset_kcsan_skip(void)
|
||||||
|
@ -639,10 +635,14 @@ static __always_inline void check_access(const volatile void *ptr, size_t size,
|
||||||
|
|
||||||
void __init kcsan_init(void)
|
void __init kcsan_init(void)
|
||||||
{
|
{
|
||||||
|
int cpu;
|
||||||
|
|
||||||
BUG_ON(!in_task());
|
BUG_ON(!in_task());
|
||||||
|
|
||||||
kcsan_debugfs_init();
|
kcsan_debugfs_init();
|
||||||
prandom_seed_full_state(&kcsan_rand_state);
|
|
||||||
|
for_each_possible_cpu(cpu)
|
||||||
|
per_cpu(kcsan_rand_state, cpu) = (u32)get_cycles();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We are in the init task, and no other tasks should be running;
|
* We are in the init task, and no other tasks should be running;
|
||||||
|
|
|
@ -15,6 +15,7 @@ CFLAGS_REMOVE_mutex-debug.o = $(CC_FLAGS_FTRACE)
|
||||||
CFLAGS_REMOVE_rtmutex-debug.o = $(CC_FLAGS_FTRACE)
|
CFLAGS_REMOVE_rtmutex-debug.o = $(CC_FLAGS_FTRACE)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
obj-$(CONFIG_DEBUG_IRQFLAGS) += irqflag-debug.o
|
||||||
obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o
|
obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o
|
||||||
obj-$(CONFIG_LOCKDEP) += lockdep.o
|
obj-$(CONFIG_LOCKDEP) += lockdep.o
|
||||||
ifeq ($(CONFIG_PROC_FS),y)
|
ifeq ($(CONFIG_PROC_FS),y)
|
||||||
|
|
|
@ -0,0 +1,13 @@
|
||||||
|
// SPDX-License-Identifier: GPL-2.0-only
|
||||||
|
|
||||||
|
#include <linux/bug.h>
|
||||||
|
#include <linux/export.h>
|
||||||
|
#include <linux/irqflags.h>
|
||||||
|
|
||||||
|
noinstr void warn_bogus_irq_restore(void)
|
||||||
|
{
|
||||||
|
instrumentation_begin();
|
||||||
|
WARN_ONCE(1, "raw_local_irq_restore() called with IRQs enabled\n");
|
||||||
|
instrumentation_end();
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(warn_bogus_irq_restore);
|
|
@ -1290,6 +1290,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
|
||||||
class->name_version = count_matching_names(class);
|
class->name_version = count_matching_names(class);
|
||||||
class->wait_type_inner = lock->wait_type_inner;
|
class->wait_type_inner = lock->wait_type_inner;
|
||||||
class->wait_type_outer = lock->wait_type_outer;
|
class->wait_type_outer = lock->wait_type_outer;
|
||||||
|
class->lock_type = lock->lock_type;
|
||||||
/*
|
/*
|
||||||
* We use RCU's safe list-add method to make
|
* We use RCU's safe list-add method to make
|
||||||
* parallel walking of the hash-list safe:
|
* parallel walking of the hash-list safe:
|
||||||
|
@ -1671,6 +1672,7 @@ static inline struct lock_list *__bfs_next(struct lock_list *lock, int offset)
|
||||||
static enum bfs_result __bfs(struct lock_list *source_entry,
|
static enum bfs_result __bfs(struct lock_list *source_entry,
|
||||||
void *data,
|
void *data,
|
||||||
bool (*match)(struct lock_list *entry, void *data),
|
bool (*match)(struct lock_list *entry, void *data),
|
||||||
|
bool (*skip)(struct lock_list *entry, void *data),
|
||||||
struct lock_list **target_entry,
|
struct lock_list **target_entry,
|
||||||
int offset)
|
int offset)
|
||||||
{
|
{
|
||||||
|
@ -1731,7 +1733,12 @@ static enum bfs_result __bfs(struct lock_list *source_entry,
|
||||||
/*
|
/*
|
||||||
* Step 3: we haven't visited this and there is a strong
|
* Step 3: we haven't visited this and there is a strong
|
||||||
* dependency path to this, so check with @match.
|
* dependency path to this, so check with @match.
|
||||||
|
* If @skip is provide and returns true, we skip this
|
||||||
|
* lock (and any path this lock is in).
|
||||||
*/
|
*/
|
||||||
|
if (skip && skip(lock, data))
|
||||||
|
continue;
|
||||||
|
|
||||||
if (match(lock, data)) {
|
if (match(lock, data)) {
|
||||||
*target_entry = lock;
|
*target_entry = lock;
|
||||||
return BFS_RMATCH;
|
return BFS_RMATCH;
|
||||||
|
@ -1774,9 +1781,10 @@ static inline enum bfs_result
|
||||||
__bfs_forwards(struct lock_list *src_entry,
|
__bfs_forwards(struct lock_list *src_entry,
|
||||||
void *data,
|
void *data,
|
||||||
bool (*match)(struct lock_list *entry, void *data),
|
bool (*match)(struct lock_list *entry, void *data),
|
||||||
|
bool (*skip)(struct lock_list *entry, void *data),
|
||||||
struct lock_list **target_entry)
|
struct lock_list **target_entry)
|
||||||
{
|
{
|
||||||
return __bfs(src_entry, data, match, target_entry,
|
return __bfs(src_entry, data, match, skip, target_entry,
|
||||||
offsetof(struct lock_class, locks_after));
|
offsetof(struct lock_class, locks_after));
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -1785,9 +1793,10 @@ static inline enum bfs_result
|
||||||
__bfs_backwards(struct lock_list *src_entry,
|
__bfs_backwards(struct lock_list *src_entry,
|
||||||
void *data,
|
void *data,
|
||||||
bool (*match)(struct lock_list *entry, void *data),
|
bool (*match)(struct lock_list *entry, void *data),
|
||||||
|
bool (*skip)(struct lock_list *entry, void *data),
|
||||||
struct lock_list **target_entry)
|
struct lock_list **target_entry)
|
||||||
{
|
{
|
||||||
return __bfs(src_entry, data, match, target_entry,
|
return __bfs(src_entry, data, match, skip, target_entry,
|
||||||
offsetof(struct lock_class, locks_before));
|
offsetof(struct lock_class, locks_before));
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -2018,7 +2027,7 @@ static unsigned long __lockdep_count_forward_deps(struct lock_list *this)
|
||||||
unsigned long count = 0;
|
unsigned long count = 0;
|
||||||
struct lock_list *target_entry;
|
struct lock_list *target_entry;
|
||||||
|
|
||||||
__bfs_forwards(this, (void *)&count, noop_count, &target_entry);
|
__bfs_forwards(this, (void *)&count, noop_count, NULL, &target_entry);
|
||||||
|
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
@ -2043,7 +2052,7 @@ static unsigned long __lockdep_count_backward_deps(struct lock_list *this)
|
||||||
unsigned long count = 0;
|
unsigned long count = 0;
|
||||||
struct lock_list *target_entry;
|
struct lock_list *target_entry;
|
||||||
|
|
||||||
__bfs_backwards(this, (void *)&count, noop_count, &target_entry);
|
__bfs_backwards(this, (void *)&count, noop_count, NULL, &target_entry);
|
||||||
|
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
@ -2071,11 +2080,12 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
|
||||||
static noinline enum bfs_result
|
static noinline enum bfs_result
|
||||||
check_path(struct held_lock *target, struct lock_list *src_entry,
|
check_path(struct held_lock *target, struct lock_list *src_entry,
|
||||||
bool (*match)(struct lock_list *entry, void *data),
|
bool (*match)(struct lock_list *entry, void *data),
|
||||||
|
bool (*skip)(struct lock_list *entry, void *data),
|
||||||
struct lock_list **target_entry)
|
struct lock_list **target_entry)
|
||||||
{
|
{
|
||||||
enum bfs_result ret;
|
enum bfs_result ret;
|
||||||
|
|
||||||
ret = __bfs_forwards(src_entry, target, match, target_entry);
|
ret = __bfs_forwards(src_entry, target, match, skip, target_entry);
|
||||||
|
|
||||||
if (unlikely(bfs_error(ret)))
|
if (unlikely(bfs_error(ret)))
|
||||||
print_bfs_bug(ret);
|
print_bfs_bug(ret);
|
||||||
|
@ -2102,7 +2112,7 @@ check_noncircular(struct held_lock *src, struct held_lock *target,
|
||||||
|
|
||||||
debug_atomic_inc(nr_cyclic_checks);
|
debug_atomic_inc(nr_cyclic_checks);
|
||||||
|
|
||||||
ret = check_path(target, &src_entry, hlock_conflict, &target_entry);
|
ret = check_path(target, &src_entry, hlock_conflict, NULL, &target_entry);
|
||||||
|
|
||||||
if (unlikely(ret == BFS_RMATCH)) {
|
if (unlikely(ret == BFS_RMATCH)) {
|
||||||
if (!*trace) {
|
if (!*trace) {
|
||||||
|
@ -2120,46 +2130,6 @@ check_noncircular(struct held_lock *src, struct held_lock *target,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_LOCKDEP_SMALL
|
|
||||||
/*
|
|
||||||
* Check that the dependency graph starting at <src> can lead to
|
|
||||||
* <target> or not. If it can, <src> -> <target> dependency is already
|
|
||||||
* in the graph.
|
|
||||||
*
|
|
||||||
* Return BFS_RMATCH if it does, or BFS_RMATCH if it does not, return BFS_E* if
|
|
||||||
* any error appears in the bfs search.
|
|
||||||
*/
|
|
||||||
static noinline enum bfs_result
|
|
||||||
check_redundant(struct held_lock *src, struct held_lock *target)
|
|
||||||
{
|
|
||||||
enum bfs_result ret;
|
|
||||||
struct lock_list *target_entry;
|
|
||||||
struct lock_list src_entry;
|
|
||||||
|
|
||||||
bfs_init_root(&src_entry, src);
|
|
||||||
/*
|
|
||||||
* Special setup for check_redundant().
|
|
||||||
*
|
|
||||||
* To report redundant, we need to find a strong dependency path that
|
|
||||||
* is equal to or stronger than <src> -> <target>. So if <src> is E,
|
|
||||||
* we need to let __bfs() only search for a path starting at a -(E*)->,
|
|
||||||
* we achieve this by setting the initial node's ->only_xr to true in
|
|
||||||
* that case. And if <prev> is S, we set initial ->only_xr to false
|
|
||||||
* because both -(S*)-> (equal) and -(E*)-> (stronger) are redundant.
|
|
||||||
*/
|
|
||||||
src_entry.only_xr = src->read == 0;
|
|
||||||
|
|
||||||
debug_atomic_inc(nr_redundant_checks);
|
|
||||||
|
|
||||||
ret = check_path(target, &src_entry, hlock_equal, &target_entry);
|
|
||||||
|
|
||||||
if (ret == BFS_RMATCH)
|
|
||||||
debug_atomic_inc(nr_redundant);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -2230,6 +2200,44 @@ static inline bool usage_match(struct lock_list *entry, void *mask)
|
||||||
return !!((entry->class->usage_mask & LOCKF_IRQ) & *(unsigned long *)mask);
|
return !!((entry->class->usage_mask & LOCKF_IRQ) & *(unsigned long *)mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool usage_skip(struct lock_list *entry, void *mask)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Skip local_lock() for irq inversion detection.
|
||||||
|
*
|
||||||
|
* For !RT, local_lock() is not a real lock, so it won't carry any
|
||||||
|
* dependency.
|
||||||
|
*
|
||||||
|
* For RT, an irq inversion happens when we have lock A and B, and on
|
||||||
|
* some CPU we can have:
|
||||||
|
*
|
||||||
|
* lock(A);
|
||||||
|
* <interrupted>
|
||||||
|
* lock(B);
|
||||||
|
*
|
||||||
|
* where lock(B) cannot sleep, and we have a dependency B -> ... -> A.
|
||||||
|
*
|
||||||
|
* Now we prove local_lock() cannot exist in that dependency. First we
|
||||||
|
* have the observation for any lock chain L1 -> ... -> Ln, for any
|
||||||
|
* 1 <= i <= n, Li.inner_wait_type <= L1.inner_wait_type, otherwise
|
||||||
|
* wait context check will complain. And since B is not a sleep lock,
|
||||||
|
* therefore B.inner_wait_type >= 2, and since the inner_wait_type of
|
||||||
|
* local_lock() is 3, which is greater than 2, therefore there is no
|
||||||
|
* way the local_lock() exists in the dependency B -> ... -> A.
|
||||||
|
*
|
||||||
|
* As a result, we will skip local_lock(), when we search for irq
|
||||||
|
* inversion bugs.
|
||||||
|
*/
|
||||||
|
if (entry->class->lock_type == LD_LOCK_PERCPU) {
|
||||||
|
if (DEBUG_LOCKS_WARN_ON(entry->class->wait_type_inner < LD_WAIT_CONFIG))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Find a node in the forwards-direction dependency sub-graph starting
|
* Find a node in the forwards-direction dependency sub-graph starting
|
||||||
* at @root->class that matches @bit.
|
* at @root->class that matches @bit.
|
||||||
|
@ -2245,7 +2253,7 @@ find_usage_forwards(struct lock_list *root, unsigned long usage_mask,
|
||||||
|
|
||||||
debug_atomic_inc(nr_find_usage_forwards_checks);
|
debug_atomic_inc(nr_find_usage_forwards_checks);
|
||||||
|
|
||||||
result = __bfs_forwards(root, &usage_mask, usage_match, target_entry);
|
result = __bfs_forwards(root, &usage_mask, usage_match, usage_skip, target_entry);
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
@ -2262,7 +2270,7 @@ find_usage_backwards(struct lock_list *root, unsigned long usage_mask,
|
||||||
|
|
||||||
debug_atomic_inc(nr_find_usage_backwards_checks);
|
debug_atomic_inc(nr_find_usage_backwards_checks);
|
||||||
|
|
||||||
result = __bfs_backwards(root, &usage_mask, usage_match, target_entry);
|
result = __bfs_backwards(root, &usage_mask, usage_match, usage_skip, target_entry);
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
@ -2627,7 +2635,7 @@ static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
|
||||||
*/
|
*/
|
||||||
bfs_init_rootb(&this, prev);
|
bfs_init_rootb(&this, prev);
|
||||||
|
|
||||||
ret = __bfs_backwards(&this, &usage_mask, usage_accumulate, NULL);
|
ret = __bfs_backwards(&this, &usage_mask, usage_accumulate, usage_skip, NULL);
|
||||||
if (bfs_error(ret)) {
|
if (bfs_error(ret)) {
|
||||||
print_bfs_bug(ret);
|
print_bfs_bug(ret);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -2694,8 +2702,68 @@ static inline int check_irq_usage(struct task_struct *curr,
|
||||||
{
|
{
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool usage_skip(struct lock_list *entry, void *mask)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* CONFIG_TRACE_IRQFLAGS */
|
#endif /* CONFIG_TRACE_IRQFLAGS */
|
||||||
|
|
||||||
|
#ifdef CONFIG_LOCKDEP_SMALL
|
||||||
|
/*
|
||||||
|
* Check that the dependency graph starting at <src> can lead to
|
||||||
|
* <target> or not. If it can, <src> -> <target> dependency is already
|
||||||
|
* in the graph.
|
||||||
|
*
|
||||||
|
* Return BFS_RMATCH if it does, or BFS_RMATCH if it does not, return BFS_E* if
|
||||||
|
* any error appears in the bfs search.
|
||||||
|
*/
|
||||||
|
static noinline enum bfs_result
|
||||||
|
check_redundant(struct held_lock *src, struct held_lock *target)
|
||||||
|
{
|
||||||
|
enum bfs_result ret;
|
||||||
|
struct lock_list *target_entry;
|
||||||
|
struct lock_list src_entry;
|
||||||
|
|
||||||
|
bfs_init_root(&src_entry, src);
|
||||||
|
/*
|
||||||
|
* Special setup for check_redundant().
|
||||||
|
*
|
||||||
|
* To report redundant, we need to find a strong dependency path that
|
||||||
|
* is equal to or stronger than <src> -> <target>. So if <src> is E,
|
||||||
|
* we need to let __bfs() only search for a path starting at a -(E*)->,
|
||||||
|
* we achieve this by setting the initial node's ->only_xr to true in
|
||||||
|
* that case. And if <prev> is S, we set initial ->only_xr to false
|
||||||
|
* because both -(S*)-> (equal) and -(E*)-> (stronger) are redundant.
|
||||||
|
*/
|
||||||
|
src_entry.only_xr = src->read == 0;
|
||||||
|
|
||||||
|
debug_atomic_inc(nr_redundant_checks);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Note: we skip local_lock() for redundant check, because as the
|
||||||
|
* comment in usage_skip(), A -> local_lock() -> B and A -> B are not
|
||||||
|
* the same.
|
||||||
|
*/
|
||||||
|
ret = check_path(target, &src_entry, hlock_equal, usage_skip, &target_entry);
|
||||||
|
|
||||||
|
if (ret == BFS_RMATCH)
|
||||||
|
debug_atomic_inc(nr_redundant);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
static inline enum bfs_result
|
||||||
|
check_redundant(struct held_lock *src, struct held_lock *target)
|
||||||
|
{
|
||||||
|
return BFS_RNOMATCH;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
static void inc_chains(int irq_context)
|
static void inc_chains(int irq_context)
|
||||||
{
|
{
|
||||||
if (irq_context & LOCK_CHAIN_HARDIRQ_CONTEXT)
|
if (irq_context & LOCK_CHAIN_HARDIRQ_CONTEXT)
|
||||||
|
@ -2916,7 +2984,6 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_LOCKDEP_SMALL
|
|
||||||
/*
|
/*
|
||||||
* Is the <prev> -> <next> link redundant?
|
* Is the <prev> -> <next> link redundant?
|
||||||
*/
|
*/
|
||||||
|
@ -2925,7 +2992,6 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
|
||||||
return 0;
|
return 0;
|
||||||
else if (ret == BFS_RMATCH)
|
else if (ret == BFS_RMATCH)
|
||||||
return 2;
|
return 2;
|
||||||
#endif
|
|
||||||
|
|
||||||
if (!*trace) {
|
if (!*trace) {
|
||||||
*trace = save_trace();
|
*trace = save_trace();
|
||||||
|
@ -3707,7 +3773,7 @@ static void
|
||||||
print_usage_bug(struct task_struct *curr, struct held_lock *this,
|
print_usage_bug(struct task_struct *curr, struct held_lock *this,
|
||||||
enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
|
enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
|
||||||
{
|
{
|
||||||
if (!debug_locks_off_graph_unlock() || debug_locks_silent)
|
if (!debug_locks_off() || debug_locks_silent)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
pr_warn("\n");
|
pr_warn("\n");
|
||||||
|
@ -3748,6 +3814,7 @@ valid_state(struct task_struct *curr, struct held_lock *this,
|
||||||
enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
|
enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
|
||||||
{
|
{
|
||||||
if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit))) {
|
if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit))) {
|
||||||
|
graph_unlock();
|
||||||
print_usage_bug(curr, this, bad_bit, new_bit);
|
print_usage_bug(curr, this, bad_bit, new_bit);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -4503,9 +4570,9 @@ print_lock_invalid_wait_context(struct task_struct *curr,
|
||||||
*/
|
*/
|
||||||
static int check_wait_context(struct task_struct *curr, struct held_lock *next)
|
static int check_wait_context(struct task_struct *curr, struct held_lock *next)
|
||||||
{
|
{
|
||||||
short next_inner = hlock_class(next)->wait_type_inner;
|
u8 next_inner = hlock_class(next)->wait_type_inner;
|
||||||
short next_outer = hlock_class(next)->wait_type_outer;
|
u8 next_outer = hlock_class(next)->wait_type_outer;
|
||||||
short curr_inner;
|
u8 curr_inner;
|
||||||
int depth;
|
int depth;
|
||||||
|
|
||||||
if (!curr->lockdep_depth || !next_inner || next->trylock)
|
if (!curr->lockdep_depth || !next_inner || next->trylock)
|
||||||
|
@ -4528,7 +4595,7 @@ static int check_wait_context(struct task_struct *curr, struct held_lock *next)
|
||||||
|
|
||||||
for (; depth < curr->lockdep_depth; depth++) {
|
for (; depth < curr->lockdep_depth; depth++) {
|
||||||
struct held_lock *prev = curr->held_locks + depth;
|
struct held_lock *prev = curr->held_locks + depth;
|
||||||
short prev_inner = hlock_class(prev)->wait_type_inner;
|
u8 prev_inner = hlock_class(prev)->wait_type_inner;
|
||||||
|
|
||||||
if (prev_inner) {
|
if (prev_inner) {
|
||||||
/*
|
/*
|
||||||
|
@ -4577,9 +4644,9 @@ static inline int check_wait_context(struct task_struct *curr,
|
||||||
/*
|
/*
|
||||||
* Initialize a lock instance's lock-class mapping info:
|
* Initialize a lock instance's lock-class mapping info:
|
||||||
*/
|
*/
|
||||||
void lockdep_init_map_waits(struct lockdep_map *lock, const char *name,
|
void lockdep_init_map_type(struct lockdep_map *lock, const char *name,
|
||||||
struct lock_class_key *key, int subclass,
|
struct lock_class_key *key, int subclass,
|
||||||
short inner, short outer)
|
u8 inner, u8 outer, u8 lock_type)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
@ -4602,6 +4669,7 @@ void lockdep_init_map_waits(struct lockdep_map *lock, const char *name,
|
||||||
|
|
||||||
lock->wait_type_outer = outer;
|
lock->wait_type_outer = outer;
|
||||||
lock->wait_type_inner = inner;
|
lock->wait_type_inner = inner;
|
||||||
|
lock->lock_type = lock_type;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* No key, no joy, we need to hash something.
|
* No key, no joy, we need to hash something.
|
||||||
|
@ -4636,7 +4704,7 @@ void lockdep_init_map_waits(struct lockdep_map *lock, const char *name,
|
||||||
raw_local_irq_restore(flags);
|
raw_local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(lockdep_init_map_waits);
|
EXPORT_SYMBOL_GPL(lockdep_init_map_type);
|
||||||
|
|
||||||
struct lock_class_key __lockdep_no_validate__;
|
struct lock_class_key __lockdep_no_validate__;
|
||||||
EXPORT_SYMBOL_GPL(__lockdep_no_validate__);
|
EXPORT_SYMBOL_GPL(__lockdep_no_validate__);
|
||||||
|
|
|
@ -86,16 +86,6 @@ bool mutex_is_locked(struct mutex *lock)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(mutex_is_locked);
|
EXPORT_SYMBOL(mutex_is_locked);
|
||||||
|
|
||||||
__must_check enum mutex_trylock_recursive_enum
|
|
||||||
mutex_trylock_recursive(struct mutex *lock)
|
|
||||||
{
|
|
||||||
if (unlikely(__mutex_owner(lock) == current))
|
|
||||||
return MUTEX_TRYLOCK_RECURSIVE;
|
|
||||||
|
|
||||||
return mutex_trylock(lock);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(mutex_trylock_recursive);
|
|
||||||
|
|
||||||
static inline unsigned long __owner_flags(unsigned long owner)
|
static inline unsigned long __owner_flags(unsigned long owner)
|
||||||
{
|
{
|
||||||
return owner & MUTEX_FLAGS;
|
return owner & MUTEX_FLAGS;
|
||||||
|
|
|
@ -1604,8 +1604,11 @@ void __sched rt_mutex_unlock(struct rt_mutex *lock)
|
||||||
EXPORT_SYMBOL_GPL(rt_mutex_unlock);
|
EXPORT_SYMBOL_GPL(rt_mutex_unlock);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Futex variant, that since futex variants do not use the fast-path, can be
|
* __rt_mutex_futex_unlock - Futex variant, that since futex variants
|
||||||
* simple and will not need to retry.
|
* do not use the fast-path, can be simple and will not need to retry.
|
||||||
|
*
|
||||||
|
* @lock: The rt_mutex to be unlocked
|
||||||
|
* @wake_q: The wake queue head from which to get the next lock waiter
|
||||||
*/
|
*/
|
||||||
bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock,
|
bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock,
|
||||||
struct wake_q_head *wake_q)
|
struct wake_q_head *wake_q)
|
||||||
|
@ -1662,13 +1665,15 @@ void rt_mutex_destroy(struct rt_mutex *lock)
|
||||||
EXPORT_SYMBOL_GPL(rt_mutex_destroy);
|
EXPORT_SYMBOL_GPL(rt_mutex_destroy);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* __rt_mutex_init - initialize the rt lock
|
* __rt_mutex_init - initialize the rt_mutex
|
||||||
*
|
*
|
||||||
* @lock: the rt lock to be initialized
|
* @lock: The rt_mutex to be initialized
|
||||||
|
* @name: The lock name used for debugging
|
||||||
|
* @key: The lock class key used for debugging
|
||||||
*
|
*
|
||||||
* Initialize the rt lock to unlocked state.
|
* Initialize the rt_mutex to unlocked state.
|
||||||
*
|
*
|
||||||
* Initializing of a locked rt lock is not allowed
|
* Initializing of a locked rt_mutex is not allowed
|
||||||
*/
|
*/
|
||||||
void __rt_mutex_init(struct rt_mutex *lock, const char *name,
|
void __rt_mutex_init(struct rt_mutex *lock, const char *name,
|
||||||
struct lock_class_key *key)
|
struct lock_class_key *key)
|
||||||
|
|
|
@ -1335,6 +1335,7 @@ config LOCKDEP_SMALL
|
||||||
config DEBUG_LOCKDEP
|
config DEBUG_LOCKDEP
|
||||||
bool "Lock dependency engine debugging"
|
bool "Lock dependency engine debugging"
|
||||||
depends on DEBUG_KERNEL && LOCKDEP
|
depends on DEBUG_KERNEL && LOCKDEP
|
||||||
|
select DEBUG_IRQFLAGS
|
||||||
help
|
help
|
||||||
If you say Y here, the lock dependency engine will do
|
If you say Y here, the lock dependency engine will do
|
||||||
additional runtime checks to debug itself, at the price
|
additional runtime checks to debug itself, at the price
|
||||||
|
@ -1423,6 +1424,13 @@ config TRACE_IRQFLAGS_NMI
|
||||||
depends on TRACE_IRQFLAGS
|
depends on TRACE_IRQFLAGS
|
||||||
depends on TRACE_IRQFLAGS_NMI_SUPPORT
|
depends on TRACE_IRQFLAGS_NMI_SUPPORT
|
||||||
|
|
||||||
|
config DEBUG_IRQFLAGS
|
||||||
|
bool "Debug IRQ flag manipulation"
|
||||||
|
help
|
||||||
|
Enables checks for potentially unsafe enabling or disabling of
|
||||||
|
interrupts, such as calling raw_local_irq_restore() when interrupts
|
||||||
|
are enabled.
|
||||||
|
|
||||||
config STACKTRACE
|
config STACKTRACE
|
||||||
bool "Stack backtrace support"
|
bool "Stack backtrace support"
|
||||||
depends on STACKTRACE_SUPPORT
|
depends on STACKTRACE_SUPPORT
|
||||||
|
|
|
@ -27,9 +27,6 @@ KASAN_SANITIZE_string.o := n
|
||||||
CFLAGS_string.o += -fno-stack-protector
|
CFLAGS_string.o += -fno-stack-protector
|
||||||
endif
|
endif
|
||||||
|
|
||||||
# Used by KCSAN while enabled, avoid recursion.
|
|
||||||
KCSAN_SANITIZE_random32.o := n
|
|
||||||
|
|
||||||
lib-y := ctype.o string.o vsprintf.o cmdline.o \
|
lib-y := ctype.o string.o vsprintf.o cmdline.o \
|
||||||
rbtree.o radix-tree.o timerqueue.o xarray.o \
|
rbtree.o radix-tree.o timerqueue.o xarray.o \
|
||||||
idr.o extable.o sha1.o irq_regs.o argv_split.o \
|
idr.o extable.o sha1.o irq_regs.o argv_split.o \
|
||||||
|
|
|
@ -24,6 +24,7 @@
|
||||||
#include <linux/debug_locks.h>
|
#include <linux/debug_locks.h>
|
||||||
#include <linux/irqflags.h>
|
#include <linux/irqflags.h>
|
||||||
#include <linux/rtmutex.h>
|
#include <linux/rtmutex.h>
|
||||||
|
#include <linux/local_lock.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Change this to 1 if you want to see the failure printouts:
|
* Change this to 1 if you want to see the failure printouts:
|
||||||
|
@ -51,6 +52,7 @@ __setup("debug_locks_verbose=", setup_debug_locks_verbose);
|
||||||
#define LOCKTYPE_RWSEM 0x8
|
#define LOCKTYPE_RWSEM 0x8
|
||||||
#define LOCKTYPE_WW 0x10
|
#define LOCKTYPE_WW 0x10
|
||||||
#define LOCKTYPE_RTMUTEX 0x20
|
#define LOCKTYPE_RTMUTEX 0x20
|
||||||
|
#define LOCKTYPE_LL 0x40
|
||||||
|
|
||||||
static struct ww_acquire_ctx t, t2;
|
static struct ww_acquire_ctx t, t2;
|
||||||
static struct ww_mutex o, o2, o3;
|
static struct ww_mutex o, o2, o3;
|
||||||
|
@ -64,6 +66,9 @@ static DEFINE_SPINLOCK(lock_B);
|
||||||
static DEFINE_SPINLOCK(lock_C);
|
static DEFINE_SPINLOCK(lock_C);
|
||||||
static DEFINE_SPINLOCK(lock_D);
|
static DEFINE_SPINLOCK(lock_D);
|
||||||
|
|
||||||
|
static DEFINE_RAW_SPINLOCK(raw_lock_A);
|
||||||
|
static DEFINE_RAW_SPINLOCK(raw_lock_B);
|
||||||
|
|
||||||
static DEFINE_RWLOCK(rwlock_A);
|
static DEFINE_RWLOCK(rwlock_A);
|
||||||
static DEFINE_RWLOCK(rwlock_B);
|
static DEFINE_RWLOCK(rwlock_B);
|
||||||
static DEFINE_RWLOCK(rwlock_C);
|
static DEFINE_RWLOCK(rwlock_C);
|
||||||
|
@ -133,6 +138,8 @@ static DEFINE_RT_MUTEX(rtmutex_Z2);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
static local_lock_t local_A = INIT_LOCAL_LOCK(local_A);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* non-inlined runtime initializers, to let separate locks share
|
* non-inlined runtime initializers, to let separate locks share
|
||||||
* the same lock-class:
|
* the same lock-class:
|
||||||
|
@ -1306,19 +1313,23 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_soft_wlock)
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||||
# define I_SPINLOCK(x) lockdep_reset_lock(&lock_##x.dep_map)
|
# define I_SPINLOCK(x) lockdep_reset_lock(&lock_##x.dep_map)
|
||||||
|
# define I_RAW_SPINLOCK(x) lockdep_reset_lock(&raw_lock_##x.dep_map)
|
||||||
# define I_RWLOCK(x) lockdep_reset_lock(&rwlock_##x.dep_map)
|
# define I_RWLOCK(x) lockdep_reset_lock(&rwlock_##x.dep_map)
|
||||||
# define I_MUTEX(x) lockdep_reset_lock(&mutex_##x.dep_map)
|
# define I_MUTEX(x) lockdep_reset_lock(&mutex_##x.dep_map)
|
||||||
# define I_RWSEM(x) lockdep_reset_lock(&rwsem_##x.dep_map)
|
# define I_RWSEM(x) lockdep_reset_lock(&rwsem_##x.dep_map)
|
||||||
# define I_WW(x) lockdep_reset_lock(&x.dep_map)
|
# define I_WW(x) lockdep_reset_lock(&x.dep_map)
|
||||||
|
# define I_LOCAL_LOCK(x) lockdep_reset_lock(&local_##x.dep_map)
|
||||||
#ifdef CONFIG_RT_MUTEXES
|
#ifdef CONFIG_RT_MUTEXES
|
||||||
# define I_RTMUTEX(x) lockdep_reset_lock(&rtmutex_##x.dep_map)
|
# define I_RTMUTEX(x) lockdep_reset_lock(&rtmutex_##x.dep_map)
|
||||||
#endif
|
#endif
|
||||||
#else
|
#else
|
||||||
# define I_SPINLOCK(x)
|
# define I_SPINLOCK(x)
|
||||||
|
# define I_RAW_SPINLOCK(x)
|
||||||
# define I_RWLOCK(x)
|
# define I_RWLOCK(x)
|
||||||
# define I_MUTEX(x)
|
# define I_MUTEX(x)
|
||||||
# define I_RWSEM(x)
|
# define I_RWSEM(x)
|
||||||
# define I_WW(x)
|
# define I_WW(x)
|
||||||
|
# define I_LOCAL_LOCK(x)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef I_RTMUTEX
|
#ifndef I_RTMUTEX
|
||||||
|
@ -1358,9 +1369,16 @@ static void reset_locks(void)
|
||||||
I1(A); I1(B); I1(C); I1(D);
|
I1(A); I1(B); I1(C); I1(D);
|
||||||
I1(X1); I1(X2); I1(Y1); I1(Y2); I1(Z1); I1(Z2);
|
I1(X1); I1(X2); I1(Y1); I1(Y2); I1(Z1); I1(Z2);
|
||||||
I_WW(t); I_WW(t2); I_WW(o.base); I_WW(o2.base); I_WW(o3.base);
|
I_WW(t); I_WW(t2); I_WW(o.base); I_WW(o2.base); I_WW(o3.base);
|
||||||
|
I_RAW_SPINLOCK(A); I_RAW_SPINLOCK(B);
|
||||||
|
I_LOCAL_LOCK(A);
|
||||||
|
|
||||||
lockdep_reset();
|
lockdep_reset();
|
||||||
|
|
||||||
I2(A); I2(B); I2(C); I2(D);
|
I2(A); I2(B); I2(C); I2(D);
|
||||||
init_shared_classes();
|
init_shared_classes();
|
||||||
|
raw_spin_lock_init(&raw_lock_A);
|
||||||
|
raw_spin_lock_init(&raw_lock_B);
|
||||||
|
local_lock_init(&local_A);
|
||||||
|
|
||||||
ww_mutex_init(&o, &ww_lockdep); ww_mutex_init(&o2, &ww_lockdep); ww_mutex_init(&o3, &ww_lockdep);
|
ww_mutex_init(&o, &ww_lockdep); ww_mutex_init(&o2, &ww_lockdep); ww_mutex_init(&o3, &ww_lockdep);
|
||||||
memset(&t, 0, sizeof(t)); memset(&t2, 0, sizeof(t2));
|
memset(&t, 0, sizeof(t)); memset(&t2, 0, sizeof(t2));
|
||||||
|
@ -1382,6 +1400,8 @@ static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask)
|
||||||
|
|
||||||
WARN_ON(irqs_disabled());
|
WARN_ON(irqs_disabled());
|
||||||
|
|
||||||
|
debug_locks_silent = !(debug_locks_verbose & lockclass_mask);
|
||||||
|
|
||||||
testcase_fn();
|
testcase_fn();
|
||||||
/*
|
/*
|
||||||
* Filter out expected failures:
|
* Filter out expected failures:
|
||||||
|
@ -1402,7 +1422,7 @@ static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask)
|
||||||
}
|
}
|
||||||
testcase_total++;
|
testcase_total++;
|
||||||
|
|
||||||
if (debug_locks_verbose)
|
if (debug_locks_verbose & lockclass_mask)
|
||||||
pr_cont(" lockclass mask: %x, debug_locks: %d, expected: %d\n",
|
pr_cont(" lockclass mask: %x, debug_locks: %d, expected: %d\n",
|
||||||
lockclass_mask, debug_locks, expected);
|
lockclass_mask, debug_locks, expected);
|
||||||
/*
|
/*
|
||||||
|
@ -2419,6 +2439,311 @@ static void fs_reclaim_tests(void)
|
||||||
pr_cont("\n");
|
pr_cont("\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define __guard(cleanup) __maybe_unused __attribute__((__cleanup__(cleanup)))
|
||||||
|
|
||||||
|
static void hardirq_exit(int *_)
|
||||||
|
{
|
||||||
|
HARDIRQ_EXIT();
|
||||||
|
}
|
||||||
|
|
||||||
|
#define HARDIRQ_CONTEXT(name, ...) \
|
||||||
|
int hardirq_guard_##name __guard(hardirq_exit); \
|
||||||
|
HARDIRQ_ENTER();
|
||||||
|
|
||||||
|
#define NOTTHREADED_HARDIRQ_CONTEXT(name, ...) \
|
||||||
|
int notthreaded_hardirq_guard_##name __guard(hardirq_exit); \
|
||||||
|
local_irq_disable(); \
|
||||||
|
__irq_enter(); \
|
||||||
|
WARN_ON(!in_irq());
|
||||||
|
|
||||||
|
static void softirq_exit(int *_)
|
||||||
|
{
|
||||||
|
SOFTIRQ_EXIT();
|
||||||
|
}
|
||||||
|
|
||||||
|
#define SOFTIRQ_CONTEXT(name, ...) \
|
||||||
|
int softirq_guard_##name __guard(softirq_exit); \
|
||||||
|
SOFTIRQ_ENTER();
|
||||||
|
|
||||||
|
static void rcu_exit(int *_)
|
||||||
|
{
|
||||||
|
rcu_read_unlock();
|
||||||
|
}
|
||||||
|
|
||||||
|
#define RCU_CONTEXT(name, ...) \
|
||||||
|
int rcu_guard_##name __guard(rcu_exit); \
|
||||||
|
rcu_read_lock();
|
||||||
|
|
||||||
|
static void rcu_bh_exit(int *_)
|
||||||
|
{
|
||||||
|
rcu_read_unlock_bh();
|
||||||
|
}
|
||||||
|
|
||||||
|
#define RCU_BH_CONTEXT(name, ...) \
|
||||||
|
int rcu_bh_guard_##name __guard(rcu_bh_exit); \
|
||||||
|
rcu_read_lock_bh();
|
||||||
|
|
||||||
|
static void rcu_sched_exit(int *_)
|
||||||
|
{
|
||||||
|
rcu_read_unlock_sched();
|
||||||
|
}
|
||||||
|
|
||||||
|
#define RCU_SCHED_CONTEXT(name, ...) \
|
||||||
|
int rcu_sched_guard_##name __guard(rcu_sched_exit); \
|
||||||
|
rcu_read_lock_sched();
|
||||||
|
|
||||||
|
static void rcu_callback_exit(int *_)
|
||||||
|
{
|
||||||
|
rcu_lock_release(&rcu_callback_map);
|
||||||
|
}
|
||||||
|
|
||||||
|
#define RCU_CALLBACK_CONTEXT(name, ...) \
|
||||||
|
int rcu_callback_guard_##name __guard(rcu_callback_exit); \
|
||||||
|
rcu_lock_acquire(&rcu_callback_map);
|
||||||
|
|
||||||
|
|
||||||
|
static void raw_spinlock_exit(raw_spinlock_t **lock)
|
||||||
|
{
|
||||||
|
raw_spin_unlock(*lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
#define RAW_SPINLOCK_CONTEXT(name, lock) \
|
||||||
|
raw_spinlock_t *raw_spinlock_guard_##name __guard(raw_spinlock_exit) = &(lock); \
|
||||||
|
raw_spin_lock(&(lock));
|
||||||
|
|
||||||
|
static void spinlock_exit(spinlock_t **lock)
|
||||||
|
{
|
||||||
|
spin_unlock(*lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
#define SPINLOCK_CONTEXT(name, lock) \
|
||||||
|
spinlock_t *spinlock_guard_##name __guard(spinlock_exit) = &(lock); \
|
||||||
|
spin_lock(&(lock));
|
||||||
|
|
||||||
|
static void mutex_exit(struct mutex **lock)
|
||||||
|
{
|
||||||
|
mutex_unlock(*lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
#define MUTEX_CONTEXT(name, lock) \
|
||||||
|
struct mutex *mutex_guard_##name __guard(mutex_exit) = &(lock); \
|
||||||
|
mutex_lock(&(lock));
|
||||||
|
|
||||||
|
#define GENERATE_2_CONTEXT_TESTCASE(outer, outer_lock, inner, inner_lock) \
|
||||||
|
\
|
||||||
|
static void __maybe_unused inner##_in_##outer(void) \
|
||||||
|
{ \
|
||||||
|
outer##_CONTEXT(_, outer_lock); \
|
||||||
|
{ \
|
||||||
|
inner##_CONTEXT(_, inner_lock); \
|
||||||
|
} \
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* wait contexts (considering PREEMPT_RT)
|
||||||
|
*
|
||||||
|
* o: inner is allowed in outer
|
||||||
|
* x: inner is disallowed in outer
|
||||||
|
*
|
||||||
|
* \ inner | RCU | RAW_SPIN | SPIN | MUTEX
|
||||||
|
* outer \ | | | |
|
||||||
|
* ---------------+-------+----------+------+-------
|
||||||
|
* HARDIRQ | o | o | o | x
|
||||||
|
* ---------------+-------+----------+------+-------
|
||||||
|
* NOTTHREADED_IRQ| o | o | x | x
|
||||||
|
* ---------------+-------+----------+------+-------
|
||||||
|
* SOFTIRQ | o | o | o | x
|
||||||
|
* ---------------+-------+----------+------+-------
|
||||||
|
* RCU | o | o | o | x
|
||||||
|
* ---------------+-------+----------+------+-------
|
||||||
|
* RCU_BH | o | o | o | x
|
||||||
|
* ---------------+-------+----------+------+-------
|
||||||
|
* RCU_CALLBACK | o | o | o | x
|
||||||
|
* ---------------+-------+----------+------+-------
|
||||||
|
* RCU_SCHED | o | o | x | x
|
||||||
|
* ---------------+-------+----------+------+-------
|
||||||
|
* RAW_SPIN | o | o | x | x
|
||||||
|
* ---------------+-------+----------+------+-------
|
||||||
|
* SPIN | o | o | o | x
|
||||||
|
* ---------------+-------+----------+------+-------
|
||||||
|
* MUTEX | o | o | o | o
|
||||||
|
* ---------------+-------+----------+------+-------
|
||||||
|
*/
|
||||||
|
|
||||||
|
#define GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(inner, inner_lock) \
|
||||||
|
GENERATE_2_CONTEXT_TESTCASE(HARDIRQ, , inner, inner_lock) \
|
||||||
|
GENERATE_2_CONTEXT_TESTCASE(NOTTHREADED_HARDIRQ, , inner, inner_lock) \
|
||||||
|
GENERATE_2_CONTEXT_TESTCASE(SOFTIRQ, , inner, inner_lock) \
|
||||||
|
GENERATE_2_CONTEXT_TESTCASE(RCU, , inner, inner_lock) \
|
||||||
|
GENERATE_2_CONTEXT_TESTCASE(RCU_BH, , inner, inner_lock) \
|
||||||
|
GENERATE_2_CONTEXT_TESTCASE(RCU_CALLBACK, , inner, inner_lock) \
|
||||||
|
GENERATE_2_CONTEXT_TESTCASE(RCU_SCHED, , inner, inner_lock) \
|
||||||
|
GENERATE_2_CONTEXT_TESTCASE(RAW_SPINLOCK, raw_lock_A, inner, inner_lock) \
|
||||||
|
GENERATE_2_CONTEXT_TESTCASE(SPINLOCK, lock_A, inner, inner_lock) \
|
||||||
|
GENERATE_2_CONTEXT_TESTCASE(MUTEX, mutex_A, inner, inner_lock)
|
||||||
|
|
||||||
|
GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(RCU, )
|
||||||
|
GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(RAW_SPINLOCK, raw_lock_B)
|
||||||
|
GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(SPINLOCK, lock_B)
|
||||||
|
GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(MUTEX, mutex_B)
|
||||||
|
|
||||||
|
/* the outer context allows all kinds of preemption */
|
||||||
|
#define DO_CONTEXT_TESTCASE_OUTER_PREEMPTIBLE(outer) \
|
||||||
|
dotest(RCU_in_##outer, SUCCESS, LOCKTYPE_RWLOCK); \
|
||||||
|
dotest(RAW_SPINLOCK_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
|
||||||
|
dotest(SPINLOCK_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
|
||||||
|
dotest(MUTEX_in_##outer, SUCCESS, LOCKTYPE_MUTEX); \
|
||||||
|
|
||||||
|
/*
|
||||||
|
* the outer context only allows the preemption introduced by spinlock_t (which
|
||||||
|
* is a sleepable lock for PREEMPT_RT)
|
||||||
|
*/
|
||||||
|
#define DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(outer) \
|
||||||
|
dotest(RCU_in_##outer, SUCCESS, LOCKTYPE_RWLOCK); \
|
||||||
|
dotest(RAW_SPINLOCK_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
|
||||||
|
dotest(SPINLOCK_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
|
||||||
|
dotest(MUTEX_in_##outer, FAILURE, LOCKTYPE_MUTEX); \
|
||||||
|
|
||||||
|
/* the outer doesn't allows any kind of preemption */
|
||||||
|
#define DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE(outer) \
|
||||||
|
dotest(RCU_in_##outer, SUCCESS, LOCKTYPE_RWLOCK); \
|
||||||
|
dotest(RAW_SPINLOCK_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
|
||||||
|
dotest(SPINLOCK_in_##outer, FAILURE, LOCKTYPE_SPIN); \
|
||||||
|
dotest(MUTEX_in_##outer, FAILURE, LOCKTYPE_MUTEX); \
|
||||||
|
|
||||||
|
static void wait_context_tests(void)
|
||||||
|
{
|
||||||
|
printk(" --------------------------------------------------------------------------\n");
|
||||||
|
printk(" | wait context tests |\n");
|
||||||
|
printk(" --------------------------------------------------------------------------\n");
|
||||||
|
printk(" | rcu | raw | spin |mutex |\n");
|
||||||
|
printk(" --------------------------------------------------------------------------\n");
|
||||||
|
print_testname("in hardirq context");
|
||||||
|
DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(HARDIRQ);
|
||||||
|
pr_cont("\n");
|
||||||
|
|
||||||
|
print_testname("in hardirq context (not threaded)");
|
||||||
|
DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE(NOTTHREADED_HARDIRQ);
|
||||||
|
pr_cont("\n");
|
||||||
|
|
||||||
|
print_testname("in softirq context");
|
||||||
|
DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(SOFTIRQ);
|
||||||
|
pr_cont("\n");
|
||||||
|
|
||||||
|
print_testname("in RCU context");
|
||||||
|
DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(RCU);
|
||||||
|
pr_cont("\n");
|
||||||
|
|
||||||
|
print_testname("in RCU-bh context");
|
||||||
|
DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(RCU_BH);
|
||||||
|
pr_cont("\n");
|
||||||
|
|
||||||
|
print_testname("in RCU callback context");
|
||||||
|
DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(RCU_CALLBACK);
|
||||||
|
pr_cont("\n");
|
||||||
|
|
||||||
|
print_testname("in RCU-sched context");
|
||||||
|
DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE(RCU_SCHED);
|
||||||
|
pr_cont("\n");
|
||||||
|
|
||||||
|
print_testname("in RAW_SPINLOCK context");
|
||||||
|
DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE(RAW_SPINLOCK);
|
||||||
|
pr_cont("\n");
|
||||||
|
|
||||||
|
print_testname("in SPINLOCK context");
|
||||||
|
DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(SPINLOCK);
|
||||||
|
pr_cont("\n");
|
||||||
|
|
||||||
|
print_testname("in MUTEX context");
|
||||||
|
DO_CONTEXT_TESTCASE_OUTER_PREEMPTIBLE(MUTEX);
|
||||||
|
pr_cont("\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
static void local_lock_2(void)
|
||||||
|
{
|
||||||
|
local_lock_acquire(&local_A); /* IRQ-ON */
|
||||||
|
local_lock_release(&local_A);
|
||||||
|
|
||||||
|
HARDIRQ_ENTER();
|
||||||
|
spin_lock(&lock_A); /* IN-IRQ */
|
||||||
|
spin_unlock(&lock_A);
|
||||||
|
HARDIRQ_EXIT()
|
||||||
|
|
||||||
|
HARDIRQ_DISABLE();
|
||||||
|
spin_lock(&lock_A);
|
||||||
|
local_lock_acquire(&local_A); /* IN-IRQ <-> IRQ-ON cycle, false */
|
||||||
|
local_lock_release(&local_A);
|
||||||
|
spin_unlock(&lock_A);
|
||||||
|
HARDIRQ_ENABLE();
|
||||||
|
}
|
||||||
|
|
||||||
|
static void local_lock_3A(void)
|
||||||
|
{
|
||||||
|
local_lock_acquire(&local_A); /* IRQ-ON */
|
||||||
|
spin_lock(&lock_B); /* IRQ-ON */
|
||||||
|
spin_unlock(&lock_B);
|
||||||
|
local_lock_release(&local_A);
|
||||||
|
|
||||||
|
HARDIRQ_ENTER();
|
||||||
|
spin_lock(&lock_A); /* IN-IRQ */
|
||||||
|
spin_unlock(&lock_A);
|
||||||
|
HARDIRQ_EXIT()
|
||||||
|
|
||||||
|
HARDIRQ_DISABLE();
|
||||||
|
spin_lock(&lock_A);
|
||||||
|
local_lock_acquire(&local_A); /* IN-IRQ <-> IRQ-ON cycle only if we count local_lock(), false */
|
||||||
|
local_lock_release(&local_A);
|
||||||
|
spin_unlock(&lock_A);
|
||||||
|
HARDIRQ_ENABLE();
|
||||||
|
}
|
||||||
|
|
||||||
|
static void local_lock_3B(void)
|
||||||
|
{
|
||||||
|
local_lock_acquire(&local_A); /* IRQ-ON */
|
||||||
|
spin_lock(&lock_B); /* IRQ-ON */
|
||||||
|
spin_unlock(&lock_B);
|
||||||
|
local_lock_release(&local_A);
|
||||||
|
|
||||||
|
HARDIRQ_ENTER();
|
||||||
|
spin_lock(&lock_A); /* IN-IRQ */
|
||||||
|
spin_unlock(&lock_A);
|
||||||
|
HARDIRQ_EXIT()
|
||||||
|
|
||||||
|
HARDIRQ_DISABLE();
|
||||||
|
spin_lock(&lock_A);
|
||||||
|
local_lock_acquire(&local_A); /* IN-IRQ <-> IRQ-ON cycle only if we count local_lock(), false */
|
||||||
|
local_lock_release(&local_A);
|
||||||
|
spin_unlock(&lock_A);
|
||||||
|
HARDIRQ_ENABLE();
|
||||||
|
|
||||||
|
HARDIRQ_DISABLE();
|
||||||
|
spin_lock(&lock_A);
|
||||||
|
spin_lock(&lock_B); /* IN-IRQ <-> IRQ-ON cycle, true */
|
||||||
|
spin_unlock(&lock_B);
|
||||||
|
spin_unlock(&lock_A);
|
||||||
|
HARDIRQ_DISABLE();
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
static void local_lock_tests(void)
|
||||||
|
{
|
||||||
|
printk(" --------------------------------------------------------------------------\n");
|
||||||
|
printk(" | local_lock tests |\n");
|
||||||
|
printk(" ---------------------\n");
|
||||||
|
|
||||||
|
print_testname("local_lock inversion 2");
|
||||||
|
dotest(local_lock_2, SUCCESS, LOCKTYPE_LL);
|
||||||
|
pr_cont("\n");
|
||||||
|
|
||||||
|
print_testname("local_lock inversion 3A");
|
||||||
|
dotest(local_lock_3A, SUCCESS, LOCKTYPE_LL);
|
||||||
|
pr_cont("\n");
|
||||||
|
|
||||||
|
print_testname("local_lock inversion 3B");
|
||||||
|
dotest(local_lock_3B, FAILURE, LOCKTYPE_LL);
|
||||||
|
pr_cont("\n");
|
||||||
|
}
|
||||||
|
|
||||||
void locking_selftest(void)
|
void locking_selftest(void)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
|
@ -2446,7 +2771,6 @@ void locking_selftest(void)
|
||||||
printk(" --------------------------------------------------------------------------\n");
|
printk(" --------------------------------------------------------------------------\n");
|
||||||
|
|
||||||
init_shared_classes();
|
init_shared_classes();
|
||||||
debug_locks_silent = !debug_locks_verbose;
|
|
||||||
lockdep_set_selftest_task(current);
|
lockdep_set_selftest_task(current);
|
||||||
|
|
||||||
DO_TESTCASE_6R("A-A deadlock", AA);
|
DO_TESTCASE_6R("A-A deadlock", AA);
|
||||||
|
@ -2542,6 +2866,12 @@ void locking_selftest(void)
|
||||||
|
|
||||||
fs_reclaim_tests();
|
fs_reclaim_tests();
|
||||||
|
|
||||||
|
/* Wait context test cases that are specific for RAW_LOCK_NESTING */
|
||||||
|
if (IS_ENABLED(CONFIG_PROVE_RAW_LOCK_NESTING))
|
||||||
|
wait_context_tests();
|
||||||
|
|
||||||
|
local_lock_tests();
|
||||||
|
|
||||||
if (unexpected_testcase_failures) {
|
if (unexpected_testcase_failures) {
|
||||||
printk("-----------------------------------------------------------------\n");
|
printk("-----------------------------------------------------------------\n");
|
||||||
debug_locks = 0;
|
debug_locks = 0;
|
||||||
|
|
|
@ -7062,12 +7062,6 @@ sub process {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
# check for mutex_trylock_recursive usage
|
|
||||||
if ($line =~ /mutex_trylock_recursive/) {
|
|
||||||
ERROR("LOCKING",
|
|
||||||
"recursive locking is bad, do not use this ever.\n" . $herecurr);
|
|
||||||
}
|
|
||||||
|
|
||||||
# check for lockdep_set_novalidate_class
|
# check for lockdep_set_novalidate_class
|
||||||
if ($line =~ /^.\s*lockdep_set_novalidate_class\s*\(/ ||
|
if ($line =~ /^.\s*lockdep_set_novalidate_class\s*\(/ ||
|
||||||
$line =~ /__lockdep_no_validate__\s*\)/ ) {
|
$line =~ /__lockdep_no_validate__\s*\)/ ) {
|
||||||
|
|
|
@ -33,10 +33,11 @@ Acquire: With respect to a lock, acquiring that lock, for example,
|
||||||
acquire loads.
|
acquire loads.
|
||||||
|
|
||||||
When an acquire load returns the value stored by a release store
|
When an acquire load returns the value stored by a release store
|
||||||
to that same variable, then all operations preceding that store
|
to that same variable, (in other words, the acquire load "reads
|
||||||
happen before any operations following that load acquire.
|
from" the release store), then all operations preceding that
|
||||||
|
store "happen before" any operations following that load acquire.
|
||||||
|
|
||||||
See also "Relaxed" and "Release".
|
See also "Happens-Before", "Reads-From", "Relaxed", and "Release".
|
||||||
|
|
||||||
Coherence (co): When one CPU's store to a given variable overwrites
|
Coherence (co): When one CPU's store to a given variable overwrites
|
||||||
either the value from another CPU's store or some later value,
|
either the value from another CPU's store or some later value,
|
||||||
|
@ -119,6 +120,11 @@ Fully Ordered: An operation such as smp_mb() that orders all of
|
||||||
that orders all of its CPU's prior accesses, itself, and
|
that orders all of its CPU's prior accesses, itself, and
|
||||||
all of its CPU's subsequent accesses.
|
all of its CPU's subsequent accesses.
|
||||||
|
|
||||||
|
Happens-Before (hb): A relation between two accesses in which LKMM
|
||||||
|
guarantees the first access precedes the second. For more
|
||||||
|
detail, please see the "THE HAPPENS-BEFORE RELATION: hb"
|
||||||
|
section of explanation.txt.
|
||||||
|
|
||||||
Marked Access: An access to a variable that uses an special function or
|
Marked Access: An access to a variable that uses an special function or
|
||||||
macro such as "r1 = READ_ONCE(x)" or "smp_store_release(&a, 1)".
|
macro such as "r1 = READ_ONCE(x)" or "smp_store_release(&a, 1)".
|
||||||
|
|
||||||
|
|
|
@ -51,7 +51,7 @@ klitmus7 Compatibility Table
|
||||||
============ ==========
|
============ ==========
|
||||||
target Linux herdtools7
|
target Linux herdtools7
|
||||||
------------ ----------
|
------------ ----------
|
||||||
-- 4.18 7.48 --
|
-- 4.14 7.48 --
|
||||||
4.15 -- 4.19 7.49 --
|
4.15 -- 4.19 7.49 --
|
||||||
4.20 -- 5.5 7.54 --
|
4.20 -- 5.5 7.54 --
|
||||||
5.6 -- 7.56 --
|
5.6 -- 7.56 --
|
||||||
|
|
|
@ -7,9 +7,7 @@ C CoRR+poonceonce+Once
|
||||||
* reads from the same variable are ordered.
|
* reads from the same variable are ordered.
|
||||||
*)
|
*)
|
||||||
|
|
||||||
{
|
{}
|
||||||
int x;
|
|
||||||
}
|
|
||||||
|
|
||||||
P0(int *x)
|
P0(int *x)
|
||||||
{
|
{
|
||||||
|
|
|
@ -7,9 +7,7 @@ C CoRW+poonceonce+Once
|
||||||
* a given variable and a later write to that same variable are ordered.
|
* a given variable and a later write to that same variable are ordered.
|
||||||
*)
|
*)
|
||||||
|
|
||||||
{
|
{}
|
||||||
int x;
|
|
||||||
}
|
|
||||||
|
|
||||||
P0(int *x)
|
P0(int *x)
|
||||||
{
|
{
|
||||||
|
|
|
@ -7,9 +7,7 @@ C CoWR+poonceonce+Once
|
||||||
* given variable and a later read from that same variable are ordered.
|
* given variable and a later read from that same variable are ordered.
|
||||||
*)
|
*)
|
||||||
|
|
||||||
{
|
{}
|
||||||
int x;
|
|
||||||
}
|
|
||||||
|
|
||||||
P0(int *x)
|
P0(int *x)
|
||||||
{
|
{
|
||||||
|
|
|
@ -7,9 +7,7 @@ C CoWW+poonceonce
|
||||||
* writes to the same variable are ordered.
|
* writes to the same variable are ordered.
|
||||||
*)
|
*)
|
||||||
|
|
||||||
{
|
{}
|
||||||
int x;
|
|
||||||
}
|
|
||||||
|
|
||||||
P0(int *x)
|
P0(int *x)
|
||||||
{
|
{
|
||||||
|
|
|
@ -10,10 +10,7 @@ C IRIW+fencembonceonces+OnceOnce
|
||||||
* process? This litmus test exercises LKMM's "propagation" rule.
|
* process? This litmus test exercises LKMM's "propagation" rule.
|
||||||
*)
|
*)
|
||||||
|
|
||||||
{
|
{}
|
||||||
int x;
|
|
||||||
int y;
|
|
||||||
}
|
|
||||||
|
|
||||||
P0(int *x)
|
P0(int *x)
|
||||||
{
|
{
|
||||||
|
|
|
@ -10,10 +10,7 @@ C IRIW+poonceonces+OnceOnce
|
||||||
* different process?
|
* different process?
|
||||||
*)
|
*)
|
||||||
|
|
||||||
{
|
{}
|
||||||
int x;
|
|
||||||
int y;
|
|
||||||
}
|
|
||||||
|
|
||||||
P0(int *x)
|
P0(int *x)
|
||||||
{
|
{
|
||||||
|
|
|
@ -7,12 +7,7 @@ C ISA2+pooncelock+pooncelock+pombonce
|
||||||
* (in P0() and P1()) is visible to external process P2().
|
* (in P0() and P1()) is visible to external process P2().
|
||||||
*)
|
*)
|
||||||
|
|
||||||
{
|
{}
|
||||||
spinlock_t mylock;
|
|
||||||
int x;
|
|
||||||
int y;
|
|
||||||
int z;
|
|
||||||
}
|
|
||||||
|
|
||||||
P0(int *x, int *y, spinlock_t *mylock)
|
P0(int *x, int *y, spinlock_t *mylock)
|
||||||
{
|
{
|
||||||
|
|
|
@ -9,11 +9,7 @@ C ISA2+poonceonces
|
||||||
* of the smp_load_acquire() invocations are replaced by READ_ONCE()?
|
* of the smp_load_acquire() invocations are replaced by READ_ONCE()?
|
||||||
*)
|
*)
|
||||||
|
|
||||||
{
|
{}
|
||||||
int x;
|
|
||||||
int y;
|
|
||||||
int z;
|
|
||||||
}
|
|
||||||
|
|
||||||
P0(int *x, int *y)
|
P0(int *x, int *y)
|
||||||
{
|
{
|
||||||
|
|
|
@ -11,11 +11,7 @@ C ISA2+pooncerelease+poacquirerelease+poacquireonce
|
||||||
* (AKA non-rf) link, so release-acquire is all that is needed.
|
* (AKA non-rf) link, so release-acquire is all that is needed.
|
||||||
*)
|
*)
|
||||||
|
|
||||||
{
|
{}
|
||||||
int x;
|
|
||||||
int y;
|
|
||||||
int z;
|
|
||||||
}
|
|
||||||
|
|
||||||
P0(int *x, int *y)
|
P0(int *x, int *y)
|
||||||
{
|
{
|
||||||
|
|
|
@ -11,10 +11,7 @@ C LB+fencembonceonce+ctrlonceonce
|
||||||
* another control dependency and order would still be maintained.)
|
* another control dependency and order would still be maintained.)
|
||||||
*)
|
*)
|
||||||
|
|
||||||
{
|
{}
|
||||||
int x;
|
|
||||||
int y;
|
|
||||||
}
|
|
||||||
|
|
||||||
P0(int *x, int *y)
|
P0(int *x, int *y)
|
||||||
{
|
{
|
||||||
|
|
|
@ -8,10 +8,7 @@ C LB+poacquireonce+pooncerelease
|
||||||
* to the other?
|
* to the other?
|
||||||
*)
|
*)
|
||||||
|
|
||||||
{
|
{}
|
||||||
int x;
|
|
||||||
int y;
|
|
||||||
}
|
|
||||||
|
|
||||||
P0(int *x, int *y)
|
P0(int *x, int *y)
|
||||||
{
|
{
|
||||||
|
|
|
@ -7,10 +7,7 @@ C LB+poonceonces
|
||||||
* be prevented even with no explicit ordering?
|
* be prevented even with no explicit ordering?
|
||||||
*)
|
*)
|
||||||
|
|
||||||
{
|
{}
|
||||||
int x;
|
|
||||||
int y;
|
|
||||||
}
|
|
||||||
|
|
||||||
P0(int *x, int *y)
|
P0(int *x, int *y)
|
||||||
{
|
{
|
||||||
|
|
|
@ -8,10 +8,7 @@ C MP+fencewmbonceonce+fencermbonceonce
|
||||||
* is usually better to use smp_store_release() and smp_load_acquire().
|
* is usually better to use smp_store_release() and smp_load_acquire().
|
||||||
*)
|
*)
|
||||||
|
|
||||||
{
|
{}
|
||||||
int buf;
|
|
||||||
int flag;
|
|
||||||
}
|
|
||||||
|
|
||||||
P0(int *buf, int *flag) // Producer
|
P0(int *buf, int *flag) // Producer
|
||||||
{
|
{
|
||||||
|
|
|
@ -10,9 +10,7 @@ C MP+onceassign+derefonce
|
||||||
*)
|
*)
|
||||||
|
|
||||||
{
|
{
|
||||||
int *p=y;
|
p=y;
|
||||||
int x;
|
|
||||||
int y=0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
P0(int *x, int **p) // Producer
|
P0(int *x, int **p) // Producer
|
||||||
|
|
|
@ -10,10 +10,7 @@ C MP+polockmbonce+poacquiresilsil
|
||||||
* executed before the lock was acquired (loosely speaking).
|
* executed before the lock was acquired (loosely speaking).
|
||||||
*)
|
*)
|
||||||
|
|
||||||
{
|
{}
|
||||||
spinlock_t lo;
|
|
||||||
int x;
|
|
||||||
}
|
|
||||||
|
|
||||||
P0(spinlock_t *lo, int *x) // Producer
|
P0(spinlock_t *lo, int *x) // Producer
|
||||||
{
|
{
|
||||||
|
|
|
@ -10,10 +10,7 @@ C MP+polockonce+poacquiresilsil
|
||||||
* speaking).
|
* speaking).
|
||||||
*)
|
*)
|
||||||
|
|
||||||
{
|
{}
|
||||||
spinlock_t lo;
|
|
||||||
int x;
|
|
||||||
}
|
|
||||||
|
|
||||||
P0(spinlock_t *lo, int *x) // Producer
|
P0(spinlock_t *lo, int *x) // Producer
|
||||||
{
|
{
|
||||||
|
|
|
@ -11,11 +11,7 @@ C MP+polocks
|
||||||
* to see all prior accesses by those other CPUs.
|
* to see all prior accesses by those other CPUs.
|
||||||
*)
|
*)
|
||||||
|
|
||||||
{
|
{}
|
||||||
spinlock_t mylock;
|
|
||||||
int buf;
|
|
||||||
int flag;
|
|
||||||
}
|
|
||||||
|
|
||||||
P0(int *buf, int *flag, spinlock_t *mylock) // Producer
|
P0(int *buf, int *flag, spinlock_t *mylock) // Producer
|
||||||
{
|
{
|
||||||
|
|
|
@ -7,10 +7,7 @@ C MP+poonceonces
|
||||||
* no ordering at all?
|
* no ordering at all?
|
||||||
*)
|
*)
|
||||||
|
|
||||||
{
|
{}
|
||||||
int buf;
|
|
||||||
int flag;
|
|
||||||
}
|
|
||||||
|
|
||||||
P0(int *buf, int *flag) // Producer
|
P0(int *buf, int *flag) // Producer
|
||||||
{
|
{
|
||||||
|
|
|
@ -8,10 +8,7 @@ C MP+pooncerelease+poacquireonce
|
||||||
* pattern.
|
* pattern.
|
||||||
*)
|
*)
|
||||||
|
|
||||||
{
|
{}
|
||||||
int buf;
|
|
||||||
int flag;
|
|
||||||
}
|
|
||||||
|
|
||||||
P0(int *buf, int *flag) // Producer
|
P0(int *buf, int *flag) // Producer
|
||||||
{
|
{
|
||||||
|
|
|
@ -11,11 +11,7 @@ C MP+porevlocks
|
||||||
* see all prior accesses by those other CPUs.
|
* see all prior accesses by those other CPUs.
|
||||||
*)
|
*)
|
||||||
|
|
||||||
{
|
{}
|
||||||
spinlock_t mylock;
|
|
||||||
int buf;
|
|
||||||
int flag;
|
|
||||||
}
|
|
||||||
|
|
||||||
P0(int *buf, int *flag, spinlock_t *mylock) // Consumer
|
P0(int *buf, int *flag, spinlock_t *mylock) // Consumer
|
||||||
{
|
{
|
||||||
|
|
|
@ -9,10 +9,7 @@ C R+fencembonceonces
|
||||||
* cause the resulting test to be allowed.
|
* cause the resulting test to be allowed.
|
||||||
*)
|
*)
|
||||||
|
|
||||||
{
|
{}
|
||||||
int x;
|
|
||||||
int y;
|
|
||||||
}
|
|
||||||
|
|
||||||
P0(int *x, int *y)
|
P0(int *x, int *y)
|
||||||
{
|
{
|
||||||
|
|
|
@ -8,10 +8,7 @@ C R+poonceonces
|
||||||
* store propagation delays.
|
* store propagation delays.
|
||||||
*)
|
*)
|
||||||
|
|
||||||
{
|
{}
|
||||||
int x;
|
|
||||||
int y;
|
|
||||||
}
|
|
||||||
|
|
||||||
P0(int *x, int *y)
|
P0(int *x, int *y)
|
||||||
{
|
{
|
||||||
|
|
|
@ -7,10 +7,7 @@ C S+fencewmbonceonce+poacquireonce
|
||||||
* store against a subsequent store?
|
* store against a subsequent store?
|
||||||
*)
|
*)
|
||||||
|
|
||||||
{
|
{}
|
||||||
int x;
|
|
||||||
int y;
|
|
||||||
}
|
|
||||||
|
|
||||||
P0(int *x, int *y)
|
P0(int *x, int *y)
|
||||||
{
|
{
|
||||||
|
|
|
@ -9,10 +9,7 @@ C S+poonceonces
|
||||||
* READ_ONCE(), is ordering preserved?
|
* READ_ONCE(), is ordering preserved?
|
||||||
*)
|
*)
|
||||||
|
|
||||||
{
|
{}
|
||||||
int x;
|
|
||||||
int y;
|
|
||||||
}
|
|
||||||
|
|
||||||
P0(int *x, int *y)
|
P0(int *x, int *y)
|
||||||
{
|
{
|
||||||
|
|
|
@ -9,10 +9,7 @@ C SB+fencembonceonces
|
||||||
* suffice, but not much else.)
|
* suffice, but not much else.)
|
||||||
*)
|
*)
|
||||||
|
|
||||||
{
|
{}
|
||||||
int x;
|
|
||||||
int y;
|
|
||||||
}
|
|
||||||
|
|
||||||
P0(int *x, int *y)
|
P0(int *x, int *y)
|
||||||
{
|
{
|
||||||
|
|
|
@ -8,10 +8,7 @@ C SB+poonceonces
|
||||||
* variable that the preceding process reads.
|
* variable that the preceding process reads.
|
||||||
*)
|
*)
|
||||||
|
|
||||||
{
|
{}
|
||||||
int x;
|
|
||||||
int y;
|
|
||||||
}
|
|
||||||
|
|
||||||
P0(int *x, int *y)
|
P0(int *x, int *y)
|
||||||
{
|
{
|
||||||
|
|
|
@ -6,10 +6,7 @@ C SB+rfionceonce-poonceonces
|
||||||
* This litmus test demonstrates that LKMM is not fully multicopy atomic.
|
* This litmus test demonstrates that LKMM is not fully multicopy atomic.
|
||||||
*)
|
*)
|
||||||
|
|
||||||
{
|
{}
|
||||||
int x;
|
|
||||||
int y;
|
|
||||||
}
|
|
||||||
|
|
||||||
P0(int *x, int *y)
|
P0(int *x, int *y)
|
||||||
{
|
{
|
||||||
|
|
|
@ -8,10 +8,7 @@ C WRC+poonceonces+Once
|
||||||
* test has no ordering at all.
|
* test has no ordering at all.
|
||||||
*)
|
*)
|
||||||
|
|
||||||
{
|
{}
|
||||||
int x;
|
|
||||||
int y;
|
|
||||||
}
|
|
||||||
|
|
||||||
P0(int *x)
|
P0(int *x)
|
||||||
{
|
{
|
||||||
|
|
|
@ -10,10 +10,7 @@ C WRC+pooncerelease+fencermbonceonce+Once
|
||||||
* is A-cumulative in LKMM.
|
* is A-cumulative in LKMM.
|
||||||
*)
|
*)
|
||||||
|
|
||||||
{
|
{}
|
||||||
int x;
|
|
||||||
int y;
|
|
||||||
}
|
|
||||||
|
|
||||||
P0(int *x)
|
P0(int *x)
|
||||||
{
|
{
|
||||||
|
|
|
@ -9,12 +9,7 @@ C Z6.0+pooncelock+poonceLock+pombonce
|
||||||
* by CPUs not holding that lock.
|
* by CPUs not holding that lock.
|
||||||
*)
|
*)
|
||||||
|
|
||||||
{
|
{}
|
||||||
spinlock_t mylock;
|
|
||||||
int x;
|
|
||||||
int y;
|
|
||||||
int z;
|
|
||||||
}
|
|
||||||
|
|
||||||
P0(int *x, int *y, spinlock_t *mylock)
|
P0(int *x, int *y, spinlock_t *mylock)
|
||||||
{
|
{
|
||||||
|
|
|
@ -8,12 +8,7 @@ C Z6.0+pooncelock+pooncelock+pombonce
|
||||||
* seen as ordered by a third process not holding that lock.
|
* seen as ordered by a third process not holding that lock.
|
||||||
*)
|
*)
|
||||||
|
|
||||||
{
|
{}
|
||||||
spinlock_t mylock;
|
|
||||||
int x;
|
|
||||||
int y;
|
|
||||||
int z;
|
|
||||||
}
|
|
||||||
|
|
||||||
P0(int *x, int *y, spinlock_t *mylock)
|
P0(int *x, int *y, spinlock_t *mylock)
|
||||||
{
|
{
|
||||||
|
|
|
@ -14,11 +14,7 @@ C Z6.0+pooncerelease+poacquirerelease+fencembonceonce
|
||||||
* involving locking.)
|
* involving locking.)
|
||||||
*)
|
*)
|
||||||
|
|
||||||
{
|
{}
|
||||||
int x;
|
|
||||||
int y;
|
|
||||||
int z;
|
|
||||||
}
|
|
||||||
|
|
||||||
P0(int *x, int *y)
|
P0(int *x, int *y)
|
||||||
{
|
{
|
||||||
|
|
Loading…
Reference in New Issue