mirror of https://gitee.com/openkylin/linux.git
A single commit for debug objects which fixes a pile of potential data
races detected by KCSAN. -----BEGIN PGP SIGNATURE----- iQJHBAABCgAxFiEEQp8+kY+LLUocC4bMphj1TA10mKEFAl4vbu4THHRnbHhAbGlu dXRyb25peC5kZQAKCRCmGPVMDXSYoWgNEACurOgtQpSpcAaGD7pbpmlwyI3OiHlv +ad36Z4W53fJDgXkWcFP3L7VyGdd7RisXfXWk2ih2aA0DJhe9OSyc+0AuBxP9pOv MfpEkEO29g1tIUfYl8SY9HacsMPnmzkj70e9uyojikOuEHZkAHUHcUCgBt5MCDrY GrpoBVGhiwwduCnuMYjukAr0OQQs2UaRi3enDoerH3fWET0A7TPwyA0hV7dt8vN1 1Cg6iPAQyXDqXy/w6UnHAiCEaPtSPYzVqeJF2HJMhv9iqmxySSC6vJNBY4Vi7zj6 c8s2c0ILbeT1cAsr1uADY7oGVMYRWvULcjT6tTXJEzpHLiG/i5VL215snClJd4sO 5m4zLhtpvpuKzPQ/74GH7y2LndxSumHaT2bYqHTyuPAUqjgbRh8rFWDOXoub8ibD aouHFrLvdO6EZ2m080yI/1h8BIRZySZxaV9Z081IGd26mu7swtfqW5SPilZDrQpZ YGIiCyOaV7AuvqngFxRzEQ9+cHkw/BiRyVb2abtu9EPRtoP8lXddGPrPw5gO7Zs4 tSrZLM4GGsCfPN+ND7aSXGcG85YKJpmVT6bCnDeOwpCiQbgSTU3ySyhs/XFP3Fbf e2dxr7/7GMff/xni2x4+bO0DFGGgNtmN9duT8rSrwHp7SYUO6zNUM1D5QcQEUEpB ck98jlcg0yaE6g== =H9yH -----END PGP SIGNATURE----- Merge tag 'core-debugobjects-2020-01-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull debugobjects update from Thomas Gleixner: "A single commit for debug objects which fixes a pile of potential data races detected by KCSAN" * tag 'core-debugobjects-2020-01-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: debugobjects: Fix various data races
This commit is contained in:
commit
534b0a8b67
|
@ -132,14 +132,18 @@ static void fill_pool(void)
|
|||
struct debug_obj *obj;
|
||||
unsigned long flags;
|
||||
|
||||
if (likely(obj_pool_free >= debug_objects_pool_min_level))
|
||||
if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Reuse objs from the global free list; they will be reinitialized
|
||||
* when allocating.
|
||||
*
|
||||
* Both obj_nr_tofree and obj_pool_free are checked locklessly; the
|
||||
* READ_ONCE()s pair with the WRITE_ONCE()s in pool_lock critical
|
||||
* sections.
|
||||
*/
|
||||
while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
|
||||
while (READ_ONCE(obj_nr_tofree) && (READ_ONCE(obj_pool_free) < obj_pool_min_free)) {
|
||||
raw_spin_lock_irqsave(&pool_lock, flags);
|
||||
/*
|
||||
* Recheck with the lock held as the worker thread might have
|
||||
|
@ -148,9 +152,9 @@ static void fill_pool(void)
|
|||
while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
|
||||
obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
|
||||
hlist_del(&obj->node);
|
||||
obj_nr_tofree--;
|
||||
WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
|
||||
hlist_add_head(&obj->node, &obj_pool);
|
||||
obj_pool_free++;
|
||||
WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
|
||||
}
|
||||
raw_spin_unlock_irqrestore(&pool_lock, flags);
|
||||
}
|
||||
|
@ -158,7 +162,7 @@ static void fill_pool(void)
|
|||
if (unlikely(!obj_cache))
|
||||
return;
|
||||
|
||||
while (obj_pool_free < debug_objects_pool_min_level) {
|
||||
while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) {
|
||||
struct debug_obj *new[ODEBUG_BATCH_SIZE];
|
||||
int cnt;
|
||||
|
||||
|
@ -174,7 +178,7 @@ static void fill_pool(void)
|
|||
while (cnt) {
|
||||
hlist_add_head(&new[--cnt]->node, &obj_pool);
|
||||
debug_objects_allocated++;
|
||||
obj_pool_free++;
|
||||
WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
|
||||
}
|
||||
raw_spin_unlock_irqrestore(&pool_lock, flags);
|
||||
}
|
||||
|
@ -236,7 +240,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
|
|||
obj = __alloc_object(&obj_pool);
|
||||
if (obj) {
|
||||
obj_pool_used++;
|
||||
obj_pool_free--;
|
||||
WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
|
||||
|
||||
/*
|
||||
* Looking ahead, allocate one batch of debug objects and
|
||||
|
@ -255,7 +259,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
|
|||
&percpu_pool->free_objs);
|
||||
percpu_pool->obj_free++;
|
||||
obj_pool_used++;
|
||||
obj_pool_free--;
|
||||
WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -309,8 +313,8 @@ static void free_obj_work(struct work_struct *work)
|
|||
obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
|
||||
hlist_del(&obj->node);
|
||||
hlist_add_head(&obj->node, &obj_pool);
|
||||
obj_pool_free++;
|
||||
obj_nr_tofree--;
|
||||
WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
|
||||
WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
|
||||
}
|
||||
raw_spin_unlock_irqrestore(&pool_lock, flags);
|
||||
return;
|
||||
|
@ -324,7 +328,7 @@ static void free_obj_work(struct work_struct *work)
|
|||
if (obj_nr_tofree) {
|
||||
hlist_move_list(&obj_to_free, &tofree);
|
||||
debug_objects_freed += obj_nr_tofree;
|
||||
obj_nr_tofree = 0;
|
||||
WRITE_ONCE(obj_nr_tofree, 0);
|
||||
}
|
||||
raw_spin_unlock_irqrestore(&pool_lock, flags);
|
||||
|
||||
|
@ -375,10 +379,10 @@ static void __free_object(struct debug_obj *obj)
|
|||
obj_pool_used--;
|
||||
|
||||
if (work) {
|
||||
obj_nr_tofree++;
|
||||
WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
|
||||
hlist_add_head(&obj->node, &obj_to_free);
|
||||
if (lookahead_count) {
|
||||
obj_nr_tofree += lookahead_count;
|
||||
WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + lookahead_count);
|
||||
obj_pool_used -= lookahead_count;
|
||||
while (lookahead_count) {
|
||||
hlist_add_head(&objs[--lookahead_count]->node,
|
||||
|
@ -396,15 +400,15 @@ static void __free_object(struct debug_obj *obj)
|
|||
for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
|
||||
obj = __alloc_object(&obj_pool);
|
||||
hlist_add_head(&obj->node, &obj_to_free);
|
||||
obj_pool_free--;
|
||||
obj_nr_tofree++;
|
||||
WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
|
||||
WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
obj_pool_free++;
|
||||
WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
|
||||
hlist_add_head(&obj->node, &obj_pool);
|
||||
if (lookahead_count) {
|
||||
obj_pool_free += lookahead_count;
|
||||
WRITE_ONCE(obj_pool_free, obj_pool_free + lookahead_count);
|
||||
obj_pool_used -= lookahead_count;
|
||||
while (lookahead_count) {
|
||||
hlist_add_head(&objs[--lookahead_count]->node,
|
||||
|
@ -423,7 +427,7 @@ static void __free_object(struct debug_obj *obj)
|
|||
static void free_object(struct debug_obj *obj)
|
||||
{
|
||||
__free_object(obj);
|
||||
if (!obj_freeing && obj_nr_tofree) {
|
||||
if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
|
||||
WRITE_ONCE(obj_freeing, true);
|
||||
schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
|
||||
}
|
||||
|
@ -982,7 +986,7 @@ static void __debug_check_no_obj_freed(const void *address, unsigned long size)
|
|||
debug_objects_maxchecked = objs_checked;
|
||||
|
||||
/* Schedule work to actually kmem_cache_free() objects */
|
||||
if (!obj_freeing && obj_nr_tofree) {
|
||||
if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
|
||||
WRITE_ONCE(obj_freeing, true);
|
||||
schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
|
||||
}
|
||||
|
@ -1008,12 +1012,12 @@ static int debug_stats_show(struct seq_file *m, void *v)
|
|||
seq_printf(m, "max_checked :%d\n", debug_objects_maxchecked);
|
||||
seq_printf(m, "warnings :%d\n", debug_objects_warnings);
|
||||
seq_printf(m, "fixups :%d\n", debug_objects_fixups);
|
||||
seq_printf(m, "pool_free :%d\n", obj_pool_free + obj_percpu_free);
|
||||
seq_printf(m, "pool_free :%d\n", READ_ONCE(obj_pool_free) + obj_percpu_free);
|
||||
seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free);
|
||||
seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
|
||||
seq_printf(m, "pool_used :%d\n", obj_pool_used - obj_percpu_free);
|
||||
seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
|
||||
seq_printf(m, "on_free_list :%d\n", obj_nr_tofree);
|
||||
seq_printf(m, "on_free_list :%d\n", READ_ONCE(obj_nr_tofree));
|
||||
seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
|
||||
seq_printf(m, "objs_freed :%d\n", debug_objects_freed);
|
||||
return 0;
|
||||
|
|
Loading…
Reference in New Issue