workqueue: Wrap flush_workqueue() using a macro

Since flush operation synchronously waits for completion, flushing
system-wide WQs (e.g. system_wq) might introduce possibility of deadlock
due to unexpected locking dependency. Tejun Heo commented at [1] that it
makes no sense at all to call flush_workqueue() on the shared WQs as the
caller has no idea what it's gonna end up waiting for.

Although there is flush_scheduled_work() which flushes system_wq WQ with
"Think twice before calling this function! It's very easy to get into
trouble if you don't take great care." warning message, syzbot found a
circular locking dependency caused by flushing system_wq WQ [2].

Therefore, let's change the direction to that developers had better use
their local WQs if flush_scheduled_work()/flush_workqueue(system_*_wq) is
inevitable.

Steps for converting system-wide WQs into local WQs are explained at [3],
and a conversion to stop flushing system-wide WQs is in progress. Now we
want some mechanism for preventing developers who are not aware of this
conversion from again start flushing system-wide WQs.

Since I found that WARN_ON() is complete but awkward approach for teaching
developers about this problem, let's use __compiletime_warning() for
incomplete but handy approach. For completeness, we will also insert
WARN_ON() into __flush_workqueue() after all in-tree users stopped calling
flush_scheduled_work().

Link: https://lore.kernel.org/all/YgnQGZWT%2Fn3VAITX@slm.duckdns.org/ [1]
Link: https://syzkaller.appspot.com/bug?extid=bde0f89deacca7c765b8 [2]
Link: https://lkml.kernel.org/r/49925af7-78a8-a3dd-bce6-cfc02e1a9236@I-love.SAKURA.ne.jp [3]
Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
Tetsuo Handa 2022-06-01 16:32:47 +09:00 committed by Tejun Heo
parent e71e60cd74
commit c4f135d643
2 changed files with 68 additions and 12 deletions

View File

@ -445,7 +445,7 @@ extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
struct delayed_work *dwork, unsigned long delay); struct delayed_work *dwork, unsigned long delay);
extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork); extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork);
extern void flush_workqueue(struct workqueue_struct *wq); extern void __flush_workqueue(struct workqueue_struct *wq);
extern void drain_workqueue(struct workqueue_struct *wq); extern void drain_workqueue(struct workqueue_struct *wq);
extern int schedule_on_each_cpu(work_func_t func); extern int schedule_on_each_cpu(work_func_t func);
@ -563,15 +563,23 @@ static inline bool schedule_work(struct work_struct *work)
return queue_work(system_wq, work); return queue_work(system_wq, work);
} }
/*
* Detect attempt to flush system-wide workqueues at compile time when possible.
*
* See https://lkml.kernel.org/r/49925af7-78a8-a3dd-bce6-cfc02e1a9236@I-love.SAKURA.ne.jp
* for reasons and steps for converting system-wide workqueues into local workqueues.
*/
extern void __warn_flushing_systemwide_wq(void)
__compiletime_warning("Please avoid flushing system-wide workqueues.");
/** /**
* flush_scheduled_work - ensure that any scheduled work has run to completion. * flush_scheduled_work - ensure that any scheduled work has run to completion.
* *
* Forces execution of the kernel-global workqueue and blocks until its * Forces execution of the kernel-global workqueue and blocks until its
* completion. * completion.
* *
* Think twice before calling this function! It's very easy to get into * It's very easy to get into trouble if you don't take great care.
* trouble if you don't take great care. Either of the following situations * Either of the following situations will lead to deadlock:
* will lead to deadlock:
* *
* One of the work items currently on the workqueue needs to acquire * One of the work items currently on the workqueue needs to acquire
* a lock held by your code or its caller. * a lock held by your code or its caller.
@ -586,11 +594,51 @@ static inline bool schedule_work(struct work_struct *work)
* need to know that a particular work item isn't queued and isn't running. * need to know that a particular work item isn't queued and isn't running.
* In such cases you should use cancel_delayed_work_sync() or * In such cases you should use cancel_delayed_work_sync() or
* cancel_work_sync() instead. * cancel_work_sync() instead.
*
* Please stop calling this function! A conversion to stop flushing system-wide
* workqueues is in progress. This function will be removed after all in-tree
* users stopped calling this function.
*/ */
static inline void flush_scheduled_work(void) /*
{ * The background of commit 771c035372a036f8 ("deprecate the
flush_workqueue(system_wq); * '__deprecated' attribute warnings entirely and for good") is that,
} * since Linus builds all modules between every single pull he does,
* the standard kernel build needs to be _clean_ in order to be able to
* notice when new problems happen. Therefore, don't emit warning while
* there are in-tree users.
*/
#define flush_scheduled_work() \
({ \
if (0) \
__warn_flushing_systemwide_wq(); \
__flush_workqueue(system_wq); \
})
/*
* Although there is no longer in-tree caller, for now just emit warning
* in order to give out-of-tree callers time to update.
*/
#define flush_workqueue(wq) \
({ \
struct workqueue_struct *_wq = (wq); \
\
if ((__builtin_constant_p(_wq == system_wq) && \
_wq == system_wq) || \
(__builtin_constant_p(_wq == system_highpri_wq) && \
_wq == system_highpri_wq) || \
(__builtin_constant_p(_wq == system_long_wq) && \
_wq == system_long_wq) || \
(__builtin_constant_p(_wq == system_unbound_wq) && \
_wq == system_unbound_wq) || \
(__builtin_constant_p(_wq == system_freezable_wq) && \
_wq == system_freezable_wq) || \
(__builtin_constant_p(_wq == system_power_efficient_wq) && \
_wq == system_power_efficient_wq) || \
(__builtin_constant_p(_wq == system_freezable_power_efficient_wq) && \
_wq == system_freezable_power_efficient_wq)) \
__warn_flushing_systemwide_wq(); \
__flush_workqueue(_wq); \
})
/** /**
* schedule_delayed_work_on - queue work in global workqueue on CPU after delay * schedule_delayed_work_on - queue work in global workqueue on CPU after delay

View File

@ -2788,13 +2788,13 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
} }
/** /**
* flush_workqueue - ensure that any scheduled work has run to completion. * __flush_workqueue - ensure that any scheduled work has run to completion.
* @wq: workqueue to flush * @wq: workqueue to flush
* *
* This function sleeps until all work items which were queued on entry * This function sleeps until all work items which were queued on entry
* have finished execution, but it is not livelocked by new incoming ones. * have finished execution, but it is not livelocked by new incoming ones.
*/ */
void flush_workqueue(struct workqueue_struct *wq) void __flush_workqueue(struct workqueue_struct *wq)
{ {
struct wq_flusher this_flusher = { struct wq_flusher this_flusher = {
.list = LIST_HEAD_INIT(this_flusher.list), .list = LIST_HEAD_INIT(this_flusher.list),
@ -2943,7 +2943,7 @@ void flush_workqueue(struct workqueue_struct *wq)
out_unlock: out_unlock:
mutex_unlock(&wq->mutex); mutex_unlock(&wq->mutex);
} }
EXPORT_SYMBOL(flush_workqueue); EXPORT_SYMBOL(__flush_workqueue);
/** /**
* drain_workqueue - drain a workqueue * drain_workqueue - drain a workqueue
@ -2971,7 +2971,7 @@ void drain_workqueue(struct workqueue_struct *wq)
wq->flags |= __WQ_DRAINING; wq->flags |= __WQ_DRAINING;
mutex_unlock(&wq->mutex); mutex_unlock(&wq->mutex);
reflush: reflush:
flush_workqueue(wq); __flush_workqueue(wq);
mutex_lock(&wq->mutex); mutex_lock(&wq->mutex);
@ -6111,3 +6111,11 @@ void __init workqueue_init(void)
wq_online = true; wq_online = true;
wq_watchdog_init(); wq_watchdog_init();
} }
/*
* Despite the naming, this is a no-op function which is here only for avoiding
* link error. Since compile-time warning may fail to catch, we will need to
* emit run-time warning from __flush_workqueue().
*/
void __warn_flushing_systemwide_wq(void) { }
EXPORT_SYMBOL(__warn_flushing_systemwide_wq);