sched/wait: Fix abort_exclusive_wait(), it should pass TASK_NORMAL to wake_up()
Otherwise this logic only works if mode is "compatible" with another exclusive waiter. If some wq has both TASK_INTERRUPTIBLE and TASK_UNINTERRUPTIBLE waiters, abort_exclusive_wait() won't wait an uninterruptible waiter. The main user is __wait_on_bit_lock() and currently it is fine but only because TASK_KILLABLE includes TASK_UNINTERRUPTIBLE and we do not have lock_page_interruptible() yet. Just use TASK_NORMAL and remove the "mode" arg from abort_exclusive_wait(). Yes, this means that (say) wake_up_interruptible() can wake up the non- interruptible waiter(s), but I think this is fine. And in fact I think that abort_exclusive_wait() must die, see the next change. Signed-off-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Al Viro <viro@ZenIV.linux.org.uk> Cc: Bart Van Assche <bvanassche@acm.org> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Neil Brown <neilb@suse.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/20160906140047.GA6157@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
ab522e33f9
commit
38a3e1fc1d
|
@ -281,8 +281,8 @@ wait_queue_head_t *bit_waitqueue(void *, int);
|
|||
if (___wait_is_interruptible(state) && __int) { \
|
||||
__ret = __int; \
|
||||
if (exclusive) { \
|
||||
abort_exclusive_wait(&wq, &__wait, \
|
||||
state, NULL); \
|
||||
abort_exclusive_wait(&wq, &__wait, \
|
||||
NULL); \
|
||||
goto __out; \
|
||||
} \
|
||||
break; \
|
||||
|
@ -989,7 +989,7 @@ void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
|
|||
void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
|
||||
long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state);
|
||||
void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
|
||||
void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, unsigned int mode, void *key);
|
||||
void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, void *key);
|
||||
long wait_woken(wait_queue_t *wait, unsigned mode, long timeout);
|
||||
int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
|
||||
int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
|
||||
|
|
|
@ -259,7 +259,6 @@ EXPORT_SYMBOL(finish_wait);
|
|||
* abort_exclusive_wait - abort exclusive waiting in a queue
|
||||
* @q: waitqueue waited on
|
||||
* @wait: wait descriptor
|
||||
* @mode: runstate of the waiter to be woken
|
||||
* @key: key to identify a wait bit queue or %NULL
|
||||
*
|
||||
* Sets current thread back to running state and removes
|
||||
|
@ -273,8 +272,7 @@ EXPORT_SYMBOL(finish_wait);
|
|||
* aborts and is woken up concurrently and no one wakes up
|
||||
* the next waiter.
|
||||
*/
|
||||
void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
|
||||
unsigned int mode, void *key)
|
||||
void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, void *key)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
|
@ -283,7 +281,7 @@ void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
|
|||
if (!list_empty(&wait->task_list))
|
||||
list_del_init(&wait->task_list);
|
||||
else if (waitqueue_active(q))
|
||||
__wake_up_locked_key(q, mode, key);
|
||||
__wake_up_locked_key(q, TASK_NORMAL, key);
|
||||
spin_unlock_irqrestore(&q->lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(abort_exclusive_wait);
|
||||
|
@ -434,7 +432,7 @@ __wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q,
|
|||
ret = action(&q->key, mode);
|
||||
if (!ret)
|
||||
continue;
|
||||
abort_exclusive_wait(wq, &q->wait, mode, &q->key);
|
||||
abort_exclusive_wait(wq, &q->wait, &q->key);
|
||||
return ret;
|
||||
} while (test_and_set_bit(q->key.bit_nr, q->key.flags));
|
||||
finish_wait(wq, &q->wait);
|
||||
|
|
Loading…
Reference in New Issue