io_uring: replace sqd rw_semaphore with mutex

The only user of read-locking of sqd->rw_lock is sq_thread itself, which
is by definition alone, so we don't really need rw_semaphore, but mutex
will do. Replace it with a mutex, and kill read-to-write upgrading and
extra task_work handling in io_sq_thread().

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Pavel Begunkov 2021-03-14 20:57:10 +00:00 committed by Jens Axboe
parent 180f829fe4
commit 09a6f4efaa
1 changed files with 14 additions and 22 deletions

View File

@ -258,7 +258,7 @@ enum {
struct io_sq_data { struct io_sq_data {
refcount_t refs; refcount_t refs;
struct rw_semaphore rw_lock; struct mutex lock;
/* ctx's that are using this sqd */ /* ctx's that are using this sqd */
struct list_head ctx_list; struct list_head ctx_list;
@ -6689,16 +6689,15 @@ static int io_sq_thread(void *data)
set_cpus_allowed_ptr(current, cpu_online_mask); set_cpus_allowed_ptr(current, cpu_online_mask);
current->flags |= PF_NO_SETAFFINITY; current->flags |= PF_NO_SETAFFINITY;
down_read(&sqd->rw_lock); mutex_lock(&sqd->lock);
while (!test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state)) { while (!test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state)) {
int ret; int ret;
bool cap_entries, sqt_spin, needs_sched; bool cap_entries, sqt_spin, needs_sched;
if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state)) { if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state)) {
up_read(&sqd->rw_lock); mutex_unlock(&sqd->lock);
cond_resched(); cond_resched();
down_read(&sqd->rw_lock); mutex_lock(&sqd->lock);
io_run_task_work(); io_run_task_work();
timeout = jiffies + sqd->sq_thread_idle; timeout = jiffies + sqd->sq_thread_idle;
continue; continue;
@ -6745,10 +6744,10 @@ static int io_sq_thread(void *data)
list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
io_ring_set_wakeup_flag(ctx); io_ring_set_wakeup_flag(ctx);
up_read(&sqd->rw_lock); mutex_unlock(&sqd->lock);
schedule(); schedule();
try_to_freeze(); try_to_freeze();
down_read(&sqd->rw_lock); mutex_lock(&sqd->lock);
list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
io_ring_clear_wakeup_flag(ctx); io_ring_clear_wakeup_flag(ctx);
} }
@ -6756,20 +6755,13 @@ static int io_sq_thread(void *data)
finish_wait(&sqd->wait, &wait); finish_wait(&sqd->wait, &wait);
timeout = jiffies + sqd->sq_thread_idle; timeout = jiffies + sqd->sq_thread_idle;
} }
up_read(&sqd->rw_lock);
down_write(&sqd->rw_lock);
/*
* someone may have parked and added a cancellation task_work, run
* it first because we don't want it in io_uring_cancel_sqpoll()
*/
io_run_task_work();
list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
io_uring_cancel_sqpoll(ctx); io_uring_cancel_sqpoll(ctx);
sqd->thread = NULL; sqd->thread = NULL;
list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
io_ring_set_wakeup_flag(ctx); io_ring_set_wakeup_flag(ctx);
up_write(&sqd->rw_lock); mutex_unlock(&sqd->lock);
io_run_task_work(); io_run_task_work();
complete(&sqd->exited); complete(&sqd->exited);
@ -7071,21 +7063,21 @@ static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
} }
static void io_sq_thread_unpark(struct io_sq_data *sqd) static void io_sq_thread_unpark(struct io_sq_data *sqd)
__releases(&sqd->rw_lock) __releases(&sqd->lock)
{ {
WARN_ON_ONCE(sqd->thread == current); WARN_ON_ONCE(sqd->thread == current);
clear_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state); clear_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
up_write(&sqd->rw_lock); mutex_unlock(&sqd->lock);
} }
static void io_sq_thread_park(struct io_sq_data *sqd) static void io_sq_thread_park(struct io_sq_data *sqd)
__acquires(&sqd->rw_lock) __acquires(&sqd->lock)
{ {
WARN_ON_ONCE(sqd->thread == current); WARN_ON_ONCE(sqd->thread == current);
set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state); set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
down_write(&sqd->rw_lock); mutex_lock(&sqd->lock);
/* set again for consistency, in case concurrent parks are happening */ /* set again for consistency, in case concurrent parks are happening */
set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state); set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
if (sqd->thread) if (sqd->thread)
@ -7096,11 +7088,11 @@ static void io_sq_thread_stop(struct io_sq_data *sqd)
{ {
WARN_ON_ONCE(sqd->thread == current); WARN_ON_ONCE(sqd->thread == current);
down_write(&sqd->rw_lock); mutex_lock(&sqd->lock);
set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state); set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
if (sqd->thread) if (sqd->thread)
wake_up_process(sqd->thread); wake_up_process(sqd->thread);
up_write(&sqd->rw_lock); mutex_unlock(&sqd->lock);
wait_for_completion(&sqd->exited); wait_for_completion(&sqd->exited);
} }
@ -7182,7 +7174,7 @@ static struct io_sq_data *io_get_sq_data(struct io_uring_params *p,
refcount_set(&sqd->refs, 1); refcount_set(&sqd->refs, 1);
INIT_LIST_HEAD(&sqd->ctx_list); INIT_LIST_HEAD(&sqd->ctx_list);
init_rwsem(&sqd->rw_lock); mutex_init(&sqd->lock);
init_waitqueue_head(&sqd->wait); init_waitqueue_head(&sqd->wait);
init_completion(&sqd->exited); init_completion(&sqd->exited);
return sqd; return sqd;