io_uring: fix {SQ,IO}POLL with unsupported opcodes

IORING_SETUP_IOPOLL is defined only for read/write, other opcodes should
be disallowed, otherwise it'll get an error as below. Also refuse
open/close with SQPOLL, as the polling thread wouldn't know which file
table to use.

RIP: 0010:io_iopoll_getevents+0x111/0x5a0
Call Trace:
 ? _raw_spin_unlock_irqrestore+0x24/0x40
 ? do_send_sig_info+0x64/0x90
 io_iopoll_reap_events.part.0+0x5e/0xa0
 io_ring_ctx_wait_and_kill+0x132/0x1c0
 io_uring_release+0x20/0x30
 __fput+0xcd/0x230
 ____fput+0xe/0x10
 task_work_run+0x67/0xa0
 do_exit+0x353/0xb10
 ? handle_mm_fault+0xd4/0x200
 ? syscall_trace_enter+0x18c/0x2c0
 do_group_exit+0x43/0xa0
 __x64_sys_exit_group+0x18/0x20
 do_syscall_64+0x60/0x1e0
 entry_SYSCALL_64_after_hwframe+0x44/0xa9

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
[axboe: allow provide/remove buffers and files update]
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Pavel Begunkov 2020-06-03 18:03:22 +03:00 committed by Jens Axboe
parent fd2206e4e9
commit 3232dd02af
1 changed files with 18 additions and 0 deletions

View File

@ -2766,6 +2766,8 @@ static int __io_splice_prep(struct io_kiocb *req,
if (req->flags & REQ_F_NEED_CLEANUP) if (req->flags & REQ_F_NEED_CLEANUP)
return 0; return 0;
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
sp->file_in = NULL; sp->file_in = NULL;
sp->len = READ_ONCE(sqe->len); sp->len = READ_ONCE(sqe->len);
@ -2966,6 +2968,8 @@ static int io_fallocate_prep(struct io_kiocb *req,
{ {
if (sqe->ioprio || sqe->buf_index || sqe->rw_flags) if (sqe->ioprio || sqe->buf_index || sqe->rw_flags)
return -EINVAL; return -EINVAL;
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
req->sync.off = READ_ONCE(sqe->off); req->sync.off = READ_ONCE(sqe->off);
req->sync.len = READ_ONCE(sqe->addr); req->sync.len = READ_ONCE(sqe->addr);
@ -2991,6 +2995,8 @@ static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
const char __user *fname; const char __user *fname;
int ret; int ret;
if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
return -EINVAL;
if (sqe->ioprio || sqe->buf_index) if (sqe->ioprio || sqe->buf_index)
return -EINVAL; return -EINVAL;
if (req->flags & REQ_F_FIXED_FILE) if (req->flags & REQ_F_FIXED_FILE)
@ -3024,6 +3030,8 @@ static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
size_t len; size_t len;
int ret; int ret;
if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
return -EINVAL;
if (sqe->ioprio || sqe->buf_index) if (sqe->ioprio || sqe->buf_index)
return -EINVAL; return -EINVAL;
if (req->flags & REQ_F_FIXED_FILE) if (req->flags & REQ_F_FIXED_FILE)
@ -3263,6 +3271,8 @@ static int io_epoll_ctl_prep(struct io_kiocb *req,
#if defined(CONFIG_EPOLL) #if defined(CONFIG_EPOLL)
if (sqe->ioprio || sqe->buf_index) if (sqe->ioprio || sqe->buf_index)
return -EINVAL; return -EINVAL;
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
req->epoll.epfd = READ_ONCE(sqe->fd); req->epoll.epfd = READ_ONCE(sqe->fd);
req->epoll.op = READ_ONCE(sqe->len); req->epoll.op = READ_ONCE(sqe->len);
@ -3307,6 +3317,8 @@ static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU) #if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
if (sqe->ioprio || sqe->buf_index || sqe->off) if (sqe->ioprio || sqe->buf_index || sqe->off)
return -EINVAL; return -EINVAL;
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
req->madvise.addr = READ_ONCE(sqe->addr); req->madvise.addr = READ_ONCE(sqe->addr);
req->madvise.len = READ_ONCE(sqe->len); req->madvise.len = READ_ONCE(sqe->len);
@ -3341,6 +3353,8 @@ static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{ {
if (sqe->ioprio || sqe->buf_index || sqe->addr) if (sqe->ioprio || sqe->buf_index || sqe->addr)
return -EINVAL; return -EINVAL;
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
req->fadvise.offset = READ_ONCE(sqe->off); req->fadvise.offset = READ_ONCE(sqe->off);
req->fadvise.len = READ_ONCE(sqe->len); req->fadvise.len = READ_ONCE(sqe->len);
@ -3374,6 +3388,8 @@ static int io_fadvise(struct io_kiocb *req, bool force_nonblock)
static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{ {
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
if (sqe->ioprio || sqe->buf_index) if (sqe->ioprio || sqe->buf_index)
return -EINVAL; return -EINVAL;
if (req->flags & REQ_F_FIXED_FILE) if (req->flags & REQ_F_FIXED_FILE)
@ -3418,6 +3434,8 @@ static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
*/ */
req->work.flags |= IO_WQ_WORK_NO_CANCEL; req->work.flags |= IO_WQ_WORK_NO_CANCEL;
if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
return -EINVAL;
if (sqe->ioprio || sqe->off || sqe->addr || sqe->len || if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
sqe->rw_flags || sqe->buf_index) sqe->rw_flags || sqe->buf_index)
return -EINVAL; return -EINVAL;