io_uring: flip if handling after io_setup_async_rw

As recently done with with send/recv, flip the if after
rw_verify_aread() in io_{read,write}() and tabulise left bits left.
This removes mispredicted by a compiler jump on the success/fast path.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Pavel Begunkov 2020-08-01 13:50:02 +03:00 committed by Jens Axboe
parent 1752f0adea
commit fa15bafb71
1 changed files with 72 additions and 74 deletions

View File

@ -3034,28 +3034,28 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
struct kiocb *kiocb = &req->rw.kiocb;
struct iov_iter iter;
size_t iov_count;
ssize_t io_size, ret;
ssize_t io_size, ret, ret2;
unsigned long nr_segs;
ret = io_import_iovec(READ, req, &iovec, &iter, !force_nonblock);
if (ret < 0)
return ret;
io_size = ret;
req->result = io_size;
/* Ensure we clear previously set non-block flag */
if (!force_nonblock)
kiocb->ki_flags &= ~IOCB_NOWAIT;
io_size = ret;
req->result = io_size;
/* If the file doesn't support async, just async punt */
if (force_nonblock && !io_file_supports_async(req->file, READ))
goto copy_iov;
iov_count = iov_iter_count(&iter);
nr_segs = iter.nr_segs;
ret = rw_verify_area(READ, req->file, &kiocb->ki_pos, iov_count);
if (!ret) {
unsigned long nr_segs = iter.nr_segs;
ssize_t ret2 = 0;
if (unlikely(ret))
goto out_free;
ret2 = io_iter_do_read(req, &iter);
@ -3066,8 +3066,8 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
iter.count = iov_count;
iter.nr_segs = nr_segs;
copy_iov:
ret = io_setup_async_rw(req, io_size, iovec,
inline_vecs, &iter);
ret = io_setup_async_rw(req, io_size, iovec, inline_vecs,
&iter);
if (ret)
goto out_free;
/* it's copied and will be cleaned with ->io */
@ -3085,7 +3085,6 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
kiocb->ki_flags &= ~IOCB_WAITQ;
return -EAGAIN;
}
}
out_free:
if (iovec)
kfree(iovec);
@ -3117,19 +3116,19 @@ static int io_write(struct io_kiocb *req, bool force_nonblock,
struct kiocb *kiocb = &req->rw.kiocb;
struct iov_iter iter;
size_t iov_count;
ssize_t ret, io_size;
ssize_t ret, ret2, io_size;
unsigned long nr_segs;
ret = io_import_iovec(WRITE, req, &iovec, &iter, !force_nonblock);
if (ret < 0)
return ret;
io_size = ret;
req->result = io_size;
/* Ensure we clear previously set non-block flag */
if (!force_nonblock)
req->rw.kiocb.ki_flags &= ~IOCB_NOWAIT;
io_size = ret;
req->result = io_size;
/* If the file doesn't support async, just async punt */
if (force_nonblock && !io_file_supports_async(req->file, WRITE))
goto copy_iov;
@ -3140,10 +3139,10 @@ static int io_write(struct io_kiocb *req, bool force_nonblock,
goto copy_iov;
iov_count = iov_iter_count(&iter);
nr_segs = iter.nr_segs;
ret = rw_verify_area(WRITE, req->file, &kiocb->ki_pos, iov_count);
if (!ret) {
unsigned long nr_segs = iter.nr_segs;
ssize_t ret2;
if (unlikely(ret))
goto out_free;
/*
* Open-code file_start_write here to grab freeze protection,
@ -3177,15 +3176,14 @@ static int io_write(struct io_kiocb *req, bool force_nonblock,
iter.count = iov_count;
iter.nr_segs = nr_segs;
copy_iov:
ret = io_setup_async_rw(req, io_size, iovec,
inline_vecs, &iter);
ret = io_setup_async_rw(req, io_size, iovec, inline_vecs,
&iter);
if (ret)
goto out_free;
/* it's copied and will be cleaned with ->io */
iovec = NULL;
return -EAGAIN;
}
}
out_free:
if (iovec)
kfree(iovec);