mm: support async buffered reads in generic_file_buffered_read()

Use the async page locking infrastructure, if IOCB_WAITQ is set in the
passed in iocb. The caller must expect an -EIOCBQUEUED return value,
which means that IO is started but not done yet. This is similar to how
O_DIRECT signals the same operation. Once the callback is received by
the caller for IO completion, the caller must retry the operation.

Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Jens Axboe 2020-05-22 09:18:38 -06:00
parent dd3e6d5039
commit 1a0a7853b9
1 changed files with 27 additions and 11 deletions

View File

@ -1210,6 +1210,14 @@ static int __wait_on_page_locked_async(struct page *page,
return ret; return ret;
} }
static int wait_on_page_locked_async(struct page *page,
struct wait_page_queue *wait)
{
if (!PageLocked(page))
return 0;
return __wait_on_page_locked_async(compound_head(page), wait, false);
}
/** /**
* put_and_wait_on_page_locked - Drop a reference and wait for it to be unlocked * put_and_wait_on_page_locked - Drop a reference and wait for it to be unlocked
* @page: The page to wait for. * @page: The page to wait for.
@ -2049,17 +2057,25 @@ ssize_t generic_file_buffered_read(struct kiocb *iocb,
index, last_index - index); index, last_index - index);
} }
if (!PageUptodate(page)) { if (!PageUptodate(page)) {
if (iocb->ki_flags & IOCB_NOWAIT) {
put_page(page);
goto would_block;
}
/* /*
* See comment in do_read_cache_page on why * See comment in do_read_cache_page on why
* wait_on_page_locked is used to avoid unnecessarily * wait_on_page_locked is used to avoid unnecessarily
* serialisations and why it's safe. * serialisations and why it's safe.
*/ */
if (iocb->ki_flags & IOCB_WAITQ) {
if (written) {
put_page(page);
goto out;
}
error = wait_on_page_locked_async(page,
iocb->ki_waitq);
} else {
if (iocb->ki_flags & IOCB_NOWAIT) {
put_page(page);
goto would_block;
}
error = wait_on_page_locked_killable(page); error = wait_on_page_locked_killable(page);
}
if (unlikely(error)) if (unlikely(error))
goto readpage_error; goto readpage_error;
if (PageUptodate(page)) if (PageUptodate(page))
@ -2147,6 +2163,9 @@ ssize_t generic_file_buffered_read(struct kiocb *iocb,
page_not_up_to_date: page_not_up_to_date:
/* Get exclusive access to the page ... */ /* Get exclusive access to the page ... */
if (iocb->ki_flags & IOCB_WAITQ)
error = lock_page_async(page, iocb->ki_waitq);
else
error = lock_page_killable(page); error = lock_page_killable(page);
if (unlikely(error)) if (unlikely(error))
goto readpage_error; goto readpage_error;
@ -2190,9 +2209,6 @@ ssize_t generic_file_buffered_read(struct kiocb *iocb,
} }
if (!PageUptodate(page)) { if (!PageUptodate(page)) {
if (iocb->ki_flags & IOCB_WAITQ)
error = lock_page_async(page, iocb->ki_waitq);
else
error = lock_page_killable(page); error = lock_page_killable(page);
if (unlikely(error)) if (unlikely(error))
goto readpage_error; goto readpage_error;