mm/filemap: Add readahead_folio()
The pointers stored in the page cache are folios, by definition. This change comes with a behaviour change -- callers of readahead_folio() are no longer required to put the page reference themselves. This matches how readpage works, rather than matching how readpages used to work. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: David Howells <dhowells@redhat.com> Acked-by: Vlastimil Babka <vbabka@suse.cz>
This commit is contained in:
parent
f705bf84ea
commit
9bf70167e3
|
@ -987,33 +987,57 @@ void page_cache_async_readahead(struct address_space *mapping,
|
|||
page_cache_async_ra(&ractl, page, req_count);
|
||||
}
|
||||
|
||||
static inline struct folio *__readahead_folio(struct readahead_control *ractl)
|
||||
{
|
||||
struct folio *folio;
|
||||
|
||||
BUG_ON(ractl->_batch_count > ractl->_nr_pages);
|
||||
ractl->_nr_pages -= ractl->_batch_count;
|
||||
ractl->_index += ractl->_batch_count;
|
||||
|
||||
if (!ractl->_nr_pages) {
|
||||
ractl->_batch_count = 0;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
folio = xa_load(&ractl->mapping->i_pages, ractl->_index);
|
||||
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
|
||||
ractl->_batch_count = folio_nr_pages(folio);
|
||||
|
||||
return folio;
|
||||
}
|
||||
|
||||
/**
|
||||
* readahead_page - Get the next page to read.
|
||||
* @rac: The current readahead request.
|
||||
* @ractl: The current readahead request.
|
||||
*
|
||||
* Context: The page is locked and has an elevated refcount. The caller
|
||||
* should decreases the refcount once the page has been submitted for I/O
|
||||
* and unlock the page once all I/O to that page has completed.
|
||||
* Return: A pointer to the next page, or %NULL if we are done.
|
||||
*/
|
||||
static inline struct page *readahead_page(struct readahead_control *rac)
|
||||
static inline struct page *readahead_page(struct readahead_control *ractl)
|
||||
{
|
||||
struct page *page;
|
||||
struct folio *folio = __readahead_folio(ractl);
|
||||
|
||||
BUG_ON(rac->_batch_count > rac->_nr_pages);
|
||||
rac->_nr_pages -= rac->_batch_count;
|
||||
rac->_index += rac->_batch_count;
|
||||
return &folio->page;
|
||||
}
|
||||
|
||||
if (!rac->_nr_pages) {
|
||||
rac->_batch_count = 0;
|
||||
return NULL;
|
||||
}
|
||||
/**
|
||||
* readahead_folio - Get the next folio to read.
|
||||
* @ractl: The current readahead request.
|
||||
*
|
||||
* Context: The folio is locked. The caller should unlock the folio once
|
||||
* all I/O to that folio has completed.
|
||||
* Return: A pointer to the next folio, or %NULL if we are done.
|
||||
*/
|
||||
static inline struct folio *readahead_folio(struct readahead_control *ractl)
|
||||
{
|
||||
struct folio *folio = __readahead_folio(ractl);
|
||||
|
||||
page = xa_load(&rac->mapping->i_pages, rac->_index);
|
||||
VM_BUG_ON_PAGE(!PageLocked(page), page);
|
||||
rac->_batch_count = thp_nr_pages(page);
|
||||
|
||||
return page;
|
||||
if (folio)
|
||||
folio_put(folio);
|
||||
return folio;
|
||||
}
|
||||
|
||||
static inline unsigned int __readahead_batch(struct readahead_control *rac,
|
||||
|
|
Loading…
Reference in New Issue