mm/filemap: add helper for finding pages

There is a lot of common code in find_get_entries(),
find_get_pages_range() and find_get_pages_range_tag().  Factor out
find_get_entry() which simplifies all three functions.

[willy@infradead.org: remove VM_BUG_ON_PAGE()]
  Link: https://lkml.kernel.org/r/20201124041507.28996-2-willy@infradead.orgLink: https://lkml.kernel.org/r/20201112212641.27837-7-willy@infradead.org

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Jan Kara <jack@suse.cz>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Cc: Dave Chinner <dchinner@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Yang Shi <yang.shi@linux.alibaba.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2021-02-25 17:15:44 -08:00 committed by Linus Torvalds
parent bc5a301120
commit c7bad633e6
1 changed files with 42 additions and 55 deletions

View File

@ -1825,6 +1825,42 @@ struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
} }
EXPORT_SYMBOL(pagecache_get_page); EXPORT_SYMBOL(pagecache_get_page);
static inline struct page *find_get_entry(struct xa_state *xas, pgoff_t max,
xa_mark_t mark)
{
struct page *page;
retry:
if (mark == XA_PRESENT)
page = xas_find(xas, max);
else
page = xas_find_marked(xas, max, mark);
if (xas_retry(xas, page))
goto retry;
/*
* A shadow entry of a recently evicted page, a swap
* entry from shmem/tmpfs or a DAX entry. Return it
* without attempting to raise page count.
*/
if (!page || xa_is_value(page))
return page;
if (!page_cache_get_speculative(page))
goto reset;
/* Has the page moved or been split? */
if (unlikely(page != xas_reload(xas))) {
put_page(page);
goto reset;
}
return page;
reset:
xas_reset(xas);
goto retry;
}
/** /**
* find_get_entries - gang pagecache lookup * find_get_entries - gang pagecache lookup
* @mapping: The address_space to search * @mapping: The address_space to search
@ -1864,42 +1900,21 @@ unsigned find_get_entries(struct address_space *mapping,
return 0; return 0;
rcu_read_lock(); rcu_read_lock();
xas_for_each(&xas, page, ULONG_MAX) { while ((page = find_get_entry(&xas, ULONG_MAX, XA_PRESENT))) {
if (xas_retry(&xas, page))
continue;
/*
* A shadow entry of a recently evicted page, a swap
* entry from shmem/tmpfs or a DAX entry. Return it
* without attempting to raise page count.
*/
if (xa_is_value(page))
goto export;
if (!page_cache_get_speculative(page))
goto retry;
/* Has the page moved or been split? */
if (unlikely(page != xas_reload(&xas)))
goto put_page;
/* /*
* Terminate early on finding a THP, to allow the caller to * Terminate early on finding a THP, to allow the caller to
* handle it all at once; but continue if this is hugetlbfs. * handle it all at once; but continue if this is hugetlbfs.
*/ */
if (PageTransHuge(page) && !PageHuge(page)) { if (!xa_is_value(page) && PageTransHuge(page) &&
!PageHuge(page)) {
page = find_subpage(page, xas.xa_index); page = find_subpage(page, xas.xa_index);
nr_entries = ret + 1; nr_entries = ret + 1;
} }
export:
indices[ret] = xas.xa_index; indices[ret] = xas.xa_index;
entries[ret] = page; entries[ret] = page;
if (++ret == nr_entries) if (++ret == nr_entries)
break; break;
continue;
put_page:
put_page(page);
retry:
xas_reset(&xas);
} }
rcu_read_unlock(); rcu_read_unlock();
return ret; return ret;
@ -1938,30 +1953,16 @@ unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
return 0; return 0;
rcu_read_lock(); rcu_read_lock();
xas_for_each(&xas, page, end) { while ((page = find_get_entry(&xas, end, XA_PRESENT))) {
if (xas_retry(&xas, page))
continue;
/* Skip over shadow, swap and DAX entries */ /* Skip over shadow, swap and DAX entries */
if (xa_is_value(page)) if (xa_is_value(page))
continue; continue;
if (!page_cache_get_speculative(page))
goto retry;
/* Has the page moved or been split? */
if (unlikely(page != xas_reload(&xas)))
goto put_page;
pages[ret] = find_subpage(page, xas.xa_index); pages[ret] = find_subpage(page, xas.xa_index);
if (++ret == nr_pages) { if (++ret == nr_pages) {
*start = xas.xa_index + 1; *start = xas.xa_index + 1;
goto out; goto out;
} }
continue;
put_page:
put_page(page);
retry:
xas_reset(&xas);
} }
/* /*
@ -2061,9 +2062,7 @@ unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
return 0; return 0;
rcu_read_lock(); rcu_read_lock();
xas_for_each_marked(&xas, page, end, tag) { while ((page = find_get_entry(&xas, end, tag))) {
if (xas_retry(&xas, page))
continue;
/* /*
* Shadow entries should never be tagged, but this iteration * Shadow entries should never be tagged, but this iteration
* is lockless so there is a window for page reclaim to evict * is lockless so there is a window for page reclaim to evict
@ -2072,23 +2071,11 @@ unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
if (xa_is_value(page)) if (xa_is_value(page))
continue; continue;
if (!page_cache_get_speculative(page))
goto retry;
/* Has the page moved or been split? */
if (unlikely(page != xas_reload(&xas)))
goto put_page;
pages[ret] = page; pages[ret] = page;
if (++ret == nr_pages) { if (++ret == nr_pages) {
*index = page->index + thp_nr_pages(page); *index = page->index + thp_nr_pages(page);
goto out; goto out;
} }
continue;
put_page:
put_page(page);
retry:
xas_reset(&xas);
} }
/* /*