mm/truncate: Inline invalidate_complete_page() into its one caller
invalidate_inode_page() is the only caller of invalidate_complete_page() and inlining it reveals that the first check is unnecessary (because we hold the page locked, and we just retrieved the mapping from the page). Actually, it does make a difference, in that tail pages no longer fail at this check, so it's now possible to remove a tail page from a mapping. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: John Hubbard <jhubbard@nvidia.com> Reviewed-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
parent
b9ccad2e5d
commit
1b8ddbeeb9
|
@ -302,7 +302,7 @@ int get_futex_key(u32 __user *uaddr, bool fshared, union futex_key *key,
|
|||
* found it, but truncated or holepunched or subjected to
|
||||
* invalidate_complete_page2 before we got the page lock (also
|
||||
* cases which we are happy to fail). And we hold a reference,
|
||||
* so refcount care in invalidate_complete_page's remove_mapping
|
||||
* so refcount care in invalidate_inode_page's remove_mapping
|
||||
* prevents drop_caches from setting mapping to NULL beneath us.
|
||||
*
|
||||
* The case we do have to guard against is when memory pressure made
|
||||
|
|
|
@ -193,27 +193,6 @@ static void truncate_cleanup_folio(struct folio *folio)
|
|||
folio_clear_mappedtodisk(folio);
|
||||
}
|
||||
|
||||
/*
|
||||
* This is for invalidate_mapping_pages(). That function can be called at
|
||||
* any time, and is not supposed to throw away dirty pages. But pages can
|
||||
* be marked dirty at any time too, so use remove_mapping which safely
|
||||
* discards clean, unused pages.
|
||||
*
|
||||
* Returns non-zero if the page was successfully invalidated.
|
||||
*/
|
||||
static int
|
||||
invalidate_complete_page(struct address_space *mapping, struct page *page)
|
||||
{
|
||||
|
||||
if (page->mapping != mapping)
|
||||
return 0;
|
||||
|
||||
if (page_has_private(page) && !try_to_release_page(page, 0))
|
||||
return 0;
|
||||
|
||||
return remove_mapping(mapping, page);
|
||||
}
|
||||
|
||||
int truncate_inode_folio(struct address_space *mapping, struct folio *folio)
|
||||
{
|
||||
if (folio->mapping != mapping)
|
||||
|
@ -309,7 +288,10 @@ int invalidate_inode_page(struct page *page)
|
|||
return 0;
|
||||
if (page_mapped(page))
|
||||
return 0;
|
||||
return invalidate_complete_page(mapping, page);
|
||||
if (page_has_private(page) && !try_to_release_page(page, 0))
|
||||
return 0;
|
||||
|
||||
return remove_mapping(mapping, page);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -584,7 +566,7 @@ void invalidate_mapping_pagevec(struct address_space *mapping,
|
|||
}
|
||||
|
||||
/*
|
||||
* This is like invalidate_complete_page(), except it ignores the page's
|
||||
* This is like invalidate_inode_page(), except it ignores the page's
|
||||
* refcount. We do this because invalidate_inode_pages2() needs stronger
|
||||
* invalidation guarantees, and cannot afford to leave pages behind because
|
||||
* shrink_page_list() has a temp ref on them, or because they're transiently
|
||||
|
|
Loading…
Reference in New Issue