mm/filemap: Add folio_end_writeback()

Add an end_page_writeback() wrapper function for users that are not yet
converted to folios.

folio_end_writeback() is less than half the size of end_page_writeback()
at just 105 bytes compared to 228 bytes, due to removing all the
compound_head() calls.  The 30 byte wrapper function makes this a net
saving of 93 bytes.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Acked-by: Jeff Layton <jlayton@kernel.org>
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
Reviewed-by: David Howells <dhowells@redhat.com>
Acked-by: Mike Rapoport <rppt@linux.ibm.com>
This commit is contained in:
Matthew Wilcox (Oracle) 2021-03-03 15:21:55 -05:00
parent 575ced1c8b
commit 4268b48077
3 changed files with 29 additions and 23 deletions

View File

@ -767,7 +767,8 @@ static inline int wait_on_page_locked_killable(struct page *page)
int put_and_wait_on_page_locked(struct page *page, int state); int put_and_wait_on_page_locked(struct page *page, int state);
void wait_on_page_writeback(struct page *page); void wait_on_page_writeback(struct page *page);
int wait_on_page_writeback_killable(struct page *page); int wait_on_page_writeback_killable(struct page *page);
extern void end_page_writeback(struct page *page); void end_page_writeback(struct page *page);
void folio_end_writeback(struct folio *folio);
void wait_for_stable_page(struct page *page); void wait_for_stable_page(struct page *page);
void __set_page_dirty(struct page *, struct address_space *, int warn); void __set_page_dirty(struct page *, struct address_space *, int warn);

View File

@ -1230,11 +1230,11 @@ static void wake_up_page_bit(struct page *page, int bit_nr)
spin_unlock_irqrestore(&q->lock, flags); spin_unlock_irqrestore(&q->lock, flags);
} }
static void wake_up_page(struct page *page, int bit) static void folio_wake(struct folio *folio, int bit)
{ {
if (!PageWaiters(page)) if (!folio_test_waiters(folio))
return; return;
wake_up_page_bit(page, bit); wake_up_page_bit(&folio->page, bit);
} }
/* /*
@ -1571,39 +1571,38 @@ int wait_on_page_private_2_killable(struct page *page)
EXPORT_SYMBOL(wait_on_page_private_2_killable); EXPORT_SYMBOL(wait_on_page_private_2_killable);
/** /**
* end_page_writeback - end writeback against a page * folio_end_writeback - End writeback against a folio.
* @page: the page * @folio: The folio.
*/ */
void end_page_writeback(struct page *page) void folio_end_writeback(struct folio *folio)
{ {
/* /*
* TestClearPageReclaim could be used here but it is an atomic * folio_test_clear_reclaim() could be used here but it is an
* operation and overkill in this particular case. Failing to * atomic operation and overkill in this particular case. Failing
* shuffle a page marked for immediate reclaim is too mild to * to shuffle a folio marked for immediate reclaim is too mild
* justify taking an atomic operation penalty at the end of * a gain to justify taking an atomic operation penalty at the
* ever page writeback. * end of every folio writeback.
*/ */
if (PageReclaim(page)) { if (folio_test_reclaim(folio)) {
struct folio *folio = page_folio(page); folio_clear_reclaim(folio);
ClearPageReclaim(page);
folio_rotate_reclaimable(folio); folio_rotate_reclaimable(folio);
} }
/* /*
* Writeback does not hold a page reference of its own, relying * Writeback does not hold a folio reference of its own, relying
* on truncation to wait for the clearing of PG_writeback. * on truncation to wait for the clearing of PG_writeback.
* But here we must make sure that the page is not freed and * But here we must make sure that the folio is not freed and
* reused before the wake_up_page(). * reused before the folio_wake().
*/ */
get_page(page); folio_get(folio);
if (!test_clear_page_writeback(page)) if (!test_clear_page_writeback(&folio->page))
BUG(); BUG();
smp_mb__after_atomic(); smp_mb__after_atomic();
wake_up_page(page, PG_writeback); folio_wake(folio, PG_writeback);
put_page(page); folio_put(folio);
} }
EXPORT_SYMBOL(end_page_writeback); EXPORT_SYMBOL(folio_end_writeback);
/* /*
* After completing I/O on a page, call this routine to update the page * After completing I/O on a page, call this routine to update the page

View File

@ -17,3 +17,9 @@ void unlock_page(struct page *page)
return folio_unlock(page_folio(page)); return folio_unlock(page_folio(page));
} }
EXPORT_SYMBOL(unlock_page); EXPORT_SYMBOL(unlock_page);
void end_page_writeback(struct page *page)
{
return folio_end_writeback(page_folio(page));
}
EXPORT_SYMBOL(end_page_writeback);