mm/writeback: Add folio_clear_dirty_for_io()

Transform clear_page_dirty_for_io() into folio_clear_dirty_for_io()
and add a compatibility wrapper.  Also move the declaration to pagemap.h
as this is page cache functionality that doesn't need to be used by the
rest of the kernel.

Increases the size of the kernel by 79 bytes.  While we remove a few
calls to compound_head(), we add a call to folio_nr_pages() to get the
stats correct for the eventual support of multi-page folios.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: David Howells <dhowells@redhat.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
This commit is contained in:
Matthew Wilcox (Oracle) 2021-02-28 16:21:20 -05:00
parent fdaf532a23
commit 9350f20a07
4 changed files with 38 additions and 30 deletions

View File

@ -2008,7 +2008,6 @@ int redirty_page_for_writepage(struct writeback_control *wbc,
bool folio_mark_dirty(struct folio *folio); bool folio_mark_dirty(struct folio *folio);
bool set_page_dirty(struct page *page); bool set_page_dirty(struct page *page);
int set_page_dirty_lock(struct page *page); int set_page_dirty_lock(struct page *page);
int clear_page_dirty_for_io(struct page *page);
int get_cmdline(struct task_struct *task, char *buffer, int buflen); int get_cmdline(struct task_struct *task, char *buffer, int buflen);

View File

@ -796,6 +796,8 @@ static inline void cancel_dirty_page(struct page *page)
{ {
folio_cancel_dirty(page_folio(page)); folio_cancel_dirty(page_folio(page));
} }
bool folio_clear_dirty_for_io(struct folio *folio);
bool clear_page_dirty_for_io(struct page *page);
int __set_page_dirty_nobuffers(struct page *page); int __set_page_dirty_nobuffers(struct page *page);
int __set_page_dirty_no_writeback(struct page *page); int __set_page_dirty_no_writeback(struct page *page);

View File

@ -89,3 +89,9 @@ int __set_page_dirty_nobuffers(struct page *page)
return filemap_dirty_folio(page_mapping(page), page_folio(page)); return filemap_dirty_folio(page_mapping(page), page_folio(page));
} }
EXPORT_SYMBOL(__set_page_dirty_nobuffers); EXPORT_SYMBOL(__set_page_dirty_nobuffers);
bool clear_page_dirty_for_io(struct page *page)
{
return folio_clear_dirty_for_io(page_folio(page));
}
EXPORT_SYMBOL(clear_page_dirty_for_io);

View File

@ -2681,25 +2681,25 @@ void __folio_cancel_dirty(struct folio *folio)
EXPORT_SYMBOL(__folio_cancel_dirty); EXPORT_SYMBOL(__folio_cancel_dirty);
/* /*
* Clear a page's dirty flag, while caring for dirty memory accounting. * Clear a folio's dirty flag, while caring for dirty memory accounting.
* Returns true if the page was previously dirty. * Returns true if the folio was previously dirty.
* *
* This is for preparing to put the page under writeout. We leave the page * This is for preparing to put the folio under writeout. We leave
* tagged as dirty in the xarray so that a concurrent write-for-sync * the folio tagged as dirty in the xarray so that a concurrent
* can discover it via a PAGECACHE_TAG_DIRTY walk. The ->writepage * write-for-sync can discover it via a PAGECACHE_TAG_DIRTY walk.
* implementation will run either set_page_writeback() or set_page_dirty(), * The ->writepage implementation will run either folio_start_writeback()
* at which stage we bring the page's dirty flag and xarray dirty tag * or folio_mark_dirty(), at which stage we bring the folio's dirty flag
* back into sync. * and xarray dirty tag back into sync.
* *
* This incoherency between the page's dirty flag and xarray tag is * This incoherency between the folio's dirty flag and xarray tag is
* unfortunate, but it only exists while the page is locked. * unfortunate, but it only exists while the folio is locked.
*/ */
int clear_page_dirty_for_io(struct page *page) bool folio_clear_dirty_for_io(struct folio *folio)
{ {
struct address_space *mapping = page_mapping(page); struct address_space *mapping = folio_mapping(folio);
int ret = 0; bool ret = false;
VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
if (mapping && mapping_can_writeback(mapping)) { if (mapping && mapping_can_writeback(mapping)) {
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
@ -2712,48 +2712,49 @@ int clear_page_dirty_for_io(struct page *page)
* We use this sequence to make sure that * We use this sequence to make sure that
* (a) we account for dirty stats properly * (a) we account for dirty stats properly
* (b) we tell the low-level filesystem to * (b) we tell the low-level filesystem to
* mark the whole page dirty if it was * mark the whole folio dirty if it was
* dirty in a pagetable. Only to then * dirty in a pagetable. Only to then
* (c) clean the page again and return 1 to * (c) clean the folio again and return 1 to
* cause the writeback. * cause the writeback.
* *
* This way we avoid all nasty races with the * This way we avoid all nasty races with the
* dirty bit in multiple places and clearing * dirty bit in multiple places and clearing
* them concurrently from different threads. * them concurrently from different threads.
* *
* Note! Normally the "set_page_dirty(page)" * Note! Normally the "folio_mark_dirty(folio)"
* has no effect on the actual dirty bit - since * has no effect on the actual dirty bit - since
* that will already usually be set. But we * that will already usually be set. But we
* need the side effects, and it can help us * need the side effects, and it can help us
* avoid races. * avoid races.
* *
* We basically use the page "master dirty bit" * We basically use the folio "master dirty bit"
* as a serialization point for all the different * as a serialization point for all the different
* threads doing their things. * threads doing their things.
*/ */
if (page_mkclean(page)) if (folio_mkclean(folio))
set_page_dirty(page); folio_mark_dirty(folio);
/* /*
* We carefully synchronise fault handlers against * We carefully synchronise fault handlers against
* installing a dirty pte and marking the page dirty * installing a dirty pte and marking the folio dirty
* at this point. We do this by having them hold the * at this point. We do this by having them hold the
* page lock while dirtying the page, and pages are * page lock while dirtying the folio, and folios are
* always locked coming in here, so we get the desired * always locked coming in here, so we get the desired
* exclusion. * exclusion.
*/ */
wb = unlocked_inode_to_wb_begin(inode, &cookie); wb = unlocked_inode_to_wb_begin(inode, &cookie);
if (TestClearPageDirty(page)) { if (folio_test_clear_dirty(folio)) {
dec_lruvec_page_state(page, NR_FILE_DIRTY); long nr = folio_nr_pages(folio);
dec_zone_page_state(page, NR_ZONE_WRITE_PENDING); lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, -nr);
dec_wb_stat(wb, WB_RECLAIMABLE); zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
ret = 1; wb_stat_mod(wb, WB_RECLAIMABLE, -nr);
ret = true;
} }
unlocked_inode_to_wb_end(inode, &cookie); unlocked_inode_to_wb_end(inode, &cookie);
return ret; return ret;
} }
return TestClearPageDirty(page); return folio_test_clear_dirty(folio);
} }
EXPORT_SYMBOL(clear_page_dirty_for_io); EXPORT_SYMBOL(folio_clear_dirty_for_io);
static void wb_inode_writeback_start(struct bdi_writeback *wb) static void wb_inode_writeback_start(struct bdi_writeback *wb)
{ {