mm/writeback: Add folio_start_writeback()

Rename set_page_writeback() to folio_start_writeback() to match
folio_end_writeback().  Do not bother with wrappers that return void;
callers are perfectly capable of ignoring return values.

Add wrappers for set_page_writeback(), set_page_writeback_keepwrite() and
test_set_page_writeback() for compatibililty with existing filesystems.
The main advantage of this patch is getting the statistics right,
although it does eliminate a couple of calls to compound_head().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: David Howells <dhowells@redhat.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
This commit is contained in:
Matthew Wilcox (Oracle) 2021-04-24 12:00:48 -04:00
parent 269ccca389
commit f143f1ea5a
3 changed files with 38 additions and 30 deletions

View File

@ -657,21 +657,22 @@ static __always_inline void SetPageUptodate(struct page *page)
CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL) CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL)
int __test_set_page_writeback(struct page *page, bool keep_write); bool __folio_start_writeback(struct folio *folio, bool keep_write);
bool set_page_writeback(struct page *page);
#define test_set_page_writeback(page) \ #define folio_start_writeback(folio) \
__test_set_page_writeback(page, false) __folio_start_writeback(folio, false)
#define test_set_page_writeback_keepwrite(page) \ #define folio_start_writeback_keepwrite(folio) \
__test_set_page_writeback(page, true) __folio_start_writeback(folio, true)
static inline void set_page_writeback(struct page *page)
{
test_set_page_writeback(page);
}
static inline void set_page_writeback_keepwrite(struct page *page) static inline void set_page_writeback_keepwrite(struct page *page)
{ {
test_set_page_writeback_keepwrite(page); folio_start_writeback_keepwrite(page_folio(page));
}
static inline bool test_set_page_writeback(struct page *page)
{
return set_page_writeback(page);
} }
__PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY) __PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY)

View File

@ -71,3 +71,9 @@ void migrate_page_copy(struct page *newpage, struct page *page)
} }
EXPORT_SYMBOL(migrate_page_copy); EXPORT_SYMBOL(migrate_page_copy);
#endif #endif
bool set_page_writeback(struct page *page)
{
return folio_start_writeback(page_folio(page));
}
EXPORT_SYMBOL(set_page_writeback);

View File

@ -2811,21 +2811,23 @@ bool __folio_end_writeback(struct folio *folio)
return ret; return ret;
} }
int __test_set_page_writeback(struct page *page, bool keep_write) bool __folio_start_writeback(struct folio *folio, bool keep_write)
{ {
struct address_space *mapping = page_mapping(page); long nr = folio_nr_pages(folio);
int ret, access_ret; struct address_space *mapping = folio_mapping(folio);
bool ret;
int access_ret;
lock_page_memcg(page); folio_memcg_lock(folio);
if (mapping && mapping_use_writeback_tags(mapping)) { if (mapping && mapping_use_writeback_tags(mapping)) {
XA_STATE(xas, &mapping->i_pages, page_index(page)); XA_STATE(xas, &mapping->i_pages, folio_index(folio));
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
struct backing_dev_info *bdi = inode_to_bdi(inode); struct backing_dev_info *bdi = inode_to_bdi(inode);
unsigned long flags; unsigned long flags;
xas_lock_irqsave(&xas, flags); xas_lock_irqsave(&xas, flags);
xas_load(&xas); xas_load(&xas);
ret = TestSetPageWriteback(page); ret = folio_test_set_writeback(folio);
if (!ret) { if (!ret) {
bool on_wblist; bool on_wblist;
@ -2836,43 +2838,42 @@ int __test_set_page_writeback(struct page *page, bool keep_write)
if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) { if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) {
struct bdi_writeback *wb = inode_to_wb(inode); struct bdi_writeback *wb = inode_to_wb(inode);
inc_wb_stat(wb, WB_WRITEBACK); wb_stat_mod(wb, WB_WRITEBACK, nr);
if (!on_wblist) if (!on_wblist)
wb_inode_writeback_start(wb); wb_inode_writeback_start(wb);
} }
/* /*
* We can come through here when swapping anonymous * We can come through here when swapping
* pages, so we don't necessarily have an inode to track * anonymous folios, so we don't necessarily
* for sync. * have an inode to track for sync.
*/ */
if (mapping->host && !on_wblist) if (mapping->host && !on_wblist)
sb_mark_inode_writeback(mapping->host); sb_mark_inode_writeback(mapping->host);
} }
if (!PageDirty(page)) if (!folio_test_dirty(folio))
xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY); xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY);
if (!keep_write) if (!keep_write)
xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE); xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE);
xas_unlock_irqrestore(&xas, flags); xas_unlock_irqrestore(&xas, flags);
} else { } else {
ret = TestSetPageWriteback(page); ret = folio_test_set_writeback(folio);
} }
if (!ret) { if (!ret) {
inc_lruvec_page_state(page, NR_WRITEBACK); lruvec_stat_mod_folio(folio, NR_WRITEBACK, nr);
inc_zone_page_state(page, NR_ZONE_WRITE_PENDING); zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, nr);
} }
unlock_page_memcg(page); folio_memcg_unlock(folio);
access_ret = arch_make_page_accessible(page); access_ret = arch_make_folio_accessible(folio);
/* /*
* If writeback has been triggered on a page that cannot be made * If writeback has been triggered on a page that cannot be made
* accessible, it is too late to recover here. * accessible, it is too late to recover here.
*/ */
VM_BUG_ON_PAGE(access_ret != 0, page); VM_BUG_ON_FOLIO(access_ret != 0, folio);
return ret; return ret;
} }
EXPORT_SYMBOL(__test_set_page_writeback); EXPORT_SYMBOL(__folio_start_writeback);
/** /**
* folio_wait_writeback - Wait for a folio to finish writeback. * folio_wait_writeback - Wait for a folio to finish writeback.