mm/writeback: Add filemap_dirty_folio()
Reimplement __set_page_dirty_nobuffers() as a wrapper around filemap_dirty_folio(). Eventually folio_mark_dirty() will pass the folio's mapping to the address space's ->dirty_folio() operation, so add the parameter to filemap_dirty_folio() now. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: David Howells <dhowells@redhat.com> Acked-by: Vlastimil Babka <vbabka@suse.cz>
This commit is contained in:
parent
b9b0ff61ee
commit
85d4d2ebc8
|
@ -393,6 +393,7 @@ void writeback_set_ratelimit(void);
|
||||||
void tag_pages_for_writeback(struct address_space *mapping,
|
void tag_pages_for_writeback(struct address_space *mapping,
|
||||||
pgoff_t start, pgoff_t end);
|
pgoff_t start, pgoff_t end);
|
||||||
|
|
||||||
|
bool filemap_dirty_folio(struct address_space *mapping, struct folio *folio);
|
||||||
void account_page_redirty(struct page *page);
|
void account_page_redirty(struct page *page);
|
||||||
|
|
||||||
void sb_mark_inode_writeback(struct inode *inode);
|
void sb_mark_inode_writeback(struct inode *inode);
|
||||||
|
|
|
@ -83,3 +83,9 @@ bool set_page_dirty(struct page *page)
|
||||||
return folio_mark_dirty(page_folio(page));
|
return folio_mark_dirty(page_folio(page));
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(set_page_dirty);
|
EXPORT_SYMBOL(set_page_dirty);
|
||||||
|
|
||||||
|
int __set_page_dirty_nobuffers(struct page *page)
|
||||||
|
{
|
||||||
|
return filemap_dirty_folio(page_mapping(page), page_folio(page));
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(__set_page_dirty_nobuffers);
|
||||||
|
|
|
@ -2505,41 +2505,43 @@ void __folio_mark_dirty(struct folio *folio, struct address_space *mapping,
|
||||||
xa_unlock_irqrestore(&mapping->i_pages, flags);
|
xa_unlock_irqrestore(&mapping->i_pages, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/**
|
||||||
* For address_spaces which do not use buffers. Just tag the page as dirty in
|
* filemap_dirty_folio - Mark a folio dirty for filesystems which do not use buffer_heads.
|
||||||
* the xarray.
|
* @mapping: Address space this folio belongs to.
|
||||||
|
* @folio: Folio to be marked as dirty.
|
||||||
*
|
*
|
||||||
* This is also used when a single buffer is being dirtied: we want to set the
|
* Filesystems which do not use buffer heads should call this function
|
||||||
* page dirty in that case, but not all the buffers. This is a "bottom-up"
|
* from their set_page_dirty address space operation. It ignores the
|
||||||
* dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
|
* contents of folio_get_private(), so if the filesystem marks individual
|
||||||
|
* blocks as dirty, the filesystem should handle that itself.
|
||||||
*
|
*
|
||||||
* The caller must ensure this doesn't race with truncation. Most will simply
|
* This is also sometimes used by filesystems which use buffer_heads when
|
||||||
* hold the page lock, but e.g. zap_pte_range() calls with the page mapped and
|
* a single buffer is being dirtied: we want to set the folio dirty in
|
||||||
* the pte lock held, which also locks out truncation.
|
* that case, but not all the buffers. This is a "bottom-up" dirtying,
|
||||||
|
* whereas __set_page_dirty_buffers() is a "top-down" dirtying.
|
||||||
|
*
|
||||||
|
* The caller must ensure this doesn't race with truncation. Most will
|
||||||
|
* simply hold the folio lock, but e.g. zap_pte_range() calls with the
|
||||||
|
* folio mapped and the pte lock held, which also locks out truncation.
|
||||||
*/
|
*/
|
||||||
int __set_page_dirty_nobuffers(struct page *page)
|
bool filemap_dirty_folio(struct address_space *mapping, struct folio *folio)
|
||||||
{
|
{
|
||||||
lock_page_memcg(page);
|
folio_memcg_lock(folio);
|
||||||
if (!TestSetPageDirty(page)) {
|
if (folio_test_set_dirty(folio)) {
|
||||||
struct address_space *mapping = page_mapping(page);
|
folio_memcg_unlock(folio);
|
||||||
|
return false;
|
||||||
if (!mapping) {
|
|
||||||
unlock_page_memcg(page);
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
__set_page_dirty(page, mapping, !PagePrivate(page));
|
|
||||||
unlock_page_memcg(page);
|
|
||||||
|
|
||||||
if (mapping->host) {
|
|
||||||
/* !PageAnon && !swapper_space */
|
|
||||||
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
|
|
||||||
}
|
|
||||||
return 1;
|
|
||||||
}
|
}
|
||||||
unlock_page_memcg(page);
|
|
||||||
return 0;
|
__folio_mark_dirty(folio, mapping, !folio_test_private(folio));
|
||||||
|
folio_memcg_unlock(folio);
|
||||||
|
|
||||||
|
if (mapping->host) {
|
||||||
|
/* !PageAnon && !swapper_space */
|
||||||
|
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
|
||||||
|
}
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(__set_page_dirty_nobuffers);
|
EXPORT_SYMBOL(filemap_dirty_folio);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Call this whenever redirtying a page, to de-account the dirty counters
|
* Call this whenever redirtying a page, to de-account the dirty counters
|
||||||
|
|
Loading…
Reference in New Issue