fs: Change try_to_free_buffers() to take a folio
All but two of the callers already have a folio; pass a folio into try_to_free_buffers(). This removes the last user of cancel_dirty_page() so remove that wrapper function too. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Jeff Layton <jlayton@kernel.org>
This commit is contained in:
parent
731222557a
commit
68189fef88
42
fs/buffer.c
42
fs/buffer.c
|
@ -955,7 +955,7 @@ grow_dev_page(struct block_device *bdev, sector_t block,
|
|||
size);
|
||||
goto done;
|
||||
}
|
||||
if (!try_to_free_buffers(page))
|
||||
if (!try_to_free_buffers(page_folio(page)))
|
||||
goto failed;
|
||||
}
|
||||
|
||||
|
@ -3155,20 +3155,20 @@ int sync_dirty_buffer(struct buffer_head *bh)
|
|||
EXPORT_SYMBOL(sync_dirty_buffer);
|
||||
|
||||
/*
|
||||
* try_to_free_buffers() checks if all the buffers on this particular page
|
||||
* try_to_free_buffers() checks if all the buffers on this particular folio
|
||||
* are unused, and releases them if so.
|
||||
*
|
||||
* Exclusion against try_to_free_buffers may be obtained by either
|
||||
* locking the page or by holding its mapping's private_lock.
|
||||
* locking the folio or by holding its mapping's private_lock.
|
||||
*
|
||||
* If the page is dirty but all the buffers are clean then we need to
|
||||
* be sure to mark the page clean as well. This is because the page
|
||||
* If the folio is dirty but all the buffers are clean then we need to
|
||||
* be sure to mark the folio clean as well. This is because the folio
|
||||
* may be against a block device, and a later reattachment of buffers
|
||||
* to a dirty page will set *all* buffers dirty. Which would corrupt
|
||||
* to a dirty folio will set *all* buffers dirty. Which would corrupt
|
||||
* filesystem data on the same device.
|
||||
*
|
||||
* The same applies to regular filesystem pages: if all the buffers are
|
||||
* clean then we set the page clean and proceed. To do that, we require
|
||||
* The same applies to regular filesystem folios: if all the buffers are
|
||||
* clean then we set the folio clean and proceed. To do that, we require
|
||||
* total exclusion from block_dirty_folio(). That is obtained with
|
||||
* private_lock.
|
||||
*
|
||||
|
@ -3207,40 +3207,40 @@ drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int try_to_free_buffers(struct page *page)
|
||||
bool try_to_free_buffers(struct folio *folio)
|
||||
{
|
||||
struct address_space * const mapping = page->mapping;
|
||||
struct address_space * const mapping = folio->mapping;
|
||||
struct buffer_head *buffers_to_free = NULL;
|
||||
int ret = 0;
|
||||
bool ret = 0;
|
||||
|
||||
BUG_ON(!PageLocked(page));
|
||||
if (PageWriteback(page))
|
||||
return 0;
|
||||
BUG_ON(!folio_test_locked(folio));
|
||||
if (folio_test_writeback(folio))
|
||||
return false;
|
||||
|
||||
if (mapping == NULL) { /* can this still happen? */
|
||||
ret = drop_buffers(page, &buffers_to_free);
|
||||
ret = drop_buffers(&folio->page, &buffers_to_free);
|
||||
goto out;
|
||||
}
|
||||
|
||||
spin_lock(&mapping->private_lock);
|
||||
ret = drop_buffers(page, &buffers_to_free);
|
||||
ret = drop_buffers(&folio->page, &buffers_to_free);
|
||||
|
||||
/*
|
||||
* If the filesystem writes its buffers by hand (eg ext3)
|
||||
* then we can have clean buffers against a dirty page. We
|
||||
* clean the page here; otherwise the VM will never notice
|
||||
* then we can have clean buffers against a dirty folio. We
|
||||
* clean the folio here; otherwise the VM will never notice
|
||||
* that the filesystem did any IO at all.
|
||||
*
|
||||
* Also, during truncate, discard_buffer will have marked all
|
||||
* the page's buffers clean. We discover that here and clean
|
||||
* the page also.
|
||||
* the folio's buffers clean. We discover that here and clean
|
||||
* the folio also.
|
||||
*
|
||||
* private_lock must be held over this entire operation in order
|
||||
* to synchronise against block_dirty_folio and prevent the
|
||||
* dirty bit from being lost.
|
||||
*/
|
||||
if (ret)
|
||||
cancel_dirty_page(page);
|
||||
folio_cancel_dirty(folio);
|
||||
spin_unlock(&mapping->private_lock);
|
||||
out:
|
||||
if (buffers_to_free) {
|
||||
|
|
|
@ -3255,7 +3255,7 @@ static bool ext4_release_folio(struct folio *folio, gfp_t wait)
|
|||
if (journal)
|
||||
return jbd2_journal_try_to_free_buffers(journal, folio);
|
||||
else
|
||||
return try_to_free_buffers(&folio->page);
|
||||
return try_to_free_buffers(folio);
|
||||
}
|
||||
|
||||
static bool ext4_inode_datasync_dirty(struct inode *inode)
|
||||
|
|
|
@ -757,7 +757,7 @@ bool gfs2_release_folio(struct folio *folio, gfp_t gfp_mask)
|
|||
} while (bh != head);
|
||||
gfs2_log_unlock(sdp);
|
||||
|
||||
return try_to_free_buffers(&folio->page);
|
||||
return try_to_free_buffers(folio);
|
||||
|
||||
cannot_release:
|
||||
gfs2_log_unlock(sdp);
|
||||
|
|
|
@ -124,7 +124,7 @@ static bool hfs_release_folio(struct folio *folio, gfp_t mask)
|
|||
} while (--i && nidx < tree->node_count);
|
||||
spin_unlock(&tree->hash_lock);
|
||||
}
|
||||
return res ? try_to_free_buffers(&folio->page) : false;
|
||||
return res ? try_to_free_buffers(folio) : false;
|
||||
}
|
||||
|
||||
static ssize_t hfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
|
||||
|
|
|
@ -121,7 +121,7 @@ static bool hfsplus_release_folio(struct folio *folio, gfp_t mask)
|
|||
} while (--i && nidx < tree->node_count);
|
||||
spin_unlock(&tree->hash_lock);
|
||||
}
|
||||
return res ? try_to_free_buffers(&folio->page) : false;
|
||||
return res ? try_to_free_buffers(folio) : false;
|
||||
}
|
||||
|
||||
static ssize_t hfsplus_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
|
||||
|
|
|
@ -82,7 +82,7 @@ static void release_buffer_page(struct buffer_head *bh)
|
|||
|
||||
folio_get(folio);
|
||||
__brelse(bh);
|
||||
try_to_free_buffers(&folio->page);
|
||||
try_to_free_buffers(folio);
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
return;
|
||||
|
|
|
@ -2175,7 +2175,7 @@ bool jbd2_journal_try_to_free_buffers(journal_t *journal, struct folio *folio)
|
|||
goto busy;
|
||||
} while ((bh = bh->b_this_page) != head);
|
||||
|
||||
ret = try_to_free_buffers(&folio->page);
|
||||
ret = try_to_free_buffers(folio);
|
||||
busy:
|
||||
return ret;
|
||||
}
|
||||
|
@ -2482,7 +2482,7 @@ int jbd2_journal_invalidate_folio(journal_t *journal, struct folio *folio,
|
|||
} while (bh != head);
|
||||
|
||||
if (!partial_page) {
|
||||
if (may_free && try_to_free_buffers(&folio->page))
|
||||
if (may_free && try_to_free_buffers(folio))
|
||||
J_ASSERT(!folio_buffers(folio));
|
||||
}
|
||||
return 0;
|
||||
|
|
|
@ -431,7 +431,7 @@ static void clean_buffers(struct page *page, unsigned first_unmapped)
|
|||
* disk before we reach the platter.
|
||||
*/
|
||||
if (buffer_heads_over_limit && PageUptodate(page))
|
||||
try_to_free_buffers(page);
|
||||
try_to_free_buffers(page_folio(page));
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -502,7 +502,7 @@ static bool ocfs2_release_folio(struct folio *folio, gfp_t wait)
|
|||
{
|
||||
if (!folio_buffers(folio))
|
||||
return false;
|
||||
return try_to_free_buffers(&folio->page);
|
||||
return try_to_free_buffers(folio);
|
||||
}
|
||||
|
||||
static void ocfs2_figure_cluster_boundaries(struct ocfs2_super *osb,
|
||||
|
|
|
@ -3234,7 +3234,7 @@ static bool reiserfs_release_folio(struct folio *folio, gfp_t unused_gfp_flags)
|
|||
bh = bh->b_this_page;
|
||||
} while (bh != head);
|
||||
if (ret)
|
||||
ret = try_to_free_buffers(&folio->page);
|
||||
ret = try_to_free_buffers(folio);
|
||||
spin_unlock(&j->j_dirty_buffers_lock);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -606,7 +606,7 @@ static void release_buffer_page(struct buffer_head *bh)
|
|||
folio_get(folio);
|
||||
put_bh(bh);
|
||||
if (!folio->mapping)
|
||||
try_to_free_buffers(&folio->page);
|
||||
try_to_free_buffers(folio);
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
} else {
|
||||
|
|
|
@ -158,7 +158,7 @@ void mark_buffer_write_io_error(struct buffer_head *bh);
|
|||
void touch_buffer(struct buffer_head *bh);
|
||||
void set_bh_page(struct buffer_head *bh,
|
||||
struct page *page, unsigned long offset);
|
||||
int try_to_free_buffers(struct page *);
|
||||
bool try_to_free_buffers(struct folio *);
|
||||
struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
|
||||
bool retry);
|
||||
void create_empty_buffers(struct page *, unsigned long,
|
||||
|
@ -402,7 +402,7 @@ bool block_dirty_folio(struct address_space *mapping, struct folio *folio);
|
|||
#else /* CONFIG_BLOCK */
|
||||
|
||||
static inline void buffer_init(void) {}
|
||||
static inline int try_to_free_buffers(struct page *page) { return 1; }
|
||||
static inline bool try_to_free_buffers(struct folio *folio) { return true; }
|
||||
static inline int inode_has_buffers(struct inode *inode) { return 0; }
|
||||
static inline void invalidate_inode_buffers(struct inode *inode) {}
|
||||
static inline int remove_inode_buffers(struct inode *inode) { return 1; }
|
||||
|
|
|
@ -1067,10 +1067,6 @@ static inline void folio_cancel_dirty(struct folio *folio)
|
|||
if (folio_test_dirty(folio))
|
||||
__folio_cancel_dirty(folio);
|
||||
}
|
||||
static inline void cancel_dirty_page(struct page *page)
|
||||
{
|
||||
folio_cancel_dirty(page_folio(page));
|
||||
}
|
||||
bool folio_clear_dirty_for_io(struct folio *folio);
|
||||
bool clear_page_dirty_for_io(struct page *page);
|
||||
void folio_invalidate(struct folio *folio, size_t offset, size_t length);
|
||||
|
|
|
@ -3957,6 +3957,6 @@ bool filemap_release_folio(struct folio *folio, gfp_t gfp)
|
|||
|
||||
if (mapping && mapping->a_ops->release_folio)
|
||||
return mapping->a_ops->release_folio(folio, gfp);
|
||||
return try_to_free_buffers(&folio->page);
|
||||
return try_to_free_buffers(folio);
|
||||
}
|
||||
EXPORT_SYMBOL(filemap_release_folio);
|
||||
|
|
|
@ -1013,7 +1013,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
|
|||
if (!page->mapping) {
|
||||
VM_BUG_ON_PAGE(PageAnon(page), page);
|
||||
if (page_has_private(page)) {
|
||||
try_to_free_buffers(page);
|
||||
try_to_free_buffers(folio);
|
||||
goto out_unlock_both;
|
||||
}
|
||||
} else if (page_mapped(page)) {
|
||||
|
|
|
@ -1181,7 +1181,7 @@ static pageout_t pageout(struct folio *folio, struct address_space *mapping)
|
|||
* folio->mapping == NULL while being dirty with clean buffers.
|
||||
*/
|
||||
if (folio_test_private(folio)) {
|
||||
if (try_to_free_buffers(&folio->page)) {
|
||||
if (try_to_free_buffers(folio)) {
|
||||
folio_clear_dirty(folio);
|
||||
pr_info("%s: orphaned folio\n", __func__);
|
||||
return PAGE_CLEAN;
|
||||
|
|
Loading…
Reference in New Issue