Filesystem folio changes for 5.18

Primarily this series converts some of the address_space operations
 to take a folio instead of a page.
 
 ->is_partially_uptodate() takes a folio instead of a page and changes the
 type of the 'from' and 'count' arguments to make it obvious they're bytes.
 ->invalidatepage() becomes ->invalidate_folio() and has a similar type change.
 ->launder_page() becomes ->launder_folio()
 ->set_page_dirty() becomes ->dirty_folio() and adds the address_space as
 an argument.
 
 There are a couple of other misc changes up front that weren't worth
 separating into their own pull request.
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCgAdFiEEejHryeLBw/spnjHrDpNsjXcpgj4FAmI4hqMACgkQDpNsjXcp
 gj7r7Af/fVJ7m8kKqjP/IayX3HiJRuIDQw+vM++BlRNXdjz+IyED6whdmFGxJeOY
 BMyT+8ApOAz7ErS4G+7fAv4ScJK/aEgFUsnSeAiCp0PliiEJ5NNJzElp6sVmQ7H5
 SX7+Ek444FZUGsQuy0qL7/ELpR3ditnD7x+5U2g0p5TeaHGUQn84crRyfR4xuhNG
 EBD9D71BOb7OxUcOHe93pTkK51QsQ0aCrcIsB1tkK5KR0BAthn1HqF7ehL90Rvrr
 omx5M7aDWGY4oj7IKrhlAs+55Ah2WaOzrZBp0FXNbr4UENDBKWKyUxErwa4xPkf6
 Gm1iQG/CspOHnxN3YWsd5WjtlL3A+A==
 =cOiq
 -----END PGP SIGNATURE-----

Merge tag 'folio-5.18b' of git://git.infradead.org/users/willy/pagecache

Pull filesystem folio updates from Matthew Wilcox:
 "Primarily this series converts some of the address_space operations to
  take a folio instead of a page.

  Notably:

   - a_ops->is_partially_uptodate() takes a folio instead of a page and
     changes the type of the 'from' and 'count' arguments to make it
     obvious they're bytes.

   - a_ops->invalidatepage() becomes ->invalidate_folio() and has a
     similar type change.

   - a_ops->launder_page() becomes ->launder_folio()

   - a_ops->set_page_dirty() becomes ->dirty_folio() and adds the
     address_space as an argument.

  There are a couple of other misc changes up front that weren't worth
  separating into their own pull request"

* tag 'folio-5.18b' of git://git.infradead.org/users/willy/pagecache: (53 commits)
  fs: Remove aops ->set_page_dirty
  fb_defio: Use noop_dirty_folio()
  fs: Convert __set_page_dirty_no_writeback to noop_dirty_folio
  fs: Convert __set_page_dirty_buffers to block_dirty_folio
  nilfs: Convert nilfs_set_page_dirty() to nilfs_dirty_folio()
  mm: Convert swap_set_page_dirty() to swap_dirty_folio()
  ubifs: Convert ubifs_set_page_dirty to ubifs_dirty_folio
  f2fs: Convert f2fs_set_node_page_dirty to f2fs_dirty_node_folio
  f2fs: Convert f2fs_set_data_page_dirty to f2fs_dirty_data_folio
  f2fs: Convert f2fs_set_meta_page_dirty to f2fs_dirty_meta_folio
  afs: Convert afs_dir_set_page_dirty() to afs_dir_dirty_folio()
  btrfs: Convert extent_range_redirty_for_io() to use folios
  fs: Convert trivial uses of __set_page_dirty_nobuffers to filemap_dirty_folio
  btrfs: Convert from set_page_dirty to dirty_folio
  fscache: Convert fscache_set_page_dirty() to fscache_dirty_folio()
  fs: Add aops->dirty_folio
  fs: Remove aops->launder_page
  orangefs: Convert launder_page to launder_folio
  nfs: Convert from launder_page to launder_folio
  fuse: Convert from launder_page to launder_folio
  ...
This commit is contained in:
Linus Torvalds 2022-03-22 18:26:56 -07:00
commit 6b1f86f8e9
94 changed files with 848 additions and 874 deletions

View File

@ -345,8 +345,9 @@ The following facilities are provided to manage this:
To support this, the following functions are provided::
int fscache_set_page_dirty(struct page *page,
struct fscache_cookie *cookie);
bool fscache_dirty_folio(struct address_space *mapping,
struct folio *folio,
struct fscache_cookie *cookie);
void fscache_unpin_writeback(struct writeback_control *wbc,
struct fscache_cookie *cookie);
void fscache_clear_inode_writeback(struct fscache_cookie *cookie,
@ -354,7 +355,7 @@ To support this, the following functions are provided::
const void *aux);
The *set* function is intended to be called from the filesystem's
``set_page_dirty`` address space operation. If ``I_PINNING_FSCACHE_WB`` is not
``dirty_folio`` address space operation. If ``I_PINNING_FSCACHE_WB`` is not
set, it sets that flag and increments the use count on the cookie (the caller
must already have called ``fscache_use_cookie()``).

View File

@ -239,7 +239,7 @@ prototypes::
int (*writepage)(struct page *page, struct writeback_control *wbc);
int (*readpage)(struct file *, struct page *);
int (*writepages)(struct address_space *, struct writeback_control *);
int (*set_page_dirty)(struct page *page);
bool (*dirty_folio)(struct address_space *, struct folio *folio);
void (*readahead)(struct readahead_control *);
int (*readpages)(struct file *filp, struct address_space *mapping,
struct list_head *pages, unsigned nr_pages);
@ -250,21 +250,21 @@ prototypes::
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata);
sector_t (*bmap)(struct address_space *, sector_t);
void (*invalidatepage) (struct page *, unsigned int, unsigned int);
void (*invalidate_folio) (struct folio *, size_t start, size_t len);
int (*releasepage) (struct page *, int);
void (*freepage)(struct page *);
int (*direct_IO)(struct kiocb *, struct iov_iter *iter);
bool (*isolate_page) (struct page *, isolate_mode_t);
int (*migratepage)(struct address_space *, struct page *, struct page *);
void (*putback_page) (struct page *);
int (*launder_page)(struct page *);
int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long);
int (*launder_folio)(struct folio *);
bool (*is_partially_uptodate)(struct folio *, size_t from, size_t count);
int (*error_remove_page)(struct address_space *, struct page *);
int (*swap_activate)(struct file *);
int (*swap_deactivate)(struct file *);
locking rules:
All except set_page_dirty and freepage may block
All except dirty_folio and freepage may block
====================== ======================== ========= ===============
ops PageLocked(page) i_rwsem invalidate_lock
@ -272,20 +272,20 @@ ops PageLocked(page) i_rwsem invalidate_lock
writepage: yes, unlocks (see below)
readpage: yes, unlocks shared
writepages:
set_page_dirty no
dirty_folio maybe
readahead: yes, unlocks shared
readpages: no shared
write_begin: locks the page exclusive
write_end: yes, unlocks exclusive
bmap:
invalidatepage: yes exclusive
invalidate_folio: yes exclusive
releasepage: yes
freepage: yes
direct_IO:
isolate_page: yes
migratepage: yes (both)
putback_page: yes
launder_page: yes
launder_folio: yes
is_partially_uptodate: yes
error_remove_page: yes
swap_activate: no
@ -361,22 +361,22 @@ If nr_to_write is NULL, all dirty pages must be written.
writepages should _only_ write pages which are present on
mapping->io_pages.
->set_page_dirty() is called from various places in the kernel
when the target page is marked as needing writeback. It may be called
under spinlock (it cannot block) and is sometimes called with the page
not locked.
->dirty_folio() is called from various places in the kernel when
the target folio is marked as needing writeback. The folio cannot be
truncated because either the caller holds the folio lock, or the caller
has found the folio while holding the page table lock which will block
truncation.
->bmap() is currently used by legacy ioctl() (FIBMAP) provided by some
filesystems and by the swapper. The latter will eventually go away. Please,
keep it that way and don't breed new callers.
->invalidatepage() is called when the filesystem must attempt to drop
->invalidate_folio() is called when the filesystem must attempt to drop
some or all of the buffers from the page when it is being truncated. It
returns zero on success. If ->invalidatepage is zero, the kernel uses
block_invalidatepage() instead. The filesystem must exclusively acquire
invalidate_lock before invalidating page cache in truncate / hole punch path
(and thus calling into ->invalidatepage) to block races between page cache
invalidation and page cache filling functions (fault, read, ...).
returns zero on success. The filesystem must exclusively acquire
invalidate_lock before invalidating page cache in truncate / hole punch
path (and thus calling into ->invalidate_folio) to block races between page
cache invalidation and page cache filling functions (fault, read, ...).
->releasepage() is called when the kernel is about to try to drop the
buffers from the page in preparation for freeing it. It returns zero to
@ -386,9 +386,9 @@ the kernel assumes that the fs has no private interest in the buffers.
->freepage() is called when the kernel is done dropping the page
from the page cache.
->launder_page() may be called prior to releasing a page if
it is still found to be dirty. It returns zero if the page was successfully
cleaned, or an error value if not. Note that in order to prevent the page
->launder_folio() may be called prior to releasing a folio if
it is still found to be dirty. It returns zero if the folio was successfully
cleaned, or an error value if not. Note that in order to prevent the folio
getting mapped back in and redirtied, it needs to be kept locked
across the entire operation.

View File

@ -658,7 +658,7 @@ pages, however the address_space has finer control of write sizes.
The read process essentially only requires 'readpage'. The write
process is more complicated and uses write_begin/write_end or
set_page_dirty to write data into the address_space, and writepage and
dirty_folio to write data into the address_space, and writepage and
writepages to writeback data to storage.
Adding and removing pages to/from an address_space is protected by the
@ -724,7 +724,7 @@ cache in your filesystem. The following members are defined:
int (*writepage)(struct page *page, struct writeback_control *wbc);
int (*readpage)(struct file *, struct page *);
int (*writepages)(struct address_space *, struct writeback_control *);
int (*set_page_dirty)(struct page *page);
bool (*dirty_folio)(struct address_space *, struct folio *);
void (*readahead)(struct readahead_control *);
int (*readpages)(struct file *filp, struct address_space *mapping,
struct list_head *pages, unsigned nr_pages);
@ -735,7 +735,7 @@ cache in your filesystem. The following members are defined:
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata);
sector_t (*bmap)(struct address_space *, sector_t);
void (*invalidatepage) (struct page *, unsigned int, unsigned int);
void (*invalidate_folio) (struct folio *, size_t start, size_t len);
int (*releasepage) (struct page *, int);
void (*freepage)(struct page *);
ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter);
@ -745,10 +745,10 @@ cache in your filesystem. The following members are defined:
int (*migratepage) (struct page *, struct page *);
/* put migration-failed page back to right list */
void (*putback_page) (struct page *);
int (*launder_page) (struct page *);
int (*launder_folio) (struct folio *);
int (*is_partially_uptodate) (struct page *, unsigned long,
unsigned long);
bool (*is_partially_uptodate) (struct folio *, size_t from,
size_t count);
void (*is_dirty_writeback) (struct page *, bool *, bool *);
int (*error_remove_page) (struct mapping *mapping, struct page *page);
int (*swap_activate)(struct file *);
@ -793,13 +793,13 @@ cache in your filesystem. The following members are defined:
This will choose pages from the address space that are tagged as
DIRTY and will pass them to ->writepage.
``set_page_dirty``
called by the VM to set a page dirty. This is particularly
needed if an address space attaches private data to a page, and
that data needs to be updated when a page is dirtied. This is
``dirty_folio``
called by the VM to mark a folio as dirty. This is particularly
needed if an address space attaches private data to a folio, and
that data needs to be updated when a folio is dirtied. This is
called, for example, when a memory mapped page gets modified.
If defined, it should set the PageDirty flag, and the
PAGECACHE_TAG_DIRTY tag in the radix tree.
If defined, it should set the folio dirty flag, and the
PAGECACHE_TAG_DIRTY search mark in i_pages.
``readahead``
Called by the VM to read pages associated with the address_space
@ -872,15 +872,15 @@ cache in your filesystem. The following members are defined:
to find out where the blocks in the file are and uses those
addresses directly.
``invalidatepage``
If a page has PagePrivate set, then invalidatepage will be
called when part or all of the page is to be removed from the
``invalidate_folio``
If a folio has private data, then invalidate_folio will be
called when part or all of the folio is to be removed from the
address space. This generally corresponds to either a
truncation, punch hole or a complete invalidation of the address
space (in the latter case 'offset' will always be 0 and 'length'
will be PAGE_SIZE). Any private data associated with the page
will be folio_size()). Any private data associated with the page
should be updated to reflect this truncation. If offset is 0
and length is PAGE_SIZE, then the private data should be
and length is folio_size(), then the private data should be
released, because the page must be able to be completely
discarded. This may be done by calling the ->releasepage
function, but in this case the release MUST succeed.
@ -934,16 +934,16 @@ cache in your filesystem. The following members are defined:
``putback_page``
Called by the VM when isolated page's migration fails.
``launder_page``
Called before freeing a page - it writes back the dirty page.
To prevent redirtying the page, it is kept locked during the
``launder_folio``
Called before freeing a folio - it writes back the dirty folio.
To prevent redirtying the folio, it is kept locked during the
whole operation.
``is_partially_uptodate``
Called by the VM when reading a file through the pagecache when
the underlying blocksize != pagesize. If the required block is
up to date then the read can complete without needing the IO to
bring the whole page up to date.
the underlying blocksize is smaller than the size of the folio.
If the required block is up to date then the read can complete
without needing I/O to bring the whole page up to date.
``is_dirty_writeback``
Called by the VM when attempting to reclaim a page. The VM uses

View File

@ -428,7 +428,8 @@ static int blkdev_writepages(struct address_space *mapping,
}
const struct address_space_operations def_blk_aops = {
.set_page_dirty = __set_page_dirty_buffers,
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.readpage = blkdev_readpage,
.readahead = blkdev_readahead,
.writepage = blkdev_writepage,

View File

@ -346,8 +346,7 @@ static unsigned long dax_get_unmapped_area(struct file *filp,
}
static const struct address_space_operations dev_dax_aops = {
.set_page_dirty = __set_page_dirty_no_writeback,
.invalidatepage = noop_invalidatepage,
.dirty_folio = noop_dirty_folio,
};
static int dax_open(struct inode *inode, struct file *filp)

View File

@ -151,15 +151,8 @@ static const struct vm_operations_struct fb_deferred_io_vm_ops = {
.page_mkwrite = fb_deferred_io_mkwrite,
};
static int fb_deferred_io_set_page_dirty(struct page *page)
{
if (!PageDirty(page))
SetPageDirty(page);
return 0;
}
static const struct address_space_operations fb_deferred_io_aops = {
.set_page_dirty = fb_deferred_io_set_page_dirty,
.dirty_folio = noop_dirty_folio,
};
int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)

View File

@ -158,18 +158,9 @@ static int v9fs_release_page(struct page *page, gfp_t gfp)
return 1;
}
/**
* v9fs_invalidate_page - Invalidate a page completely or partially
* @page: The page to be invalidated
* @offset: offset of the invalidated region
* @length: length of the invalidated region
*/
static void v9fs_invalidate_page(struct page *page, unsigned int offset,
unsigned int length)
static void v9fs_invalidate_folio(struct folio *folio, size_t offset,
size_t length)
{
struct folio *folio = page_folio(page);
folio_wait_fscache(folio);
}
@ -249,16 +240,8 @@ static int v9fs_vfs_writepage(struct page *page, struct writeback_control *wbc)
return retval;
}
/**
* v9fs_launder_page - Writeback a dirty page
* @page: The page to be cleaned up
*
* Returns 0 on success.
*/
static int v9fs_launder_page(struct page *page)
static int v9fs_launder_folio(struct folio *folio)
{
struct folio *folio = page_folio(page);
int retval;
if (folio_clear_dirty_for_io(folio)) {
@ -376,25 +359,25 @@ static int v9fs_write_end(struct file *filp, struct address_space *mapping,
* Mark a page as having been made dirty and thus needing writeback. We also
* need to pin the cache object to write back to.
*/
static int v9fs_set_page_dirty(struct page *page)
static bool v9fs_dirty_folio(struct address_space *mapping, struct folio *folio)
{
struct v9fs_inode *v9inode = V9FS_I(page->mapping->host);
struct v9fs_inode *v9inode = V9FS_I(mapping->host);
return fscache_set_page_dirty(page, v9fs_inode_cookie(v9inode));
return fscache_dirty_folio(mapping, folio, v9fs_inode_cookie(v9inode));
}
#else
#define v9fs_set_page_dirty __set_page_dirty_nobuffers
#define v9fs_dirty_folio filemap_dirty_folio
#endif
const struct address_space_operations v9fs_addr_operations = {
.readpage = v9fs_vfs_readpage,
.readahead = v9fs_vfs_readahead,
.set_page_dirty = v9fs_set_page_dirty,
.dirty_folio = v9fs_dirty_folio,
.writepage = v9fs_vfs_writepage,
.write_begin = v9fs_write_begin,
.write_end = v9fs_write_end,
.releasepage = v9fs_release_page,
.invalidatepage = v9fs_invalidate_page,
.launder_page = v9fs_launder_page,
.invalidate_folio = v9fs_invalidate_folio,
.launder_folio = v9fs_launder_folio,
.direct_IO = v9fs_direct_IO,
};

View File

@ -73,7 +73,8 @@ static sector_t _adfs_bmap(struct address_space *mapping, sector_t block)
}
static const struct address_space_operations adfs_aops = {
.set_page_dirty = __set_page_dirty_buffers,
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.readpage = adfs_readpage,
.writepage = adfs_writepage,
.write_begin = adfs_write_begin,

View File

@ -453,7 +453,8 @@ static sector_t _affs_bmap(struct address_space *mapping, sector_t block)
}
const struct address_space_operations affs_aops = {
.set_page_dirty = __set_page_dirty_buffers,
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.readpage = affs_readpage,
.writepage = affs_writepage,
.write_begin = affs_write_begin,
@ -834,7 +835,8 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
}
const struct address_space_operations affs_aops_ofs = {
.set_page_dirty = __set_page_dirty_buffers,
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.readpage = affs_readpage_ofs,
//.writepage = affs_writepage_ofs,
.write_begin = affs_write_begin_ofs,

View File

@ -42,10 +42,11 @@ static int afs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
struct dentry *old_dentry, struct inode *new_dir,
struct dentry *new_dentry, unsigned int flags);
static int afs_dir_releasepage(struct page *page, gfp_t gfp_flags);
static void afs_dir_invalidatepage(struct page *page, unsigned int offset,
unsigned int length);
static void afs_dir_invalidate_folio(struct folio *folio, size_t offset,
size_t length);
static int afs_dir_set_page_dirty(struct page *page)
static bool afs_dir_dirty_folio(struct address_space *mapping,
struct folio *folio)
{
BUG(); /* This should never happen. */
}
@ -73,9 +74,9 @@ const struct inode_operations afs_dir_inode_operations = {
};
const struct address_space_operations afs_dir_aops = {
.set_page_dirty = afs_dir_set_page_dirty,
.dirty_folio = afs_dir_dirty_folio,
.releasepage = afs_dir_releasepage,
.invalidatepage = afs_dir_invalidatepage,
.invalidate_folio = afs_dir_invalidate_folio,
};
const struct dentry_operations afs_fs_dentry_operations = {
@ -2019,13 +2020,12 @@ static int afs_dir_releasepage(struct page *subpage, gfp_t gfp_flags)
/*
* Invalidate part or all of a folio.
*/
static void afs_dir_invalidatepage(struct page *subpage, unsigned int offset,
unsigned int length)
static void afs_dir_invalidate_folio(struct folio *folio, size_t offset,
size_t length)
{
struct folio *folio = page_folio(subpage);
struct afs_vnode *dvnode = AFS_FS_I(folio_inode(folio));
_enter("{%lu},%u,%u", folio_index(folio), offset, length);
_enter("{%lu},%zu,%zu", folio->index, offset, length);
BUG_ON(!folio_test_locked(folio));

View File

@ -21,8 +21,8 @@
static int afs_file_mmap(struct file *file, struct vm_area_struct *vma);
static int afs_readpage(struct file *file, struct page *page);
static int afs_symlink_readpage(struct file *file, struct page *page);
static void afs_invalidatepage(struct page *page, unsigned int offset,
unsigned int length);
static void afs_invalidate_folio(struct folio *folio, size_t offset,
size_t length);
static int afs_releasepage(struct page *page, gfp_t gfp_flags);
static void afs_readahead(struct readahead_control *ractl);
@ -54,10 +54,10 @@ const struct inode_operations afs_file_inode_operations = {
const struct address_space_operations afs_file_aops = {
.readpage = afs_readpage,
.readahead = afs_readahead,
.set_page_dirty = afs_set_page_dirty,
.launder_page = afs_launder_page,
.dirty_folio = afs_dirty_folio,
.launder_folio = afs_launder_folio,
.releasepage = afs_releasepage,
.invalidatepage = afs_invalidatepage,
.invalidate_folio = afs_invalidate_folio,
.write_begin = afs_write_begin,
.write_end = afs_write_end,
.writepage = afs_writepage,
@ -67,7 +67,7 @@ const struct address_space_operations afs_file_aops = {
const struct address_space_operations afs_symlink_aops = {
.readpage = afs_symlink_readpage,
.releasepage = afs_releasepage,
.invalidatepage = afs_invalidatepage,
.invalidate_folio = afs_invalidate_folio,
};
static const struct vm_operations_struct afs_vm_ops = {
@ -427,8 +427,8 @@ int afs_write_inode(struct inode *inode, struct writeback_control *wbc)
* Adjust the dirty region of the page on truncation or full invalidation,
* getting rid of the markers altogether if the region is entirely invalidated.
*/
static void afs_invalidate_dirty(struct folio *folio, unsigned int offset,
unsigned int length)
static void afs_invalidate_dirty(struct folio *folio, size_t offset,
size_t length)
{
struct afs_vnode *vnode = AFS_FS_I(folio_inode(folio));
unsigned long priv;
@ -485,16 +485,14 @@ static void afs_invalidate_dirty(struct folio *folio, unsigned int offset,
* - release a page and clean up its private data if offset is 0 (indicating
* the entire page)
*/
static void afs_invalidatepage(struct page *page, unsigned int offset,
unsigned int length)
static void afs_invalidate_folio(struct folio *folio, size_t offset,
size_t length)
{
struct folio *folio = page_folio(page);
_enter("{%lu},%zu,%zu", folio->index, offset, length);
_enter("{%lu},%u,%u", folio_index(folio), offset, length);
BUG_ON(!folio_test_locked(folio));
BUG_ON(!PageLocked(page));
if (PagePrivate(page))
if (folio_get_private(folio))
afs_invalidate_dirty(folio, offset, length);
folio_wait_fscache(folio);

View File

@ -1521,9 +1521,9 @@ extern int afs_check_volume_status(struct afs_volume *, struct afs_operation *);
* write.c
*/
#ifdef CONFIG_AFS_FSCACHE
extern int afs_set_page_dirty(struct page *);
bool afs_dirty_folio(struct address_space *, struct folio *);
#else
#define afs_set_page_dirty __set_page_dirty_nobuffers
#define afs_dirty_folio filemap_dirty_folio
#endif
extern int afs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
@ -1537,7 +1537,7 @@ extern ssize_t afs_file_write(struct kiocb *, struct iov_iter *);
extern int afs_fsync(struct file *, loff_t, loff_t, int);
extern vm_fault_t afs_page_mkwrite(struct vm_fault *vmf);
extern void afs_prune_wb_keys(struct afs_vnode *);
extern int afs_launder_page(struct page *);
int afs_launder_folio(struct folio *);
/*
* xattr.c

View File

@ -22,9 +22,10 @@ static void afs_write_to_cache(struct afs_vnode *vnode, loff_t start, size_t len
* Mark a page as having been made dirty and thus needing writeback. We also
* need to pin the cache object to write back to.
*/
int afs_set_page_dirty(struct page *page)
bool afs_dirty_folio(struct address_space *mapping, struct folio *folio)
{
return fscache_set_page_dirty(page, afs_vnode_cache(AFS_FS_I(page->mapping->host)));
return fscache_dirty_folio(mapping, folio,
afs_vnode_cache(AFS_FS_I(mapping->host)));
}
static void afs_folio_start_fscache(bool caching, struct folio *folio)
{
@ -979,9 +980,8 @@ void afs_prune_wb_keys(struct afs_vnode *vnode)
/*
* Clean up a page during invalidation.
*/
int afs_launder_page(struct page *subpage)
int afs_launder_folio(struct folio *folio)
{
struct folio *folio = page_folio(subpage);
struct afs_vnode *vnode = AFS_FS_I(folio_inode(folio));
struct iov_iter iter;
struct bio_vec bv[1];
@ -989,7 +989,7 @@ int afs_launder_page(struct page *subpage)
unsigned int f, t;
int ret = 0;
_enter("{%lx}", folio_index(folio));
_enter("{%lx}", folio->index);
priv = (unsigned long)folio_get_private(folio);
if (folio_clear_dirty_for_io(folio)) {

View File

@ -478,7 +478,7 @@ static int aio_migratepage(struct address_space *mapping, struct page *new,
#endif
static const struct address_space_operations aio_ctx_aops = {
.set_page_dirty = __set_page_dirty_no_writeback,
.dirty_folio = noop_dirty_folio,
#if IS_ENABLED(CONFIG_MIGRATION)
.migratepage = aio_migratepage,
#endif

View File

@ -188,7 +188,8 @@ static sector_t bfs_bmap(struct address_space *mapping, sector_t block)
}
const struct address_space_operations bfs_aops = {
.set_page_dirty = __set_page_dirty_buffers,
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.readpage = bfs_readpage,
.writepage = bfs_writepage,
.write_begin = bfs_write_begin,

View File

@ -3945,5 +3945,8 @@ static inline bool btrfs_is_data_reloc_root(const struct btrfs_root *root)
#define PageOrdered(page) PagePrivate2(page)
#define SetPageOrdered(page) SetPagePrivate2(page)
#define ClearPageOrdered(page) ClearPagePrivate2(page)
#define folio_test_ordered(folio) folio_test_private_2(folio)
#define folio_set_ordered(folio) folio_set_private_2(folio)
#define folio_clear_ordered(folio) folio_clear_private_2(folio)
#endif

View File

@ -1013,41 +1013,40 @@ static int btree_releasepage(struct page *page, gfp_t gfp_flags)
return try_release_extent_buffer(page);
}
static void btree_invalidatepage(struct page *page, unsigned int offset,
unsigned int length)
static void btree_invalidate_folio(struct folio *folio, size_t offset,
size_t length)
{
struct extent_io_tree *tree;
tree = &BTRFS_I(page->mapping->host)->io_tree;
extent_invalidatepage(tree, page, offset);
btree_releasepage(page, GFP_NOFS);
if (PagePrivate(page)) {
btrfs_warn(BTRFS_I(page->mapping->host)->root->fs_info,
"page private not zero on page %llu",
(unsigned long long)page_offset(page));
detach_page_private(page);
tree = &BTRFS_I(folio->mapping->host)->io_tree;
extent_invalidate_folio(tree, folio, offset);
btree_releasepage(&folio->page, GFP_NOFS);
if (folio_get_private(folio)) {
btrfs_warn(BTRFS_I(folio->mapping->host)->root->fs_info,
"folio private not zero on folio %llu",
(unsigned long long)folio_pos(folio));
folio_detach_private(folio);
}
}
static int btree_set_page_dirty(struct page *page)
{
#ifdef DEBUG
struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
static bool btree_dirty_folio(struct address_space *mapping,
struct folio *folio)
{
struct btrfs_fs_info *fs_info = btrfs_sb(mapping->host->i_sb);
struct btrfs_subpage *subpage;
struct extent_buffer *eb;
int cur_bit = 0;
u64 page_start = page_offset(page);
u64 page_start = folio_pos(folio);
if (fs_info->sectorsize == PAGE_SIZE) {
BUG_ON(!PagePrivate(page));
eb = (struct extent_buffer *)page->private;
eb = folio_get_private(folio);
BUG_ON(!eb);
BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
BUG_ON(!atomic_read(&eb->refs));
btrfs_assert_tree_write_locked(eb);
return __set_page_dirty_nobuffers(page);
return filemap_dirty_folio(mapping, folio);
}
ASSERT(PagePrivate(page) && page->private);
subpage = (struct btrfs_subpage *)page->private;
subpage = folio_get_private(folio);
ASSERT(subpage->dirty_bitmap);
while (cur_bit < BTRFS_SUBPAGE_BITMAP_SIZE) {
@ -1073,18 +1072,20 @@ static int btree_set_page_dirty(struct page *page)
cur_bit += (fs_info->nodesize >> fs_info->sectorsize_bits);
}
#endif
return __set_page_dirty_nobuffers(page);
return filemap_dirty_folio(mapping, folio);
}
#else
#define btree_dirty_folio filemap_dirty_folio
#endif
static const struct address_space_operations btree_aops = {
.writepages = btree_writepages,
.releasepage = btree_releasepage,
.invalidatepage = btree_invalidatepage,
.invalidate_folio = btree_invalidate_folio,
#ifdef CONFIG_MIGRATION
.migratepage = btree_migratepage,
#endif
.set_page_dirty = btree_set_page_dirty,
.dirty_folio = btree_dirty_folio,
};
struct extent_buffer *btrfs_find_create_tree_block(

View File

@ -244,8 +244,8 @@ void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
u64 *start_ret, u64 *end_ret, u32 bits);
int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start,
u64 *start_ret, u64 *end_ret, u32 bits);
int extent_invalidatepage(struct extent_io_tree *tree,
struct page *page, unsigned long offset);
int extent_invalidate_folio(struct extent_io_tree *tree,
struct folio *folio, size_t offset);
bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start,
u64 *end, u64 max_bytes,
struct extent_state **cached_state);

View File

@ -1507,17 +1507,17 @@ void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
{
struct address_space *mapping = inode->i_mapping;
unsigned long index = start >> PAGE_SHIFT;
unsigned long end_index = end >> PAGE_SHIFT;
struct page *page;
struct folio *folio;
while (index <= end_index) {
page = find_get_page(inode->i_mapping, index);
BUG_ON(!page); /* Pages should be in the extent_io_tree */
__set_page_dirty_nobuffers(page);
account_page_redirty(page);
put_page(page);
index++;
folio = filemap_get_folio(mapping, index);
filemap_dirty_folio(mapping, folio);
folio_account_redirty(folio);
index += folio_nr_pages(folio);
folio_put(folio);
}
}
@ -4054,6 +4054,7 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
static int __extent_writepage(struct page *page, struct writeback_control *wbc,
struct extent_page_data *epd)
{
struct folio *folio = page_folio(page);
struct inode *inode = page->mapping->host;
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
const u64 page_start = page_offset(page);
@ -4074,8 +4075,8 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
pg_offset = offset_in_page(i_size);
if (page->index > end_index ||
(page->index == end_index && !pg_offset)) {
page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
unlock_page(page);
folio_invalidate(folio, 0, folio_size(folio));
folio_unlock(folio);
return 0;
}
@ -5225,17 +5226,17 @@ void extent_readahead(struct readahead_control *rac)
}
/*
* basic invalidatepage code, this waits on any locked or writeback
* ranges corresponding to the page, and then deletes any extent state
* basic invalidate_folio code, this waits on any locked or writeback
* ranges corresponding to the folio, and then deletes any extent state
* records from the tree
*/
int extent_invalidatepage(struct extent_io_tree *tree,
struct page *page, unsigned long offset)
int extent_invalidate_folio(struct extent_io_tree *tree,
struct folio *folio, size_t offset)
{
struct extent_state *cached_state = NULL;
u64 start = page_offset(page);
u64 end = start + PAGE_SIZE - 1;
size_t blocksize = page->mapping->host->i_sb->s_blocksize;
u64 start = folio_pos(folio);
u64 end = start + folio_size(folio) - 1;
size_t blocksize = folio->mapping->host->i_sb->s_blocksize;
/* This function is only called for the btree inode */
ASSERT(tree->owner == IO_TREE_BTREE_INODE_IO);
@ -5245,7 +5246,7 @@ int extent_invalidatepage(struct extent_io_tree *tree,
return 0;
lock_extent_bits(tree, start, end, &cached_state);
wait_on_page_writeback(page);
folio_wait_writeback(folio);
/*
* Currently for btree io tree, only EXTENT_LOCKED is utilized,

View File

@ -5080,16 +5080,17 @@ static int btrfs_setattr(struct user_namespace *mnt_userns, struct dentry *dentr
}
/*
* While truncating the inode pages during eviction, we get the VFS calling
* btrfs_invalidatepage() against each page of the inode. This is slow because
* the calls to btrfs_invalidatepage() result in a huge amount of calls to
* lock_extent_bits() and clear_extent_bit(), which keep merging and splitting
* extent_state structures over and over, wasting lots of time.
* While truncating the inode pages during eviction, we get the VFS
* calling btrfs_invalidate_folio() against each folio of the inode. This
* is slow because the calls to btrfs_invalidate_folio() result in a
* huge amount of calls to lock_extent_bits() and clear_extent_bit(),
* which keep merging and splitting extent_state structures over and over,
* wasting lots of time.
*
* Therefore if the inode is being evicted, let btrfs_invalidatepage() skip all
* those expensive operations on a per page basis and do only the ordered io
* finishing, while we release here the extent_map and extent_state structures,
* without the excessive merging and splitting.
* Therefore if the inode is being evicted, let btrfs_invalidate_folio()
* skip all those expensive operations on a per folio basis and do only
* the ordered io finishing, while we release here the extent_map and
* extent_state structures, without the excessive merging and splitting.
*/
static void evict_inode_truncate_pages(struct inode *inode)
{
@ -5155,7 +5156,7 @@ static void evict_inode_truncate_pages(struct inode *inode)
* If still has DELALLOC flag, the extent didn't reach disk,
* and its reserved space won't be freed by delayed_ref.
* So we need to free its reserved space here.
* (Refer to comment in btrfs_invalidatepage, case 2)
* (Refer to comment in btrfs_invalidate_folio, case 2)
*
* Note, end is the bytenr of last byte, so we need + 1 here.
*/
@ -8178,8 +8179,8 @@ static void btrfs_readahead(struct readahead_control *rac)
}
/*
* For releasepage() and invalidatepage() we have a race window where
* end_page_writeback() is called but the subpage spinlock is not yet released.
* For releasepage() and invalidate_folio() we have a race window where
* folio_end_writeback() is called but the subpage spinlock is not yet released.
* If we continue to release/invalidate the page, we could cause use-after-free
* for subpage spinlock. So this function is to spin and wait for subpage
* spinlock.
@ -8255,48 +8256,48 @@ static int btrfs_migratepage(struct address_space *mapping,
}
#endif
static void btrfs_invalidatepage(struct page *page, unsigned int offset,
unsigned int length)
static void btrfs_invalidate_folio(struct folio *folio, size_t offset,
size_t length)
{
struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
struct btrfs_inode *inode = BTRFS_I(folio->mapping->host);
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct extent_io_tree *tree = &inode->io_tree;
struct extent_state *cached_state = NULL;
u64 page_start = page_offset(page);
u64 page_end = page_start + PAGE_SIZE - 1;
u64 page_start = folio_pos(folio);
u64 page_end = page_start + folio_size(folio) - 1;
u64 cur;
int inode_evicting = inode->vfs_inode.i_state & I_FREEING;
/*
* We have page locked so no new ordered extent can be created on this
* page, nor bio can be submitted for this page.
* We have folio locked so no new ordered extent can be created on this
* page, nor bio can be submitted for this folio.
*
* But already submitted bio can still be finished on this page.
* Furthermore, endio function won't skip page which has Ordered
* But already submitted bio can still be finished on this folio.
* Furthermore, endio function won't skip folio which has Ordered
* (Private2) already cleared, so it's possible for endio and
* invalidatepage to do the same ordered extent accounting twice
* on one page.
* invalidate_folio to do the same ordered extent accounting twice
* on one folio.
*
* So here we wait for any submitted bios to finish, so that we won't
* do double ordered extent accounting on the same page.
* do double ordered extent accounting on the same folio.
*/
wait_on_page_writeback(page);
wait_subpage_spinlock(page);
folio_wait_writeback(folio);
wait_subpage_spinlock(&folio->page);
/*
* For subpage case, we have call sites like
* btrfs_punch_hole_lock_range() which passes range not aligned to
* sectorsize.
* If the range doesn't cover the full page, we don't need to and
* shouldn't clear page extent mapped, as page->private can still
* If the range doesn't cover the full folio, we don't need to and
* shouldn't clear page extent mapped, as folio->private can still
* record subpage dirty bits for other part of the range.
*
* For cases that can invalidate the full even the range doesn't
* cover the full page, like invalidating the last page, we're
* For cases that invalidate the full folio even the range doesn't
* cover the full folio, like invalidating the last folio, we're
* still safe to wait for ordered extent to finish.
*/
if (!(offset == 0 && length == PAGE_SIZE)) {
btrfs_releasepage(page, GFP_NOFS);
btrfs_releasepage(&folio->page, GFP_NOFS);
return;
}
@ -8337,7 +8338,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset,
page_end);
ASSERT(range_end + 1 - cur < U32_MAX);
range_len = range_end + 1 - cur;
if (!btrfs_page_test_ordered(fs_info, page, cur, range_len)) {
if (!btrfs_page_test_ordered(fs_info, &folio->page, cur, range_len)) {
/*
* If Ordered (Private2) is cleared, it means endio has
* already been executed for the range.
@ -8347,7 +8348,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset,
delete_states = false;
goto next;
}
btrfs_page_clear_ordered(fs_info, page, cur, range_len);
btrfs_page_clear_ordered(fs_info, &folio->page, cur, range_len);
/*
* IO on this page will never be started, so we need to account
@ -8417,11 +8418,11 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset,
* should not have Ordered (Private2) anymore, or the above iteration
* did something wrong.
*/
ASSERT(!PageOrdered(page));
btrfs_page_clear_checked(fs_info, page, page_offset(page), PAGE_SIZE);
ASSERT(!folio_test_ordered(folio));
btrfs_page_clear_checked(fs_info, &folio->page, folio_pos(folio), folio_size(folio));
if (!inode_evicting)
__btrfs_releasepage(page, GFP_NOFS);
clear_page_extent_mapped(page);
__btrfs_releasepage(&folio->page, GFP_NOFS);
clear_page_extent_mapped(&folio->page);
}
/*
@ -10056,11 +10057,6 @@ int btrfs_prealloc_file_range_trans(struct inode *inode,
min_size, actual_len, alloc_hint, trans);
}
static int btrfs_set_page_dirty(struct page *page)
{
return __set_page_dirty_nobuffers(page);
}
static int btrfs_permission(struct user_namespace *mnt_userns,
struct inode *inode, int mask)
{
@ -11359,12 +11355,12 @@ static const struct address_space_operations btrfs_aops = {
.writepages = btrfs_writepages,
.readahead = btrfs_readahead,
.direct_IO = noop_direct_IO,
.invalidatepage = btrfs_invalidatepage,
.invalidate_folio = btrfs_invalidate_folio,
.releasepage = btrfs_releasepage,
#ifdef CONFIG_MIGRATION
.migratepage = btrfs_migratepage,
#endif
.set_page_dirty = btrfs_set_page_dirty,
.dirty_folio = filemap_dirty_folio,
.error_remove_page = generic_error_remove_page,
.swap_activate = btrfs_swap_activate,
.swap_deactivate = btrfs_swap_deactivate,

View File

@ -613,17 +613,14 @@ EXPORT_SYMBOL(mark_buffer_dirty_inode);
* FIXME: may need to call ->reservepage here as well. That's rather up to the
* address_space though.
*/
int __set_page_dirty_buffers(struct page *page)
bool block_dirty_folio(struct address_space *mapping, struct folio *folio)
{
int newly_dirty;
struct address_space *mapping = page_mapping(page);
if (unlikely(!mapping))
return !TestSetPageDirty(page);
struct buffer_head *head;
bool newly_dirty;
spin_lock(&mapping->private_lock);
if (page_has_buffers(page)) {
struct buffer_head *head = page_buffers(page);
head = folio_buffers(folio);
if (head) {
struct buffer_head *bh = head;
do {
@ -635,21 +632,21 @@ int __set_page_dirty_buffers(struct page *page)
* Lock out page's memcg migration to keep PageDirty
* synchronized with per-memcg dirty page counters.
*/
lock_page_memcg(page);
newly_dirty = !TestSetPageDirty(page);
folio_memcg_lock(folio);
newly_dirty = !folio_test_set_dirty(folio);
spin_unlock(&mapping->private_lock);
if (newly_dirty)
__set_page_dirty(page, mapping, 1);
__folio_mark_dirty(folio, mapping, 1);
unlock_page_memcg(page);
folio_memcg_unlock(folio);
if (newly_dirty)
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
return newly_dirty;
}
EXPORT_SYMBOL(__set_page_dirty_buffers);
EXPORT_SYMBOL(block_dirty_folio);
/*
* Write out and wait upon a list of buffers.
@ -1484,41 +1481,40 @@ static void discard_buffer(struct buffer_head * bh)
}
/**
* block_invalidatepage - invalidate part or all of a buffer-backed page
*
* @page: the page which is affected
* block_invalidate_folio - Invalidate part or all of a buffer-backed folio.
* @folio: The folio which is affected.
* @offset: start of the range to invalidate
* @length: length of the range to invalidate
*
* block_invalidatepage() is called when all or part of the page has become
* block_invalidate_folio() is called when all or part of the folio has been
* invalidated by a truncate operation.
*
* block_invalidatepage() does not have to release all buffers, but it must
* block_invalidate_folio() does not have to release all buffers, but it must
* ensure that no dirty buffer is left outside @offset and that no I/O
* is underway against any of the blocks which are outside the truncation
* point. Because the caller is about to free (and possibly reuse) those
* blocks on-disk.
*/
void block_invalidatepage(struct page *page, unsigned int offset,
unsigned int length)
void block_invalidate_folio(struct folio *folio, size_t offset, size_t length)
{
struct buffer_head *head, *bh, *next;
unsigned int curr_off = 0;
unsigned int stop = length + offset;
size_t curr_off = 0;
size_t stop = length + offset;
BUG_ON(!PageLocked(page));
if (!page_has_buffers(page))
goto out;
BUG_ON(!folio_test_locked(folio));
/*
* Check for overflow
*/
BUG_ON(stop > PAGE_SIZE || stop < length);
BUG_ON(stop > folio_size(folio) || stop < length);
head = folio_buffers(folio);
if (!head)
return;
head = page_buffers(page);
bh = head;
do {
unsigned int next_off = curr_off + bh->b_size;
size_t next_off = curr_off + bh->b_size;
next = bh->b_this_page;
/*
@ -1537,21 +1533,21 @@ void block_invalidatepage(struct page *page, unsigned int offset,
} while (bh != head);
/*
* We release buffers only if the entire page is being invalidated.
* We release buffers only if the entire folio is being invalidated.
* The get_block cached value has been unconditionally invalidated,
* so real IO is not possible anymore.
*/
if (length == PAGE_SIZE)
try_to_release_page(page, 0);
if (length == folio_size(folio))
filemap_release_folio(folio, 0);
out:
return;
}
EXPORT_SYMBOL(block_invalidatepage);
EXPORT_SYMBOL(block_invalidate_folio);
/*
* We attach and possibly dirty the buffers atomically wrt
* __set_page_dirty_buffers() via private_lock. try_to_free_buffers
* block_dirty_folio() via private_lock. try_to_free_buffers
* is already excluded via the page lock.
*/
void create_empty_buffers(struct page *page,
@ -1726,12 +1722,12 @@ int __block_write_full_page(struct inode *inode, struct page *page,
(1 << BH_Dirty)|(1 << BH_Uptodate));
/*
* Be very careful. We have no exclusion from __set_page_dirty_buffers
* Be very careful. We have no exclusion from block_dirty_folio
* here, and the (potentially unmapped) buffers may become dirty at
* any time. If a buffer becomes dirty here after we've inspected it
* then we just miss that fact, and the page stays dirty.
*
* Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
* Buffers outside i_size may be dirtied by block_dirty_folio;
* handle that here by just cleaning them.
*/
@ -2208,29 +2204,27 @@ int generic_write_end(struct file *file, struct address_space *mapping,
EXPORT_SYMBOL(generic_write_end);
/*
* block_is_partially_uptodate checks whether buffers within a page are
* block_is_partially_uptodate checks whether buffers within a folio are
* uptodate or not.
*
* Returns true if all buffers which correspond to a file portion
* we want to read are uptodate.
* Returns true if all buffers which correspond to the specified part
* of the folio are uptodate.
*/
int block_is_partially_uptodate(struct page *page, unsigned long from,
unsigned long count)
bool block_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
{
unsigned block_start, block_end, blocksize;
unsigned to;
struct buffer_head *bh, *head;
int ret = 1;
bool ret = true;
if (!page_has_buffers(page))
return 0;
head = page_buffers(page);
head = folio_buffers(folio);
if (!head)
return false;
blocksize = head->b_size;
to = min_t(unsigned, PAGE_SIZE - from, count);
to = min_t(unsigned, folio_size(folio) - from, count);
to = from + to;
if (from < blocksize && to > PAGE_SIZE - blocksize)
return 0;
if (from < blocksize && to > folio_size(folio) - blocksize)
return false;
bh = head;
block_start = 0;
@ -2238,7 +2232,7 @@ int block_is_partially_uptodate(struct page *page, unsigned long from,
block_end = block_start + blocksize;
if (block_end > from && block_start < to) {
if (!buffer_uptodate(bh)) {
ret = 0;
ret = false;
break;
}
if (block_end >= to)
@ -3185,7 +3179,7 @@ EXPORT_SYMBOL(sync_dirty_buffer);
*
* The same applies to regular filesystem pages: if all the buffers are
* clean then we set the page clean and proceed. To do that, we require
* total exclusion from __set_page_dirty_buffers(). That is obtained with
* total exclusion from block_dirty_folio(). That is obtained with
* private_lock.
*
* try_to_free_buffers() is non-blocking.
@ -3252,7 +3246,7 @@ int try_to_free_buffers(struct page *page)
* the page also.
*
* private_lock must be held over this entire operation in order
* to synchronise against __set_page_dirty_buffers and prevent the
* to synchronise against block_dirty_folio and prevent the
* dirty bit from being lost.
*/
if (ret)

View File

@ -76,18 +76,17 @@ static inline struct ceph_snap_context *page_snap_context(struct page *page)
* Dirty a page. Optimistically adjust accounting, on the assumption
* that we won't race with invalidate. If we do, readjust.
*/
static int ceph_set_page_dirty(struct page *page)
static bool ceph_dirty_folio(struct address_space *mapping, struct folio *folio)
{
struct address_space *mapping = page->mapping;
struct inode *inode;
struct ceph_inode_info *ci;
struct ceph_snap_context *snapc;
if (PageDirty(page)) {
dout("%p set_page_dirty %p idx %lu -- already dirty\n",
mapping->host, page, page->index);
BUG_ON(!PagePrivate(page));
return 0;
if (folio_test_dirty(folio)) {
dout("%p dirty_folio %p idx %lu -- already dirty\n",
mapping->host, folio, folio->index);
BUG_ON(!folio_get_private(folio));
return false;
}
inode = mapping->host;
@ -111,56 +110,56 @@ static int ceph_set_page_dirty(struct page *page)
if (ci->i_wrbuffer_ref == 0)
ihold(inode);
++ci->i_wrbuffer_ref;
dout("%p set_page_dirty %p idx %lu head %d/%d -> %d/%d "
dout("%p dirty_folio %p idx %lu head %d/%d -> %d/%d "
"snapc %p seq %lld (%d snaps)\n",
mapping->host, page, page->index,
mapping->host, folio, folio->index,
ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1,
ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
snapc, snapc->seq, snapc->num_snaps);
spin_unlock(&ci->i_ceph_lock);
/*
* Reference snap context in page->private. Also set
* PagePrivate so that we get invalidatepage callback.
* Reference snap context in folio->private. Also set
* PagePrivate so that we get invalidate_folio callback.
*/
BUG_ON(PagePrivate(page));
attach_page_private(page, snapc);
BUG_ON(folio_get_private(folio));
folio_attach_private(folio, snapc);
return ceph_fscache_set_page_dirty(page);
return ceph_fscache_dirty_folio(mapping, folio);
}
/*
* If we are truncating the full page (i.e. offset == 0), adjust the
* dirty page counters appropriately. Only called if there is private
* data on the page.
* If we are truncating the full folio (i.e. offset == 0), adjust the
* dirty folio counters appropriately. Only called if there is private
* data on the folio.
*/
static void ceph_invalidatepage(struct page *page, unsigned int offset,
unsigned int length)
static void ceph_invalidate_folio(struct folio *folio, size_t offset,
size_t length)
{
struct inode *inode;
struct ceph_inode_info *ci;
struct ceph_snap_context *snapc;
inode = page->mapping->host;
inode = folio->mapping->host;
ci = ceph_inode(inode);
if (offset != 0 || length != thp_size(page)) {
dout("%p invalidatepage %p idx %lu partial dirty page %u~%u\n",
inode, page, page->index, offset, length);
if (offset != 0 || length != folio_size(folio)) {
dout("%p invalidate_folio idx %lu partial dirty page %zu~%zu\n",
inode, folio->index, offset, length);
return;
}
WARN_ON(!PageLocked(page));
if (PagePrivate(page)) {
dout("%p invalidatepage %p idx %lu full dirty page\n",
inode, page, page->index);
WARN_ON(!folio_test_locked(folio));
if (folio_get_private(folio)) {
dout("%p invalidate_folio idx %lu full dirty page\n",
inode, folio->index);
snapc = detach_page_private(page);
snapc = folio_detach_private(folio);
ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
ceph_put_snap_context(snapc);
}
wait_on_page_fscache(page);
folio_wait_fscache(folio);
}
static int ceph_releasepage(struct page *page, gfp_t gfp)
@ -516,6 +515,7 @@ static u64 get_writepages_data_length(struct inode *inode,
*/
static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
{
struct folio *folio = page_folio(page);
struct inode *inode = page->mapping->host;
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
@ -550,8 +550,9 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
/* is this a partial page at end of file? */
if (page_off >= ceph_wbc.i_size) {
dout("%p page eof %llu\n", page, ceph_wbc.i_size);
page->mapping->a_ops->invalidatepage(page, 0, thp_size(page));
dout("folio at %lu beyond eof %llu\n", folio->index,
ceph_wbc.i_size);
folio_invalidate(folio, 0, folio_size(folio));
return 0;
}
@ -874,14 +875,16 @@ static int ceph_writepages_start(struct address_space *mapping,
continue;
}
if (page_offset(page) >= ceph_wbc.i_size) {
dout("%p page eof %llu\n",
page, ceph_wbc.i_size);
struct folio *folio = page_folio(page);
dout("folio at %lu beyond eof %llu\n",
folio->index, ceph_wbc.i_size);
if ((ceph_wbc.size_stable ||
page_offset(page) >= i_size_read(inode)) &&
clear_page_dirty_for_io(page))
mapping->a_ops->invalidatepage(page,
0, thp_size(page));
unlock_page(page);
folio_pos(folio) >= i_size_read(inode)) &&
folio_clear_dirty_for_io(folio))
folio_invalidate(folio, 0,
folio_size(folio));
folio_unlock(folio);
continue;
}
if (strip_unit_end && (page->index > strip_unit_end)) {
@ -1376,8 +1379,8 @@ const struct address_space_operations ceph_aops = {
.writepages = ceph_writepages_start,
.write_begin = ceph_write_begin,
.write_end = ceph_write_end,
.set_page_dirty = ceph_set_page_dirty,
.invalidatepage = ceph_invalidatepage,
.dirty_folio = ceph_dirty_folio,
.invalidate_folio = ceph_invalidate_folio,
.releasepage = ceph_releasepage,
.direct_IO = noop_direct_IO,
};

View File

@ -54,12 +54,12 @@ static inline void ceph_fscache_unpin_writeback(struct inode *inode,
fscache_unpin_writeback(wbc, ceph_fscache_cookie(ceph_inode(inode)));
}
static inline int ceph_fscache_set_page_dirty(struct page *page)
static inline int ceph_fscache_dirty_folio(struct address_space *mapping,
struct folio *folio)
{
struct inode *inode = page->mapping->host;
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_inode_info *ci = ceph_inode(mapping->host);
return fscache_set_page_dirty(page, ceph_fscache_cookie(ci));
return fscache_dirty_folio(mapping, folio, ceph_fscache_cookie(ci));
}
static inline int ceph_begin_cache_operation(struct netfs_read_request *rreq)
@ -133,9 +133,10 @@ static inline void ceph_fscache_unpin_writeback(struct inode *inode,
{
}
static inline int ceph_fscache_set_page_dirty(struct page *page)
static inline int ceph_fscache_dirty_folio(struct address_space *mapping,
struct folio *folio)
{
return __set_page_dirty_nobuffers(page);
return filemap_dirty_folio(mapping, folio);
}
static inline bool ceph_is_cache_enabled(struct inode *inode)

View File

@ -4764,17 +4764,17 @@ static int cifs_release_page(struct page *page, gfp_t gfp)
return true;
}
static void cifs_invalidate_page(struct page *page, unsigned int offset,
unsigned int length)
static void cifs_invalidate_folio(struct folio *folio, size_t offset,
size_t length)
{
wait_on_page_fscache(page);
folio_wait_fscache(folio);
}
static int cifs_launder_page(struct page *page)
static int cifs_launder_folio(struct folio *folio)
{
int rc = 0;
loff_t range_start = page_offset(page);
loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
loff_t range_start = folio_pos(folio);
loff_t range_end = range_start + folio_size(folio);
struct writeback_control wbc = {
.sync_mode = WB_SYNC_ALL,
.nr_to_write = 0,
@ -4782,12 +4782,12 @@ static int cifs_launder_page(struct page *page)
.range_end = range_end,
};
cifs_dbg(FYI, "Launder page: %p\n", page);
cifs_dbg(FYI, "Launder page: %lu\n", folio->index);
if (clear_page_dirty_for_io(page))
rc = cifs_writepage_locked(page, &wbc);
if (folio_clear_dirty_for_io(folio))
rc = cifs_writepage_locked(&folio->page, &wbc);
wait_on_page_fscache(page);
folio_wait_fscache(folio);
return rc;
}
@ -4949,12 +4949,13 @@ static void cifs_swap_deactivate(struct file *file)
* need to pin the cache object to write back to.
*/
#ifdef CONFIG_CIFS_FSCACHE
static int cifs_set_page_dirty(struct page *page)
static bool cifs_dirty_folio(struct address_space *mapping, struct folio *folio)
{
return fscache_set_page_dirty(page, cifs_inode_cookie(page->mapping->host));
return fscache_dirty_folio(mapping, folio,
cifs_inode_cookie(mapping->host));
}
#else
#define cifs_set_page_dirty __set_page_dirty_nobuffers
#define cifs_dirty_folio filemap_dirty_folio
#endif
const struct address_space_operations cifs_addr_ops = {
@ -4964,11 +4965,11 @@ const struct address_space_operations cifs_addr_ops = {
.writepages = cifs_writepages,
.write_begin = cifs_write_begin,
.write_end = cifs_write_end,
.set_page_dirty = cifs_set_page_dirty,
.dirty_folio = cifs_dirty_folio,
.releasepage = cifs_release_page,
.direct_IO = cifs_direct_io,
.invalidatepage = cifs_invalidate_page,
.launder_page = cifs_launder_page,
.invalidate_folio = cifs_invalidate_folio,
.launder_folio = cifs_launder_folio,
/*
* TODO: investigate and if useful we could add an cifs_migratePage
* helper (under an CONFIG_MIGRATION) in the future, and also
@ -4989,8 +4990,8 @@ const struct address_space_operations cifs_addr_ops_smallbuf = {
.writepages = cifs_writepages,
.write_begin = cifs_write_begin,
.write_end = cifs_write_end,
.set_page_dirty = cifs_set_page_dirty,
.dirty_folio = cifs_dirty_folio,
.releasepage = cifs_release_page,
.invalidatepage = cifs_invalidate_page,
.launder_page = cifs_launder_page,
.invalidate_folio = cifs_invalidate_folio,
.launder_folio = cifs_launder_folio,
};

View File

@ -540,12 +540,13 @@ const struct address_space_operations ecryptfs_aops = {
* XXX: This is pretty broken for multiple reasons: ecryptfs does not
* actually use buffer_heads, and ecryptfs will crash without
* CONFIG_BLOCK. But it matches the behavior before the default for
* address_space_operations without the ->set_page_dirty method was
* address_space_operations without the ->dirty_folio method was
* cleaned up, so this is the best we can do without maintainer
* feedback.
*/
#ifdef CONFIG_BLOCK
.set_page_dirty = __set_page_dirty_buffers,
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
#endif
.writepage = ecryptfs_writepage,
.readpage = ecryptfs_readpage,

View File

@ -537,25 +537,24 @@ static int erofs_managed_cache_releasepage(struct page *page, gfp_t gfp_mask)
* decompression requests in progress, wait with rescheduling for a bit here.
* We could introduce an extra locking instead but it seems unnecessary.
*/
static void erofs_managed_cache_invalidatepage(struct page *page,
unsigned int offset,
unsigned int length)
static void erofs_managed_cache_invalidate_folio(struct folio *folio,
size_t offset, size_t length)
{
const unsigned int stop = length + offset;
const size_t stop = length + offset;
DBG_BUGON(!PageLocked(page));
DBG_BUGON(!folio_test_locked(folio));
/* Check for potential overflow in debug mode */
DBG_BUGON(stop > PAGE_SIZE || stop < length);
DBG_BUGON(stop > folio_size(folio) || stop < length);
if (offset == 0 && stop == PAGE_SIZE)
while (!erofs_managed_cache_releasepage(page, GFP_NOFS))
if (offset == 0 && stop == folio_size(folio))
while (!erofs_managed_cache_releasepage(&folio->page, GFP_NOFS))
cond_resched();
}
static const struct address_space_operations managed_cache_aops = {
.releasepage = erofs_managed_cache_releasepage,
.invalidatepage = erofs_managed_cache_invalidatepage,
.invalidate_folio = erofs_managed_cache_invalidate_folio,
};
static int erofs_init_managed_cache(struct super_block *sb)

View File

@ -490,7 +490,8 @@ int exfat_block_truncate_page(struct inode *inode, loff_t from)
}
static const struct address_space_operations exfat_aops = {
.set_page_dirty = __set_page_dirty_buffers,
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.readpage = exfat_readpage,
.readahead = exfat_readahead,
.writepage = exfat_writepage,

View File

@ -967,7 +967,8 @@ ext2_dax_writepages(struct address_space *mapping, struct writeback_control *wbc
}
const struct address_space_operations ext2_aops = {
.set_page_dirty = __set_page_dirty_buffers,
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.readpage = ext2_readpage,
.readahead = ext2_readahead,
.writepage = ext2_writepage,
@ -982,7 +983,8 @@ const struct address_space_operations ext2_aops = {
};
const struct address_space_operations ext2_nobh_aops = {
.set_page_dirty = __set_page_dirty_buffers,
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.readpage = ext2_readpage,
.readahead = ext2_readahead,
.writepage = ext2_nobh_writepage,
@ -998,8 +1000,7 @@ const struct address_space_operations ext2_nobh_aops = {
static const struct address_space_operations ext2_dax_aops = {
.writepages = ext2_dax_writepages,
.direct_IO = noop_direct_IO,
.set_page_dirty = __set_page_dirty_no_writeback,
.invalidatepage = noop_invalidatepage,
.dirty_folio = noop_dirty_folio,
};
/*

View File

@ -137,8 +137,6 @@ static inline int ext4_begin_ordered_truncate(struct inode *inode,
new_size);
}
static void ext4_invalidatepage(struct page *page, unsigned int offset,
unsigned int length);
static int __ext4_journalled_writepage(struct page *page, unsigned int len);
static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
int pextents);
@ -186,7 +184,7 @@ void ext4_evict_inode(struct inode *inode)
* journal. So although mm thinks everything is clean and
* ready for reaping the inode might still have some pages to
* write in the running transaction or waiting to be
* checkpointed. Thus calling jbd2_journal_invalidatepage()
* checkpointed. Thus calling jbd2_journal_invalidate_folio()
* (via truncate_inode_pages()) to discard these buffers can
* cause data loss. Also even if we did not discard these
* buffers, we would have no way to find them after the inode
@ -1571,16 +1569,18 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd,
break;
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
struct folio *folio = page_folio(page);
BUG_ON(!PageLocked(page));
BUG_ON(PageWriteback(page));
BUG_ON(!folio_test_locked(folio));
BUG_ON(folio_test_writeback(folio));
if (invalidate) {
if (page_mapped(page))
clear_page_dirty_for_io(page);
block_invalidatepage(page, 0, PAGE_SIZE);
ClearPageUptodate(page);
if (folio_mapped(folio))
folio_clear_dirty_for_io(folio);
block_invalidate_folio(folio, 0,
folio_size(folio));
folio_clear_uptodate(folio);
}
unlock_page(page);
folio_unlock(folio);
}
pagevec_release(&pvec);
}
@ -1971,6 +1971,7 @@ static int __ext4_journalled_writepage(struct page *page,
static int ext4_writepage(struct page *page,
struct writeback_control *wbc)
{
struct folio *folio = page_folio(page);
int ret = 0;
loff_t size;
unsigned int len;
@ -1980,8 +1981,8 @@ static int ext4_writepage(struct page *page,
bool keep_towrite = false;
if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) {
inode->i_mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
unlock_page(page);
folio_invalidate(folio, 0, folio_size(folio));
folio_unlock(folio);
return -EIO;
}
@ -3207,40 +3208,39 @@ static void ext4_readahead(struct readahead_control *rac)
ext4_mpage_readpages(inode, rac, NULL);
}
static void ext4_invalidatepage(struct page *page, unsigned int offset,
unsigned int length)
static void ext4_invalidate_folio(struct folio *folio, size_t offset,
size_t length)
{
trace_ext4_invalidatepage(page, offset, length);
trace_ext4_invalidate_folio(folio, offset, length);
/* No journalling happens on data buffers when this function is used */
WARN_ON(page_has_buffers(page) && buffer_jbd(page_buffers(page)));
WARN_ON(folio_buffers(folio) && buffer_jbd(folio_buffers(folio)));
block_invalidatepage(page, offset, length);
block_invalidate_folio(folio, offset, length);
}
static int __ext4_journalled_invalidatepage(struct page *page,
unsigned int offset,
unsigned int length)
static int __ext4_journalled_invalidate_folio(struct folio *folio,
size_t offset, size_t length)
{
journal_t *journal = EXT4_JOURNAL(page->mapping->host);
journal_t *journal = EXT4_JOURNAL(folio->mapping->host);
trace_ext4_journalled_invalidatepage(page, offset, length);
trace_ext4_journalled_invalidate_folio(folio, offset, length);
/*
* If it's a full truncate we just forget about the pending dirtying
*/
if (offset == 0 && length == PAGE_SIZE)
ClearPageChecked(page);
if (offset == 0 && length == folio_size(folio))
folio_clear_checked(folio);
return jbd2_journal_invalidatepage(journal, page, offset, length);
return jbd2_journal_invalidate_folio(journal, folio, offset, length);
}
/* Wrapper for aops... */
static void ext4_journalled_invalidatepage(struct page *page,
unsigned int offset,
unsigned int length)
static void ext4_journalled_invalidate_folio(struct folio *folio,
size_t offset,
size_t length)
{
WARN_ON(__ext4_journalled_invalidatepage(page, offset, length) < 0);
WARN_ON(__ext4_journalled_invalidate_folio(folio, offset, length) < 0);
}
static int ext4_releasepage(struct page *page, gfp_t wait)
@ -3573,31 +3573,32 @@ const struct iomap_ops ext4_iomap_report_ops = {
};
/*
* Whenever the page is being dirtied, corresponding buffers should already be
* attached to the transaction (we take care of this in ext4_page_mkwrite() and
* ext4_write_begin()). However we cannot move buffers to dirty transaction
* lists here because ->set_page_dirty is called under VFS locks and the page
* Whenever the folio is being dirtied, corresponding buffers should already
* be attached to the transaction (we take care of this in ext4_page_mkwrite()
* and ext4_write_begin()). However we cannot move buffers to dirty transaction
* lists here because ->dirty_folio is called under VFS locks and the folio
* is not necessarily locked.
*
* We cannot just dirty the page and leave attached buffers clean, because the
* We cannot just dirty the folio and leave attached buffers clean, because the
* buffers' dirty state is "definitive". We cannot just set the buffers dirty
* or jbddirty because all the journalling code will explode.
*
* So what we do is to mark the page "pending dirty" and next time writepage
* So what we do is to mark the folio "pending dirty" and next time writepage
* is called, propagate that into the buffers appropriately.
*/
static int ext4_journalled_set_page_dirty(struct page *page)
static bool ext4_journalled_dirty_folio(struct address_space *mapping,
struct folio *folio)
{
WARN_ON_ONCE(!page_has_buffers(page));
SetPageChecked(page);
return __set_page_dirty_nobuffers(page);
WARN_ON_ONCE(!page_has_buffers(&folio->page));
folio_set_checked(folio);
return filemap_dirty_folio(mapping, folio);
}
static int ext4_set_page_dirty(struct page *page)
static bool ext4_dirty_folio(struct address_space *mapping, struct folio *folio)
{
WARN_ON_ONCE(!PageLocked(page) && !PageDirty(page));
WARN_ON_ONCE(!page_has_buffers(page));
return __set_page_dirty_buffers(page);
WARN_ON_ONCE(!folio_test_locked(folio) && !folio_test_dirty(folio));
WARN_ON_ONCE(!folio_buffers(folio));
return block_dirty_folio(mapping, folio);
}
static int ext4_iomap_swap_activate(struct swap_info_struct *sis,
@ -3614,9 +3615,9 @@ static const struct address_space_operations ext4_aops = {
.writepages = ext4_writepages,
.write_begin = ext4_write_begin,
.write_end = ext4_write_end,
.set_page_dirty = ext4_set_page_dirty,
.dirty_folio = ext4_dirty_folio,
.bmap = ext4_bmap,
.invalidatepage = ext4_invalidatepage,
.invalidate_folio = ext4_invalidate_folio,
.releasepage = ext4_releasepage,
.direct_IO = noop_direct_IO,
.migratepage = buffer_migrate_page,
@ -3632,9 +3633,9 @@ static const struct address_space_operations ext4_journalled_aops = {
.writepages = ext4_writepages,
.write_begin = ext4_write_begin,
.write_end = ext4_journalled_write_end,
.set_page_dirty = ext4_journalled_set_page_dirty,
.dirty_folio = ext4_journalled_dirty_folio,
.bmap = ext4_bmap,
.invalidatepage = ext4_journalled_invalidatepage,
.invalidate_folio = ext4_journalled_invalidate_folio,
.releasepage = ext4_releasepage,
.direct_IO = noop_direct_IO,
.is_partially_uptodate = block_is_partially_uptodate,
@ -3649,9 +3650,9 @@ static const struct address_space_operations ext4_da_aops = {
.writepages = ext4_writepages,
.write_begin = ext4_da_write_begin,
.write_end = ext4_da_write_end,
.set_page_dirty = ext4_set_page_dirty,
.dirty_folio = ext4_dirty_folio,
.bmap = ext4_bmap,
.invalidatepage = ext4_invalidatepage,
.invalidate_folio = ext4_invalidate_folio,
.releasepage = ext4_releasepage,
.direct_IO = noop_direct_IO,
.migratepage = buffer_migrate_page,
@ -3663,9 +3664,8 @@ static const struct address_space_operations ext4_da_aops = {
static const struct address_space_operations ext4_dax_aops = {
.writepages = ext4_dax_writepages,
.direct_IO = noop_direct_IO,
.set_page_dirty = __set_page_dirty_no_writeback,
.dirty_folio = noop_dirty_folio,
.bmap = ext4_bmap,
.invalidatepage = noop_invalidatepage,
.swap_activate = ext4_iomap_swap_activate,
};
@ -5238,13 +5238,12 @@ int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
}
/*
* In data=journal mode ext4_journalled_invalidatepage() may fail to invalidate
* buffers that are attached to a page stradding i_size and are undergoing
* In data=journal mode ext4_journalled_invalidate_folio() may fail to invalidate
* buffers that are attached to a folio straddling i_size and are undergoing
* commit. In that case we have to wait for commit to finish and try again.
*/
static void ext4_wait_for_tail_page_commit(struct inode *inode)
{
struct page *page;
unsigned offset;
journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
tid_t commit_tid = 0;
@ -5252,25 +5251,25 @@ static void ext4_wait_for_tail_page_commit(struct inode *inode)
offset = inode->i_size & (PAGE_SIZE - 1);
/*
* If the page is fully truncated, we don't need to wait for any commit
* (and we even should not as __ext4_journalled_invalidatepage() may
* strip all buffers from the page but keep the page dirty which can then
* confuse e.g. concurrent ext4_writepage() seeing dirty page without
* If the folio is fully truncated, we don't need to wait for any commit
* (and we even should not as __ext4_journalled_invalidate_folio() may
* strip all buffers from the folio but keep the folio dirty which can then
* confuse e.g. concurrent ext4_writepage() seeing dirty folio without
* buffers). Also we don't need to wait for any commit if all buffers in
* the page remain valid. This is most beneficial for the common case of
* the folio remain valid. This is most beneficial for the common case of
* blocksize == PAGESIZE.
*/
if (!offset || offset > (PAGE_SIZE - i_blocksize(inode)))
return;
while (1) {
page = find_lock_page(inode->i_mapping,
struct folio *folio = filemap_lock_folio(inode->i_mapping,
inode->i_size >> PAGE_SHIFT);
if (!page)
if (!folio)
return;
ret = __ext4_journalled_invalidatepage(page, offset,
PAGE_SIZE - offset);
unlock_page(page);
put_page(page);
ret = __ext4_journalled_invalidate_folio(folio, offset,
folio_size(folio) - offset);
folio_unlock(folio);
folio_put(folio);
if (ret != -EBUSY)
return;
commit_tid = 0;

View File

@ -447,26 +447,27 @@ long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
return nwritten;
}
static int f2fs_set_meta_page_dirty(struct page *page)
static bool f2fs_dirty_meta_folio(struct address_space *mapping,
struct folio *folio)
{
trace_f2fs_set_page_dirty(page, META);
trace_f2fs_set_page_dirty(&folio->page, META);
if (!PageUptodate(page))
SetPageUptodate(page);
if (!PageDirty(page)) {
__set_page_dirty_nobuffers(page);
inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_META);
set_page_private_reference(page);
return 1;
if (!folio_test_uptodate(folio))
folio_mark_uptodate(folio);
if (!folio_test_dirty(folio)) {
filemap_dirty_folio(mapping, folio);
inc_page_count(F2FS_P_SB(&folio->page), F2FS_DIRTY_META);
set_page_private_reference(&folio->page);
return true;
}
return 0;
return false;
}
const struct address_space_operations f2fs_meta_aops = {
.writepage = f2fs_write_meta_page,
.writepages = f2fs_write_meta_pages,
.set_page_dirty = f2fs_set_meta_page_dirty,
.invalidatepage = f2fs_invalidate_page,
.dirty_folio = f2fs_dirty_meta_folio,
.invalidate_folio = f2fs_invalidate_folio,
.releasepage = f2fs_release_page,
#ifdef CONFIG_MIGRATION
.migratepage = f2fs_migrate_page,
@ -1027,7 +1028,7 @@ static void __remove_dirty_inode(struct inode *inode, enum inode_type type)
stat_dec_dirty_inode(F2FS_I_SB(inode), type);
}
void f2fs_update_dirty_page(struct inode *inode, struct page *page)
void f2fs_update_dirty_folio(struct inode *inode, struct folio *folio)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
enum inode_type type = S_ISDIR(inode->i_mode) ? DIR_INODE : FILE_INODE;
@ -1042,7 +1043,7 @@ void f2fs_update_dirty_page(struct inode *inode, struct page *page)
inode_inc_dirty_pages(inode);
spin_unlock(&sbi->inode_lock[type]);
set_page_private_reference(page);
set_page_private_reference(&folio->page);
}
void f2fs_remove_dirty_inode(struct inode *inode)

View File

@ -1747,7 +1747,7 @@ unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn)
const struct address_space_operations f2fs_compress_aops = {
.releasepage = f2fs_release_page,
.invalidatepage = f2fs_invalidate_page,
.invalidate_folio = f2fs_invalidate_folio,
};
struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi)

View File

@ -3489,17 +3489,16 @@ static int f2fs_write_end(struct file *file,
return copied;
}
void f2fs_invalidate_page(struct page *page, unsigned int offset,
unsigned int length)
void f2fs_invalidate_folio(struct folio *folio, size_t offset, size_t length)
{
struct inode *inode = page->mapping->host;
struct inode *inode = folio->mapping->host;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
(offset % PAGE_SIZE || length != PAGE_SIZE))
(offset || length != folio_size(folio)))
return;
if (PageDirty(page)) {
if (folio_test_dirty(folio)) {
if (inode->i_ino == F2FS_META_INO(sbi)) {
dec_page_count(sbi, F2FS_DIRTY_META);
} else if (inode->i_ino == F2FS_NODE_INO(sbi)) {
@ -3510,17 +3509,16 @@ void f2fs_invalidate_page(struct page *page, unsigned int offset,
}
}
clear_page_private_gcing(page);
clear_page_private_gcing(&folio->page);
if (test_opt(sbi, COMPRESS_CACHE) &&
inode->i_ino == F2FS_COMPRESS_INO(sbi))
clear_page_private_data(page);
clear_page_private_data(&folio->page);
if (page_private_atomic(page))
return f2fs_drop_inmem_page(inode, page);
if (page_private_atomic(&folio->page))
return f2fs_drop_inmem_page(inode, &folio->page);
detach_page_private(page);
set_page_private(page, 0);
folio_detach_private(folio);
}
int f2fs_release_page(struct page *page, gfp_t wait)
@ -3547,35 +3545,35 @@ int f2fs_release_page(struct page *page, gfp_t wait)
return 1;
}
static int f2fs_set_data_page_dirty(struct page *page)
static bool f2fs_dirty_data_folio(struct address_space *mapping,
struct folio *folio)
{
struct inode *inode = page_file_mapping(page)->host;
struct inode *inode = mapping->host;
trace_f2fs_set_page_dirty(page, DATA);
trace_f2fs_set_page_dirty(&folio->page, DATA);
if (!PageUptodate(page))
SetPageUptodate(page);
if (PageSwapCache(page))
return __set_page_dirty_nobuffers(page);
if (!folio_test_uptodate(folio))
folio_mark_uptodate(folio);
BUG_ON(folio_test_swapcache(folio));
if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) {
if (!page_private_atomic(page)) {
f2fs_register_inmem_page(inode, page);
return 1;
if (!page_private_atomic(&folio->page)) {
f2fs_register_inmem_page(inode, &folio->page);
return true;
}
/*
* Previously, this page has been registered, we just
* return here.
*/
return 0;
return false;
}
if (!PageDirty(page)) {
__set_page_dirty_nobuffers(page);
f2fs_update_dirty_page(inode, page);
return 1;
if (!folio_test_dirty(folio)) {
filemap_dirty_folio(mapping, folio);
f2fs_update_dirty_folio(inode, folio);
return true;
}
return 0;
return true;
}
@ -3937,8 +3935,8 @@ const struct address_space_operations f2fs_dblock_aops = {
.writepages = f2fs_write_data_pages,
.write_begin = f2fs_write_begin,
.write_end = f2fs_write_end,
.set_page_dirty = f2fs_set_data_page_dirty,
.invalidatepage = f2fs_invalidate_page,
.dirty_folio = f2fs_dirty_data_folio,
.invalidate_folio = f2fs_invalidate_folio,
.releasepage = f2fs_release_page,
.direct_IO = noop_direct_IO,
.bmap = f2fs_bmap,

View File

@ -3705,7 +3705,7 @@ void f2fs_add_orphan_inode(struct inode *inode);
void f2fs_remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino);
int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi);
int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi);
void f2fs_update_dirty_page(struct inode *inode, struct page *page);
void f2fs_update_dirty_folio(struct inode *inode, struct folio *folio);
void f2fs_remove_dirty_inode(struct inode *inode);
int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type);
void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type);
@ -3769,8 +3769,7 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
enum iostat_type io_type,
int compr_blocks, bool allow_balance);
void f2fs_write_failed(struct inode *inode, loff_t to);
void f2fs_invalidate_page(struct page *page, unsigned int offset,
unsigned int length);
void f2fs_invalidate_folio(struct folio *folio, size_t offset, size_t length);
int f2fs_release_page(struct page *page, gfp_t wait);
#ifdef CONFIG_MIGRATION
int f2fs_migrate_page(struct address_space *mapping, struct page *newpage,

View File

@ -2137,23 +2137,24 @@ static int f2fs_write_node_pages(struct address_space *mapping,
return 0;
}
static int f2fs_set_node_page_dirty(struct page *page)
static bool f2fs_dirty_node_folio(struct address_space *mapping,
struct folio *folio)
{
trace_f2fs_set_page_dirty(page, NODE);
trace_f2fs_set_page_dirty(&folio->page, NODE);
if (!PageUptodate(page))
SetPageUptodate(page);
if (!folio_test_uptodate(folio))
folio_mark_uptodate(folio);
#ifdef CONFIG_F2FS_CHECK_FS
if (IS_INODE(page))
f2fs_inode_chksum_set(F2FS_P_SB(page), page);
if (IS_INODE(&folio->page))
f2fs_inode_chksum_set(F2FS_P_SB(&folio->page), &folio->page);
#endif
if (!PageDirty(page)) {
__set_page_dirty_nobuffers(page);
inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
set_page_private_reference(page);
return 1;
if (!folio_test_dirty(folio)) {
filemap_dirty_folio(mapping, folio);
inc_page_count(F2FS_P_SB(&folio->page), F2FS_DIRTY_NODES);
set_page_private_reference(&folio->page);
return true;
}
return 0;
return false;
}
/*
@ -2162,8 +2163,8 @@ static int f2fs_set_node_page_dirty(struct page *page)
const struct address_space_operations f2fs_node_aops = {
.writepage = f2fs_write_node_page,
.writepages = f2fs_write_node_pages,
.set_page_dirty = f2fs_set_node_page_dirty,
.invalidatepage = f2fs_invalidate_page,
.dirty_folio = f2fs_dirty_node_folio,
.invalidate_folio = f2fs_invalidate_folio,
.releasepage = f2fs_release_page,
#ifdef CONFIG_MIGRATION
.migratepage = f2fs_migrate_page,

View File

@ -342,7 +342,8 @@ int fat_block_truncate_page(struct inode *inode, loff_t from)
}
static const struct address_space_operations fat_aops = {
.set_page_dirty = __set_page_dirty_buffers,
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.readpage = fat_readpage,
.readahead = fat_readahead,
.writepage = fat_writepage,

View File

@ -159,27 +159,29 @@ int __fscache_begin_write_operation(struct netfs_cache_resources *cres,
EXPORT_SYMBOL(__fscache_begin_write_operation);
/**
* fscache_set_page_dirty - Mark page dirty and pin a cache object for writeback
* @page: The page being dirtied
* fscache_dirty_folio - Mark folio dirty and pin a cache object for writeback
* @mapping: The mapping the folio belongs to.
* @folio: The folio being dirtied.
* @cookie: The cookie referring to the cache object
*
* Set the dirty flag on a page and pin an in-use cache object in memory when
* dirtying a page so that writeback can later write to it. This is intended
* to be called from the filesystem's ->set_page_dirty() method.
* Set the dirty flag on a folio and pin an in-use cache object in memory
* so that writeback can later write to it. This is intended
* to be called from the filesystem's ->dirty_folio() method.
*
* Returns 1 if PG_dirty was set on the page, 0 otherwise.
* Return: true if the dirty flag was set on the folio, false otherwise.
*/
int fscache_set_page_dirty(struct page *page, struct fscache_cookie *cookie)
bool fscache_dirty_folio(struct address_space *mapping, struct folio *folio,
struct fscache_cookie *cookie)
{
struct inode *inode = page->mapping->host;
struct inode *inode = mapping->host;
bool need_use = false;
_enter("");
if (!__set_page_dirty_nobuffers(page))
return 0;
if (!filemap_dirty_folio(mapping, folio))
return false;
if (!fscache_cookie_valid(cookie))
return 1;
return true;
if (!(inode->i_state & I_PINNING_FSCACHE_WB)) {
spin_lock(&inode->i_lock);
@ -192,9 +194,9 @@ int fscache_set_page_dirty(struct page *page, struct fscache_cookie *cookie)
if (need_use)
fscache_use_cookie(cookie, true);
}
return 1;
return true;
}
EXPORT_SYMBOL(fscache_set_page_dirty);
EXPORT_SYMBOL(fscache_dirty_folio);
struct fscache_write_request {
struct netfs_cache_resources cache_resources;

View File

@ -1326,8 +1326,7 @@ bool fuse_dax_inode_alloc(struct super_block *sb, struct fuse_inode *fi)
static const struct address_space_operations fuse_dax_file_aops = {
.writepages = fuse_dax_writepages,
.direct_IO = noop_direct_IO,
.set_page_dirty = __set_page_dirty_no_writeback,
.invalidatepage = noop_invalidatepage,
.dirty_folio = noop_dirty_folio,
};
static bool fuse_should_enable_dax(struct inode *inode, unsigned int flags)

View File

@ -1773,7 +1773,7 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
/*
* Only call invalidate_inode_pages2() after removing
* FUSE_NOWRITE, otherwise fuse_launder_page() would deadlock.
* FUSE_NOWRITE, otherwise fuse_launder_folio() would deadlock.
*/
if ((is_truncate || !is_wb) &&
S_ISREG(inode->i_mode) && oldsize != outarg.attr.size) {

View File

@ -2348,17 +2348,17 @@ static int fuse_write_end(struct file *file, struct address_space *mapping,
return copied;
}
static int fuse_launder_page(struct page *page)
static int fuse_launder_folio(struct folio *folio)
{
int err = 0;
if (clear_page_dirty_for_io(page)) {
struct inode *inode = page->mapping->host;
if (folio_clear_dirty_for_io(folio)) {
struct inode *inode = folio->mapping->host;
/* Serialize with pending writeback for the same page */
fuse_wait_on_page_writeback(inode, page->index);
err = fuse_writepage_locked(page);
fuse_wait_on_page_writeback(inode, folio->index);
err = fuse_writepage_locked(&folio->page);
if (!err)
fuse_wait_on_page_writeback(inode, page->index);
fuse_wait_on_page_writeback(inode, folio->index);
}
return err;
}
@ -3179,8 +3179,8 @@ static const struct address_space_operations fuse_file_aops = {
.readahead = fuse_readahead,
.writepage = fuse_writepage,
.writepages = fuse_writepages,
.launder_page = fuse_launder_page,
.set_page_dirty = __set_page_dirty_nobuffers,
.launder_folio = fuse_launder_folio,
.dirty_folio = filemap_dirty_folio,
.bmap = fuse_bmap,
.direct_IO = fuse_direct_IO,
.write_begin = fuse_write_begin,

View File

@ -606,18 +606,12 @@ void adjust_fs_space(struct inode *inode)
gfs2_trans_end(sdp);
}
/**
* jdata_set_page_dirty - Page dirtying function
* @page: The page to dirty
*
* Returns: 1 if it dirtyed the page, or 0 otherwise
*/
static int jdata_set_page_dirty(struct page *page)
static bool jdata_dirty_folio(struct address_space *mapping,
struct folio *folio)
{
if (current->journal_info)
SetPageChecked(page);
return __set_page_dirty_buffers(page);
folio_set_checked(folio);
return block_dirty_folio(mapping, folio);
}
/**
@ -672,22 +666,23 @@ static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
unlock_buffer(bh);
}
static void gfs2_invalidatepage(struct page *page, unsigned int offset,
unsigned int length)
static void gfs2_invalidate_folio(struct folio *folio, size_t offset,
size_t length)
{
struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
unsigned int stop = offset + length;
int partial_page = (offset || length < PAGE_SIZE);
struct gfs2_sbd *sdp = GFS2_SB(folio->mapping->host);
size_t stop = offset + length;
int partial_page = (offset || length < folio_size(folio));
struct buffer_head *bh, *head;
unsigned long pos = 0;
BUG_ON(!PageLocked(page));
BUG_ON(!folio_test_locked(folio));
if (!partial_page)
ClearPageChecked(page);
if (!page_has_buffers(page))
folio_clear_checked(folio);
head = folio_buffers(folio);
if (!head)
goto out;
bh = head = page_buffers(page);
bh = head;
do {
if (pos + bh->b_size > stop)
return;
@ -699,7 +694,7 @@ static void gfs2_invalidatepage(struct page *page, unsigned int offset,
} while (bh != head);
out:
if (!partial_page)
try_to_release_page(page, 0);
filemap_release_folio(folio, 0);
}
/**
@ -779,9 +774,9 @@ static const struct address_space_operations gfs2_aops = {
.writepages = gfs2_writepages,
.readpage = gfs2_readpage,
.readahead = gfs2_readahead,
.set_page_dirty = __set_page_dirty_nobuffers,
.dirty_folio = filemap_dirty_folio,
.releasepage = iomap_releasepage,
.invalidatepage = iomap_invalidatepage,
.invalidate_folio = iomap_invalidate_folio,
.bmap = gfs2_bmap,
.direct_IO = noop_direct_IO,
.migratepage = iomap_migrate_page,
@ -794,9 +789,9 @@ static const struct address_space_operations gfs2_jdata_aops = {
.writepages = gfs2_jdata_writepages,
.readpage = gfs2_readpage,
.readahead = gfs2_readahead,
.set_page_dirty = jdata_set_page_dirty,
.dirty_folio = jdata_dirty_folio,
.bmap = gfs2_bmap,
.invalidatepage = gfs2_invalidatepage,
.invalidate_folio = gfs2_invalidate_folio,
.releasepage = gfs2_releasepage,
.is_partially_uptodate = block_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,

View File

@ -89,13 +89,15 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
}
const struct address_space_operations gfs2_meta_aops = {
.set_page_dirty = __set_page_dirty_buffers,
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.writepage = gfs2_aspace_writepage,
.releasepage = gfs2_releasepage,
};
const struct address_space_operations gfs2_rgrp_aops = {
.set_page_dirty = __set_page_dirty_buffers,
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.writepage = gfs2_aspace_writepage,
.releasepage = gfs2_releasepage,
};

View File

@ -159,7 +159,8 @@ static int hfs_writepages(struct address_space *mapping,
}
const struct address_space_operations hfs_btree_aops = {
.set_page_dirty = __set_page_dirty_buffers,
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.readpage = hfs_readpage,
.writepage = hfs_writepage,
.write_begin = hfs_write_begin,
@ -169,7 +170,8 @@ const struct address_space_operations hfs_btree_aops = {
};
const struct address_space_operations hfs_aops = {
.set_page_dirty = __set_page_dirty_buffers,
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.readpage = hfs_readpage,
.writepage = hfs_writepage,
.write_begin = hfs_write_begin,

View File

@ -156,7 +156,8 @@ static int hfsplus_writepages(struct address_space *mapping,
}
const struct address_space_operations hfsplus_btree_aops = {
.set_page_dirty = __set_page_dirty_buffers,
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.readpage = hfsplus_readpage,
.writepage = hfsplus_writepage,
.write_begin = hfsplus_write_begin,
@ -166,7 +167,8 @@ const struct address_space_operations hfsplus_btree_aops = {
};
const struct address_space_operations hfsplus_aops = {
.set_page_dirty = __set_page_dirty_buffers,
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.readpage = hfsplus_readpage,
.writepage = hfsplus_writepage,
.write_begin = hfsplus_write_begin,

View File

@ -14,6 +14,7 @@
#include <linux/statfs.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/writeback.h>
#include <linux/mount.h>
#include <linux/namei.h>
#include "hostfs.h"
@ -504,7 +505,7 @@ static int hostfs_write_end(struct file *file, struct address_space *mapping,
static const struct address_space_operations hostfs_aops = {
.writepage = hostfs_writepage,
.readpage = hostfs_readpage,
.set_page_dirty = __set_page_dirty_nobuffers,
.dirty_folio = filemap_dirty_folio,
.write_begin = hostfs_write_begin,
.write_end = hostfs_write_end,
};

View File

@ -245,7 +245,8 @@ static int hpfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
}
const struct address_space_operations hpfs_aops = {
.set_page_dirty = __set_page_dirty_buffers,
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.readpage = hpfs_readpage,
.writepage = hpfs_writepage,
.readahead = hpfs_readahead,

View File

@ -1144,7 +1144,7 @@ static void hugetlbfs_destroy_inode(struct inode *inode)
static const struct address_space_operations hugetlbfs_aops = {
.write_begin = hugetlbfs_write_begin,
.write_end = hugetlbfs_write_end,
.set_page_dirty = __set_page_dirty_no_writeback,
.dirty_folio = noop_dirty_folio,
.migratepage = hugetlbfs_migrate_page,
.error_remove_page = hugetlbfs_error_remove_page,
};

View File

@ -425,37 +425,33 @@ void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
EXPORT_SYMBOL_GPL(iomap_readahead);
/*
* iomap_is_partially_uptodate checks whether blocks within a page are
* iomap_is_partially_uptodate checks whether blocks within a folio are
* uptodate or not.
*
* Returns true if all blocks which correspond to a file portion
* we want to read within the page are uptodate.
* Returns true if all blocks which correspond to the specified part
* of the folio are uptodate.
*/
int
iomap_is_partially_uptodate(struct page *page, unsigned long from,
unsigned long count)
bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
{
struct folio *folio = page_folio(page);
struct iomap_page *iop = to_iomap_page(folio);
struct inode *inode = page->mapping->host;
unsigned len, first, last;
unsigned i;
struct inode *inode = folio->mapping->host;
size_t len;
unsigned first, last, i;
/* Limit range to one page */
len = min_t(unsigned, PAGE_SIZE - from, count);
if (!iop)
return false;
/* Limit range to this folio */
len = min(folio_size(folio) - from, count);
/* First and last blocks in range within page */
first = from >> inode->i_blkbits;
last = (from + len - 1) >> inode->i_blkbits;
if (iop) {
for (i = first; i <= last; i++)
if (!test_bit(i, iop->uptodate))
return 0;
return 1;
}
return 0;
for (i = first; i <= last; i++)
if (!test_bit(i, iop->uptodate))
return false;
return true;
}
EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
@ -481,7 +477,8 @@ EXPORT_SYMBOL_GPL(iomap_releasepage);
void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len)
{
trace_iomap_invalidatepage(folio->mapping->host, offset, len);
trace_iomap_invalidate_folio(folio->mapping->host,
folio_pos(folio) + offset, len);
/*
* If we're invalidating the entire folio, clear the dirty state
@ -500,13 +497,6 @@ void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len)
}
EXPORT_SYMBOL_GPL(iomap_invalidate_folio);
void iomap_invalidatepage(struct page *page, unsigned int offset,
unsigned int len)
{
iomap_invalidate_folio(page_folio(page), offset, len);
}
EXPORT_SYMBOL_GPL(iomap_invalidatepage);
#ifdef CONFIG_MIGRATION
int
iomap_migrate_page(struct address_space *mapping, struct page *newpage,

View File

@ -81,7 +81,7 @@ DEFINE_EVENT(iomap_range_class, name, \
TP_ARGS(inode, off, len))
DEFINE_RANGE_EVENT(iomap_writepage);
DEFINE_RANGE_EVENT(iomap_releasepage);
DEFINE_RANGE_EVENT(iomap_invalidatepage);
DEFINE_RANGE_EVENT(iomap_invalidate_folio);
DEFINE_RANGE_EVENT(iomap_dio_invalidate_fail);
#define IOMAP_TYPE_STRINGS \

View File

@ -86,7 +86,7 @@ EXPORT_SYMBOL(jbd2_journal_start_commit);
EXPORT_SYMBOL(jbd2_journal_force_commit_nested);
EXPORT_SYMBOL(jbd2_journal_wipe);
EXPORT_SYMBOL(jbd2_journal_blocks_per_page);
EXPORT_SYMBOL(jbd2_journal_invalidatepage);
EXPORT_SYMBOL(jbd2_journal_invalidate_folio);
EXPORT_SYMBOL(jbd2_journal_try_to_free_buffers);
EXPORT_SYMBOL(jbd2_journal_force_commit);
EXPORT_SYMBOL(jbd2_journal_inode_ranged_write);

View File

@ -2217,14 +2217,14 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
}
/*
* jbd2_journal_invalidatepage
* jbd2_journal_invalidate_folio
*
* This code is tricky. It has a number of cases to deal with.
*
* There are two invariants which this code relies on:
*
* i_size must be updated on disk before we start calling invalidatepage on the
* data.
* i_size must be updated on disk before we start calling invalidate_folio
* on the data.
*
* This is done in ext3 by defining an ext3_setattr method which
* updates i_size before truncate gets going. By maintaining this
@ -2426,9 +2426,9 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
}
/**
* jbd2_journal_invalidatepage()
* jbd2_journal_invalidate_folio()
* @journal: journal to use for flush...
* @page: page to flush
* @folio: folio to flush
* @offset: start of the range to invalidate
* @length: length of the range to invalidate
*
@ -2437,30 +2437,29 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
* the page is straddling i_size. Caller then has to wait for current commit
* and try again.
*/
int jbd2_journal_invalidatepage(journal_t *journal,
struct page *page,
unsigned int offset,
unsigned int length)
int jbd2_journal_invalidate_folio(journal_t *journal, struct folio *folio,
size_t offset, size_t length)
{
struct buffer_head *head, *bh, *next;
unsigned int stop = offset + length;
unsigned int curr_off = 0;
int partial_page = (offset || length < PAGE_SIZE);
int partial_page = (offset || length < folio_size(folio));
int may_free = 1;
int ret = 0;
if (!PageLocked(page))
if (!folio_test_locked(folio))
BUG();
if (!page_has_buffers(page))
head = folio_buffers(folio);
if (!head)
return 0;
BUG_ON(stop > PAGE_SIZE || stop < length);
BUG_ON(stop > folio_size(folio) || stop < length);
/* We will potentially be playing with lists other than just the
* data lists (especially for journaled data mode), so be
* cautious in our locking. */
head = bh = page_buffers(page);
bh = head;
do {
unsigned int next_off = curr_off + bh->b_size;
next = bh->b_this_page;
@ -2483,8 +2482,8 @@ int jbd2_journal_invalidatepage(journal_t *journal,
} while (bh != head);
if (!partial_page) {
if (may_free && try_to_free_buffers(page))
J_ASSERT(!page_has_buffers(page));
if (may_free && try_to_free_buffers(&folio->page))
J_ASSERT(!folio_buffers(folio));
}
return 0;
}

View File

@ -357,7 +357,8 @@ static ssize_t jfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
}
const struct address_space_operations jfs_aops = {
.set_page_dirty = __set_page_dirty_buffers,
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.readpage = jfs_readpage,
.readahead = jfs_readahead,
.writepage = jfs_writepage,

View File

@ -552,22 +552,22 @@ static int metapage_releasepage(struct page *page, gfp_t gfp_mask)
return ret;
}
static void metapage_invalidatepage(struct page *page, unsigned int offset,
unsigned int length)
static void metapage_invalidate_folio(struct folio *folio, size_t offset,
size_t length)
{
BUG_ON(offset || length < PAGE_SIZE);
BUG_ON(offset || length < folio_size(folio));
BUG_ON(PageWriteback(page));
BUG_ON(folio_test_writeback(folio));
metapage_releasepage(page, 0);
metapage_releasepage(&folio->page, 0);
}
const struct address_space_operations jfs_metapage_aops = {
.readpage = metapage_readpage,
.writepage = metapage_writepage,
.releasepage = metapage_releasepage,
.invalidatepage = metapage_invalidatepage,
.set_page_dirty = __set_page_dirty_nobuffers,
.invalidate_folio = metapage_invalidate_folio,
.dirty_folio = filemap_dirty_folio,
};
struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,

View File

@ -631,7 +631,7 @@ const struct address_space_operations ram_aops = {
.readpage = simple_readpage,
.write_begin = simple_write_begin,
.write_end = simple_write_end,
.set_page_dirty = __set_page_dirty_no_writeback,
.dirty_folio = noop_dirty_folio,
};
EXPORT_SYMBOL(ram_aops);
@ -1198,17 +1198,6 @@ int noop_fsync(struct file *file, loff_t start, loff_t end, int datasync)
}
EXPORT_SYMBOL(noop_fsync);
void noop_invalidatepage(struct page *page, unsigned int offset,
unsigned int length)
{
/*
* There is no page cache to invalidate in the dax case, however
* we need this callback defined to prevent falling back to
* block_invalidatepage() in do_invalidatepage().
*/
}
EXPORT_SYMBOL_GPL(noop_invalidatepage);
ssize_t noop_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
{
/*
@ -1231,7 +1220,7 @@ EXPORT_SYMBOL(kfree_link);
struct inode *alloc_anon_inode(struct super_block *s)
{
static const struct address_space_operations anon_aops = {
.set_page_dirty = __set_page_dirty_no_writeback,
.dirty_folio = noop_dirty_folio,
};
struct inode *inode = new_inode_pseudo(s);

View File

@ -442,7 +442,8 @@ static sector_t minix_bmap(struct address_space *mapping, sector_t block)
}
static const struct address_space_operations minix_aops = {
.set_page_dirty = __set_page_dirty_buffers,
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.readpage = minix_readpage,
.writepage = minix_writepage,
.write_begin = minix_write_begin,

View File

@ -479,7 +479,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
if (!buffer_mapped(bh)) {
/*
* unmapped dirty buffers are created by
* __set_page_dirty_buffers -> mmapped data
* block_dirty_folio -> mmapped data
*/
if (buffer_dirty(bh))
goto confused;

View File

@ -406,17 +406,17 @@ static int nfs_write_end(struct file *file, struct address_space *mapping,
* - Called if either PG_private or PG_fscache is set on the page
* - Caller holds page lock
*/
static void nfs_invalidate_page(struct page *page, unsigned int offset,
unsigned int length)
static void nfs_invalidate_folio(struct folio *folio, size_t offset,
size_t length)
{
dfprintk(PAGECACHE, "NFS: invalidate_page(%p, %u, %u)\n",
page, offset, length);
dfprintk(PAGECACHE, "NFS: invalidate_folio(%lu, %zu, %zu)\n",
folio->index, offset, length);
if (offset != 0 || length < PAGE_SIZE)
if (offset != 0 || length < folio_size(folio))
return;
/* Cancel any unstarted writes on this page */
nfs_wb_page_cancel(page_file_mapping(page)->host, page);
wait_on_page_fscache(page);
nfs_wb_folio_cancel(folio->mapping->host, folio);
folio_wait_fscache(folio);
}
/*
@ -472,15 +472,15 @@ static void nfs_check_dirty_writeback(struct page *page,
* - Caller holds page lock
* - Return 0 if successful, -error otherwise
*/
static int nfs_launder_page(struct page *page)
static int nfs_launder_folio(struct folio *folio)
{
struct inode *inode = page_file_mapping(page)->host;
struct inode *inode = folio->mapping->host;
dfprintk(PAGECACHE, "NFS: launder_page(%ld, %llu)\n",
inode->i_ino, (long long)page_offset(page));
dfprintk(PAGECACHE, "NFS: launder_folio(%ld, %llu)\n",
inode->i_ino, folio_pos(folio));
wait_on_page_fscache(page);
return nfs_wb_page(inode, page);
folio_wait_fscache(folio);
return nfs_wb_page(inode, &folio->page);
}
static int nfs_swap_activate(struct swap_info_struct *sis, struct file *file,
@ -515,18 +515,18 @@ static void nfs_swap_deactivate(struct file *file)
const struct address_space_operations nfs_file_aops = {
.readpage = nfs_readpage,
.readpages = nfs_readpages,
.set_page_dirty = __set_page_dirty_nobuffers,
.dirty_folio = filemap_dirty_folio,
.writepage = nfs_writepage,
.writepages = nfs_writepages,
.write_begin = nfs_write_begin,
.write_end = nfs_write_end,
.invalidatepage = nfs_invalidate_page,
.invalidate_folio = nfs_invalidate_folio,
.releasepage = nfs_release_page,
.direct_IO = nfs_direct_IO,
#ifdef CONFIG_MIGRATION
.migratepage = nfs_migrate_page,
#endif
.launder_page = nfs_launder_page,
.launder_folio = nfs_launder_folio,
.is_dirty_writeback = nfs_check_dirty_writeback,
.error_remove_page = generic_error_remove_page,
.swap_activate = nfs_swap_activate,

View File

@ -2057,21 +2057,21 @@ int nfs_wb_all(struct inode *inode)
}
EXPORT_SYMBOL_GPL(nfs_wb_all);
int nfs_wb_page_cancel(struct inode *inode, struct page *page)
int nfs_wb_folio_cancel(struct inode *inode, struct folio *folio)
{
struct nfs_page *req;
int ret = 0;
wait_on_page_writeback(page);
folio_wait_writeback(folio);
/* blocking call to cancel all requests and join to a single (head)
* request */
req = nfs_lock_and_join_requests(page);
req = nfs_lock_and_join_requests(&folio->page);
if (IS_ERR(req)) {
ret = PTR_ERR(req);
} else if (req) {
/* all requests from this page have been cancelled by
/* all requests from this folio have been cancelled by
* nfs_lock_and_join_requests, so just remove the head
* request from the inode / page_private pointer and
* release it */

View File

@ -199,23 +199,22 @@ static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
return 0;
}
static int nilfs_set_page_dirty(struct page *page)
static bool nilfs_dirty_folio(struct address_space *mapping,
struct folio *folio)
{
struct inode *inode = page->mapping->host;
int ret = __set_page_dirty_nobuffers(page);
struct inode *inode = mapping->host;
struct buffer_head *head;
unsigned int nr_dirty = 0;
bool ret = filemap_dirty_folio(mapping, folio);
if (page_has_buffers(page)) {
unsigned int nr_dirty = 0;
struct buffer_head *bh, *head;
/*
* The page may not be locked, eg if called from try_to_unmap_one()
*/
spin_lock(&mapping->private_lock);
head = folio_buffers(folio);
if (head) {
struct buffer_head *bh = head;
/*
* This page is locked by callers, and no other thread
* concurrently marks its buffers dirty since they are
* only dirtied through routines in fs/buffer.c in
* which call sites of mark_buffer_dirty are protected
* by page lock.
*/
bh = head = page_buffers(page);
do {
/* Do not mark hole blocks dirty */
if (buffer_dirty(bh) || !buffer_mapped(bh))
@ -224,14 +223,13 @@ static int nilfs_set_page_dirty(struct page *page)
set_buffer_dirty(bh);
nr_dirty++;
} while (bh = bh->b_this_page, bh != head);
if (nr_dirty)
nilfs_set_file_dirty(inode, nr_dirty);
} else if (ret) {
unsigned int nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits);
nilfs_set_file_dirty(inode, nr_dirty);
nr_dirty = 1 << (folio_shift(folio) - inode->i_blkbits);
}
spin_unlock(&mapping->private_lock);
if (nr_dirty)
nilfs_set_file_dirty(inode, nr_dirty);
return ret;
}
@ -299,12 +297,12 @@ const struct address_space_operations nilfs_aops = {
.writepage = nilfs_writepage,
.readpage = nilfs_readpage,
.writepages = nilfs_writepages,
.set_page_dirty = nilfs_set_page_dirty,
.dirty_folio = nilfs_dirty_folio,
.readahead = nilfs_readahead,
.write_begin = nilfs_write_begin,
.write_end = nilfs_write_end,
/* .releasepage = nilfs_releasepage, */
.invalidatepage = block_invalidatepage,
.invalidate_folio = block_invalidate_folio,
.direct_IO = nilfs_direct_IO,
.is_partially_uptodate = block_is_partially_uptodate,
};

View File

@ -434,7 +434,8 @@ nilfs_mdt_write_page(struct page *page, struct writeback_control *wbc)
static const struct address_space_operations def_mdt_aops = {
.set_page_dirty = __set_page_dirty_buffers,
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.writepage = nilfs_mdt_write_page,
};

View File

@ -593,12 +593,12 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
iblock = initialized_size >> blocksize_bits;
/*
* Be very careful. We have no exclusion from __set_page_dirty_buffers
* Be very careful. We have no exclusion from block_dirty_folio
* here, and the (potentially unmapped) buffers may become dirty at
* any time. If a buffer becomes dirty here after we've inspected it
* then we just miss that fact, and the page stays dirty.
*
* Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
* Buffers outside i_size may be dirtied by block_dirty_folio;
* handle that here by just cleaning them.
*/
@ -653,7 +653,7 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
// Update initialized size in the attribute and
// in the inode.
// Again, for each page do:
// __set_page_dirty_buffers();
// block_dirty_folio();
// put_page()
// We don't need to wait on the writes.
// Update iblock.
@ -1350,12 +1350,13 @@ static int ntfs_writepage(struct page *page, struct writeback_control *wbc)
/* Is the page fully outside i_size? (truncate in progress) */
if (unlikely(page->index >= (i_size + PAGE_SIZE - 1) >>
PAGE_SHIFT)) {
struct folio *folio = page_folio(page);
/*
* The page may have dirty, unmapped buffers. Make them
* freeable here, so the page does not leak.
*/
block_invalidatepage(page, 0, PAGE_SIZE);
unlock_page(page);
block_invalidate_folio(folio, 0, folio_size(folio));
folio_unlock(folio);
ntfs_debug("Write outside i_size - truncated?");
return 0;
}
@ -1653,7 +1654,7 @@ const struct address_space_operations ntfs_normal_aops = {
.readpage = ntfs_readpage,
#ifdef NTFS_RW
.writepage = ntfs_writepage,
.set_page_dirty = __set_page_dirty_buffers,
.dirty_folio = block_dirty_folio,
#endif /* NTFS_RW */
.bmap = ntfs_bmap,
.migratepage = buffer_migrate_page,
@ -1668,7 +1669,7 @@ const struct address_space_operations ntfs_compressed_aops = {
.readpage = ntfs_readpage,
#ifdef NTFS_RW
.writepage = ntfs_writepage,
.set_page_dirty = __set_page_dirty_buffers,
.dirty_folio = block_dirty_folio,
#endif /* NTFS_RW */
.migratepage = buffer_migrate_page,
.is_partially_uptodate = block_is_partially_uptodate,
@ -1683,9 +1684,7 @@ const struct address_space_operations ntfs_mst_aops = {
.readpage = ntfs_readpage, /* Fill page with data. */
#ifdef NTFS_RW
.writepage = ntfs_writepage, /* Write dirty page to disk. */
.set_page_dirty = __set_page_dirty_nobuffers, /* Set the page dirty
without touching the buffers
belonging to the page. */
.dirty_folio = filemap_dirty_folio,
#endif /* NTFS_RW */
.migratepage = buffer_migrate_page,
.is_partially_uptodate = block_is_partially_uptodate,
@ -1747,7 +1746,7 @@ void mark_ntfs_record_dirty(struct page *page, const unsigned int ofs) {
set_buffer_dirty(bh);
} while ((bh = bh->b_this_page) != head);
spin_unlock(&mapping->private_lock);
__set_page_dirty_nobuffers(page);
block_dirty_folio(mapping, page_folio(page));
if (unlikely(buffers_to_free)) {
do {
bh = buffers_to_free->b_this_page;

View File

@ -1950,7 +1950,7 @@ const struct address_space_operations ntfs_aops = {
.write_end = ntfs_write_end,
.direct_IO = ntfs_direct_IO,
.bmap = ntfs_bmap,
.set_page_dirty = __set_page_dirty_buffers,
.dirty_folio = block_dirty_folio,
};
const struct address_space_operations ntfs_aops_cmpr = {

View File

@ -2453,7 +2453,7 @@ static ssize_t ocfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
}
const struct address_space_operations ocfs2_aops = {
.set_page_dirty = __set_page_dirty_buffers,
.dirty_folio = block_dirty_folio,
.readpage = ocfs2_readpage,
.readahead = ocfs2_readahead,
.writepage = ocfs2_writepage,
@ -2461,7 +2461,7 @@ const struct address_space_operations ocfs2_aops = {
.write_end = ocfs2_write_end,
.bmap = ocfs2_bmap,
.direct_IO = ocfs2_direct_IO,
.invalidatepage = block_invalidatepage,
.invalidate_folio = block_invalidate_folio,
.releasepage = ocfs2_releasepage,
.migratepage = buffer_migrate_page,
.is_partially_uptodate = block_is_partially_uptodate,

View File

@ -372,7 +372,8 @@ const struct inode_operations omfs_file_inops = {
};
const struct address_space_operations omfs_aops = {
.set_page_dirty = __set_page_dirty_buffers,
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.readpage = omfs_readpage,
.readahead = omfs_readahead,
.writepage = omfs_writepage,

View File

@ -46,7 +46,7 @@ static int orangefs_writepage_locked(struct page *page,
else
wlen = PAGE_SIZE;
}
/* Should've been handled in orangefs_invalidatepage. */
/* Should've been handled in orangefs_invalidate_folio. */
WARN_ON(off == len || off + wlen > len);
bv.bv_page = page;
@ -243,7 +243,7 @@ static int orangefs_writepages(struct address_space *mapping,
return ret;
}
static int orangefs_launder_page(struct page *);
static int orangefs_launder_folio(struct folio *);
static void orangefs_readahead(struct readahead_control *rac)
{
@ -290,14 +290,15 @@ static void orangefs_readahead(struct readahead_control *rac)
static int orangefs_readpage(struct file *file, struct page *page)
{
struct folio *folio = page_folio(page);
struct inode *inode = page->mapping->host;
struct iov_iter iter;
struct bio_vec bv;
ssize_t ret;
loff_t off; /* offset into this page */
if (PageDirty(page))
orangefs_launder_page(page);
if (folio_test_dirty(folio))
orangefs_launder_folio(folio);
off = page_offset(page);
bv.bv_page = page;
@ -330,6 +331,7 @@ static int orangefs_write_begin(struct file *file,
void **fsdata)
{
struct orangefs_write_range *wr;
struct folio *folio;
struct page *page;
pgoff_t index;
int ret;
@ -341,27 +343,28 @@ static int orangefs_write_begin(struct file *file,
return -ENOMEM;
*pagep = page;
folio = page_folio(page);
if (PageDirty(page) && !PagePrivate(page)) {
if (folio_test_dirty(folio) && !folio_test_private(folio)) {
/*
* Should be impossible. If it happens, launder the page
* since we don't know what's dirty. This will WARN in
* orangefs_writepage_locked.
*/
ret = orangefs_launder_page(page);
ret = orangefs_launder_folio(folio);
if (ret)
return ret;
}
if (PagePrivate(page)) {
if (folio_test_private(folio)) {
struct orangefs_write_range *wr;
wr = (struct orangefs_write_range *)page_private(page);
wr = folio_get_private(folio);
if (wr->pos + wr->len == pos &&
uid_eq(wr->uid, current_fsuid()) &&
gid_eq(wr->gid, current_fsgid())) {
wr->len += len;
goto okay;
} else {
ret = orangefs_launder_page(page);
ret = orangefs_launder_folio(folio);
if (ret)
return ret;
}
@ -375,7 +378,7 @@ static int orangefs_write_begin(struct file *file,
wr->len = len;
wr->uid = current_fsuid();
wr->gid = current_fsgid();
attach_page_private(page, wr);
folio_attach_private(folio, wr);
okay:
return 0;
}
@ -415,47 +418,45 @@ static int orangefs_write_end(struct file *file, struct address_space *mapping,
return copied;
}
static void orangefs_invalidatepage(struct page *page,
unsigned int offset,
unsigned int length)
static void orangefs_invalidate_folio(struct folio *folio,
size_t offset, size_t length)
{
struct orangefs_write_range *wr;
wr = (struct orangefs_write_range *)page_private(page);
struct orangefs_write_range *wr = folio_get_private(folio);
if (offset == 0 && length == PAGE_SIZE) {
kfree(detach_page_private(page));
kfree(folio_detach_private(folio));
return;
/* write range entirely within invalidate range (or equal) */
} else if (page_offset(page) + offset <= wr->pos &&
wr->pos + wr->len <= page_offset(page) + offset + length) {
kfree(detach_page_private(page));
} else if (folio_pos(folio) + offset <= wr->pos &&
wr->pos + wr->len <= folio_pos(folio) + offset + length) {
kfree(folio_detach_private(folio));
/* XXX is this right? only caller in fs */
cancel_dirty_page(page);
folio_cancel_dirty(folio);
return;
/* invalidate range chops off end of write range */
} else if (wr->pos < page_offset(page) + offset &&
wr->pos + wr->len <= page_offset(page) + offset + length &&
page_offset(page) + offset < wr->pos + wr->len) {
} else if (wr->pos < folio_pos(folio) + offset &&
wr->pos + wr->len <= folio_pos(folio) + offset + length &&
folio_pos(folio) + offset < wr->pos + wr->len) {
size_t x;
x = wr->pos + wr->len - (page_offset(page) + offset);
x = wr->pos + wr->len - (folio_pos(folio) + offset);
WARN_ON(x > wr->len);
wr->len -= x;
wr->uid = current_fsuid();
wr->gid = current_fsgid();
/* invalidate range chops off beginning of write range */
} else if (page_offset(page) + offset <= wr->pos &&
page_offset(page) + offset + length < wr->pos + wr->len &&
wr->pos < page_offset(page) + offset + length) {
} else if (folio_pos(folio) + offset <= wr->pos &&
folio_pos(folio) + offset + length < wr->pos + wr->len &&
wr->pos < folio_pos(folio) + offset + length) {
size_t x;
x = page_offset(page) + offset + length - wr->pos;
x = folio_pos(folio) + offset + length - wr->pos;
WARN_ON(x > wr->len);
wr->pos += x;
wr->len -= x;
wr->uid = current_fsuid();
wr->gid = current_fsgid();
/* invalidate range entirely within write range (punch hole) */
} else if (wr->pos < page_offset(page) + offset &&
page_offset(page) + offset + length < wr->pos + wr->len) {
} else if (wr->pos < folio_pos(folio) + offset &&
folio_pos(folio) + offset + length < wr->pos + wr->len) {
/* XXX what do we do here... should not WARN_ON */
WARN_ON(1);
/* punch hole */
@ -467,11 +468,11 @@ static void orangefs_invalidatepage(struct page *page,
/* non-overlapping ranges */
} else {
/* WARN if they do overlap */
if (!((page_offset(page) + offset + length <= wr->pos) ^
(wr->pos + wr->len <= page_offset(page) + offset))) {
if (!((folio_pos(folio) + offset + length <= wr->pos) ^
(wr->pos + wr->len <= folio_pos(folio) + offset))) {
WARN_ON(1);
printk("invalidate range offset %llu length %u\n",
page_offset(page) + offset, length);
printk("invalidate range offset %llu length %zu\n",
folio_pos(folio) + offset, length);
printk("write range offset %llu length %zu\n",
wr->pos, wr->len);
}
@ -483,7 +484,7 @@ static void orangefs_invalidatepage(struct page *page,
* Thus the following runs if wr was modified above.
*/
orangefs_launder_page(page);
orangefs_launder_folio(folio);
}
static int orangefs_releasepage(struct page *page, gfp_t foo)
@ -496,17 +497,17 @@ static void orangefs_freepage(struct page *page)
kfree(detach_page_private(page));
}
static int orangefs_launder_page(struct page *page)
static int orangefs_launder_folio(struct folio *folio)
{
int r = 0;
struct writeback_control wbc = {
.sync_mode = WB_SYNC_ALL,
.nr_to_write = 0,
};
wait_on_page_writeback(page);
if (clear_page_dirty_for_io(page)) {
r = orangefs_writepage_locked(page, &wbc);
end_page_writeback(page);
folio_wait_writeback(folio);
if (folio_clear_dirty_for_io(folio)) {
r = orangefs_writepage_locked(&folio->page, &wbc);
folio_end_writeback(folio);
}
return r;
}
@ -633,19 +634,19 @@ static const struct address_space_operations orangefs_address_operations = {
.readahead = orangefs_readahead,
.readpage = orangefs_readpage,
.writepages = orangefs_writepages,
.set_page_dirty = __set_page_dirty_nobuffers,
.dirty_folio = filemap_dirty_folio,
.write_begin = orangefs_write_begin,
.write_end = orangefs_write_end,
.invalidatepage = orangefs_invalidatepage,
.invalidate_folio = orangefs_invalidate_folio,
.releasepage = orangefs_releasepage,
.freepage = orangefs_freepage,
.launder_page = orangefs_launder_page,
.launder_folio = orangefs_launder_folio,
.direct_IO = orangefs_direct_IO,
};
vm_fault_t orangefs_page_mkwrite(struct vm_fault *vmf)
{
struct page *page = vmf->page;
struct folio *folio = page_folio(vmf->page);
struct inode *inode = file_inode(vmf->vma->vm_file);
struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
unsigned long *bitlock = &orangefs_inode->bitlock;
@ -659,27 +660,27 @@ vm_fault_t orangefs_page_mkwrite(struct vm_fault *vmf)
goto out;
}
lock_page(page);
if (PageDirty(page) && !PagePrivate(page)) {
folio_lock(folio);
if (folio_test_dirty(folio) && !folio_test_private(folio)) {
/*
* Should be impossible. If it happens, launder the page
* Should be impossible. If it happens, launder the folio
* since we don't know what's dirty. This will WARN in
* orangefs_writepage_locked.
*/
if (orangefs_launder_page(page)) {
if (orangefs_launder_folio(folio)) {
ret = VM_FAULT_LOCKED|VM_FAULT_RETRY;
goto out;
}
}
if (PagePrivate(page)) {
wr = (struct orangefs_write_range *)page_private(page);
if (folio_test_private(folio)) {
wr = folio_get_private(folio);
if (uid_eq(wr->uid, current_fsuid()) &&
gid_eq(wr->gid, current_fsgid())) {
wr->pos = page_offset(page);
wr->pos = page_offset(vmf->page);
wr->len = PAGE_SIZE;
goto okay;
} else {
if (orangefs_launder_page(page)) {
if (orangefs_launder_folio(folio)) {
ret = VM_FAULT_LOCKED|VM_FAULT_RETRY;
goto out;
}
@ -690,27 +691,27 @@ vm_fault_t orangefs_page_mkwrite(struct vm_fault *vmf)
ret = VM_FAULT_LOCKED|VM_FAULT_RETRY;
goto out;
}
wr->pos = page_offset(page);
wr->pos = page_offset(vmf->page);
wr->len = PAGE_SIZE;
wr->uid = current_fsuid();
wr->gid = current_fsgid();
attach_page_private(page, wr);
folio_attach_private(folio, wr);
okay:
file_update_time(vmf->vma->vm_file);
if (page->mapping != inode->i_mapping) {
unlock_page(page);
if (folio->mapping != inode->i_mapping) {
folio_unlock(folio);
ret = VM_FAULT_LOCKED|VM_FAULT_NOPAGE;
goto out;
}
/*
* We mark the page dirty already here so that when freeze is in
* We mark the folio dirty already here so that when freeze is in
* progress, we are guaranteed that writeback during freezing will
* see the dirty page and writeprotect it again.
* see the dirty folio and writeprotect it again.
*/
set_page_dirty(page);
wait_for_stable_page(page);
folio_mark_dirty(folio);
folio_wait_stable(folio);
ret = VM_FAULT_LOCKED;
out:
sb_end_pagefault(inode->i_sb);

View File

@ -3094,7 +3094,7 @@ void sd_attrs_to_i_attrs(__u16 sd_attrs, struct inode *inode)
* decide if this buffer needs to stay around for data logging or ordered
* write purposes
*/
static int invalidatepage_can_drop(struct inode *inode, struct buffer_head *bh)
static int invalidate_folio_can_drop(struct inode *inode, struct buffer_head *bh)
{
int ret = 1;
struct reiserfs_journal *j = SB_JOURNAL(inode->i_sb);
@ -3147,26 +3147,26 @@ static int invalidatepage_can_drop(struct inode *inode, struct buffer_head *bh)
return ret;
}
/* clm -- taken from fs/buffer.c:block_invalidate_page */
static void reiserfs_invalidatepage(struct page *page, unsigned int offset,
unsigned int length)
/* clm -- taken from fs/buffer.c:block_invalidate_folio */
static void reiserfs_invalidate_folio(struct folio *folio, size_t offset,
size_t length)
{
struct buffer_head *head, *bh, *next;
struct inode *inode = page->mapping->host;
struct inode *inode = folio->mapping->host;
unsigned int curr_off = 0;
unsigned int stop = offset + length;
int partial_page = (offset || length < PAGE_SIZE);
int partial_page = (offset || length < folio_size(folio));
int ret = 1;
BUG_ON(!PageLocked(page));
BUG_ON(!folio_test_locked(folio));
if (!partial_page)
ClearPageChecked(page);
folio_clear_checked(folio);
if (!page_has_buffers(page))
head = folio_buffers(folio);
if (!head)
goto out;
head = page_buffers(page);
bh = head;
do {
unsigned int next_off = curr_off + bh->b_size;
@ -3179,7 +3179,7 @@ static void reiserfs_invalidatepage(struct page *page, unsigned int offset,
* is this block fully invalidated?
*/
if (offset <= curr_off) {
if (invalidatepage_can_drop(inode, bh))
if (invalidate_folio_can_drop(inode, bh))
reiserfs_unmap_buffer(bh);
else
ret = 0;
@ -3194,21 +3194,21 @@ static void reiserfs_invalidatepage(struct page *page, unsigned int offset,
* so real IO is not possible anymore.
*/
if (!partial_page && ret) {
ret = try_to_release_page(page, 0);
ret = filemap_release_folio(folio, 0);
/* maybe should BUG_ON(!ret); - neilb */
}
out:
return;
}
static int reiserfs_set_page_dirty(struct page *page)
static bool reiserfs_dirty_folio(struct address_space *mapping,
struct folio *folio)
{
struct inode *inode = page->mapping->host;
if (reiserfs_file_data_log(inode)) {
SetPageChecked(page);
return __set_page_dirty_nobuffers(page);
if (reiserfs_file_data_log(mapping->host)) {
folio_set_checked(folio);
return filemap_dirty_folio(mapping, folio);
}
return __set_page_dirty_buffers(page);
return block_dirty_folio(mapping, folio);
}
/*
@ -3430,10 +3430,10 @@ const struct address_space_operations reiserfs_address_space_operations = {
.readpage = reiserfs_readpage,
.readahead = reiserfs_readahead,
.releasepage = reiserfs_releasepage,
.invalidatepage = reiserfs_invalidatepage,
.invalidate_folio = reiserfs_invalidate_folio,
.write_begin = reiserfs_write_begin,
.write_end = reiserfs_write_end,
.bmap = reiserfs_aop_bmap,
.direct_IO = reiserfs_direct_IO,
.set_page_dirty = reiserfs_set_page_dirty,
.dirty_folio = reiserfs_dirty_folio,
};

View File

@ -858,8 +858,8 @@ static int write_ordered_buffers(spinlock_t * lock,
ret = -EIO;
}
/*
* ugly interaction with invalidatepage here.
* reiserfs_invalidate_page will pin any buffer that has a
* ugly interaction with invalidate_folio here.
* reiserfs_invalidate_folio will pin any buffer that has a
* valid journal head from an older transaction. If someone
* else sets our buffer dirty after we write it in the first
* loop, and then someone truncates the page away, nobody

View File

@ -146,11 +146,11 @@ static int generic_remap_check_len(struct inode *inode_in,
}
/* Read a page's worth of file data into the page cache. */
static struct folio *vfs_dedupe_get_folio(struct inode *inode, loff_t pos)
static struct folio *vfs_dedupe_get_folio(struct file *file, loff_t pos)
{
struct folio *folio;
folio = read_mapping_folio(inode->i_mapping, pos >> PAGE_SHIFT, NULL);
folio = read_mapping_folio(file->f_mapping, pos >> PAGE_SHIFT, file);
if (IS_ERR(folio))
return folio;
if (!folio_test_uptodate(folio)) {
@ -187,8 +187,8 @@ static void vfs_unlock_two_folios(struct folio *folio1, struct folio *folio2)
* Compare extents of two files to see if they are the same.
* Caller must have locked both inodes to prevent write races.
*/
static int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
struct inode *dest, loff_t dstoff,
static int vfs_dedupe_file_range_compare(struct file *src, loff_t srcoff,
struct file *dest, loff_t dstoff,
loff_t len, bool *is_same)
{
bool same = true;
@ -224,8 +224,8 @@ static int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
* someone is invalidating pages on us and we lose.
*/
if (!folio_test_uptodate(src_folio) || !folio_test_uptodate(dst_folio) ||
src_folio->mapping != src->i_mapping ||
dst_folio->mapping != dest->i_mapping) {
src_folio->mapping != src->f_mapping ||
dst_folio->mapping != dest->f_mapping) {
same = false;
goto unlock;
}
@ -333,8 +333,8 @@ int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
if (remap_flags & REMAP_FILE_DEDUP) {
bool is_same = false;
ret = vfs_dedupe_file_range_compare(inode_in, pos_in,
inode_out, pos_out, *len, &is_same);
ret = vfs_dedupe_file_range_compare(file_in, pos_in,
file_out, pos_out, *len, &is_same);
if (ret)
return ret;
if (!is_same)

View File

@ -495,7 +495,8 @@ static sector_t sysv_bmap(struct address_space *mapping, sector_t block)
}
const struct address_space_operations sysv_aops = {
.set_page_dirty = __set_page_dirty_buffers,
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.readpage = sysv_readpage,
.writepage = sysv_writepage,
.write_begin = sysv_write_begin,

View File

@ -1287,25 +1287,25 @@ int ubifs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
return err;
}
static void ubifs_invalidatepage(struct page *page, unsigned int offset,
unsigned int length)
static void ubifs_invalidate_folio(struct folio *folio, size_t offset,
size_t length)
{
struct inode *inode = page->mapping->host;
struct inode *inode = folio->mapping->host;
struct ubifs_info *c = inode->i_sb->s_fs_info;
ubifs_assert(c, PagePrivate(page));
if (offset || length < PAGE_SIZE)
/* Partial page remains dirty */
ubifs_assert(c, folio_test_private(folio));
if (offset || length < folio_size(folio))
/* Partial folio remains dirty */
return;
if (PageChecked(page))
if (folio_test_checked(folio))
release_new_page_budget(c);
else
release_existing_page_budget(c);
atomic_long_dec(&c->dirty_pg_cnt);
ClearPagePrivate(page);
ClearPageChecked(page);
folio_clear_private(folio);
folio_clear_checked(folio);
}
int ubifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
@ -1445,18 +1445,18 @@ static ssize_t ubifs_write_iter(struct kiocb *iocb, struct iov_iter *from)
return generic_file_write_iter(iocb, from);
}
static int ubifs_set_page_dirty(struct page *page)
static bool ubifs_dirty_folio(struct address_space *mapping,
struct folio *folio)
{
int ret;
struct inode *inode = page->mapping->host;
struct ubifs_info *c = inode->i_sb->s_fs_info;
bool ret;
struct ubifs_info *c = mapping->host->i_sb->s_fs_info;
ret = __set_page_dirty_nobuffers(page);
ret = filemap_dirty_folio(mapping, folio);
/*
* An attempt to dirty a page without budgeting for it - should not
* happen.
*/
ubifs_assert(c, ret == 0);
ubifs_assert(c, ret == false);
return ret;
}
@ -1646,8 +1646,8 @@ const struct address_space_operations ubifs_file_address_operations = {
.writepage = ubifs_writepage,
.write_begin = ubifs_write_begin,
.write_end = ubifs_write_end,
.invalidatepage = ubifs_invalidatepage,
.set_page_dirty = ubifs_set_page_dirty,
.invalidate_folio = ubifs_invalidate_folio,
.dirty_folio = ubifs_dirty_folio,
#ifdef CONFIG_MIGRATION
.migratepage = ubifs_migrate_page,
#endif

View File

@ -125,7 +125,8 @@ static int udf_adinicb_write_end(struct file *file, struct address_space *mappin
}
const struct address_space_operations udf_adinicb_aops = {
.set_page_dirty = __set_page_dirty_buffers,
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.readpage = udf_adinicb_readpage,
.writepage = udf_adinicb_writepage,
.write_begin = udf_adinicb_write_begin,

View File

@ -235,7 +235,8 @@ static sector_t udf_bmap(struct address_space *mapping, sector_t block)
}
const struct address_space_operations udf_aops = {
.set_page_dirty = __set_page_dirty_buffers,
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.readpage = udf_readpage,
.readahead = udf_readahead,
.writepage = udf_writepage,

View File

@ -526,7 +526,8 @@ static sector_t ufs_bmap(struct address_space *mapping, sector_t block)
}
const struct address_space_operations ufs_aops = {
.set_page_dirty = __set_page_dirty_buffers,
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.readpage = ufs_readpage,
.writepage = ufs_writepage,
.write_begin = ufs_write_begin,

View File

@ -354,7 +354,7 @@ static int vboxsf_write_end(struct file *file, struct address_space *mapping,
const struct address_space_operations vboxsf_reg_aops = {
.readpage = vboxsf_readpage,
.writepage = vboxsf_writepage,
.set_page_dirty = __set_page_dirty_nobuffers,
.dirty_folio = filemap_dirty_folio,
.write_begin = simple_write_begin,
.write_end = vboxsf_write_end,
};

View File

@ -567,9 +567,9 @@ const struct address_space_operations xfs_address_space_operations = {
.readpage = xfs_vm_readpage,
.readahead = xfs_vm_readahead,
.writepages = xfs_vm_writepages,
.set_page_dirty = __set_page_dirty_nobuffers,
.dirty_folio = filemap_dirty_folio,
.releasepage = iomap_releasepage,
.invalidatepage = iomap_invalidatepage,
.invalidate_folio = iomap_invalidate_folio,
.bmap = xfs_vm_bmap,
.direct_IO = noop_direct_IO,
.migratepage = iomap_migrate_page,
@ -581,7 +581,6 @@ const struct address_space_operations xfs_address_space_operations = {
const struct address_space_operations xfs_dax_aops = {
.writepages = xfs_dax_writepages,
.direct_IO = noop_direct_IO,
.set_page_dirty = __set_page_dirty_no_writeback,
.invalidatepage = noop_invalidatepage,
.dirty_folio = noop_dirty_folio,
.swap_activate = xfs_iomap_swapfile_activate,
};

View File

@ -185,9 +185,9 @@ static const struct address_space_operations zonefs_file_aops = {
.readahead = zonefs_readahead,
.writepage = zonefs_writepage,
.writepages = zonefs_writepages,
.set_page_dirty = __set_page_dirty_nobuffers,
.dirty_folio = filemap_dirty_folio,
.releasepage = iomap_releasepage,
.invalidatepage = iomap_invalidatepage,
.invalidate_folio = iomap_invalidate_folio,
.migratepage = iomap_migrate_page,
.is_partially_uptodate = iomap_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,

View File

@ -144,6 +144,7 @@ BUFFER_FNS(Defer_Completion, defer_completion)
((struct buffer_head *)page_private(page)); \
})
#define page_has_buffers(page) PagePrivate(page)
#define folio_buffers(folio) folio_get_private(folio)
void buffer_check_dirty_writeback(struct page *page,
bool *dirty, bool *writeback);
@ -216,16 +217,14 @@ extern int buffer_heads_over_limit;
* Generic address_space_operations implementations for buffer_head-backed
* address_spaces.
*/
void block_invalidatepage(struct page *page, unsigned int offset,
unsigned int length);
void block_invalidate_folio(struct folio *folio, size_t offset, size_t length);
int block_write_full_page(struct page *page, get_block_t *get_block,
struct writeback_control *wbc);
int __block_write_full_page(struct inode *inode, struct page *page,
get_block_t *get_block, struct writeback_control *wbc,
bh_end_io_t *handler);
int block_read_full_page(struct page*, get_block_t*);
int block_is_partially_uptodate(struct page *page, unsigned long from,
unsigned long count);
bool block_is_partially_uptodate(struct folio *, size_t from, size_t count);
int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
unsigned flags, struct page **pagep, get_block_t *get_block);
int __block_write_begin(struct page *page, loff_t pos, unsigned len,
@ -398,7 +397,7 @@ __bread(struct block_device *bdev, sector_t block, unsigned size)
return __bread_gfp(bdev, block, size, __GFP_MOVABLE);
}
extern int __set_page_dirty_buffers(struct page *page);
bool block_dirty_folio(struct address_space *mapping, struct folio *folio);
#else /* CONFIG_BLOCK */

View File

@ -368,8 +368,8 @@ struct address_space_operations {
/* Write back some dirty pages from this mapping. */
int (*writepages)(struct address_space *, struct writeback_control *);
/* Set a page dirty. Return true if this dirtied it */
int (*set_page_dirty)(struct page *page);
/* Mark a folio dirty. Return true if this dirtied it */
bool (*dirty_folio)(struct address_space *, struct folio *);
/*
* Reads in the requested pages. Unlike ->readpage(), this is
@ -388,7 +388,7 @@ struct address_space_operations {
/* Unfortunately this kludge is needed for FIBMAP. Don't use it */
sector_t (*bmap)(struct address_space *, sector_t);
void (*invalidatepage) (struct page *, unsigned int, unsigned int);
void (*invalidate_folio) (struct folio *, size_t offset, size_t len);
int (*releasepage) (struct page *, gfp_t);
void (*freepage)(struct page *);
ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter);
@ -400,9 +400,9 @@ struct address_space_operations {
struct page *, struct page *, enum migrate_mode);
bool (*isolate_page)(struct page *, isolate_mode_t);
void (*putback_page)(struct page *);
int (*launder_page) (struct page *);
int (*is_partially_uptodate) (struct page *, unsigned long,
unsigned long);
int (*launder_folio)(struct folio *);
bool (*is_partially_uptodate) (struct folio *, size_t from,
size_t count);
void (*is_dirty_writeback) (struct page *, bool *, bool *);
int (*error_remove_page)(struct address_space *, struct page *);
@ -3232,8 +3232,6 @@ extern int simple_rename(struct user_namespace *, struct inode *,
extern void simple_recursive_removal(struct dentry *,
void (*callback)(struct dentry *));
extern int noop_fsync(struct file *, loff_t, loff_t, int);
extern void noop_invalidatepage(struct page *page, unsigned int offset,
unsigned int length);
extern ssize_t noop_direct_IO(struct kiocb *iocb, struct iov_iter *iter);
extern int simple_empty(struct dentry *);
extern int simple_write_begin(struct file *file, struct address_space *mapping,

View File

@ -616,9 +616,11 @@ static inline void fscache_write_to_cache(struct fscache_cookie *cookie,
}
#if __fscache_available
extern int fscache_set_page_dirty(struct page *page, struct fscache_cookie *cookie);
bool fscache_dirty_folio(struct address_space *mapping, struct folio *folio,
struct fscache_cookie *cookie);
#else
#define fscache_set_page_dirty(PAGE, COOKIE) (__set_page_dirty_nobuffers((PAGE)))
#define fscache_dirty_folio(MAPPING, FOLIO, COOKIE) \
filemap_dirty_folio(MAPPING, FOLIO)
#endif
/**
@ -626,7 +628,7 @@ extern int fscache_set_page_dirty(struct page *page, struct fscache_cookie *cook
* @wbc: The writeback control
* @cookie: The cookie referring to the cache object
*
* Unpin the writeback resources pinned by fscache_set_page_dirty(). This is
* Unpin the writeback resources pinned by fscache_dirty_folio(). This is
* intended to be called by the netfs's ->write_inode() method.
*/
static inline void fscache_unpin_writeback(struct writeback_control *wbc,

View File

@ -227,12 +227,9 @@ ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
const struct iomap_ops *ops);
int iomap_readpage(struct page *page, const struct iomap_ops *ops);
void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops);
int iomap_is_partially_uptodate(struct page *page, unsigned long from,
unsigned long count);
bool iomap_is_partially_uptodate(struct folio *, size_t from, size_t count);
int iomap_releasepage(struct page *page, gfp_t gfp_mask);
void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len);
void iomap_invalidatepage(struct page *page, unsigned int offset,
unsigned int len);
#ifdef CONFIG_MIGRATION
int iomap_migrate_page(struct address_space *mapping, struct page *newpage,
struct page *page, enum migrate_mode mode);

View File

@ -1527,8 +1527,8 @@ void jbd2_journal_set_triggers(struct buffer_head *,
struct jbd2_buffer_trigger_type *type);
extern int jbd2_journal_dirty_metadata (handle_t *, struct buffer_head *);
extern int jbd2_journal_forget (handle_t *, struct buffer_head *);
extern int jbd2_journal_invalidatepage(journal_t *,
struct page *, unsigned int, unsigned int);
int jbd2_journal_invalidate_folio(journal_t *, struct folio *,
size_t offset, size_t length);
extern int jbd2_journal_try_to_free_buffers(journal_t *journal, struct page *page);
extern int jbd2_journal_stop(handle_t *);
extern int jbd2_journal_flush(journal_t *journal, unsigned int flags);

View File

@ -1947,9 +1947,6 @@ int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
struct page **pages);
struct page *get_dump_page(unsigned long addr);
extern void do_invalidatepage(struct page *page, unsigned int offset,
unsigned int length);
bool folio_mark_dirty(struct folio *folio);
bool set_page_dirty(struct page *page);
int set_page_dirty_lock(struct page *page);

View File

@ -583,7 +583,7 @@ extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned
extern int nfs_sync_inode(struct inode *inode);
extern int nfs_wb_all(struct inode *inode);
extern int nfs_wb_page(struct inode *inode, struct page *page);
extern int nfs_wb_page_cancel(struct inode *inode, struct page* page);
int nfs_wb_folio_cancel(struct inode *inode, struct folio *folio);
extern int nfs_commit_inode(struct inode *, int);
extern struct nfs_commit_data *nfs_commitdata_alloc(bool never_fail);
extern void nfs_commit_free(struct nfs_commit_data *data);

View File

@ -531,6 +531,24 @@ static inline struct folio *filemap_get_folio(struct address_space *mapping,
return __filemap_get_folio(mapping, index, 0, 0);
}
/**
* filemap_lock_folio - Find and lock a folio.
* @mapping: The address_space to search.
* @index: The page index.
*
* Looks up the page cache entry at @mapping & @index. If a folio is
* present, it is returned locked with an increased refcount.
*
* Context: May sleep.
* Return: A folio or %NULL if there is no folio in the cache for this
* index. Will not return a shadow, swap or DAX entry.
*/
static inline struct folio *filemap_lock_folio(struct address_space *mapping,
pgoff_t index)
{
return __filemap_get_folio(mapping, index, FGP_LOCK, 0);
}
/**
* find_get_page - find and get a page reference
* @mapping: the address_space to search
@ -738,15 +756,15 @@ extern int read_cache_pages(struct address_space *mapping,
struct list_head *pages, filler_t *filler, void *data);
static inline struct page *read_mapping_page(struct address_space *mapping,
pgoff_t index, void *data)
pgoff_t index, struct file *file)
{
return read_cache_page(mapping, index, NULL, data);
return read_cache_page(mapping, index, NULL, file);
}
static inline struct folio *read_mapping_folio(struct address_space *mapping,
pgoff_t index, void *data)
pgoff_t index, struct file *file)
{
return read_cache_folio(mapping, index, NULL, data);
return read_cache_folio(mapping, index, NULL, file);
}
/*
@ -1006,6 +1024,7 @@ static inline void cancel_dirty_page(struct page *page)
}
bool folio_clear_dirty_for_io(struct folio *folio);
bool clear_page_dirty_for_io(struct page *page);
void folio_invalidate(struct folio *folio, size_t offset, size_t length);
int __must_check folio_write_one(struct folio *folio);
static inline int __must_check write_one_page(struct page *page)
{
@ -1013,7 +1032,7 @@ static inline int __must_check write_one_page(struct page *page)
}
int __set_page_dirty_nobuffers(struct page *page);
int __set_page_dirty_no_writeback(struct page *page);
bool noop_dirty_folio(struct address_space *mapping, struct folio *folio);
void page_endio(struct page *page, bool is_write, int err);

View File

@ -428,7 +428,7 @@ extern int swap_writepage(struct page *page, struct writeback_control *wbc);
extern void end_swap_bio_write(struct bio *bio);
extern int __swap_writepage(struct page *page, struct writeback_control *wbc,
bio_end_io_t end_write_func);
extern int swap_set_page_dirty(struct page *page);
bool swap_dirty_folio(struct address_space *mapping, struct folio *folio);
int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
unsigned long nr_pages, sector_t start_block);

View File

@ -608,44 +608,44 @@ DEFINE_EVENT(ext4__page_op, ext4_releasepage,
TP_ARGS(page)
);
DECLARE_EVENT_CLASS(ext4_invalidatepage_op,
TP_PROTO(struct page *page, unsigned int offset, unsigned int length),
DECLARE_EVENT_CLASS(ext4_invalidate_folio_op,
TP_PROTO(struct folio *folio, size_t offset, size_t length),
TP_ARGS(page, offset, length),
TP_ARGS(folio, offset, length),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
__field( pgoff_t, index )
__field( unsigned int, offset )
__field( unsigned int, length )
__field( size_t, offset )
__field( size_t, length )
),
TP_fast_assign(
__entry->dev = page->mapping->host->i_sb->s_dev;
__entry->ino = page->mapping->host->i_ino;
__entry->index = page->index;
__entry->dev = folio->mapping->host->i_sb->s_dev;
__entry->ino = folio->mapping->host->i_ino;
__entry->index = folio->index;
__entry->offset = offset;
__entry->length = length;
),
TP_printk("dev %d,%d ino %lu page_index %lu offset %u length %u",
TP_printk("dev %d,%d ino %lu folio_index %lu offset %zu length %zu",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
(unsigned long) __entry->index,
__entry->offset, __entry->length)
);
DEFINE_EVENT(ext4_invalidatepage_op, ext4_invalidatepage,
TP_PROTO(struct page *page, unsigned int offset, unsigned int length),
DEFINE_EVENT(ext4_invalidate_folio_op, ext4_invalidate_folio,
TP_PROTO(struct folio *folio, size_t offset, size_t length),
TP_ARGS(page, offset, length)
TP_ARGS(folio, offset, length)
);
DEFINE_EVENT(ext4_invalidatepage_op, ext4_journalled_invalidatepage,
TP_PROTO(struct page *page, unsigned int offset, unsigned int length),
DEFINE_EVENT(ext4_invalidate_folio_op, ext4_journalled_invalidate_folio,
TP_PROTO(struct folio *folio, size_t offset, size_t length),
TP_ARGS(page, offset, length)
TP_ARGS(folio, offset, length)
);
TRACE_EVENT(ext4_discard_blocks,

View File

@ -72,7 +72,7 @@
* Lock ordering:
*
* ->i_mmap_rwsem (truncate_pagecache)
* ->private_lock (__free_pte->__set_page_dirty_buffers)
* ->private_lock (__free_pte->block_dirty_folio)
* ->swap_lock (exclusive_swap_page, others)
* ->i_pages lock
*
@ -115,7 +115,7 @@
* ->memcg->move_lock (page_remove_rmap->lock_page_memcg)
* bdi.wb->list_lock (zap_pte_range->set_page_dirty)
* ->inode->i_lock (zap_pte_range->set_page_dirty)
* ->private_lock (zap_pte_range->__set_page_dirty_buffers)
* ->private_lock (zap_pte_range->block_dirty_folio)
*
* ->i_mmap_rwsem
* ->tasklist_lock (memory_failure, collect_procs_ao)
@ -2464,7 +2464,7 @@ static bool filemap_range_uptodate(struct address_space *mapping,
pos -= folio_pos(folio);
}
return mapping->a_ops->is_partially_uptodate(&folio->page, pos, count);
return mapping->a_ops->is_partially_uptodate(folio, pos, count);
}
static int filemap_update_page(struct kiocb *iocb,
@ -2856,7 +2856,7 @@ static inline loff_t folio_seek_hole_data(struct xa_state *xas,
offset = offset_in_folio(folio, start) & ~(bsz - 1);
do {
if (ops->is_partially_uptodate(&folio->page, offset, bsz) ==
if (ops->is_partially_uptodate(folio, offset, bsz) ==
seek_data)
break;
start = (start + bsz) & ~(bsz - 1);

View File

@ -2418,13 +2418,13 @@ EXPORT_SYMBOL(folio_write_one);
/*
* For address_spaces which do not use buffers nor write back.
*/
int __set_page_dirty_no_writeback(struct page *page)
bool noop_dirty_folio(struct address_space *mapping, struct folio *folio)
{
if (!PageDirty(page))
return !TestSetPageDirty(page);
return 0;
if (!folio_test_dirty(folio))
return !folio_test_set_dirty(folio);
return false;
}
EXPORT_SYMBOL(__set_page_dirty_no_writeback);
EXPORT_SYMBOL(noop_dirty_folio);
/*
* Helper function for set_page_dirty family.
@ -2518,7 +2518,7 @@ void __folio_mark_dirty(struct folio *folio, struct address_space *mapping,
* This is also sometimes used by filesystems which use buffer_heads when
* a single buffer is being dirtied: we want to set the folio dirty in
* that case, but not all the buffers. This is a "bottom-up" dirtying,
* whereas __set_page_dirty_buffers() is a "top-down" dirtying.
* whereas block_dirty_folio() is a "top-down" dirtying.
*
* The caller must ensure this doesn't race with truncation. Most will
* simply hold the folio lock, but e.g. zap_pte_range() calls with the
@ -2604,7 +2604,7 @@ EXPORT_SYMBOL(folio_redirty_for_writepage);
* folio_mark_dirty - Mark a folio as being modified.
* @folio: The folio.
*
* For folios with a mapping this should be done under the page lock
* For folios with a mapping this should be done with the folio lock held
* for the benefit of asynchronous memory errors who prefer a consistent
* dirty state. This rule can be broken in some special cases,
* but should be better not to.
@ -2618,23 +2618,21 @@ bool folio_mark_dirty(struct folio *folio)
if (likely(mapping)) {
/*
* readahead/lru_deactivate_page could remain
* PG_readahead/PG_reclaim due to race with end_page_writeback
* About readahead, if the page is written, the flags would be
* PG_readahead/PG_reclaim due to race with folio_end_writeback
* About readahead, if the folio is written, the flags would be
* reset. So no problem.
* About lru_deactivate_page, if the page is redirty, the flag
* will be reset. So no problem. but if the page is used by readahead
* it will confuse readahead and make it restart the size rampup
* process. But it's a trivial problem.
* About lru_deactivate_page, if the folio is redirtied,
* the flag will be reset. So no problem. but if the
* folio is used by readahead it will confuse readahead
* and make it restart the size rampup process. But it's
* a trivial problem.
*/
if (folio_test_reclaim(folio))
folio_clear_reclaim(folio);
return mapping->a_ops->set_page_dirty(&folio->page);
return mapping->a_ops->dirty_folio(mapping, folio);
}
if (!folio_test_dirty(folio)) {
if (!folio_test_set_dirty(folio))
return true;
}
return false;
return noop_dirty_folio(mapping, folio);
}
EXPORT_SYMBOL(folio_mark_dirty);

View File

@ -439,16 +439,19 @@ int swap_readpage(struct page *page, bool synchronous)
return ret;
}
int swap_set_page_dirty(struct page *page)
bool swap_dirty_folio(struct address_space *mapping, struct folio *folio)
{
struct swap_info_struct *sis = page_swap_info(page);
struct swap_info_struct *sis = swp_swap_info(folio_swap_entry(folio));
if (data_race(sis->flags & SWP_FS_OPS)) {
struct address_space *mapping = sis->swap_file->f_mapping;
const struct address_space_operations *aops;
VM_BUG_ON_PAGE(!PageSwapCache(page), page);
return mapping->a_ops->set_page_dirty(page);
mapping = sis->swap_file->f_mapping;
aops = mapping->a_ops;
VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio);
return aops->dirty_folio(mapping, folio);
} else {
return __set_page_dirty_no_writeback(page);
return noop_dirty_folio(mapping, folio);
}
}

View File

@ -156,7 +156,7 @@ static void read_cache_pages_invalidate_page(struct address_space *mapping,
if (!trylock_page(page))
BUG();
page->mapping = mapping;
do_invalidatepage(page, 0, PAGE_SIZE);
folio_invalidate(page_folio(page), 0, PAGE_SIZE);
page->mapping = NULL;
unlock_page(page);
}

View File

@ -31,8 +31,8 @@
* mm->page_table_lock or pte_lock
* swap_lock (in swap_duplicate, swap_info_get)
* mmlist_lock (in mmput, drain_mmlist and others)
* mapping->private_lock (in __set_page_dirty_buffers)
* lock_page_memcg move_lock (in __set_page_dirty_buffers)
* mapping->private_lock (in block_dirty_folio)
* folio_lock_memcg move_lock (in block_dirty_folio)
* i_pages lock (widely used)
* lruvec->lru_lock (in folio_lruvec_lock_irq)
* inode->i_lock (in set_page_dirty's __mark_inode_dirty)

View File

@ -152,7 +152,7 @@ static void secretmem_freepage(struct page *page)
}
const struct address_space_operations secretmem_aops = {
.set_page_dirty = __set_page_dirty_no_writeback,
.dirty_folio = noop_dirty_folio,
.freepage = secretmem_freepage,
.migratepage = secretmem_migratepage,
.isolate_page = secretmem_isolate_page,

View File

@ -3756,7 +3756,7 @@ static int shmem_error_remove_page(struct address_space *mapping,
const struct address_space_operations shmem_aops = {
.writepage = shmem_writepage,
.set_page_dirty = __set_page_dirty_no_writeback,
.dirty_folio = noop_dirty_folio,
#ifdef CONFIG_TMPFS
.write_begin = shmem_write_begin,
.write_end = shmem_write_end,

View File

@ -30,7 +30,7 @@
*/
static const struct address_space_operations swap_aops = {
.writepage = swap_writepage,
.set_page_dirty = swap_set_page_dirty,
.dirty_folio = swap_dirty_folio,
#ifdef CONFIG_MIGRATION
.migratepage = migrate_page,
#endif

View File

@ -19,8 +19,7 @@
#include <linux/highmem.h>
#include <linux/pagevec.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/buffer_head.h> /* grr. try_to_release_page,
do_invalidatepage */
#include <linux/buffer_head.h> /* grr. try_to_release_page */
#include <linux/shmem_fs.h>
#include <linux/rmap.h>
#include "internal.h"
@ -138,33 +137,28 @@ static int invalidate_exceptional_entry2(struct address_space *mapping,
}
/**
* do_invalidatepage - invalidate part or all of a page
* @page: the page which is affected
* folio_invalidate - Invalidate part or all of a folio.
* @folio: The folio which is affected.
* @offset: start of the range to invalidate
* @length: length of the range to invalidate
*
* do_invalidatepage() is called when all or part of the page has become
* folio_invalidate() is called when all or part of the folio has become
* invalidated by a truncate operation.
*
* do_invalidatepage() does not have to release all buffers, but it must
* folio_invalidate() does not have to release all buffers, but it must
* ensure that no dirty buffer is left outside @offset and that no I/O
* is underway against any of the blocks which are outside the truncation
* point. Because the caller is about to free (and possibly reuse) those
* blocks on-disk.
*/
void do_invalidatepage(struct page *page, unsigned int offset,
unsigned int length)
void folio_invalidate(struct folio *folio, size_t offset, size_t length)
{
void (*invalidatepage)(struct page *, unsigned int, unsigned int);
const struct address_space_operations *aops = folio->mapping->a_ops;
invalidatepage = page->mapping->a_ops->invalidatepage;
#ifdef CONFIG_BLOCK
if (!invalidatepage)
invalidatepage = block_invalidatepage;
#endif
if (invalidatepage)
(*invalidatepage)(page, offset, length);
if (aops->invalidate_folio)
aops->invalidate_folio(folio, offset, length);
}
EXPORT_SYMBOL_GPL(folio_invalidate);
/*
* If truncate cannot remove the fs-private metadata from the page, the page
@ -182,7 +176,7 @@ static void truncate_cleanup_folio(struct folio *folio)
unmap_mapping_folio(folio);
if (folio_has_private(folio))
do_invalidatepage(&folio->page, 0, folio_size(folio));
folio_invalidate(folio, 0, folio_size(folio));
/*
* Some filesystems seem to re-dirty the page even after
@ -243,7 +237,7 @@ bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end)
folio_zero_range(folio, offset, length);
if (folio_has_private(folio))
do_invalidatepage(&folio->page, offset, length);
folio_invalidate(folio, offset, length);
if (!folio_test_large(folio))
return true;
if (split_huge_page(&folio->page) == 0)
@ -329,7 +323,7 @@ long invalidate_inode_page(struct page *page)
* mapping is large, it is probably the case that the final pages are the most
* recently touched, and freeing happens in ascending file offset order.
*
* Note that since ->invalidatepage() accepts range to invalidate
* Note that since ->invalidate_folio() accepts range to invalidate
* truncate_inode_pages_range is able to handle cases where lend + 1 is not
* page aligned properly.
*/
@ -611,13 +605,13 @@ static int invalidate_complete_folio2(struct address_space *mapping,
return 0;
}
static int do_launder_folio(struct address_space *mapping, struct folio *folio)
static int folio_launder(struct address_space *mapping, struct folio *folio)
{
if (!folio_test_dirty(folio))
return 0;
if (folio->mapping != mapping || mapping->a_ops->launder_page == NULL)
if (folio->mapping != mapping || mapping->a_ops->launder_folio == NULL)
return 0;
return mapping->a_ops->launder_page(&folio->page);
return mapping->a_ops->launder_folio(folio);
}
/**
@ -683,7 +677,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
unmap_mapping_folio(folio);
BUG_ON(folio_mapped(folio));
ret2 = do_launder_folio(mapping, folio);
ret2 = folio_launder(mapping, folio);
if (ret2 == 0) {
if (!invalidate_complete_folio2(mapping, folio))
ret2 = -EBUSY;