mm,fs: Remove aops->readpage
With all implementations of aops->readpage converted to aops->read_folio, we can stop checking whether it's set and remove the member from aops. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
This commit is contained in:
parent
0f312591d6
commit
7e0a126519
|
@ -2402,7 +2402,7 @@ static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
struct address_space *mapping = filp->f_mapping;
|
struct address_space *mapping = filp->f_mapping;
|
||||||
|
|
||||||
if (!mapping->a_ops->readpage && !mapping->a_ops->read_folio)
|
if (!mapping->a_ops->read_folio)
|
||||||
return -ENOEXEC;
|
return -ENOEXEC;
|
||||||
|
|
||||||
file_accessed(filp);
|
file_accessed(filp);
|
||||||
|
|
|
@ -2827,10 +2827,7 @@ int nobh_truncate_page(struct address_space *mapping,
|
||||||
|
|
||||||
/* Ok, it's mapped. Make sure it's up-to-date */
|
/* Ok, it's mapped. Make sure it's up-to-date */
|
||||||
if (!folio_test_uptodate(folio)) {
|
if (!folio_test_uptodate(folio)) {
|
||||||
if (mapping->a_ops->read_folio)
|
|
||||||
err = mapping->a_ops->read_folio(NULL, folio);
|
err = mapping->a_ops->read_folio(NULL, folio);
|
||||||
else
|
|
||||||
err = mapping->a_ops->readpage(NULL, &folio->page);
|
|
||||||
if (err) {
|
if (err) {
|
||||||
folio_put(folio);
|
folio_put(folio);
|
||||||
goto out;
|
goto out;
|
||||||
|
|
|
@ -1772,7 +1772,7 @@ int ceph_mmap(struct file *file, struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
struct address_space *mapping = file->f_mapping;
|
struct address_space *mapping = file->f_mapping;
|
||||||
|
|
||||||
if (!mapping->a_ops->readpage && !mapping->a_ops->read_folio)
|
if (!mapping->a_ops->read_folio)
|
||||||
return -ENOEXEC;
|
return -ENOEXEC;
|
||||||
file_accessed(file);
|
file_accessed(file);
|
||||||
vma->vm_ops = &ceph_vmops;
|
vma->vm_ops = &ceph_vmops;
|
||||||
|
|
|
@ -262,7 +262,7 @@ struct iattr {
|
||||||
* trying again. The aop will be taking reasonable
|
* trying again. The aop will be taking reasonable
|
||||||
* precautions not to livelock. If the caller held a page
|
* precautions not to livelock. If the caller held a page
|
||||||
* reference, it should drop it before retrying. Returned
|
* reference, it should drop it before retrying. Returned
|
||||||
* by readpage().
|
* by read_folio().
|
||||||
*
|
*
|
||||||
* address_space_operation functions return these large constants to indicate
|
* address_space_operation functions return these large constants to indicate
|
||||||
* special semantics to the caller. These are much larger than the bytes in a
|
* special semantics to the caller. These are much larger than the bytes in a
|
||||||
|
@ -335,7 +335,6 @@ static inline bool is_sync_kiocb(struct kiocb *kiocb)
|
||||||
|
|
||||||
struct address_space_operations {
|
struct address_space_operations {
|
||||||
int (*writepage)(struct page *page, struct writeback_control *wbc);
|
int (*writepage)(struct page *page, struct writeback_control *wbc);
|
||||||
int (*readpage)(struct file *, struct page *);
|
|
||||||
int (*read_folio)(struct file *, struct folio *);
|
int (*read_folio)(struct file *, struct folio *);
|
||||||
|
|
||||||
/* Write back some dirty pages from this mapping. */
|
/* Write back some dirty pages from this mapping. */
|
||||||
|
|
|
@ -787,10 +787,10 @@ static int __copy_insn(struct address_space *mapping, struct file *filp,
|
||||||
struct page *page;
|
struct page *page;
|
||||||
/*
|
/*
|
||||||
* Ensure that the page that has the original instruction is populated
|
* Ensure that the page that has the original instruction is populated
|
||||||
* and in page-cache. If ->readpage == NULL it must be shmem_mapping(),
|
* and in page-cache. If ->read_folio == NULL it must be shmem_mapping(),
|
||||||
* see uprobe_register().
|
* see uprobe_register().
|
||||||
*/
|
*/
|
||||||
if (mapping->a_ops->read_folio || mapping->a_ops->readpage)
|
if (mapping->a_ops->read_folio)
|
||||||
page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp);
|
page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp);
|
||||||
else
|
else
|
||||||
page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
|
page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
|
||||||
|
@ -1144,7 +1144,6 @@ static int __uprobe_register(struct inode *inode, loff_t offset,
|
||||||
|
|
||||||
/* copy_insn() uses read_mapping_page() or shmem_read_mapping_page() */
|
/* copy_insn() uses read_mapping_page() or shmem_read_mapping_page() */
|
||||||
if (!inode->i_mapping->a_ops->read_folio &&
|
if (!inode->i_mapping->a_ops->read_folio &&
|
||||||
!inode->i_mapping->a_ops->readpage &&
|
|
||||||
!shmem_mapping(inode->i_mapping))
|
!shmem_mapping(inode->i_mapping))
|
||||||
return -EIO;
|
return -EIO;
|
||||||
/* Racy, just to catch the obvious mistakes */
|
/* Racy, just to catch the obvious mistakes */
|
||||||
|
|
13
mm/filemap.c
13
mm/filemap.c
|
@ -2414,15 +2414,12 @@ static int filemap_read_folio(struct file *file, struct address_space *mapping,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* A previous I/O error may have been due to temporary failures,
|
* A previous I/O error may have been due to temporary failures,
|
||||||
* eg. multipath errors. PG_error will be set again if readpage
|
* eg. multipath errors. PG_error will be set again if read_folio
|
||||||
* fails.
|
* fails.
|
||||||
*/
|
*/
|
||||||
folio_clear_error(folio);
|
folio_clear_error(folio);
|
||||||
/* Start the actual read. The read will unlock the page. */
|
/* Start the actual read. The read will unlock the page. */
|
||||||
if (mapping->a_ops->read_folio)
|
|
||||||
error = mapping->a_ops->read_folio(file, folio);
|
error = mapping->a_ops->read_folio(file, folio);
|
||||||
else
|
|
||||||
error = mapping->a_ops->readpage(file, &folio->page);
|
|
||||||
if (error)
|
if (error)
|
||||||
return error;
|
return error;
|
||||||
|
|
||||||
|
@ -2639,7 +2636,7 @@ static int filemap_get_pages(struct kiocb *iocb, struct iov_iter *iter,
|
||||||
* @already_read: Number of bytes already read by the caller.
|
* @already_read: Number of bytes already read by the caller.
|
||||||
*
|
*
|
||||||
* Copies data from the page cache. If the data is not currently present,
|
* Copies data from the page cache. If the data is not currently present,
|
||||||
* uses the readahead and readpage address_space operations to fetch it.
|
* uses the readahead and read_folio address_space operations to fetch it.
|
||||||
*
|
*
|
||||||
* Return: Total number of bytes copied, including those already read by
|
* Return: Total number of bytes copied, including those already read by
|
||||||
* the caller. If an error happens before any bytes are copied, returns
|
* the caller. If an error happens before any bytes are copied, returns
|
||||||
|
@ -3450,7 +3447,7 @@ int generic_file_mmap(struct file *file, struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
struct address_space *mapping = file->f_mapping;
|
struct address_space *mapping = file->f_mapping;
|
||||||
|
|
||||||
if (!mapping->a_ops->read_folio && !mapping->a_ops->readpage)
|
if (!mapping->a_ops->read_folio)
|
||||||
return -ENOEXEC;
|
return -ENOEXEC;
|
||||||
file_accessed(file);
|
file_accessed(file);
|
||||||
vma->vm_ops = &generic_file_vm_ops;
|
vma->vm_ops = &generic_file_vm_ops;
|
||||||
|
@ -3508,10 +3505,8 @@ static struct folio *do_read_cache_folio(struct address_space *mapping,
|
||||||
filler:
|
filler:
|
||||||
if (filler)
|
if (filler)
|
||||||
err = filler(data, &folio->page);
|
err = filler(data, &folio->page);
|
||||||
else if (mapping->a_ops->read_folio)
|
|
||||||
err = mapping->a_ops->read_folio(data, folio);
|
|
||||||
else
|
else
|
||||||
err = mapping->a_ops->readpage(data, &folio->page);
|
err = mapping->a_ops->read_folio(data, folio);
|
||||||
|
|
||||||
if (err < 0) {
|
if (err < 0) {
|
||||||
folio_put(folio);
|
folio_put(folio);
|
||||||
|
|
|
@ -555,11 +555,11 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
|
||||||
dump_page(page, "bad pte");
|
dump_page(page, "bad pte");
|
||||||
pr_alert("addr:%px vm_flags:%08lx anon_vma:%px mapping:%px index:%lx\n",
|
pr_alert("addr:%px vm_flags:%08lx anon_vma:%px mapping:%px index:%lx\n",
|
||||||
(void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
|
(void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
|
||||||
pr_alert("file:%pD fault:%ps mmap:%ps readpage:%ps\n",
|
pr_alert("file:%pD fault:%ps mmap:%ps read_folio:%ps\n",
|
||||||
vma->vm_file,
|
vma->vm_file,
|
||||||
vma->vm_ops ? vma->vm_ops->fault : NULL,
|
vma->vm_ops ? vma->vm_ops->fault : NULL,
|
||||||
vma->vm_file ? vma->vm_file->f_op->mmap : NULL,
|
vma->vm_file ? vma->vm_file->f_op->mmap : NULL,
|
||||||
mapping ? mapping->a_ops->readpage : NULL);
|
mapping ? mapping->a_ops->read_folio : NULL);
|
||||||
dump_stack();
|
dump_stack();
|
||||||
add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
|
add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
|
||||||
}
|
}
|
||||||
|
|
|
@ -170,12 +170,9 @@ static void read_pages(struct readahead_control *rac)
|
||||||
}
|
}
|
||||||
folio_unlock(folio);
|
folio_unlock(folio);
|
||||||
}
|
}
|
||||||
} else if (aops->read_folio) {
|
|
||||||
while ((folio = readahead_folio(rac)) != NULL)
|
|
||||||
aops->read_folio(rac->file, folio);
|
|
||||||
} else {
|
} else {
|
||||||
while ((folio = readahead_folio(rac)) != NULL)
|
while ((folio = readahead_folio(rac)) != NULL)
|
||||||
aops->readpage(rac->file, &folio->page);
|
aops->read_folio(rac->file, folio);
|
||||||
}
|
}
|
||||||
|
|
||||||
blk_finish_plug(&plug);
|
blk_finish_plug(&plug);
|
||||||
|
@ -256,8 +253,8 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Now start the IO. We ignore I/O errors - if the page is not
|
* Now start the IO. We ignore I/O errors - if the folio is not
|
||||||
* uptodate then the caller will launch readpage again, and
|
* uptodate then the caller will launch read_folio again, and
|
||||||
* will then handle the error.
|
* will then handle the error.
|
||||||
*/
|
*/
|
||||||
read_pages(ractl);
|
read_pages(ractl);
|
||||||
|
@ -305,8 +302,7 @@ void force_page_cache_ra(struct readahead_control *ractl,
|
||||||
struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
|
struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
|
||||||
unsigned long max_pages, index;
|
unsigned long max_pages, index;
|
||||||
|
|
||||||
if (unlikely(!mapping->a_ops->read_folio &&
|
if (unlikely(!mapping->a_ops->read_folio && !mapping->a_ops->readahead))
|
||||||
!mapping->a_ops->readpage && !mapping->a_ops->readahead))
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -4162,7 +4162,7 @@ int shmem_zero_setup(struct vm_area_struct *vma)
|
||||||
*
|
*
|
||||||
* This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
|
* This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
|
||||||
* with any new page allocations done using the specified allocation flags.
|
* with any new page allocations done using the specified allocation flags.
|
||||||
* But read_cache_page_gfp() uses the ->readpage() method: which does not
|
* But read_cache_page_gfp() uses the ->read_folio() method: which does not
|
||||||
* suit tmpfs, since it may have pages in swapcache, and needs to find those
|
* suit tmpfs, since it may have pages in swapcache, and needs to find those
|
||||||
* for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
|
* for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
|
||||||
*
|
*
|
||||||
|
|
|
@ -3041,7 +3041,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
|
||||||
/*
|
/*
|
||||||
* Read the swap header.
|
* Read the swap header.
|
||||||
*/
|
*/
|
||||||
if (!mapping->a_ops->read_folio && !mapping->a_ops->readpage) {
|
if (!mapping->a_ops->read_folio) {
|
||||||
error = -EINVAL;
|
error = -EINVAL;
|
||||||
goto bad_swap_unlock_inode;
|
goto bad_swap_unlock_inode;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue