Convert much of the page cache to use folios

This patchset stops just short of actually enabling large folios.
 It converts everything that I noticed needs to be converted, but there may
 still be places I've overlooked which still have page size assumptions.
 The big change here is using large entries in the page cache XArray
 instead of many small entries.  That only affects shmem for now, but
 it's a pretty big change for shmem since it changes where memory needs
 to be allocated (at split time instead of insertion).
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCgAdFiEEejHryeLBw/spnjHrDpNsjXcpgj4FAmHcraoACgkQDpNsjXcp
 gj7C3wgAl0cjtdVzTpkLmbnInsicW1m3thnbkSXYbpqRccFjpu2kEBGj31PT+oGz
 dzgXP7SNZ/VkFT+qWtmHSRF/J41B6f9bFojO81B2aQdpRiziU+5QbSbXbfUjwVhE
 GJF0WGSJtVqySKynXP/iYTEt2zj6BiVperAwIqzhZpPY7gNoyDgeRD34Xy5bQqdD
 ey6/Uwkh7oFHLEDcgxsEnyF0tUR3q+gpe5XZW1fb79p3crWw44xATc3UvKv8qCLC
 Rd4oHmKkOj4MvdiUxJEfXI+XxgrkQ8XRO70B+p6ZljhDaoDZYw7ullxA0gvlSpNX
 6pnjSQlKA1VQXsi6PMSt+9vf26XxaQ==
 =KeYZ
 -----END PGP SIGNATURE-----

Merge tag 'folio-5.17' of git://git.infradead.org/users/willy/pagecache

Pull folio conversion updates from Matthew Wilcox:
 "Convert much of the page cache to use folios

  This stops just short of actually enabling large folios. It converts
  everything that I noticed needs to be converted, but there may still
  be places I've overlooked which still have page size assumptions.

  The big change here is using large entries in the page cache XArray
  instead of many small entries. That only affects shmem for now, but
  it's a pretty big change for shmem since it changes where memory needs
  to be allocated (at split time instead of insertion)"

* tag 'folio-5.17' of git://git.infradead.org/users/willy/pagecache: (49 commits)
  mm: Use multi-index entries in the page cache
  XArray: Add xas_advance()
  truncate,shmem: Handle truncates that split large folios
  truncate: Convert invalidate_inode_pages2_range to folios
  fs: Convert vfs_dedupe_file_range_compare to folios
  mm: Remove pagevec_remove_exceptionals()
  mm: Convert find_lock_entries() to use a folio_batch
  filemap: Return only folios from find_get_entries()
  filemap: Convert filemap_get_read_batch() to use a folio_batch
  filemap: Convert filemap_read() to use a folio
  truncate: Add invalidate_complete_folio2()
  truncate: Convert invalidate_inode_pages2_range() to use a folio
  truncate: Skip known-truncated indices
  truncate,shmem: Add truncate_inode_folio()
  shmem: Convert part of shmem_undo_range() to use a folio
  mm: Add unmap_mapping_folio()
  truncate: Add truncate_cleanup_folio()
  filemap: Add filemap_release_folio()
  filemap: Use a folio in filemap_page_mkwrite
  filemap: Use a folio in filemap_map_pages
  ...
This commit is contained in:
Linus Torvalds 2022-01-12 12:37:02 -08:00
commit 6020c204be
25 changed files with 1113 additions and 1012 deletions

View File

@ -28,6 +28,8 @@
#include <linux/fscrypt.h> #include <linux/fscrypt.h>
#include <linux/fsverity.h> #include <linux/fsverity.h>
struct pagevec;
#ifdef CONFIG_F2FS_CHECK_FS #ifdef CONFIG_F2FS_CHECK_FS
#define f2fs_bug_on(sbi, condition) BUG_ON(condition) #define f2fs_bug_on(sbi, condition) BUG_ON(condition)
#else #else

View File

@ -372,7 +372,7 @@ static bool inode_do_switch_wbs(struct inode *inode,
{ {
struct address_space *mapping = inode->i_mapping; struct address_space *mapping = inode->i_mapping;
XA_STATE(xas, &mapping->i_pages, 0); XA_STATE(xas, &mapping->i_pages, 0);
struct page *page; struct folio *folio;
bool switched = false; bool switched = false;
spin_lock(&inode->i_lock); spin_lock(&inode->i_lock);
@ -389,21 +389,23 @@ static bool inode_do_switch_wbs(struct inode *inode,
/* /*
* Count and transfer stats. Note that PAGECACHE_TAG_DIRTY points * Count and transfer stats. Note that PAGECACHE_TAG_DIRTY points
* to possibly dirty pages while PAGECACHE_TAG_WRITEBACK points to * to possibly dirty folios while PAGECACHE_TAG_WRITEBACK points to
* pages actually under writeback. * folios actually under writeback.
*/ */
xas_for_each_marked(&xas, page, ULONG_MAX, PAGECACHE_TAG_DIRTY) { xas_for_each_marked(&xas, folio, ULONG_MAX, PAGECACHE_TAG_DIRTY) {
if (PageDirty(page)) { if (folio_test_dirty(folio)) {
dec_wb_stat(old_wb, WB_RECLAIMABLE); long nr = folio_nr_pages(folio);
inc_wb_stat(new_wb, WB_RECLAIMABLE); wb_stat_mod(old_wb, WB_RECLAIMABLE, -nr);
wb_stat_mod(new_wb, WB_RECLAIMABLE, nr);
} }
} }
xas_set(&xas, 0); xas_set(&xas, 0);
xas_for_each_marked(&xas, page, ULONG_MAX, PAGECACHE_TAG_WRITEBACK) { xas_for_each_marked(&xas, folio, ULONG_MAX, PAGECACHE_TAG_WRITEBACK) {
WARN_ON_ONCE(!PageWriteback(page)); long nr = folio_nr_pages(folio);
dec_wb_stat(old_wb, WB_WRITEBACK); WARN_ON_ONCE(!folio_test_writeback(folio));
inc_wb_stat(new_wb, WB_WRITEBACK); wb_stat_mod(old_wb, WB_WRITEBACK, -nr);
wb_stat_mod(new_wb, WB_WRITEBACK, nr);
} }
if (mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) { if (mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) {

View File

@ -146,41 +146,41 @@ static int generic_remap_check_len(struct inode *inode_in,
} }
/* Read a page's worth of file data into the page cache. */ /* Read a page's worth of file data into the page cache. */
static struct page *vfs_dedupe_get_page(struct inode *inode, loff_t offset) static struct folio *vfs_dedupe_get_folio(struct inode *inode, loff_t pos)
{ {
struct page *page; struct folio *folio;
page = read_mapping_page(inode->i_mapping, offset >> PAGE_SHIFT, NULL); folio = read_mapping_folio(inode->i_mapping, pos >> PAGE_SHIFT, NULL);
if (IS_ERR(page)) if (IS_ERR(folio))
return page; return folio;
if (!PageUptodate(page)) { if (!folio_test_uptodate(folio)) {
put_page(page); folio_put(folio);
return ERR_PTR(-EIO); return ERR_PTR(-EIO);
} }
return page; return folio;
} }
/* /*
* Lock two pages, ensuring that we lock in offset order if the pages are from * Lock two folios, ensuring that we lock in offset order if the folios
* the same file. * are from the same file.
*/ */
static void vfs_lock_two_pages(struct page *page1, struct page *page2) static void vfs_lock_two_folios(struct folio *folio1, struct folio *folio2)
{ {
/* Always lock in order of increasing index. */ /* Always lock in order of increasing index. */
if (page1->index > page2->index) if (folio1->index > folio2->index)
swap(page1, page2); swap(folio1, folio2);
lock_page(page1); folio_lock(folio1);
if (page1 != page2) if (folio1 != folio2)
lock_page(page2); folio_lock(folio2);
} }
/* Unlock two pages, being careful not to unlock the same page twice. */ /* Unlock two folios, being careful not to unlock the same folio twice. */
static void vfs_unlock_two_pages(struct page *page1, struct page *page2) static void vfs_unlock_two_folios(struct folio *folio1, struct folio *folio2)
{ {
unlock_page(page1); folio_unlock(folio1);
if (page1 != page2) if (folio1 != folio2)
unlock_page(page2); folio_unlock(folio2);
} }
/* /*
@ -188,77 +188,71 @@ static void vfs_unlock_two_pages(struct page *page1, struct page *page2)
* Caller must have locked both inodes to prevent write races. * Caller must have locked both inodes to prevent write races.
*/ */
static int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff, static int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
struct inode *dest, loff_t destoff, struct inode *dest, loff_t dstoff,
loff_t len, bool *is_same) loff_t len, bool *is_same)
{ {
loff_t src_poff; bool same = true;
loff_t dest_poff; int error = -EINVAL;
void *src_addr;
void *dest_addr;
struct page *src_page;
struct page *dest_page;
loff_t cmp_len;
bool same;
int error;
error = -EINVAL;
same = true;
while (len) { while (len) {
src_poff = srcoff & (PAGE_SIZE - 1); struct folio *src_folio, *dst_folio;
dest_poff = destoff & (PAGE_SIZE - 1); void *src_addr, *dst_addr;
cmp_len = min(PAGE_SIZE - src_poff, loff_t cmp_len = min(PAGE_SIZE - offset_in_page(srcoff),
PAGE_SIZE - dest_poff); PAGE_SIZE - offset_in_page(dstoff));
cmp_len = min(cmp_len, len); cmp_len = min(cmp_len, len);
if (cmp_len <= 0) if (cmp_len <= 0)
goto out_error; goto out_error;
src_page = vfs_dedupe_get_page(src, srcoff); src_folio = vfs_dedupe_get_folio(src, srcoff);
if (IS_ERR(src_page)) { if (IS_ERR(src_folio)) {
error = PTR_ERR(src_page); error = PTR_ERR(src_folio);
goto out_error; goto out_error;
} }
dest_page = vfs_dedupe_get_page(dest, destoff); dst_folio = vfs_dedupe_get_folio(dest, dstoff);
if (IS_ERR(dest_page)) { if (IS_ERR(dst_folio)) {
error = PTR_ERR(dest_page); error = PTR_ERR(dst_folio);
put_page(src_page); folio_put(src_folio);
goto out_error; goto out_error;
} }
vfs_lock_two_pages(src_page, dest_page); vfs_lock_two_folios(src_folio, dst_folio);
/* /*
* Now that we've locked both pages, make sure they're still * Now that we've locked both folios, make sure they're still
* mapped to the file data we're interested in. If not, * mapped to the file data we're interested in. If not,
* someone is invalidating pages on us and we lose. * someone is invalidating pages on us and we lose.
*/ */
if (!PageUptodate(src_page) || !PageUptodate(dest_page) || if (!folio_test_uptodate(src_folio) || !folio_test_uptodate(dst_folio) ||
src_page->mapping != src->i_mapping || src_folio->mapping != src->i_mapping ||
dest_page->mapping != dest->i_mapping) { dst_folio->mapping != dest->i_mapping) {
same = false; same = false;
goto unlock; goto unlock;
} }
src_addr = kmap_atomic(src_page); src_addr = kmap_local_folio(src_folio,
dest_addr = kmap_atomic(dest_page); offset_in_folio(src_folio, srcoff));
dst_addr = kmap_local_folio(dst_folio,
offset_in_folio(dst_folio, dstoff));
flush_dcache_page(src_page); flush_dcache_folio(src_folio);
flush_dcache_page(dest_page); flush_dcache_folio(dst_folio);
if (memcmp(src_addr + src_poff, dest_addr + dest_poff, cmp_len)) if (memcmp(src_addr, dst_addr, cmp_len))
same = false; same = false;
kunmap_atomic(dest_addr); kunmap_local(dst_addr);
kunmap_atomic(src_addr); kunmap_local(src_addr);
unlock: unlock:
vfs_unlock_two_pages(src_page, dest_page); vfs_unlock_two_folios(src_folio, dst_folio);
put_page(dest_page); folio_put(dst_folio);
put_page(src_page); folio_put(src_folio);
if (!same) if (!same)
break; break;
srcoff += cmp_len; srcoff += cmp_len;
destoff += cmp_len; dstoff += cmp_len;
len -= cmp_len; len -= cmp_len;
} }

View File

@ -274,6 +274,15 @@ static inline int thp_nr_pages(struct page *page)
return 1; return 1;
} }
/**
* folio_test_pmd_mappable - Can we map this folio with a PMD?
* @folio: The folio to test
*/
static inline bool folio_test_pmd_mappable(struct folio *folio)
{
return folio_order(folio) >= HPAGE_PMD_ORDER;
}
struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd, int flags, struct dev_pagemap **pgmap); pmd_t *pmd, int flags, struct dev_pagemap **pgmap);
struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
@ -339,6 +348,11 @@ static inline int thp_nr_pages(struct page *page)
return 1; return 1;
} }
static inline bool folio_test_pmd_mappable(struct folio *folio)
{
return false;
}
static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma) static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
{ {
return false; return false;

View File

@ -714,6 +714,27 @@ int vma_is_stack_for_current(struct vm_area_struct *vma);
struct mmu_gather; struct mmu_gather;
struct inode; struct inode;
static inline unsigned int compound_order(struct page *page)
{
if (!PageHead(page))
return 0;
return page[1].compound_order;
}
/**
* folio_order - The allocation order of a folio.
* @folio: The folio.
*
* A folio is composed of 2^order pages. See get_order() for the definition
* of order.
*
* Return: The order of the folio.
*/
static inline unsigned int folio_order(struct folio *folio)
{
return compound_order(&folio->page);
}
#include <linux/huge_mm.h> #include <linux/huge_mm.h>
/* /*
@ -913,27 +934,6 @@ static inline void destroy_compound_page(struct page *page)
compound_page_dtors[page[1].compound_dtor](page); compound_page_dtors[page[1].compound_dtor](page);
} }
static inline unsigned int compound_order(struct page *page)
{
if (!PageHead(page))
return 0;
return page[1].compound_order;
}
/**
* folio_order - The allocation order of a folio.
* @folio: The folio.
*
* A folio is composed of 2^order pages. See get_order() for the definition
* of order.
*
* Return: The order of the folio.
*/
static inline unsigned int folio_order(struct folio *folio)
{
return compound_order(&folio->page);
}
static inline bool hpage_pincount_available(struct page *page) static inline bool hpage_pincount_available(struct page *page)
{ {
/* /*
@ -1837,28 +1837,6 @@ static inline bool can_do_mlock(void) { return false; }
extern int user_shm_lock(size_t, struct ucounts *); extern int user_shm_lock(size_t, struct ucounts *);
extern void user_shm_unlock(size_t, struct ucounts *); extern void user_shm_unlock(size_t, struct ucounts *);
/*
* Parameter block passed down to zap_pte_range in exceptional cases.
*/
struct zap_details {
struct address_space *zap_mapping; /* Check page->mapping if set */
struct page *single_page; /* Locked page to be unmapped */
};
/*
* We set details->zap_mappings when we want to unmap shared but keep private
* pages. Return true if skip zapping this page, false otherwise.
*/
static inline bool
zap_skip_check_mapping(struct zap_details *details, struct page *page)
{
if (!details || !page)
return false;
return details->zap_mapping &&
(details->zap_mapping != page_rmapping(page));
}
struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
pte_t pte); pte_t pte);
struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
@ -1893,7 +1871,6 @@ extern void truncate_pagecache(struct inode *inode, loff_t new);
extern void truncate_setsize(struct inode *inode, loff_t newsize); extern void truncate_setsize(struct inode *inode, loff_t newsize);
void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to); void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end); void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
int truncate_inode_page(struct address_space *mapping, struct page *page);
int generic_error_remove_page(struct address_space *mapping, struct page *page); int generic_error_remove_page(struct address_space *mapping, struct page *page);
int invalidate_inode_page(struct page *page); int invalidate_inode_page(struct page *page);
@ -1904,7 +1881,6 @@ extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
extern int fixup_user_fault(struct mm_struct *mm, extern int fixup_user_fault(struct mm_struct *mm,
unsigned long address, unsigned int fault_flags, unsigned long address, unsigned int fault_flags,
bool *unlocked); bool *unlocked);
void unmap_mapping_page(struct page *page);
void unmap_mapping_pages(struct address_space *mapping, void unmap_mapping_pages(struct address_space *mapping,
pgoff_t start, pgoff_t nr, bool even_cows); pgoff_t start, pgoff_t nr, bool even_cows);
void unmap_mapping_range(struct address_space *mapping, void unmap_mapping_range(struct address_space *mapping,
@ -1925,7 +1901,6 @@ static inline int fixup_user_fault(struct mm_struct *mm, unsigned long address,
BUG(); BUG();
return -EFAULT; return -EFAULT;
} }
static inline void unmap_mapping_page(struct page *page) { }
static inline void unmap_mapping_pages(struct address_space *mapping, static inline void unmap_mapping_pages(struct address_space *mapping,
pgoff_t start, pgoff_t nr, bool even_cows) { } pgoff_t start, pgoff_t nr, bool even_cows) { }
static inline void unmap_mapping_range(struct address_space *mapping, static inline void unmap_mapping_range(struct address_space *mapping,
@ -1982,7 +1957,6 @@ int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
struct page **pages); struct page **pages);
struct page *get_dump_page(unsigned long addr); struct page *get_dump_page(unsigned long addr);
extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
extern void do_invalidatepage(struct page *page, unsigned int offset, extern void do_invalidatepage(struct page *page, unsigned int offset,
unsigned int length); unsigned int length);

View File

@ -68,9 +68,6 @@
* might lose their PG_swapbacked flag when they simply can be dropped (e.g. as * might lose their PG_swapbacked flag when they simply can be dropped (e.g. as
* a result of MADV_FREE). * a result of MADV_FREE).
* *
* PG_uptodate tells whether the page's contents is valid. When a read
* completes, the page becomes uptodate, unless a disk I/O error happened.
*
* PG_referenced, PG_reclaim are used for page reclaim for anonymous and * PG_referenced, PG_reclaim are used for page reclaim for anonymous and
* file-backed pagecache (see mm/vmscan.c). * file-backed pagecache (see mm/vmscan.c).
* *
@ -615,6 +612,16 @@ TESTPAGEFLAG_FALSE(Ksm, ksm)
u64 stable_page_flags(struct page *page); u64 stable_page_flags(struct page *page);
/**
* folio_test_uptodate - Is this folio up to date?
* @folio: The folio.
*
* The uptodate flag is set on a folio when every byte in the folio is
* at least as new as the corresponding bytes on storage. Anonymous
* and CoW folios are always uptodate. If the folio is not uptodate,
* some of the bytes in it may be; see the is_partially_uptodate()
* address_space operation.
*/
static inline bool folio_test_uptodate(struct folio *folio) static inline bool folio_test_uptodate(struct folio *folio)
{ {
bool ret = test_bit(PG_uptodate, folio_flags(folio, 0)); bool ret = test_bit(PG_uptodate, folio_flags(folio, 0));

View File

@ -16,7 +16,7 @@
#include <linux/hardirq.h> /* for in_interrupt() */ #include <linux/hardirq.h> /* for in_interrupt() */
#include <linux/hugetlb_inline.h> #include <linux/hugetlb_inline.h>
struct pagevec; struct folio_batch;
static inline bool mapping_empty(struct address_space *mapping) static inline bool mapping_empty(struct address_space *mapping)
{ {
@ -511,15 +511,6 @@ static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
mapping_gfp_mask(mapping)); mapping_gfp_mask(mapping));
} }
/* Does this page contain this index? */
static inline bool thp_contains(struct page *head, pgoff_t index)
{
/* HugeTLBfs indexes the page cache in units of hpage_size */
if (PageHuge(head))
return head->index == index;
return page_index(head) == (index & ~(thp_nr_pages(head) - 1UL));
}
#define swapcache_index(folio) __page_file_index(&(folio)->page) #define swapcache_index(folio) __page_file_index(&(folio)->page)
/** /**
@ -600,8 +591,6 @@ static inline struct page *find_subpage(struct page *head, pgoff_t index)
return head + (index & (thp_nr_pages(head) - 1)); return head + (index & (thp_nr_pages(head) - 1));
} }
unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
pgoff_t end, struct pagevec *pvec, pgoff_t *indices);
unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start, unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
pgoff_t end, unsigned int nr_pages, pgoff_t end, unsigned int nr_pages,
struct page **pages); struct page **pages);
@ -637,8 +626,10 @@ static inline struct page *grab_cache_page(struct address_space *mapping,
return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
} }
extern struct page * read_cache_page(struct address_space *mapping, struct folio *read_cache_folio(struct address_space *, pgoff_t index,
pgoff_t index, filler_t *filler, void *data); filler_t *filler, void *data);
struct page *read_cache_page(struct address_space *, pgoff_t index,
filler_t *filler, void *data);
extern struct page * read_cache_page_gfp(struct address_space *mapping, extern struct page * read_cache_page_gfp(struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask); pgoff_t index, gfp_t gfp_mask);
extern int read_cache_pages(struct address_space *mapping, extern int read_cache_pages(struct address_space *mapping,
@ -650,6 +641,12 @@ static inline struct page *read_mapping_page(struct address_space *mapping,
return read_cache_page(mapping, index, NULL, data); return read_cache_page(mapping, index, NULL, data);
} }
static inline struct folio *read_mapping_folio(struct address_space *mapping,
pgoff_t index, void *data)
{
return read_cache_folio(mapping, index, NULL, data);
}
/* /*
* Get index of the page within radix-tree (but not for hugetlb pages). * Get index of the page within radix-tree (but not for hugetlb pages).
* (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE) * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
@ -867,7 +864,7 @@ static inline int wait_on_page_locked_killable(struct page *page)
return folio_wait_locked_killable(page_folio(page)); return folio_wait_locked_killable(page_folio(page));
} }
int put_and_wait_on_page_locked(struct page *page, int state); int folio_put_wait_locked(struct folio *folio, int state);
void wait_on_page_writeback(struct page *page); void wait_on_page_writeback(struct page *page);
void folio_wait_writeback(struct folio *folio); void folio_wait_writeback(struct folio *folio);
int folio_wait_writeback_killable(struct folio *folio); int folio_wait_writeback_killable(struct folio *folio);
@ -883,11 +880,6 @@ static inline void __set_page_dirty(struct page *page,
} }
void folio_account_cleaned(struct folio *folio, struct address_space *mapping, void folio_account_cleaned(struct folio *folio, struct address_space *mapping,
struct bdi_writeback *wb); struct bdi_writeback *wb);
static inline void account_page_cleaned(struct page *page,
struct address_space *mapping, struct bdi_writeback *wb)
{
return folio_account_cleaned(page_folio(page), mapping, wb);
}
void __folio_cancel_dirty(struct folio *folio); void __folio_cancel_dirty(struct folio *folio);
static inline void folio_cancel_dirty(struct folio *folio) static inline void folio_cancel_dirty(struct folio *folio)
{ {
@ -934,11 +926,18 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
pgoff_t index, gfp_t gfp); pgoff_t index, gfp_t gfp);
int filemap_add_folio(struct address_space *mapping, struct folio *folio, int filemap_add_folio(struct address_space *mapping, struct folio *folio,
pgoff_t index, gfp_t gfp); pgoff_t index, gfp_t gfp);
extern void delete_from_page_cache(struct page *page); void filemap_remove_folio(struct folio *folio);
extern void __delete_from_page_cache(struct page *page, void *shadow); void delete_from_page_cache(struct page *page);
void __filemap_remove_folio(struct folio *folio, void *shadow);
static inline void __delete_from_page_cache(struct page *page, void *shadow)
{
__filemap_remove_folio(page_folio(page), shadow);
}
void replace_page_cache_page(struct page *old, struct page *new); void replace_page_cache_page(struct page *old, struct page *new);
void delete_from_page_cache_batch(struct address_space *mapping, void delete_from_page_cache_batch(struct address_space *mapping,
struct pagevec *pvec); struct folio_batch *fbatch);
int try_to_release_page(struct page *page, gfp_t gfp);
bool filemap_release_folio(struct folio *folio, gfp_t gfp);
loff_t mapping_seek_hole_data(struct address_space *, loff_t start, loff_t end, loff_t mapping_seek_hole_data(struct address_space *, loff_t start, loff_t end,
int whence); int whence);
@ -1030,7 +1029,7 @@ struct readahead_control {
void page_cache_ra_unbounded(struct readahead_control *, void page_cache_ra_unbounded(struct readahead_control *,
unsigned long nr_to_read, unsigned long lookahead_count); unsigned long nr_to_read, unsigned long lookahead_count);
void page_cache_sync_ra(struct readahead_control *, unsigned long req_count); void page_cache_sync_ra(struct readahead_control *, unsigned long req_count);
void page_cache_async_ra(struct readahead_control *, struct page *, void page_cache_async_ra(struct readahead_control *, struct folio *,
unsigned long req_count); unsigned long req_count);
void readahead_expand(struct readahead_control *ractl, void readahead_expand(struct readahead_control *ractl,
loff_t new_start, size_t new_len); loff_t new_start, size_t new_len);
@ -1077,7 +1076,7 @@ void page_cache_async_readahead(struct address_space *mapping,
struct page *page, pgoff_t index, unsigned long req_count) struct page *page, pgoff_t index, unsigned long req_count)
{ {
DEFINE_READAHEAD(ractl, file, ra, mapping, index); DEFINE_READAHEAD(ractl, file, ra, mapping, index);
page_cache_async_ra(&ractl, page, req_count); page_cache_async_ra(&ractl, page_folio(page), req_count);
} }
static inline struct folio *__readahead_folio(struct readahead_control *ractl) static inline struct folio *__readahead_folio(struct readahead_control *ractl)
@ -1154,16 +1153,6 @@ static inline unsigned int __readahead_batch(struct readahead_control *rac,
VM_BUG_ON_PAGE(PageTail(page), page); VM_BUG_ON_PAGE(PageTail(page), page);
array[i++] = page; array[i++] = page;
rac->_batch_count += thp_nr_pages(page); rac->_batch_count += thp_nr_pages(page);
/*
* The page cache isn't using multi-index entries yet,
* so the xas cursor needs to be manually moved to the
* next index. This can be removed once the page cache
* is converted.
*/
if (PageHead(page))
xas_set(&xas, rac->_index + rac->_batch_count);
if (i == array_sz) if (i == array_sz)
break; break;
} }

View File

@ -15,8 +15,10 @@
#define PAGEVEC_SIZE 15 #define PAGEVEC_SIZE 15
struct page; struct page;
struct folio;
struct address_space; struct address_space;
/* Layout must match folio_batch */
struct pagevec { struct pagevec {
unsigned char nr; unsigned char nr;
bool percpu_pvec_drained; bool percpu_pvec_drained;
@ -25,7 +27,6 @@ struct pagevec {
void __pagevec_release(struct pagevec *pvec); void __pagevec_release(struct pagevec *pvec);
void __pagevec_lru_add(struct pagevec *pvec); void __pagevec_lru_add(struct pagevec *pvec);
void pagevec_remove_exceptionals(struct pagevec *pvec);
unsigned pagevec_lookup_range(struct pagevec *pvec, unsigned pagevec_lookup_range(struct pagevec *pvec,
struct address_space *mapping, struct address_space *mapping,
pgoff_t *start, pgoff_t end); pgoff_t *start, pgoff_t end);
@ -81,4 +82,68 @@ static inline void pagevec_release(struct pagevec *pvec)
__pagevec_release(pvec); __pagevec_release(pvec);
} }
/**
* struct folio_batch - A collection of folios.
*
* The folio_batch is used to amortise the cost of retrieving and
* operating on a set of folios. The order of folios in the batch may be
* significant (eg delete_from_page_cache_batch()). Some users of the
* folio_batch store "exceptional" entries in it which can be removed
* by calling folio_batch_remove_exceptionals().
*/
struct folio_batch {
unsigned char nr;
bool percpu_pvec_drained;
struct folio *folios[PAGEVEC_SIZE];
};
/* Layout must match pagevec */
static_assert(sizeof(struct pagevec) == sizeof(struct folio_batch));
static_assert(offsetof(struct pagevec, pages) ==
offsetof(struct folio_batch, folios));
/**
* folio_batch_init() - Initialise a batch of folios
* @fbatch: The folio batch.
*
* A freshly initialised folio_batch contains zero folios.
*/
static inline void folio_batch_init(struct folio_batch *fbatch)
{
fbatch->nr = 0;
}
static inline unsigned int folio_batch_count(struct folio_batch *fbatch)
{
return fbatch->nr;
}
static inline unsigned int fbatch_space(struct folio_batch *fbatch)
{
return PAGEVEC_SIZE - fbatch->nr;
}
/**
* folio_batch_add() - Add a folio to a batch.
* @fbatch: The folio batch.
* @folio: The folio to add.
*
* The folio is added to the end of the batch.
* The batch must have previously been initialised using folio_batch_init().
*
* Return: The number of slots still available.
*/
static inline unsigned folio_batch_add(struct folio_batch *fbatch,
struct folio *folio)
{
fbatch->folios[fbatch->nr++] = folio;
return fbatch_space(fbatch);
}
static inline void folio_batch_release(struct folio_batch *fbatch)
{
pagevec_release((struct pagevec *)fbatch);
}
void folio_batch_remove_exceptionals(struct folio_batch *fbatch);
#endif /* _LINUX_PAGEVEC_H */ #endif /* _LINUX_PAGEVEC_H */

View File

@ -7,6 +7,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/thread_info.h> #include <linux/thread_info.h>
#include <linux/mm_types.h>
#include <uapi/linux/uio.h> #include <uapi/linux/uio.h>
struct page; struct page;
@ -146,6 +147,12 @@ size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i); size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i); size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
static inline size_t copy_folio_to_iter(struct folio *folio, size_t offset,
size_t bytes, struct iov_iter *i)
{
return copy_page_to_iter(&folio->page, offset, bytes, i);
}
static __always_inline __must_check static __always_inline __must_check
size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
{ {

View File

@ -1580,6 +1580,24 @@ static inline void xas_set(struct xa_state *xas, unsigned long index)
xas->xa_node = XAS_RESTART; xas->xa_node = XAS_RESTART;
} }
/**
* xas_advance() - Skip over sibling entries.
* @xas: XArray operation state.
* @index: Index of last sibling entry.
*
* Move the operation state to refer to the last sibling entry.
* This is useful for loops that normally want to see sibling
* entries but sometimes want to skip them. Use xas_set() if you
* want to move to an index which is not part of this entry.
*/
static inline void xas_advance(struct xa_state *xas, unsigned long index)
{
unsigned char shift = xas_is_node(xas) ? xas->xa_node->shift : 0;
xas->xa_index = index;
xas->xa_offset = (index >> shift) & XA_CHUNK_MASK;
}
/** /**
* xas_set_order() - Set up XArray operation state for a multislot entry. * xas_set_order() - Set up XArray operation state for a multislot entry.
* @xas: XArray operation state. * @xas: XArray operation state.

View File

@ -15,43 +15,45 @@
DECLARE_EVENT_CLASS(mm_filemap_op_page_cache, DECLARE_EVENT_CLASS(mm_filemap_op_page_cache,
TP_PROTO(struct page *page), TP_PROTO(struct folio *folio),
TP_ARGS(page), TP_ARGS(folio),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(unsigned long, pfn) __field(unsigned long, pfn)
__field(unsigned long, i_ino) __field(unsigned long, i_ino)
__field(unsigned long, index) __field(unsigned long, index)
__field(dev_t, s_dev) __field(dev_t, s_dev)
__field(unsigned char, order)
), ),
TP_fast_assign( TP_fast_assign(
__entry->pfn = page_to_pfn(page); __entry->pfn = folio_pfn(folio);
__entry->i_ino = page->mapping->host->i_ino; __entry->i_ino = folio->mapping->host->i_ino;
__entry->index = page->index; __entry->index = folio->index;
if (page->mapping->host->i_sb) if (folio->mapping->host->i_sb)
__entry->s_dev = page->mapping->host->i_sb->s_dev; __entry->s_dev = folio->mapping->host->i_sb->s_dev;
else else
__entry->s_dev = page->mapping->host->i_rdev; __entry->s_dev = folio->mapping->host->i_rdev;
__entry->order = folio_order(folio);
), ),
TP_printk("dev %d:%d ino %lx page=%p pfn=0x%lx ofs=%lu", TP_printk("dev %d:%d ino %lx pfn=0x%lx ofs=%lu order=%u",
MAJOR(__entry->s_dev), MINOR(__entry->s_dev), MAJOR(__entry->s_dev), MINOR(__entry->s_dev),
__entry->i_ino, __entry->i_ino,
pfn_to_page(__entry->pfn),
__entry->pfn, __entry->pfn,
__entry->index << PAGE_SHIFT) __entry->index << PAGE_SHIFT,
__entry->order)
); );
DEFINE_EVENT(mm_filemap_op_page_cache, mm_filemap_delete_from_page_cache, DEFINE_EVENT(mm_filemap_op_page_cache, mm_filemap_delete_from_page_cache,
TP_PROTO(struct page *page), TP_PROTO(struct folio *folio),
TP_ARGS(page) TP_ARGS(folio)
); );
DEFINE_EVENT(mm_filemap_op_page_cache, mm_filemap_add_to_page_cache, DEFINE_EVENT(mm_filemap_op_page_cache, mm_filemap_add_to_page_cache,
TP_PROTO(struct page *page), TP_PROTO(struct folio *folio),
TP_ARGS(page) TP_ARGS(folio)
); );
TRACE_EVENT(filemap_set_wb_err, TRACE_EVENT(filemap_set_wb_err,

View File

@ -69,37 +69,35 @@
#define iterate_xarray(i, n, base, len, __off, STEP) { \ #define iterate_xarray(i, n, base, len, __off, STEP) { \
__label__ __out; \ __label__ __out; \
size_t __off = 0; \ size_t __off = 0; \
struct page *head = NULL; \ struct folio *folio; \
loff_t start = i->xarray_start + i->iov_offset; \ loff_t start = i->xarray_start + i->iov_offset; \
unsigned offset = start % PAGE_SIZE; \
pgoff_t index = start / PAGE_SIZE; \ pgoff_t index = start / PAGE_SIZE; \
int j; \
\
XA_STATE(xas, i->xarray, index); \ XA_STATE(xas, i->xarray, index); \
\ \
len = PAGE_SIZE - offset_in_page(start); \
rcu_read_lock(); \ rcu_read_lock(); \
xas_for_each(&xas, head, ULONG_MAX) { \ xas_for_each(&xas, folio, ULONG_MAX) { \
unsigned left; \ unsigned left; \
if (xas_retry(&xas, head)) \ size_t offset; \
if (xas_retry(&xas, folio)) \
continue; \ continue; \
if (WARN_ON(xa_is_value(head))) \ if (WARN_ON(xa_is_value(folio))) \
break; \ break; \
if (WARN_ON(PageHuge(head))) \ if (WARN_ON(folio_test_hugetlb(folio))) \
break; \ break; \
for (j = (head->index < index) ? index - head->index : 0; \ offset = offset_in_folio(folio, start + __off); \
j < thp_nr_pages(head); j++) { \ while (offset < folio_size(folio)) { \
void *kaddr = kmap_local_page(head + j); \ base = kmap_local_folio(folio, offset); \
base = kaddr + offset; \
len = PAGE_SIZE - offset; \
len = min(n, len); \ len = min(n, len); \
left = (STEP); \ left = (STEP); \
kunmap_local(kaddr); \ kunmap_local(base); \
len -= left; \ len -= left; \
__off += len; \ __off += len; \
n -= len; \ n -= len; \
if (left || n == 0) \ if (left || n == 0) \
goto __out; \ goto __out; \
offset = 0; \ offset += len; \
len = PAGE_SIZE; \
} \ } \
} \ } \
__out: \ __out: \

View File

@ -157,7 +157,7 @@ static void xas_move_index(struct xa_state *xas, unsigned long offset)
xas->xa_index += offset << shift; xas->xa_index += offset << shift;
} }
static void xas_advance(struct xa_state *xas) static void xas_next_offset(struct xa_state *xas)
{ {
xas->xa_offset++; xas->xa_offset++;
xas_move_index(xas, xas->xa_offset); xas_move_index(xas, xas->xa_offset);
@ -1250,7 +1250,7 @@ void *xas_find(struct xa_state *xas, unsigned long max)
xas->xa_offset = ((xas->xa_index - 1) & XA_CHUNK_MASK) + 1; xas->xa_offset = ((xas->xa_index - 1) & XA_CHUNK_MASK) + 1;
} }
xas_advance(xas); xas_next_offset(xas);
while (xas->xa_node && (xas->xa_index <= max)) { while (xas->xa_node && (xas->xa_index <= max)) {
if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) { if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
@ -1268,7 +1268,7 @@ void *xas_find(struct xa_state *xas, unsigned long max)
if (entry && !xa_is_sibling(entry)) if (entry && !xa_is_sibling(entry))
return entry; return entry;
xas_advance(xas); xas_next_offset(xas);
} }
if (!xas->xa_node) if (!xas->xa_node)

File diff suppressed because it is too large Load Diff

View File

@ -140,3 +140,14 @@ struct page *grab_cache_page_write_begin(struct address_space *mapping,
mapping_gfp_mask(mapping)); mapping_gfp_mask(mapping));
} }
EXPORT_SYMBOL(grab_cache_page_write_begin); EXPORT_SYMBOL(grab_cache_page_write_begin);
void delete_from_page_cache(struct page *page)
{
return filemap_remove_folio(page_folio(page));
}
int try_to_release_page(struct page *page, gfp_t gfp)
{
return filemap_release_folio(page_folio(page), gfp);
}
EXPORT_SYMBOL(try_to_release_page);

View File

@ -2614,6 +2614,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
{ {
struct page *head = compound_head(page); struct page *head = compound_head(page);
struct deferred_split *ds_queue = get_deferred_split_queue(head); struct deferred_split *ds_queue = get_deferred_split_queue(head);
XA_STATE(xas, &head->mapping->i_pages, head->index);
struct anon_vma *anon_vma = NULL; struct anon_vma *anon_vma = NULL;
struct address_space *mapping = NULL; struct address_space *mapping = NULL;
int extra_pins, ret; int extra_pins, ret;
@ -2652,6 +2653,13 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
goto out; goto out;
} }
xas_split_alloc(&xas, head, compound_order(head),
mapping_gfp_mask(mapping) & GFP_RECLAIM_MASK);
if (xas_error(&xas)) {
ret = xas_error(&xas);
goto out;
}
anon_vma = NULL; anon_vma = NULL;
i_mmap_lock_read(mapping); i_mmap_lock_read(mapping);
@ -2681,13 +2689,12 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
/* block interrupt reentry in xa_lock and spinlock */ /* block interrupt reentry in xa_lock and spinlock */
local_irq_disable(); local_irq_disable();
if (mapping) { if (mapping) {
XA_STATE(xas, &mapping->i_pages, page_index(head));
/* /*
* Check if the head page is present in page cache. * Check if the head page is present in page cache.
* We assume all tail are present too, if head is there. * We assume all tail are present too, if head is there.
*/ */
xa_lock(&mapping->i_pages); xas_lock(&xas);
xas_reset(&xas);
if (xas_load(&xas) != head) if (xas_load(&xas) != head)
goto fail; goto fail;
} }
@ -2703,6 +2710,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
if (mapping) { if (mapping) {
int nr = thp_nr_pages(head); int nr = thp_nr_pages(head);
xas_split(&xas, head, thp_order(head));
if (PageSwapBacked(head)) { if (PageSwapBacked(head)) {
__mod_lruvec_page_state(head, NR_SHMEM_THPS, __mod_lruvec_page_state(head, NR_SHMEM_THPS,
-nr); -nr);
@ -2719,7 +2727,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
spin_unlock(&ds_queue->split_queue_lock); spin_unlock(&ds_queue->split_queue_lock);
fail: fail:
if (mapping) if (mapping)
xa_unlock(&mapping->i_pages); xas_unlock(&xas);
local_irq_enable(); local_irq_enable();
remap_page(head, thp_nr_pages(head)); remap_page(head, thp_nr_pages(head));
ret = -EBUSY; ret = -EBUSY;
@ -2733,6 +2741,8 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
if (mapping) if (mapping)
i_mmap_unlock_read(mapping); i_mmap_unlock_read(mapping);
out: out:
/* Free any memory we didn't use */
xas_nomem(&xas, 0);
count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED); count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
return ret; return ret;
} }

View File

@ -12,6 +12,8 @@
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/tracepoint-defs.h> #include <linux/tracepoint-defs.h>
struct folio_batch;
/* /*
* The set of flags that only affect watermark checking and reclaim * The set of flags that only affect watermark checking and reclaim
* behaviour. This is used by the MM to obey the caller constraints * behaviour. This is used by the MM to obey the caller constraints
@ -74,6 +76,7 @@ static inline bool can_madv_lru_vma(struct vm_area_struct *vma)
return !(vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP)); return !(vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP));
} }
struct zap_details;
void unmap_page_range(struct mmu_gather *tlb, void unmap_page_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, struct vm_area_struct *vma,
unsigned long addr, unsigned long end, unsigned long addr, unsigned long end,
@ -90,7 +93,13 @@ static inline void force_page_cache_readahead(struct address_space *mapping,
} }
unsigned find_lock_entries(struct address_space *mapping, pgoff_t start, unsigned find_lock_entries(struct address_space *mapping, pgoff_t start,
pgoff_t end, struct pagevec *pvec, pgoff_t *indices); pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
void filemap_free_folio(struct address_space *mapping, struct folio *folio);
int truncate_inode_folio(struct address_space *mapping, struct folio *folio);
bool truncate_inode_partial_folio(struct folio *folio, loff_t start,
loff_t end);
/** /**
* folio_evictable - Test whether a folio is evictable. * folio_evictable - Test whether a folio is evictable.
@ -388,6 +397,7 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma); void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma);
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
void unmap_mapping_folio(struct folio *folio);
extern long populate_vma_page_range(struct vm_area_struct *vma, extern long populate_vma_page_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end, int *locked); unsigned long start, unsigned long end, int *locked);
extern long faultin_vma_page_range(struct vm_area_struct *vma, extern long faultin_vma_page_range(struct vm_area_struct *vma,
@ -491,8 +501,8 @@ static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
} }
return fpin; return fpin;
} }
#else /* !CONFIG_MMU */ #else /* !CONFIG_MMU */
static inline void unmap_mapping_folio(struct folio *folio) { }
static inline void clear_page_mlock(struct page *page) { } static inline void clear_page_mlock(struct page *page) { }
static inline void mlock_vma_page(struct page *page) { } static inline void mlock_vma_page(struct page *page) { }
static inline void vunmap_range_noflush(unsigned long start, unsigned long end) static inline void vunmap_range_noflush(unsigned long start, unsigned long end)

View File

@ -1667,7 +1667,10 @@ static void collapse_file(struct mm_struct *mm,
} }
count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC); count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
/* This will be less messy when we use multi-index entries */ /*
* Ensure we have slots for all the pages in the range. This is
* almost certainly a no-op because most of the pages must be present
*/
do { do {
xas_lock_irq(&xas); xas_lock_irq(&xas);
xas_create_range(&xas); xas_create_range(&xas);
@ -1892,6 +1895,9 @@ static void collapse_file(struct mm_struct *mm,
__mod_lruvec_page_state(new_page, NR_SHMEM, nr_none); __mod_lruvec_page_state(new_page, NR_SHMEM, nr_none);
} }
/* Join all the small entries into a single multi-index entry */
xas_set_order(&xas, start, HPAGE_PMD_ORDER);
xas_store(&xas, new_page);
xa_locked: xa_locked:
xas_unlock_irq(&xas); xas_unlock_irq(&xas);
xa_unlocked: xa_unlocked:
@ -2013,6 +2019,10 @@ static void khugepaged_scan_file(struct mm_struct *mm,
continue; continue;
} }
/*
* XXX: khugepaged should compact smaller compound pages
* into a PMD sized page
*/
if (PageTransCompound(page)) { if (PageTransCompound(page)) {
result = SCAN_PAGE_COMPOUND; result = SCAN_PAGE_COMPOUND;
break; break;

View File

@ -1304,6 +1304,28 @@ copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
return ret; return ret;
} }
/*
* Parameter block passed down to zap_pte_range in exceptional cases.
*/
struct zap_details {
struct address_space *zap_mapping; /* Check page->mapping if set */
struct folio *single_folio; /* Locked folio to be unmapped */
};
/*
* We set details->zap_mapping when we want to unmap shared but keep private
* pages. Return true if skip zapping this page, false otherwise.
*/
static inline bool
zap_skip_check_mapping(struct zap_details *details, struct page *page)
{
if (!details || !page)
return false;
return details->zap_mapping &&
(details->zap_mapping != page_rmapping(page));
}
static unsigned long zap_pte_range(struct mmu_gather *tlb, static unsigned long zap_pte_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, pmd_t *pmd, struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end, unsigned long addr, unsigned long end,
@ -1443,8 +1465,8 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
else if (zap_huge_pmd(tlb, vma, pmd, addr)) else if (zap_huge_pmd(tlb, vma, pmd, addr))
goto next; goto next;
/* fall through */ /* fall through */
} else if (details && details->single_page && } else if (details && details->single_folio &&
PageTransCompound(details->single_page) && folio_test_pmd_mappable(details->single_folio) &&
next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) { next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) {
spinlock_t *ptl = pmd_lock(tlb->mm, pmd); spinlock_t *ptl = pmd_lock(tlb->mm, pmd);
/* /*
@ -3332,31 +3354,30 @@ static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
} }
/** /**
* unmap_mapping_page() - Unmap single page from processes. * unmap_mapping_folio() - Unmap single folio from processes.
* @page: The locked page to be unmapped. * @folio: The locked folio to be unmapped.
* *
* Unmap this page from any userspace process which still has it mmaped. * Unmap this folio from any userspace process which still has it mmaped.
* Typically, for efficiency, the range of nearby pages has already been * Typically, for efficiency, the range of nearby pages has already been
* unmapped by unmap_mapping_pages() or unmap_mapping_range(). But once * unmapped by unmap_mapping_pages() or unmap_mapping_range(). But once
* truncation or invalidation holds the lock on a page, it may find that * truncation or invalidation holds the lock on a folio, it may find that
* the page has been remapped again: and then uses unmap_mapping_page() * the page has been remapped again: and then uses unmap_mapping_folio()
* to unmap it finally. * to unmap it finally.
*/ */
void unmap_mapping_page(struct page *page) void unmap_mapping_folio(struct folio *folio)
{ {
struct address_space *mapping = page->mapping; struct address_space *mapping = folio->mapping;
struct zap_details details = { }; struct zap_details details = { };
pgoff_t first_index; pgoff_t first_index;
pgoff_t last_index; pgoff_t last_index;
VM_BUG_ON(!PageLocked(page)); VM_BUG_ON(!folio_test_locked(folio));
VM_BUG_ON(PageTail(page));
first_index = page->index; first_index = folio->index;
last_index = page->index + thp_nr_pages(page) - 1; last_index = folio->index + folio_nr_pages(folio) - 1;
details.zap_mapping = mapping; details.zap_mapping = mapping;
details.single_page = page; details.single_folio = folio;
i_mmap_lock_write(mapping); i_mmap_lock_write(mapping);
if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))) if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))

View File

@ -291,7 +291,7 @@ void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
{ {
pte_t pte; pte_t pte;
swp_entry_t entry; swp_entry_t entry;
struct page *page; struct folio *folio;
spin_lock(ptl); spin_lock(ptl);
pte = *ptep; pte = *ptep;
@ -302,18 +302,17 @@ void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
if (!is_migration_entry(entry)) if (!is_migration_entry(entry))
goto out; goto out;
page = pfn_swap_entry_to_page(entry); folio = page_folio(pfn_swap_entry_to_page(entry));
page = compound_head(page);
/* /*
* Once page cache replacement of page migration started, page_count * Once page cache replacement of page migration started, page_count
* is zero; but we must not call put_and_wait_on_page_locked() without * is zero; but we must not call folio_put_wait_locked() without
* a ref. Use get_page_unless_zero(), and just fault again if it fails. * a ref. Use folio_try_get(), and just fault again if it fails.
*/ */
if (!get_page_unless_zero(page)) if (!folio_try_get(folio))
goto out; goto out;
pte_unmap_unlock(ptep, ptl); pte_unmap_unlock(ptep, ptl);
put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE); folio_put_wait_locked(folio, TASK_UNINTERRUPTIBLE);
return; return;
out: out:
pte_unmap_unlock(ptep, ptl); pte_unmap_unlock(ptep, ptl);
@ -338,16 +337,16 @@ void migration_entry_wait_huge(struct vm_area_struct *vma,
void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd) void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
{ {
spinlock_t *ptl; spinlock_t *ptl;
struct page *page; struct folio *folio;
ptl = pmd_lock(mm, pmd); ptl = pmd_lock(mm, pmd);
if (!is_pmd_migration_entry(*pmd)) if (!is_pmd_migration_entry(*pmd))
goto unlock; goto unlock;
page = pfn_swap_entry_to_page(pmd_to_swp_entry(*pmd)); folio = page_folio(pfn_swap_entry_to_page(pmd_to_swp_entry(*pmd)));
if (!get_page_unless_zero(page)) if (!folio_try_get(folio))
goto unlock; goto unlock;
spin_unlock(ptl); spin_unlock(ptl);
put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE); folio_put_wait_locked(folio, TASK_UNINTERRUPTIBLE);
return; return;
unlock: unlock:
spin_unlock(ptl); spin_unlock(ptl);
@ -434,14 +433,6 @@ int folio_migrate_mapping(struct address_space *mapping,
} }
xas_store(&xas, newfolio); xas_store(&xas, newfolio);
if (nr > 1) {
int i;
for (i = 1; i < nr; i++) {
xas_next(&xas);
xas_store(&xas, newfolio);
}
}
/* /*
* Drop cache reference from old page by unfreezing * Drop cache reference from old page by unfreezing

View File

@ -2496,7 +2496,11 @@ void folio_account_cleaned(struct folio *folio, struct address_space *mapping,
* If warn is true, then emit a warning if the folio is not uptodate and has * If warn is true, then emit a warning if the folio is not uptodate and has
* not been truncated. * not been truncated.
* *
* The caller must hold lock_page_memcg(). * The caller must hold lock_page_memcg(). Most callers have the folio
* locked. A few have the folio blocked from truncation through other
* means (eg zap_page_range() has it mapped and is holding the page table
* lock). This can also be called from mark_buffer_dirty(), which I
* cannot prove is always protected against truncate.
*/ */
void __folio_mark_dirty(struct folio *folio, struct address_space *mapping, void __folio_mark_dirty(struct folio *folio, struct address_space *mapping,
int warn) int warn)

View File

@ -196,9 +196,9 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
* Preallocate as many pages as we will need. * Preallocate as many pages as we will need.
*/ */
for (i = 0; i < nr_to_read; i++) { for (i = 0; i < nr_to_read; i++) {
struct page *page = xa_load(&mapping->i_pages, index + i); struct folio *folio = xa_load(&mapping->i_pages, index + i);
if (page && !xa_is_value(page)) { if (folio && !xa_is_value(folio)) {
/* /*
* Page already present? Kick off the current batch * Page already present? Kick off the current batch
* of contiguous pages before continuing with the * of contiguous pages before continuing with the
@ -212,21 +212,21 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
continue; continue;
} }
page = __page_cache_alloc(gfp_mask); folio = filemap_alloc_folio(gfp_mask, 0);
if (!page) if (!folio)
break; break;
if (mapping->a_ops->readpages) { if (mapping->a_ops->readpages) {
page->index = index + i; folio->index = index + i;
list_add(&page->lru, &page_pool); list_add(&folio->lru, &page_pool);
} else if (add_to_page_cache_lru(page, mapping, index + i, } else if (filemap_add_folio(mapping, folio, index + i,
gfp_mask) < 0) { gfp_mask) < 0) {
put_page(page); folio_put(folio);
read_pages(ractl, &page_pool, true); read_pages(ractl, &page_pool, true);
i = ractl->_index + ractl->_nr_pages - index - 1; i = ractl->_index + ractl->_nr_pages - index - 1;
continue; continue;
} }
if (i == nr_to_read - lookahead_size) if (i == nr_to_read - lookahead_size)
SetPageReadahead(page); folio_set_readahead(folio);
ractl->_nr_pages++; ractl->_nr_pages++;
} }
@ -581,7 +581,7 @@ void page_cache_sync_ra(struct readahead_control *ractl,
EXPORT_SYMBOL_GPL(page_cache_sync_ra); EXPORT_SYMBOL_GPL(page_cache_sync_ra);
void page_cache_async_ra(struct readahead_control *ractl, void page_cache_async_ra(struct readahead_control *ractl,
struct page *page, unsigned long req_count) struct folio *folio, unsigned long req_count)
{ {
/* no read-ahead */ /* no read-ahead */
if (!ractl->ra->ra_pages) if (!ractl->ra->ra_pages)
@ -590,10 +590,10 @@ void page_cache_async_ra(struct readahead_control *ractl,
/* /*
* Same bit is used for PG_readahead and PG_reclaim. * Same bit is used for PG_readahead and PG_reclaim.
*/ */
if (PageWriteback(page)) if (folio_test_writeback(folio))
return; return;
ClearPageReadahead(page); folio_clear_readahead(folio);
/* /*
* Defer asynchronous read-ahead on IO congestion. * Defer asynchronous read-ahead on IO congestion.

View File

@ -694,7 +694,6 @@ static int shmem_add_to_page_cache(struct page *page,
struct mm_struct *charge_mm) struct mm_struct *charge_mm)
{ {
XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page)); XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page));
unsigned long i = 0;
unsigned long nr = compound_nr(page); unsigned long nr = compound_nr(page);
int error; int error;
@ -721,20 +720,18 @@ static int shmem_add_to_page_cache(struct page *page,
cgroup_throttle_swaprate(page, gfp); cgroup_throttle_swaprate(page, gfp);
do { do {
void *entry;
xas_lock_irq(&xas); xas_lock_irq(&xas);
entry = xas_find_conflict(&xas); if (expected != xas_find_conflict(&xas)) {
if (entry != expected)
xas_set_err(&xas, -EEXIST); xas_set_err(&xas, -EEXIST);
xas_create_range(&xas); goto unlock;
}
if (expected && xas_find_conflict(&xas)) {
xas_set_err(&xas, -EEXIST);
goto unlock;
}
xas_store(&xas, page);
if (xas_error(&xas)) if (xas_error(&xas))
goto unlock; goto unlock;
next:
xas_store(&xas, page);
if (++i < nr) {
xas_next(&xas);
goto next;
}
if (PageTransHuge(page)) { if (PageTransHuge(page)) {
count_vm_event(THP_FILE_ALLOC); count_vm_event(THP_FILE_ALLOC);
__mod_lruvec_page_state(page, NR_SHMEM_THPS, nr); __mod_lruvec_page_state(page, NR_SHMEM_THPS, nr);
@ -880,30 +877,26 @@ void shmem_unlock_mapping(struct address_space *mapping)
} }
} }
/* static struct folio *shmem_get_partial_folio(struct inode *inode, pgoff_t index)
* Check whether a hole-punch or truncation needs to split a huge page,
* returning true if no split was required, or the split has been successful.
*
* Eviction (or truncation to 0 size) should never need to split a huge page;
* but in rare cases might do so, if shmem_undo_range() failed to trylock on
* head, and then succeeded to trylock on tail.
*
* A split can only succeed when there are no additional references on the
* huge page: so the split below relies upon find_get_entries() having stopped
* when it found a subpage of the huge page, without getting further references.
*/
static bool shmem_punch_compound(struct page *page, pgoff_t start, pgoff_t end)
{ {
if (!PageTransCompound(page)) struct folio *folio;
return true; struct page *page;
/* Just proceed to delete a huge page wholly within the range punched */ /*
if (PageHead(page) && * At first avoid shmem_getpage(,,,SGP_READ): that fails
page->index >= start && page->index + HPAGE_PMD_NR <= end) * beyond i_size, and reports fallocated pages as holes.
return true; */
folio = __filemap_get_folio(inode->i_mapping, index,
/* Try to split huge page, so we can truly punch the hole or truncate */ FGP_ENTRY | FGP_LOCK, 0);
return split_huge_page(page) >= 0; if (!xa_is_value(folio))
return folio;
/*
* But read a page back from swap if any of it is within i_size
* (although in some cases this is just a waste of time).
*/
page = NULL;
shmem_getpage(inode, index, &page, SGP_READ);
return page ? page_folio(page) : NULL;
} }
/* /*
@ -917,10 +910,10 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
struct shmem_inode_info *info = SHMEM_I(inode); struct shmem_inode_info *info = SHMEM_I(inode);
pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT; pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
pgoff_t end = (lend + 1) >> PAGE_SHIFT; pgoff_t end = (lend + 1) >> PAGE_SHIFT;
unsigned int partial_start = lstart & (PAGE_SIZE - 1); struct folio_batch fbatch;
unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1);
struct pagevec pvec;
pgoff_t indices[PAGEVEC_SIZE]; pgoff_t indices[PAGEVEC_SIZE];
struct folio *folio;
bool same_folio;
long nr_swaps_freed = 0; long nr_swaps_freed = 0;
pgoff_t index; pgoff_t index;
int i; int i;
@ -931,67 +924,64 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
if (info->fallocend > start && info->fallocend <= end && !unfalloc) if (info->fallocend > start && info->fallocend <= end && !unfalloc)
info->fallocend = start; info->fallocend = start;
pagevec_init(&pvec); folio_batch_init(&fbatch);
index = start; index = start;
while (index < end && find_lock_entries(mapping, index, end - 1, while (index < end && find_lock_entries(mapping, index, end - 1,
&pvec, indices)) { &fbatch, indices)) {
for (i = 0; i < pagevec_count(&pvec); i++) { for (i = 0; i < folio_batch_count(&fbatch); i++) {
struct page *page = pvec.pages[i]; folio = fbatch.folios[i];
index = indices[i]; index = indices[i];
if (xa_is_value(page)) { if (xa_is_value(folio)) {
if (unfalloc) if (unfalloc)
continue; continue;
nr_swaps_freed += !shmem_free_swap(mapping, nr_swaps_freed += !shmem_free_swap(mapping,
index, page); index, folio);
continue; continue;
} }
index += thp_nr_pages(page) - 1; index += folio_nr_pages(folio) - 1;
if (!unfalloc || !PageUptodate(page)) if (!unfalloc || !folio_test_uptodate(folio))
truncate_inode_page(mapping, page); truncate_inode_folio(mapping, folio);
unlock_page(page); folio_unlock(folio);
} }
pagevec_remove_exceptionals(&pvec); folio_batch_remove_exceptionals(&fbatch);
pagevec_release(&pvec); folio_batch_release(&fbatch);
cond_resched(); cond_resched();
index++; index++;
} }
if (partial_start) { same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT);
struct page *page = NULL; folio = shmem_get_partial_folio(inode, lstart >> PAGE_SHIFT);
shmem_getpage(inode, start - 1, &page, SGP_READ); if (folio) {
if (page) { same_folio = lend < folio_pos(folio) + folio_size(folio);
unsigned int top = PAGE_SIZE; folio_mark_dirty(folio);
if (start > end) { if (!truncate_inode_partial_folio(folio, lstart, lend)) {
top = partial_end; start = folio->index + folio_nr_pages(folio);
partial_end = 0; if (same_folio)
end = folio->index;
} }
zero_user_segment(page, partial_start, top); folio_unlock(folio);
set_page_dirty(page); folio_put(folio);
unlock_page(page); folio = NULL;
put_page(page);
} }
if (!same_folio)
folio = shmem_get_partial_folio(inode, lend >> PAGE_SHIFT);
if (folio) {
folio_mark_dirty(folio);
if (!truncate_inode_partial_folio(folio, lstart, lend))
end = folio->index;
folio_unlock(folio);
folio_put(folio);
} }
if (partial_end) {
struct page *page = NULL;
shmem_getpage(inode, end, &page, SGP_READ);
if (page) {
zero_user_segment(page, 0, partial_end);
set_page_dirty(page);
unlock_page(page);
put_page(page);
}
}
if (start >= end)
return;
index = start; index = start;
while (index < end) { while (index < end) {
cond_resched(); cond_resched();
if (!find_get_entries(mapping, index, end - 1, &pvec, if (!find_get_entries(mapping, index, end - 1, &fbatch,
indices)) { indices)) {
/* If all gone or hole-punch or unfalloc, we're done */ /* If all gone or hole-punch or unfalloc, we're done */
if (index == start || end != -1) if (index == start || end != -1)
@ -1000,14 +990,14 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
index = start; index = start;
continue; continue;
} }
for (i = 0; i < pagevec_count(&pvec); i++) { for (i = 0; i < folio_batch_count(&fbatch); i++) {
struct page *page = pvec.pages[i]; folio = fbatch.folios[i];
index = indices[i]; index = indices[i];
if (xa_is_value(page)) { if (xa_is_value(folio)) {
if (unfalloc) if (unfalloc)
continue; continue;
if (shmem_free_swap(mapping, index, page)) { if (shmem_free_swap(mapping, index, folio)) {
/* Swap was replaced by page: retry */ /* Swap was replaced by page: retry */
index--; index--;
break; break;
@ -1016,32 +1006,24 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
continue; continue;
} }
lock_page(page); folio_lock(folio);
if (!unfalloc || !PageUptodate(page)) { if (!unfalloc || !folio_test_uptodate(folio)) {
if (page_mapping(page) != mapping) { if (folio_mapping(folio) != mapping) {
/* Page was replaced by swap: retry */ /* Page was replaced by swap: retry */
unlock_page(page); folio_unlock(folio);
index--; index--;
break; break;
} }
VM_BUG_ON_PAGE(PageWriteback(page), page); VM_BUG_ON_FOLIO(folio_test_writeback(folio),
if (shmem_punch_compound(page, start, end)) folio);
truncate_inode_page(mapping, page); truncate_inode_folio(mapping, folio);
else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
/* Wipe the page and don't get stuck */
clear_highpage(page);
flush_dcache_page(page);
set_page_dirty(page);
if (index <
round_up(start, HPAGE_PMD_NR))
start = index + 1;
} }
index = folio->index + folio_nr_pages(folio) - 1;
folio_unlock(folio);
} }
unlock_page(page); folio_batch_remove_exceptionals(&fbatch);
} folio_batch_release(&fbatch);
pagevec_remove_exceptionals(&pvec);
pagevec_release(&pvec);
index++; index++;
} }

View File

@ -1077,24 +1077,24 @@ void __pagevec_lru_add(struct pagevec *pvec)
} }
/** /**
* pagevec_remove_exceptionals - pagevec exceptionals pruning * folio_batch_remove_exceptionals() - Prune non-folios from a batch.
* @pvec: The pagevec to prune * @fbatch: The batch to prune
* *
* find_get_entries() fills both pages and XArray value entries (aka * find_get_entries() fills a batch with both folios and shadow/swap/DAX
* exceptional entries) into the pagevec. This function prunes all * entries. This function prunes all the non-folio entries from @fbatch
* exceptionals from @pvec without leaving holes, so that it can be * without leaving holes, so that it can be passed on to folio-only batch
* passed on to page-only pagevec operations. * operations.
*/ */
void pagevec_remove_exceptionals(struct pagevec *pvec) void folio_batch_remove_exceptionals(struct folio_batch *fbatch)
{ {
int i, j; unsigned int i, j;
for (i = 0, j = 0; i < pagevec_count(pvec); i++) { for (i = 0, j = 0; i < folio_batch_count(fbatch); i++) {
struct page *page = pvec->pages[i]; struct folio *folio = fbatch->folios[i];
if (!xa_is_value(page)) if (!xa_is_value(folio))
pvec->pages[j++] = page; fbatch->folios[j++] = folio;
} }
pvec->nr = j; fbatch->nr = j;
} }
/** /**

View File

@ -56,11 +56,11 @@ static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
/* /*
* Unconditionally remove exceptional entries. Usually called from truncate * Unconditionally remove exceptional entries. Usually called from truncate
* path. Note that the pagevec may be altered by this function by removing * path. Note that the folio_batch may be altered by this function by removing
* exceptional entries similar to what pagevec_remove_exceptionals does. * exceptional entries similar to what folio_batch_remove_exceptionals() does.
*/ */
static void truncate_exceptional_pvec_entries(struct address_space *mapping, static void truncate_folio_batch_exceptionals(struct address_space *mapping,
struct pagevec *pvec, pgoff_t *indices) struct folio_batch *fbatch, pgoff_t *indices)
{ {
int i, j; int i, j;
bool dax; bool dax;
@ -69,11 +69,11 @@ static void truncate_exceptional_pvec_entries(struct address_space *mapping,
if (shmem_mapping(mapping)) if (shmem_mapping(mapping))
return; return;
for (j = 0; j < pagevec_count(pvec); j++) for (j = 0; j < folio_batch_count(fbatch); j++)
if (xa_is_value(pvec->pages[j])) if (xa_is_value(fbatch->folios[j]))
break; break;
if (j == pagevec_count(pvec)) if (j == folio_batch_count(fbatch))
return; return;
dax = dax_mapping(mapping); dax = dax_mapping(mapping);
@ -82,12 +82,12 @@ static void truncate_exceptional_pvec_entries(struct address_space *mapping,
xa_lock_irq(&mapping->i_pages); xa_lock_irq(&mapping->i_pages);
} }
for (i = j; i < pagevec_count(pvec); i++) { for (i = j; i < folio_batch_count(fbatch); i++) {
struct page *page = pvec->pages[i]; struct folio *folio = fbatch->folios[i];
pgoff_t index = indices[i]; pgoff_t index = indices[i];
if (!xa_is_value(page)) { if (!xa_is_value(folio)) {
pvec->pages[j++] = page; fbatch->folios[j++] = folio;
continue; continue;
} }
@ -96,7 +96,7 @@ static void truncate_exceptional_pvec_entries(struct address_space *mapping,
continue; continue;
} }
__clear_shadow_entry(mapping, index, page); __clear_shadow_entry(mapping, index, folio);
} }
if (!dax) { if (!dax) {
@ -105,7 +105,7 @@ static void truncate_exceptional_pvec_entries(struct address_space *mapping,
inode_add_lru(mapping->host); inode_add_lru(mapping->host);
spin_unlock(&mapping->host->i_lock); spin_unlock(&mapping->host->i_lock);
} }
pvec->nr = j; fbatch->nr = j;
} }
/* /*
@ -177,21 +177,21 @@ void do_invalidatepage(struct page *page, unsigned int offset,
* its lock, b) when a concurrent invalidate_mapping_pages got there first and * its lock, b) when a concurrent invalidate_mapping_pages got there first and
* c) when tmpfs swizzles a page between a tmpfs inode and swapper_space. * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
*/ */
static void truncate_cleanup_page(struct page *page) static void truncate_cleanup_folio(struct folio *folio)
{ {
if (page_mapped(page)) if (folio_mapped(folio))
unmap_mapping_page(page); unmap_mapping_folio(folio);
if (page_has_private(page)) if (folio_has_private(folio))
do_invalidatepage(page, 0, thp_size(page)); do_invalidatepage(&folio->page, 0, folio_size(folio));
/* /*
* Some filesystems seem to re-dirty the page even after * Some filesystems seem to re-dirty the page even after
* the VM has canceled the dirty bit (eg ext3 journaling). * the VM has canceled the dirty bit (eg ext3 journaling).
* Hence dirty accounting check is placed after invalidation. * Hence dirty accounting check is placed after invalidation.
*/ */
cancel_dirty_page(page); folio_cancel_dirty(folio);
ClearPageMappedToDisk(page); folio_clear_mappedtodisk(folio);
} }
/* /*
@ -218,23 +218,75 @@ invalidate_complete_page(struct address_space *mapping, struct page *page)
return ret; return ret;
} }
int truncate_inode_page(struct address_space *mapping, struct page *page) int truncate_inode_folio(struct address_space *mapping, struct folio *folio)
{ {
VM_BUG_ON_PAGE(PageTail(page), page); if (folio->mapping != mapping)
if (page->mapping != mapping)
return -EIO; return -EIO;
truncate_cleanup_page(page); truncate_cleanup_folio(folio);
delete_from_page_cache(page); filemap_remove_folio(folio);
return 0; return 0;
} }
/*
* Handle partial folios. The folio may be entirely within the
* range if a split has raced with us. If not, we zero the part of the
* folio that's within the [start, end] range, and then split the folio if
* it's large. split_page_range() will discard pages which now lie beyond
* i_size, and we rely on the caller to discard pages which lie within a
* newly created hole.
*
* Returns false if splitting failed so the caller can avoid
* discarding the entire folio which is stubbornly unsplit.
*/
bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end)
{
loff_t pos = folio_pos(folio);
unsigned int offset, length;
if (pos < start)
offset = start - pos;
else
offset = 0;
length = folio_size(folio);
if (pos + length <= (u64)end)
length = length - offset;
else
length = end + 1 - pos - offset;
folio_wait_writeback(folio);
if (length == folio_size(folio)) {
truncate_inode_folio(folio->mapping, folio);
return true;
}
/*
* We may be zeroing pages we're about to discard, but it avoids
* doing a complex calculation here, and then doing the zeroing
* anyway if the page split fails.
*/
folio_zero_range(folio, offset, length);
cleancache_invalidate_page(folio->mapping, &folio->page);
if (folio_has_private(folio))
do_invalidatepage(&folio->page, offset, length);
if (!folio_test_large(folio))
return true;
if (split_huge_page(&folio->page) == 0)
return true;
if (folio_test_dirty(folio))
return false;
truncate_inode_folio(folio->mapping, folio);
return true;
}
/* /*
* Used to get rid of pages on hardware memory corruption. * Used to get rid of pages on hardware memory corruption.
*/ */
int generic_error_remove_page(struct address_space *mapping, struct page *page) int generic_error_remove_page(struct address_space *mapping, struct page *page)
{ {
VM_BUG_ON_PAGE(PageTail(page), page);
if (!mapping) if (!mapping)
return -EINVAL; return -EINVAL;
/* /*
@ -243,7 +295,7 @@ int generic_error_remove_page(struct address_space *mapping, struct page *page)
*/ */
if (!S_ISREG(mapping->host->i_mode)) if (!S_ISREG(mapping->host->i_mode))
return -EIO; return -EIO;
return truncate_inode_page(mapping, page); return truncate_inode_folio(mapping, page_folio(page));
} }
EXPORT_SYMBOL(generic_error_remove_page); EXPORT_SYMBOL(generic_error_remove_page);
@ -294,20 +346,16 @@ void truncate_inode_pages_range(struct address_space *mapping,
{ {
pgoff_t start; /* inclusive */ pgoff_t start; /* inclusive */
pgoff_t end; /* exclusive */ pgoff_t end; /* exclusive */
unsigned int partial_start; /* inclusive */ struct folio_batch fbatch;
unsigned int partial_end; /* exclusive */
struct pagevec pvec;
pgoff_t indices[PAGEVEC_SIZE]; pgoff_t indices[PAGEVEC_SIZE];
pgoff_t index; pgoff_t index;
int i; int i;
struct folio *folio;
bool same_folio;
if (mapping_empty(mapping)) if (mapping_empty(mapping))
goto out; goto out;
/* Offsets within partial pages */
partial_start = lstart & (PAGE_SIZE - 1);
partial_end = (lend + 1) & (PAGE_SIZE - 1);
/* /*
* 'start' and 'end' always covers the range of pages to be fully * 'start' and 'end' always covers the range of pages to be fully
* truncated. Partial pages are covered with 'partial_start' at the * truncated. Partial pages are covered with 'partial_start' at the
@ -325,64 +373,49 @@ void truncate_inode_pages_range(struct address_space *mapping,
else else
end = (lend + 1) >> PAGE_SHIFT; end = (lend + 1) >> PAGE_SHIFT;
pagevec_init(&pvec); folio_batch_init(&fbatch);
index = start; index = start;
while (index < end && find_lock_entries(mapping, index, end - 1, while (index < end && find_lock_entries(mapping, index, end - 1,
&pvec, indices)) { &fbatch, indices)) {
index = indices[pagevec_count(&pvec) - 1] + 1; index = indices[folio_batch_count(&fbatch) - 1] + 1;
truncate_exceptional_pvec_entries(mapping, &pvec, indices); truncate_folio_batch_exceptionals(mapping, &fbatch, indices);
for (i = 0; i < pagevec_count(&pvec); i++) for (i = 0; i < folio_batch_count(&fbatch); i++)
truncate_cleanup_page(pvec.pages[i]); truncate_cleanup_folio(fbatch.folios[i]);
delete_from_page_cache_batch(mapping, &pvec); delete_from_page_cache_batch(mapping, &fbatch);
for (i = 0; i < pagevec_count(&pvec); i++) for (i = 0; i < folio_batch_count(&fbatch); i++)
unlock_page(pvec.pages[i]); folio_unlock(fbatch.folios[i]);
pagevec_release(&pvec); folio_batch_release(&fbatch);
cond_resched(); cond_resched();
} }
if (partial_start) { same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT);
struct page *page = find_lock_page(mapping, start - 1); folio = __filemap_get_folio(mapping, lstart >> PAGE_SHIFT, FGP_LOCK, 0);
if (page) { if (folio) {
unsigned int top = PAGE_SIZE; same_folio = lend < folio_pos(folio) + folio_size(folio);
if (start > end) { if (!truncate_inode_partial_folio(folio, lstart, lend)) {
/* Truncation within a single page */ start = folio->index + folio_nr_pages(folio);
top = partial_end; if (same_folio)
partial_end = 0; end = folio->index;
} }
wait_on_page_writeback(page); folio_unlock(folio);
zero_user_segment(page, partial_start, top); folio_put(folio);
cleancache_invalidate_page(mapping, page); folio = NULL;
if (page_has_private(page))
do_invalidatepage(page, partial_start,
top - partial_start);
unlock_page(page);
put_page(page);
} }
if (!same_folio)
folio = __filemap_get_folio(mapping, lend >> PAGE_SHIFT,
FGP_LOCK, 0);
if (folio) {
if (!truncate_inode_partial_folio(folio, lstart, lend))
end = folio->index;
folio_unlock(folio);
folio_put(folio);
} }
if (partial_end) {
struct page *page = find_lock_page(mapping, end);
if (page) {
wait_on_page_writeback(page);
zero_user_segment(page, 0, partial_end);
cleancache_invalidate_page(mapping, page);
if (page_has_private(page))
do_invalidatepage(page, 0,
partial_end);
unlock_page(page);
put_page(page);
}
}
/*
* If the truncation happened within a single page no pages
* will be released, just zeroed, so we can bail out now.
*/
if (start >= end)
goto out;
index = start; index = start;
for ( ; ; ) { while (index < end) {
cond_resched(); cond_resched();
if (!find_get_entries(mapping, index, end - 1, &pvec, if (!find_get_entries(mapping, index, end - 1, &fbatch,
indices)) { indices)) {
/* If all gone from start onwards, we're done */ /* If all gone from start onwards, we're done */
if (index == start) if (index == start)
@ -392,23 +425,24 @@ void truncate_inode_pages_range(struct address_space *mapping,
continue; continue;
} }
for (i = 0; i < pagevec_count(&pvec); i++) { for (i = 0; i < folio_batch_count(&fbatch); i++) {
struct page *page = pvec.pages[i]; struct folio *folio = fbatch.folios[i];
/* We rely upon deletion not changing page->index */ /* We rely upon deletion not changing page->index */
index = indices[i]; index = indices[i];
if (xa_is_value(page)) if (xa_is_value(folio))
continue; continue;
lock_page(page); folio_lock(folio);
WARN_ON(page_to_index(page) != index); VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio);
wait_on_page_writeback(page); folio_wait_writeback(folio);
truncate_inode_page(mapping, page); truncate_inode_folio(mapping, folio);
unlock_page(page); folio_unlock(folio);
index = folio_index(folio) + folio_nr_pages(folio) - 1;
} }
truncate_exceptional_pvec_entries(mapping, &pvec, indices); truncate_folio_batch_exceptionals(mapping, &fbatch, indices);
pagevec_release(&pvec); folio_batch_release(&fbatch);
index++; index++;
} }
@ -479,16 +513,16 @@ static unsigned long __invalidate_mapping_pages(struct address_space *mapping,
pgoff_t start, pgoff_t end, unsigned long *nr_pagevec) pgoff_t start, pgoff_t end, unsigned long *nr_pagevec)
{ {
pgoff_t indices[PAGEVEC_SIZE]; pgoff_t indices[PAGEVEC_SIZE];
struct pagevec pvec; struct folio_batch fbatch;
pgoff_t index = start; pgoff_t index = start;
unsigned long ret; unsigned long ret;
unsigned long count = 0; unsigned long count = 0;
int i; int i;
pagevec_init(&pvec); folio_batch_init(&fbatch);
while (find_lock_entries(mapping, index, end, &pvec, indices)) { while (find_lock_entries(mapping, index, end, &fbatch, indices)) {
for (i = 0; i < pagevec_count(&pvec); i++) { for (i = 0; i < folio_batch_count(&fbatch); i++) {
struct page *page = pvec.pages[i]; struct page *page = &fbatch.folios[i]->page;
/* We rely upon deletion not changing page->index */ /* We rely upon deletion not changing page->index */
index = indices[i]; index = indices[i];
@ -515,8 +549,8 @@ static unsigned long __invalidate_mapping_pages(struct address_space *mapping,
} }
count += ret; count += ret;
} }
pagevec_remove_exceptionals(&pvec); folio_batch_remove_exceptionals(&fbatch);
pagevec_release(&pvec); folio_batch_release(&fbatch);
cond_resched(); cond_resched();
index++; index++;
} }
@ -568,31 +602,29 @@ void invalidate_mapping_pagevec(struct address_space *mapping,
* shrink_page_list() has a temp ref on them, or because they're transiently * shrink_page_list() has a temp ref on them, or because they're transiently
* sitting in the lru_cache_add() pagevecs. * sitting in the lru_cache_add() pagevecs.
*/ */
static int static int invalidate_complete_folio2(struct address_space *mapping,
invalidate_complete_page2(struct address_space *mapping, struct page *page) struct folio *folio)
{ {
if (page->mapping != mapping) if (folio->mapping != mapping)
return 0; return 0;
if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL)) if (folio_has_private(folio) &&
!filemap_release_folio(folio, GFP_KERNEL))
return 0; return 0;
spin_lock(&mapping->host->i_lock); spin_lock(&mapping->host->i_lock);
xa_lock_irq(&mapping->i_pages); xa_lock_irq(&mapping->i_pages);
if (PageDirty(page)) if (folio_test_dirty(folio))
goto failed; goto failed;
BUG_ON(page_has_private(page)); BUG_ON(folio_has_private(folio));
__delete_from_page_cache(page, NULL); __filemap_remove_folio(folio, NULL);
xa_unlock_irq(&mapping->i_pages); xa_unlock_irq(&mapping->i_pages);
if (mapping_shrinkable(mapping)) if (mapping_shrinkable(mapping))
inode_add_lru(mapping->host); inode_add_lru(mapping->host);
spin_unlock(&mapping->host->i_lock); spin_unlock(&mapping->host->i_lock);
if (mapping->a_ops->freepage) filemap_free_folio(mapping, folio);
mapping->a_ops->freepage(page);
put_page(page); /* pagecache ref */
return 1; return 1;
failed: failed:
xa_unlock_irq(&mapping->i_pages); xa_unlock_irq(&mapping->i_pages);
@ -600,13 +632,13 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
return 0; return 0;
} }
static int do_launder_page(struct address_space *mapping, struct page *page) static int do_launder_folio(struct address_space *mapping, struct folio *folio)
{ {
if (!PageDirty(page)) if (!folio_test_dirty(folio))
return 0; return 0;
if (page->mapping != mapping || mapping->a_ops->launder_page == NULL) if (folio->mapping != mapping || mapping->a_ops->launder_page == NULL)
return 0; return 0;
return mapping->a_ops->launder_page(page); return mapping->a_ops->launder_page(&folio->page);
} }
/** /**
@ -624,7 +656,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
pgoff_t start, pgoff_t end) pgoff_t start, pgoff_t end)
{ {
pgoff_t indices[PAGEVEC_SIZE]; pgoff_t indices[PAGEVEC_SIZE];
struct pagevec pvec; struct folio_batch fbatch;
pgoff_t index; pgoff_t index;
int i; int i;
int ret = 0; int ret = 0;
@ -634,25 +666,25 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
if (mapping_empty(mapping)) if (mapping_empty(mapping))
goto out; goto out;
pagevec_init(&pvec); folio_batch_init(&fbatch);
index = start; index = start;
while (find_get_entries(mapping, index, end, &pvec, indices)) { while (find_get_entries(mapping, index, end, &fbatch, indices)) {
for (i = 0; i < pagevec_count(&pvec); i++) { for (i = 0; i < folio_batch_count(&fbatch); i++) {
struct page *page = pvec.pages[i]; struct folio *folio = fbatch.folios[i];
/* We rely upon deletion not changing page->index */ /* We rely upon deletion not changing folio->index */
index = indices[i]; index = indices[i];
if (xa_is_value(page)) { if (xa_is_value(folio)) {
if (!invalidate_exceptional_entry2(mapping, if (!invalidate_exceptional_entry2(mapping,
index, page)) index, folio))
ret = -EBUSY; ret = -EBUSY;
continue; continue;
} }
if (!did_range_unmap && page_mapped(page)) { if (!did_range_unmap && folio_mapped(folio)) {
/* /*
* If page is mapped, before taking its lock, * If folio is mapped, before taking its lock,
* zap the rest of the file in one hit. * zap the rest of the file in one hit.
*/ */
unmap_mapping_pages(mapping, index, unmap_mapping_pages(mapping, index,
@ -660,29 +692,29 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
did_range_unmap = 1; did_range_unmap = 1;
} }
lock_page(page); folio_lock(folio);
WARN_ON(page_to_index(page) != index); VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio);
if (page->mapping != mapping) { if (folio->mapping != mapping) {
unlock_page(page); folio_unlock(folio);
continue; continue;
} }
wait_on_page_writeback(page); folio_wait_writeback(folio);
if (page_mapped(page)) if (folio_mapped(folio))
unmap_mapping_page(page); unmap_mapping_folio(folio);
BUG_ON(page_mapped(page)); BUG_ON(folio_mapped(folio));
ret2 = do_launder_page(mapping, page); ret2 = do_launder_folio(mapping, folio);
if (ret2 == 0) { if (ret2 == 0) {
if (!invalidate_complete_page2(mapping, page)) if (!invalidate_complete_folio2(mapping, folio))
ret2 = -EBUSY; ret2 = -EBUSY;
} }
if (ret2 < 0) if (ret2 < 0)
ret = ret2; ret = ret2;
unlock_page(page); folio_unlock(folio);
} }
pagevec_remove_exceptionals(&pvec); folio_batch_remove_exceptionals(&fbatch);
pagevec_release(&pvec); folio_batch_release(&fbatch);
cond_resched(); cond_resched();
index++; index++;
} }