mm/filemap: fix storing to a THP shadow entry
When a THP is removed from the page cache by reclaim, we replace it with a
shadow entry that occupies all slots of the XArray previously occupied by
the THP. If the user then accesses that page again, we only allocate a
single page, but storing it into the shadow entry replaces all entries
with that one page. That leads to bugs like
page dumped because: VM_BUG_ON_PAGE(page_to_pgoff(page) != offset)
------------[ cut here ]------------
kernel BUG at mm/filemap.c:2529!
https://bugzilla.kernel.org/show_bug.cgi?id=206569
This is hard to reproduce with mainline, but happens regularly with the
THP patchset (as so many more THPs are created). This solution is take
from the THP patchset. It splits the shadow entry into order-0 pieces at
the time that we bring a new page into cache.
Fixes: 99cb0dbd47
("mm,thp: add read-only THP support for (non-shmem) FS")
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Song Liu <songliubraving@fb.com>
Cc: "Kirill A . Shutemov" <kirill@shutemov.name>
Cc: Qian Cai <cai@lca.pw>
Link: https://lkml.kernel.org/r/20200903183029.14930-4-willy@infradead.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
8fc75643c5
commit
198b62f83e
40
mm/filemap.c
40
mm/filemap.c
|
@ -829,13 +829,12 @@ EXPORT_SYMBOL_GPL(replace_page_cache_page);
|
|||
|
||||
static int __add_to_page_cache_locked(struct page *page,
|
||||
struct address_space *mapping,
|
||||
pgoff_t offset, gfp_t gfp_mask,
|
||||
pgoff_t offset, gfp_t gfp,
|
||||
void **shadowp)
|
||||
{
|
||||
XA_STATE(xas, &mapping->i_pages, offset);
|
||||
int huge = PageHuge(page);
|
||||
int error;
|
||||
void *old;
|
||||
|
||||
VM_BUG_ON_PAGE(!PageLocked(page), page);
|
||||
VM_BUG_ON_PAGE(PageSwapBacked(page), page);
|
||||
|
@ -846,25 +845,46 @@ static int __add_to_page_cache_locked(struct page *page,
|
|||
page->index = offset;
|
||||
|
||||
if (!huge) {
|
||||
error = mem_cgroup_charge(page, current->mm, gfp_mask);
|
||||
error = mem_cgroup_charge(page, current->mm, gfp);
|
||||
if (error)
|
||||
goto error;
|
||||
}
|
||||
|
||||
gfp &= GFP_RECLAIM_MASK;
|
||||
|
||||
do {
|
||||
unsigned int order = xa_get_order(xas.xa, xas.xa_index);
|
||||
void *entry, *old = NULL;
|
||||
|
||||
if (order > thp_order(page))
|
||||
xas_split_alloc(&xas, xa_load(xas.xa, xas.xa_index),
|
||||
order, gfp);
|
||||
xas_lock_irq(&xas);
|
||||
old = xas_load(&xas);
|
||||
if (old && !xa_is_value(old))
|
||||
xas_for_each_conflict(&xas, entry) {
|
||||
old = entry;
|
||||
if (!xa_is_value(entry)) {
|
||||
xas_set_err(&xas, -EEXIST);
|
||||
goto unlock;
|
||||
}
|
||||
}
|
||||
|
||||
if (old) {
|
||||
if (shadowp)
|
||||
*shadowp = old;
|
||||
/* entry may have been split before we acquired lock */
|
||||
order = xa_get_order(xas.xa, xas.xa_index);
|
||||
if (order > thp_order(page)) {
|
||||
xas_split(&xas, old, order);
|
||||
xas_reset(&xas);
|
||||
}
|
||||
}
|
||||
|
||||
xas_store(&xas, page);
|
||||
if (xas_error(&xas))
|
||||
goto unlock;
|
||||
|
||||
if (xa_is_value(old)) {
|
||||
if (old)
|
||||
mapping->nrexceptional--;
|
||||
if (shadowp)
|
||||
*shadowp = old;
|
||||
}
|
||||
mapping->nrpages++;
|
||||
|
||||
/* hugetlb pages do not participate in page cache accounting */
|
||||
|
@ -872,7 +892,7 @@ static int __add_to_page_cache_locked(struct page *page,
|
|||
__inc_lruvec_page_state(page, NR_FILE_PAGES);
|
||||
unlock:
|
||||
xas_unlock_irq(&xas);
|
||||
} while (xas_nomem(&xas, gfp_mask & GFP_RECLAIM_MASK));
|
||||
} while (xas_nomem(&xas, gfp));
|
||||
|
||||
if (xas_error(&xas)) {
|
||||
error = xas_error(&xas);
|
||||
|
|
Loading…
Reference in New Issue