mm,thp: avoid writes to file with THP in pagecache

In previous patch, an application could put part of its text section in
THP via madvise().  These THPs will be protected from writes when the
application is still running (TXTBSY).  However, after the application
exits, the file is available for writes.

This patch avoids writes to file THP by dropping page cache for the file
when the file is open for write.  A new counter nr_thps is added to struct
address_space.  In do_dentry_open(), if the file is open for write and
nr_thps is non-zero, we drop page cache for the whole file.

Link: http://lkml.kernel.org/r/20190801184244.3169074-8-songliubraving@fb.com
Signed-off-by: Song Liu <songliubraving@fb.com>
Reported-by: kbuild test robot <lkp@intel.com>
Acked-by: Rik van Riel <riel@surriel.com>
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Hillf Danton <hdanton@sina.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: William Kucharski <william.kucharski@oracle.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Song Liu 2019-09-23 15:38:03 -07:00 committed by Linus Torvalds
parent 99cb0dbd47
commit 09d91cda0e
5 changed files with 47 additions and 1 deletions

View File

@ -181,6 +181,9 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
mapping->flags = 0; mapping->flags = 0;
mapping->wb_err = 0; mapping->wb_err = 0;
atomic_set(&mapping->i_mmap_writable, 0); atomic_set(&mapping->i_mmap_writable, 0);
#ifdef CONFIG_READ_ONLY_THP_FOR_FS
atomic_set(&mapping->nr_thps, 0);
#endif
mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE); mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
mapping->private_data = NULL; mapping->private_data = NULL;
mapping->writeback_index = 0; mapping->writeback_index = 0;

View File

@ -818,6 +818,14 @@ static int do_dentry_open(struct file *f,
if (!f->f_mapping->a_ops || !f->f_mapping->a_ops->direct_IO) if (!f->f_mapping->a_ops || !f->f_mapping->a_ops->direct_IO)
return -EINVAL; return -EINVAL;
} }
/*
* XXX: Huge page cache doesn't support writing yet. Drop all page
* cache for this file before processing writes.
*/
if ((f->f_mode & FMODE_WRITE) && filemap_nr_thps(inode->i_mapping))
truncate_pagecache(inode, 0);
return 0; return 0;
cleanup_all: cleanup_all:

View File

@ -429,6 +429,7 @@ int pagecache_write_end(struct file *, struct address_space *mapping,
* @i_pages: Cached pages. * @i_pages: Cached pages.
* @gfp_mask: Memory allocation flags to use for allocating pages. * @gfp_mask: Memory allocation flags to use for allocating pages.
* @i_mmap_writable: Number of VM_SHARED mappings. * @i_mmap_writable: Number of VM_SHARED mappings.
* @nr_thps: Number of THPs in the pagecache (non-shmem only).
* @i_mmap: Tree of private and shared mappings. * @i_mmap: Tree of private and shared mappings.
* @i_mmap_rwsem: Protects @i_mmap and @i_mmap_writable. * @i_mmap_rwsem: Protects @i_mmap and @i_mmap_writable.
* @nrpages: Number of page entries, protected by the i_pages lock. * @nrpages: Number of page entries, protected by the i_pages lock.
@ -446,6 +447,10 @@ struct address_space {
struct xarray i_pages; struct xarray i_pages;
gfp_t gfp_mask; gfp_t gfp_mask;
atomic_t i_mmap_writable; atomic_t i_mmap_writable;
#ifdef CONFIG_READ_ONLY_THP_FOR_FS
/* number of thp, only for non-shmem files */
atomic_t nr_thps;
#endif
struct rb_root_cached i_mmap; struct rb_root_cached i_mmap;
struct rw_semaphore i_mmap_rwsem; struct rw_semaphore i_mmap_rwsem;
unsigned long nrpages; unsigned long nrpages;
@ -2798,6 +2803,33 @@ static inline errseq_t filemap_sample_wb_err(struct address_space *mapping)
return errseq_sample(&mapping->wb_err); return errseq_sample(&mapping->wb_err);
} }
static inline int filemap_nr_thps(struct address_space *mapping)
{
#ifdef CONFIG_READ_ONLY_THP_FOR_FS
return atomic_read(&mapping->nr_thps);
#else
return 0;
#endif
}
static inline void filemap_nr_thps_inc(struct address_space *mapping)
{
#ifdef CONFIG_READ_ONLY_THP_FOR_FS
atomic_inc(&mapping->nr_thps);
#else
WARN_ON_ONCE(1);
#endif
}
static inline void filemap_nr_thps_dec(struct address_space *mapping)
{
#ifdef CONFIG_READ_ONLY_THP_FOR_FS
atomic_dec(&mapping->nr_thps);
#else
WARN_ON_ONCE(1);
#endif
}
extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end, extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end,
int datasync); int datasync);
extern int vfs_fsync(struct file *file, int datasync); extern int vfs_fsync(struct file *file, int datasync);

View File

@ -205,6 +205,7 @@ static void unaccount_page_cache_page(struct address_space *mapping,
__dec_node_page_state(page, NR_SHMEM_THPS); __dec_node_page_state(page, NR_SHMEM_THPS);
} else if (PageTransHuge(page)) { } else if (PageTransHuge(page)) {
__dec_node_page_state(page, NR_FILE_THPS); __dec_node_page_state(page, NR_FILE_THPS);
filemap_nr_thps_dec(mapping);
} }
/* /*

View File

@ -1514,8 +1514,10 @@ static void collapse_file(struct mm_struct *mm,
if (is_shmem) if (is_shmem)
__inc_node_page_state(new_page, NR_SHMEM_THPS); __inc_node_page_state(new_page, NR_SHMEM_THPS);
else else {
__inc_node_page_state(new_page, NR_FILE_THPS); __inc_node_page_state(new_page, NR_FILE_THPS);
filemap_nr_thps_inc(mapping);
}
if (nr_none) { if (nr_none) {
struct zone *zone = page_zone(new_page); struct zone *zone = page_zone(new_page);