mm, fs: obey gfp_mapping for add_to_page_cache()
Commit6afdb859b7
("mm: do not ignore mapping_gfp_mask in page cache allocation paths") has caught some users of hardcoded GFP_KERNEL used in the page cache allocation paths. This, however, wasn't complete and there were others which went unnoticed. Dave Chinner has reported the following deadlock for xfs on loop device: : With the recent merge of the loop device changes, I'm now seeing : XFS deadlock on my single CPU, 1GB RAM VM running xfs/073. : : The deadlocked is as follows: : : kloopd1: loop_queue_read_work : xfs_file_iter_read : lock XFS inode XFS_IOLOCK_SHARED (on image file) : page cache read (GFP_KERNEL) : radix tree alloc : memory reclaim : reclaim XFS inodes : log force to unpin inodes : <wait for log IO completion> : : xfs-cil/loop1: <does log force IO work> : xlog_cil_push : xlog_write : <loop issuing log writes> : xlog_state_get_iclog_space() : <blocks due to all log buffers under write io> : <waits for IO completion> : : kloopd1: loop_queue_write_work : xfs_file_write_iter : lock XFS inode XFS_IOLOCK_EXCL (on image file) : <wait for inode to be unlocked> : : i.e. the kloopd, with it's split read and write work queues, has : introduced a dependency through memory reclaim. i.e. that writes : need to be able to progress for reads make progress. : : The problem, fundamentally, is that mpage_readpages() does a : GFP_KERNEL allocation, rather than paying attention to the inode's : mapping gfp mask, which is set to GFP_NOFS. : : The didn't used to happen, because the loop device used to issue : reads through the splice path and that does: : : error = add_to_page_cache_lru(page, mapping, index, : GFP_KERNEL & mapping_gfp_mask(mapping)); This has changed by commitaa4d86163e
("block: loop: switch to VFS ITER_BVEC"). This patch changes mpage_readpage{s} to follow gfp mask set for the mapping. There are, however, other places which are doing basically the same. lustre:ll_dir_filler is doing GFP_KERNEL from the function which apparently uses GFP_NOFS for other allocations so let's make this consistent. cifs:readpages_get_pages is called from cifs_readpages and __cifs_readpages_from_fscache called from the same path obeys mapping gfp. ramfs_nommu_expand_for_mapping is hardcoding GFP_KERNEL as well regardless it uses mapping_gfp_mask for the page allocation. ext4_mpage_readpages is the called from the page cache allocation path same as read_pages and read_cache_pages As I've noticed in my previous post I cannot say I would be happy about sprinkling mapping_gfp_mask all over the place and it sounds like we should drop gfp_mask argument altogether and use it internally in __add_to_page_cache_locked that would require all the filesystems to use mapping gfp consistently which I am not sure is the case here. From a quick glance it seems that some file system use it all the time while others are selective. Signed-off-by: Michal Hocko <mhocko@suse.com> Reported-by: Dave Chinner <david@fromorbit.com> Cc: "Theodore Ts'o" <tytso@mit.edu> Cc: Ming Lei <ming.lei@canonical.com> Cc: Andreas Dilger <andreas.dilger@intel.com> Cc: Oleg Drokin <oleg.drokin@intel.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Christoph Hellwig <hch@lst.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
5b5f145527
commit
063d99b4fa
|
@ -224,7 +224,7 @@ static int ll_dir_filler(void *_hash, struct page *page0)
|
|||
|
||||
prefetchw(&page->flags);
|
||||
ret = add_to_page_cache_lru(page, inode->i_mapping, offset,
|
||||
GFP_KERNEL);
|
||||
GFP_NOFS);
|
||||
if (ret == 0) {
|
||||
unlock_page(page);
|
||||
} else {
|
||||
|
|
|
@ -3380,6 +3380,7 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
|
|||
struct page *page, *tpage;
|
||||
unsigned int expected_index;
|
||||
int rc;
|
||||
gfp_t gfp = GFP_KERNEL & mapping_gfp_mask(mapping);
|
||||
|
||||
INIT_LIST_HEAD(tmplist);
|
||||
|
||||
|
@ -3392,7 +3393,7 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
|
|||
*/
|
||||
__set_page_locked(page);
|
||||
rc = add_to_page_cache_locked(page, mapping,
|
||||
page->index, GFP_KERNEL);
|
||||
page->index, gfp);
|
||||
|
||||
/* give up if we can't stick it in the cache */
|
||||
if (rc) {
|
||||
|
@ -3418,8 +3419,7 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
|
|||
break;
|
||||
|
||||
__set_page_locked(page);
|
||||
if (add_to_page_cache_locked(page, mapping, page->index,
|
||||
GFP_KERNEL)) {
|
||||
if (add_to_page_cache_locked(page, mapping, page->index, gfp)) {
|
||||
__clear_page_locked(page);
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -165,8 +165,8 @@ int ext4_mpage_readpages(struct address_space *mapping,
|
|||
if (pages) {
|
||||
page = list_entry(pages->prev, struct page, lru);
|
||||
list_del(&page->lru);
|
||||
if (add_to_page_cache_lru(page, mapping,
|
||||
page->index, GFP_KERNEL))
|
||||
if (add_to_page_cache_lru(page, mapping, page->index,
|
||||
GFP_KERNEL & mapping_gfp_mask(mapping)))
|
||||
goto next_page;
|
||||
}
|
||||
|
||||
|
|
15
fs/mpage.c
15
fs/mpage.c
|
@ -139,7 +139,8 @@ map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block)
|
|||
static struct bio *
|
||||
do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
|
||||
sector_t *last_block_in_bio, struct buffer_head *map_bh,
|
||||
unsigned long *first_logical_block, get_block_t get_block)
|
||||
unsigned long *first_logical_block, get_block_t get_block,
|
||||
gfp_t gfp)
|
||||
{
|
||||
struct inode *inode = page->mapping->host;
|
||||
const unsigned blkbits = inode->i_blkbits;
|
||||
|
@ -277,8 +278,7 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
|
|||
goto out;
|
||||
}
|
||||
bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
|
||||
min_t(int, nr_pages, BIO_MAX_PAGES),
|
||||
GFP_KERNEL);
|
||||
min_t(int, nr_pages, BIO_MAX_PAGES), gfp);
|
||||
if (bio == NULL)
|
||||
goto confused;
|
||||
}
|
||||
|
@ -361,6 +361,7 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages,
|
|||
sector_t last_block_in_bio = 0;
|
||||
struct buffer_head map_bh;
|
||||
unsigned long first_logical_block = 0;
|
||||
gfp_t gfp = GFP_KERNEL & mapping_gfp_mask(mapping);
|
||||
|
||||
map_bh.b_state = 0;
|
||||
map_bh.b_size = 0;
|
||||
|
@ -370,12 +371,13 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages,
|
|||
prefetchw(&page->flags);
|
||||
list_del(&page->lru);
|
||||
if (!add_to_page_cache_lru(page, mapping,
|
||||
page->index, GFP_KERNEL)) {
|
||||
page->index,
|
||||
gfp)) {
|
||||
bio = do_mpage_readpage(bio, page,
|
||||
nr_pages - page_idx,
|
||||
&last_block_in_bio, &map_bh,
|
||||
&first_logical_block,
|
||||
get_block);
|
||||
get_block, gfp);
|
||||
}
|
||||
page_cache_release(page);
|
||||
}
|
||||
|
@ -395,11 +397,12 @@ int mpage_readpage(struct page *page, get_block_t get_block)
|
|||
sector_t last_block_in_bio = 0;
|
||||
struct buffer_head map_bh;
|
||||
unsigned long first_logical_block = 0;
|
||||
gfp_t gfp = GFP_KERNEL & mapping_gfp_mask(page->mapping);
|
||||
|
||||
map_bh.b_state = 0;
|
||||
map_bh.b_size = 0;
|
||||
bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio,
|
||||
&map_bh, &first_logical_block, get_block);
|
||||
&map_bh, &first_logical_block, get_block, gfp);
|
||||
if (bio)
|
||||
mpage_bio_submit(READ, bio);
|
||||
return 0;
|
||||
|
|
|
@ -70,6 +70,7 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
|
|||
unsigned order;
|
||||
void *data;
|
||||
int ret;
|
||||
gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
|
||||
|
||||
/* make various checks */
|
||||
order = get_order(newsize);
|
||||
|
@ -84,7 +85,7 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
|
|||
|
||||
/* allocate enough contiguous pages to be able to satisfy the
|
||||
* request */
|
||||
pages = alloc_pages(mapping_gfp_mask(inode->i_mapping), order);
|
||||
pages = alloc_pages(gfp, order);
|
||||
if (!pages)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -108,7 +109,7 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
|
|||
struct page *page = pages + loop;
|
||||
|
||||
ret = add_to_page_cache_lru(page, inode->i_mapping, loop,
|
||||
GFP_KERNEL);
|
||||
gfp);
|
||||
if (ret < 0)
|
||||
goto add_error;
|
||||
|
||||
|
|
|
@ -89,8 +89,8 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages,
|
|||
while (!list_empty(pages)) {
|
||||
page = list_to_page(pages);
|
||||
list_del(&page->lru);
|
||||
if (add_to_page_cache_lru(page, mapping,
|
||||
page->index, GFP_KERNEL)) {
|
||||
if (add_to_page_cache_lru(page, mapping, page->index,
|
||||
GFP_KERNEL & mapping_gfp_mask(mapping))) {
|
||||
read_cache_pages_invalidate_page(mapping, page);
|
||||
continue;
|
||||
}
|
||||
|
@ -127,8 +127,8 @@ static int read_pages(struct address_space *mapping, struct file *filp,
|
|||
for (page_idx = 0; page_idx < nr_pages; page_idx++) {
|
||||
struct page *page = list_to_page(pages);
|
||||
list_del(&page->lru);
|
||||
if (!add_to_page_cache_lru(page, mapping,
|
||||
page->index, GFP_KERNEL)) {
|
||||
if (!add_to_page_cache_lru(page, mapping, page->index,
|
||||
GFP_KERNEL & mapping_gfp_mask(mapping))) {
|
||||
mapping->a_ops->readpage(filp, page);
|
||||
}
|
||||
page_cache_release(page);
|
||||
|
|
Loading…
Reference in New Issue