mm: skip readahead if the cgroup is congested
We noticed in testing we'd get pretty bad latency stalls under heavy pressure because read ahead would try to do its thing while the cgroup was under severe pressure. If we're under this much pressure we want to do as little IO as possible so we can still make progress on real work if we're a throttled cgroup, so just skip readahead if our group is under pressure. Signed-off-by: Josef Bacik <jbacik@fb.com> Acked-by: Tejun Heo <tj@kernel.org> Acked-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
b351f0c76c
commit
ca47e8c72a
|
@ -19,6 +19,7 @@
|
|||
#include <linux/syscalls.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/mm_inline.h>
|
||||
#include <linux/blk-cgroup.h>
|
||||
|
||||
#include "internal.h"
|
||||
|
||||
|
@ -505,6 +506,9 @@ void page_cache_sync_readahead(struct address_space *mapping,
|
|||
if (!ra->ra_pages)
|
||||
return;
|
||||
|
||||
if (blk_cgroup_congested())
|
||||
return;
|
||||
|
||||
/* be dumb */
|
||||
if (filp && (filp->f_mode & FMODE_RANDOM)) {
|
||||
force_page_cache_readahead(mapping, filp, offset, req_size);
|
||||
|
@ -555,6 +559,9 @@ page_cache_async_readahead(struct address_space *mapping,
|
|||
if (inode_read_congested(mapping->host))
|
||||
return;
|
||||
|
||||
if (blk_cgroup_congested())
|
||||
return;
|
||||
|
||||
/* do read-ahead */
|
||||
ondemand_readahead(mapping, ra, filp, true, offset, req_size);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue