mirror of https://gitee.com/openkylin/linux.git
mm, memcg: pass charge order to oom killer
The oom killer typically displays the allocation order at the time of oom as a part of its diangostic messages (for global, cpuset, and mempolicy ooms). The memory controller may also pass the charge order to the oom killer so it can emit the same information. This is useful in determining how large the memory allocation is that triggered the oom killer. Signed-off-by: David Rientjes <rientjes@google.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Balbir Singh <bsingharora@gmail.com> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
c7cfa37b73
commit
e845e19936
|
@ -77,7 +77,8 @@ extern void mem_cgroup_uncharge_end(void);
|
|||
extern void mem_cgroup_uncharge_page(struct page *page);
|
||||
extern void mem_cgroup_uncharge_cache_page(struct page *page);
|
||||
|
||||
extern void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask);
|
||||
extern void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
|
||||
int order);
|
||||
int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg);
|
||||
|
||||
extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
|
||||
|
|
|
@ -1811,7 +1811,7 @@ static void memcg_oom_recover(struct mem_cgroup *memcg)
|
|||
/*
|
||||
* try to call OOM killer. returns false if we should exit memory-reclaim loop.
|
||||
*/
|
||||
bool mem_cgroup_handle_oom(struct mem_cgroup *memcg, gfp_t mask)
|
||||
bool mem_cgroup_handle_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
|
||||
{
|
||||
struct oom_wait_info owait;
|
||||
bool locked, need_to_kill;
|
||||
|
@ -1841,7 +1841,7 @@ bool mem_cgroup_handle_oom(struct mem_cgroup *memcg, gfp_t mask)
|
|||
|
||||
if (need_to_kill) {
|
||||
finish_wait(&memcg_oom_waitq, &owait.wait);
|
||||
mem_cgroup_out_of_memory(memcg, mask);
|
||||
mem_cgroup_out_of_memory(memcg, mask, order);
|
||||
} else {
|
||||
schedule();
|
||||
finish_wait(&memcg_oom_waitq, &owait.wait);
|
||||
|
@ -2212,7 +2212,7 @@ static int mem_cgroup_do_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
|
|||
if (!oom_check)
|
||||
return CHARGE_NOMEM;
|
||||
/* check OOM */
|
||||
if (!mem_cgroup_handle_oom(mem_over_limit, gfp_mask))
|
||||
if (!mem_cgroup_handle_oom(mem_over_limit, gfp_mask, get_order(csize)))
|
||||
return CHARGE_OOM_DIE;
|
||||
|
||||
return CHARGE_RETRY;
|
||||
|
|
|
@ -554,7 +554,8 @@ static void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
|
|||
}
|
||||
|
||||
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
|
||||
void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask)
|
||||
void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
|
||||
int order)
|
||||
{
|
||||
unsigned long limit;
|
||||
unsigned int points = 0;
|
||||
|
@ -570,12 +571,12 @@ void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask)
|
|||
return;
|
||||
}
|
||||
|
||||
check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, 0, NULL);
|
||||
check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL);
|
||||
limit = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT;
|
||||
read_lock(&tasklist_lock);
|
||||
p = select_bad_process(&points, limit, memcg, NULL, false);
|
||||
if (p && PTR_ERR(p) != -1UL)
|
||||
oom_kill_process(p, gfp_mask, 0, points, limit, memcg, NULL,
|
||||
oom_kill_process(p, gfp_mask, order, points, limit, memcg, NULL,
|
||||
"Memory cgroup out of memory");
|
||||
read_unlock(&tasklist_lock);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue