mm, memcg: prevent memory.min load/store tearing

This can be set concurrently with reads, which may cause the wrong value
to be propagated.

Signed-off-by: Chris Down <chris@chrisdown.name>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Roman Gushchin <guro@fb.com>
Cc: Tejun Heo <tj@kernel.org>
Link: http://lkml.kernel.org/r/e809b4e6b0c1626dac6945970de06409a180ee65.1584034301.git.chris@chrisdown.name
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Chris Down 2020-04-01 21:07:27 -07:00 committed by Linus Torvalds
parent f86b810c26
commit c3d5320086
2 changed files with 8 additions and 6 deletions

View File

@ -6389,7 +6389,7 @@ enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
return MEMCG_PROT_NONE; return MEMCG_PROT_NONE;
if (parent == root) { if (parent == root) {
memcg->memory.emin = memcg->memory.min; memcg->memory.emin = READ_ONCE(memcg->memory.min);
memcg->memory.elow = memcg->memory.low; memcg->memory.elow = memcg->memory.low;
goto out; goto out;
} }
@ -6397,7 +6397,8 @@ enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
parent_usage = page_counter_read(&parent->memory); parent_usage = page_counter_read(&parent->memory);
memcg->memory.emin = effective_protection(usage, parent_usage, memcg->memory.emin = effective_protection(usage, parent_usage,
memcg->memory.min, READ_ONCE(parent->memory.emin), READ_ONCE(memcg->memory.min),
READ_ONCE(parent->memory.emin),
atomic_long_read(&parent->memory.children_min_usage)); atomic_long_read(&parent->memory.children_min_usage));
memcg->memory.elow = effective_protection(usage, parent_usage, memcg->memory.elow = effective_protection(usage, parent_usage,

View File

@ -17,14 +17,15 @@ static void propagate_protected_usage(struct page_counter *c,
unsigned long usage) unsigned long usage)
{ {
unsigned long protected, old_protected; unsigned long protected, old_protected;
unsigned long low; unsigned long low, min;
long delta; long delta;
if (!c->parent) if (!c->parent)
return; return;
if (c->min || atomic_long_read(&c->min_usage)) { min = READ_ONCE(c->min);
protected = min(usage, c->min); if (min || atomic_long_read(&c->min_usage)) {
protected = min(usage, min);
old_protected = atomic_long_xchg(&c->min_usage, protected); old_protected = atomic_long_xchg(&c->min_usage, protected);
delta = protected - old_protected; delta = protected - old_protected;
if (delta) if (delta)
@ -207,7 +208,7 @@ void page_counter_set_min(struct page_counter *counter, unsigned long nr_pages)
{ {
struct page_counter *c; struct page_counter *c;
counter->min = nr_pages; WRITE_ONCE(counter->min, nr_pages);
for (c = counter; c; c = c->parent) for (c = counter; c; c = c->parent)
propagate_protected_usage(c, atomic_long_read(&c->usage)); propagate_protected_usage(c, atomic_long_read(&c->usage));