mm: memcg/slab: pre-allocate obj_cgroups for slab caches with SLAB_ACCOUNT

In general it's unknown in advance if a slab page will contain accounted
objects or not.  In order to avoid memory waste, an obj_cgroup vector is
allocated dynamically when a need to account of a new object arises.  Such
approach is memory efficient, but requires an expensive cmpxchg() to set
up the memcg/objcgs pointer, because an allocation can race with a
different allocation on another cpu.

But in some common cases it's known for sure that a slab page will contain
accounted objects: if the page belongs to a slab cache with a SLAB_ACCOUNT
flag set.  It includes such popular objects like vm_area_struct, anon_vma,
task_struct, etc.

In such cases we can pre-allocate the objcgs vector and simple assign it
to the page without any atomic operations, because at this early stage the
page is not visible to anyone else.

A very simplistic benchmark (allocating 10000000 64-bytes objects in a
row) shows ~15% win.  In the real life it seems that most workloads are
not very sensitive to the speed of (accounted) slab allocations.

[guro@fb.com: open-code set_page_objcgs() and add some comments, by Johannes]
  Link: https://lkml.kernel.org/r/20201113001926.GA2934489@carbon.dhcp.thefacebook.com
[akpm@linux-foundation.org: fix it for mm-slub-call-account_slab_page-after-slab-page-initialization-fix.patch]

Link: https://lkml.kernel.org/r/20201110195753.530157-2-guro@fb.com
Signed-off-by: Roman Gushchin <guro@fb.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Shakeel Butt <shakeelb@google.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Christoph Lameter <cl@linux.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Roman Gushchin 2021-02-24 12:03:11 -08:00 committed by Linus Torvalds
parent cad8320b4b
commit 2e9bd48315
5 changed files with 31 additions and 29 deletions

View File

@ -475,19 +475,6 @@ static inline struct obj_cgroup **page_objcgs_check(struct page *page)
return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
} }
/*
* set_page_objcgs - associate a page with a object cgroups vector
* @page: a pointer to the page struct
* @objcgs: a pointer to the object cgroups vector
*
* Atomically associates a page with a vector of object cgroups.
*/
static inline bool set_page_objcgs(struct page *page,
struct obj_cgroup **objcgs)
{
return !cmpxchg(&page->memcg_data, 0, (unsigned long)objcgs |
MEMCG_DATA_OBJCGS);
}
#else #else
static inline struct obj_cgroup **page_objcgs(struct page *page) static inline struct obj_cgroup **page_objcgs(struct page *page)
{ {
@ -498,12 +485,6 @@ static inline struct obj_cgroup **page_objcgs_check(struct page *page)
{ {
return NULL; return NULL;
} }
static inline bool set_page_objcgs(struct page *page,
struct obj_cgroup **objcgs)
{
return true;
}
#endif #endif
static __always_inline bool memcg_stat_item_in_bytes(int idx) static __always_inline bool memcg_stat_item_in_bytes(int idx)

View File

@ -2935,9 +2935,10 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg)
#ifdef CONFIG_MEMCG_KMEM #ifdef CONFIG_MEMCG_KMEM
int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s, int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
gfp_t gfp) gfp_t gfp, bool new_page)
{ {
unsigned int objects = objs_per_slab_page(s, page); unsigned int objects = objs_per_slab_page(s, page);
unsigned long memcg_data;
void *vec; void *vec;
vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp, vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp,
@ -2945,11 +2946,25 @@ int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
if (!vec) if (!vec)
return -ENOMEM; return -ENOMEM;
if (!set_page_objcgs(page, vec)) memcg_data = (unsigned long) vec | MEMCG_DATA_OBJCGS;
if (new_page) {
/*
* If the slab page is brand new and nobody can yet access
* it's memcg_data, no synchronization is required and
* memcg_data can be simply assigned.
*/
page->memcg_data = memcg_data;
} else if (cmpxchg(&page->memcg_data, 0, memcg_data)) {
/*
* If the slab page is already in use, somebody can allocate
* and assign obj_cgroups in parallel. In this case the existing
* objcg vector should be reused.
*/
kfree(vec); kfree(vec);
else return 0;
kmemleak_not_leak(vec); }
kmemleak_not_leak(vec);
return 0; return 0;
} }

View File

@ -1379,7 +1379,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
return NULL; return NULL;
} }
account_slab_page(page, cachep->gfporder, cachep); account_slab_page(page, cachep->gfporder, cachep, flags);
__SetPageSlab(page); __SetPageSlab(page);
/* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */ /* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
if (sk_memalloc_socks() && page_is_pfmemalloc(page)) if (sk_memalloc_socks() && page_is_pfmemalloc(page))

View File

@ -238,7 +238,7 @@ static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t fla
#ifdef CONFIG_MEMCG_KMEM #ifdef CONFIG_MEMCG_KMEM
int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s, int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
gfp_t gfp); gfp_t gfp, bool new_page);
static inline void memcg_free_page_obj_cgroups(struct page *page) static inline void memcg_free_page_obj_cgroups(struct page *page)
{ {
@ -315,7 +315,8 @@ static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
page = virt_to_head_page(p[i]); page = virt_to_head_page(p[i]);
if (!page_objcgs(page) && if (!page_objcgs(page) &&
memcg_alloc_page_obj_cgroups(page, s, flags)) { memcg_alloc_page_obj_cgroups(page, s, flags,
false)) {
obj_cgroup_uncharge(objcg, obj_full_size(s)); obj_cgroup_uncharge(objcg, obj_full_size(s));
continue; continue;
} }
@ -379,7 +380,8 @@ static inline struct mem_cgroup *memcg_from_slab_obj(void *ptr)
} }
static inline int memcg_alloc_page_obj_cgroups(struct page *page, static inline int memcg_alloc_page_obj_cgroups(struct page *page,
struct kmem_cache *s, gfp_t gfp) struct kmem_cache *s, gfp_t gfp,
bool new_page)
{ {
return 0; return 0;
} }
@ -420,8 +422,12 @@ static inline struct kmem_cache *virt_to_cache(const void *obj)
} }
static __always_inline void account_slab_page(struct page *page, int order, static __always_inline void account_slab_page(struct page *page, int order,
struct kmem_cache *s) struct kmem_cache *s,
gfp_t gfp)
{ {
if (memcg_kmem_enabled() && (s->flags & SLAB_ACCOUNT))
memcg_alloc_page_obj_cgroups(page, s, gfp, true);
mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s), mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
PAGE_SIZE << order); PAGE_SIZE << order);
} }

View File

@ -1785,7 +1785,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
page->objects = oo_objects(oo); page->objects = oo_objects(oo);
account_slab_page(page, oo_order(oo), s); account_slab_page(page, oo_order(oo), s, flags);
page->slab_cache = s; page->slab_cache = s;
__SetPageSlab(page); __SetPageSlab(page);