2012-07-07 04:25:11 +08:00
|
|
|
#ifndef MM_SLAB_H
|
|
|
|
#define MM_SLAB_H
|
|
|
|
/*
|
|
|
|
* Internal slab definitions
|
|
|
|
*/
|
|
|
|
|
2014-10-10 06:26:00 +08:00
|
|
|
#ifdef CONFIG_SLOB
|
|
|
|
/*
|
|
|
|
* Common fields provided in kmem_cache by all slab allocators
|
|
|
|
* This struct is either used directly by the allocator (SLOB)
|
|
|
|
* or the allocator must include definitions for all fields
|
|
|
|
* provided in kmem_cache_common in their definition of kmem_cache.
|
|
|
|
*
|
|
|
|
* Once we can do anonymous structs (C11 standard) we could put a
|
|
|
|
* anonymous struct definition in these allocators so that the
|
|
|
|
* separate allocations in the kmem_cache structure of SLAB and
|
|
|
|
* SLUB is no longer needed.
|
|
|
|
*/
|
|
|
|
struct kmem_cache {
|
|
|
|
unsigned int object_size;/* The original size of the object */
|
|
|
|
unsigned int size; /* The aligned/padded/added on size */
|
|
|
|
unsigned int align; /* Alignment as calculated */
|
|
|
|
unsigned long flags; /* Active flags on the slab */
|
|
|
|
const char *name; /* Slab name for sysfs */
|
|
|
|
int refcount; /* Use counter */
|
|
|
|
void (*ctor)(void *); /* Called on object slot creation */
|
|
|
|
struct list_head list; /* List of all slab caches on the system */
|
|
|
|
};
|
|
|
|
|
|
|
|
#endif /* CONFIG_SLOB */
|
|
|
|
|
|
|
|
#ifdef CONFIG_SLAB
|
|
|
|
#include <linux/slab_def.h>
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef CONFIG_SLUB
|
|
|
|
#include <linux/slub_def.h>
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#include <linux/memcontrol.h>
|
2016-03-16 05:53:35 +08:00
|
|
|
#include <linux/fault-inject.h>
|
|
|
|
#include <linux/kmemcheck.h>
|
|
|
|
#include <linux/kasan.h>
|
|
|
|
#include <linux/kmemleak.h>
|
2016-07-27 06:21:56 +08:00
|
|
|
#include <linux/random.h>
|
2014-10-10 06:26:00 +08:00
|
|
|
|
2012-07-07 04:25:11 +08:00
|
|
|
/*
|
|
|
|
* State of the slab allocator.
|
|
|
|
*
|
|
|
|
* This is used to describe the states of the allocator during bootup.
|
|
|
|
* Allocators use this to gradually bootstrap themselves. Most allocators
|
|
|
|
* have the problem that the structures used for managing slab caches are
|
|
|
|
* allocated from slab caches themselves.
|
|
|
|
*/
|
|
|
|
enum slab_state {
|
|
|
|
DOWN, /* No slab functionality yet */
|
|
|
|
PARTIAL, /* SLUB: kmem_cache_node available */
|
2013-01-11 03:14:19 +08:00
|
|
|
PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */
|
2012-07-07 04:25:11 +08:00
|
|
|
UP, /* Slab caches usable but not all extras yet */
|
|
|
|
FULL /* Everything is working */
|
|
|
|
};
|
|
|
|
|
|
|
|
extern enum slab_state slab_state;
|
|
|
|
|
2012-07-07 04:25:12 +08:00
|
|
|
/* The slab cache mutex protects the management structures during changes */
|
|
|
|
extern struct mutex slab_mutex;
|
2012-09-05 08:20:33 +08:00
|
|
|
|
|
|
|
/* The list of all slab caches on the system */
|
2012-07-07 04:25:12 +08:00
|
|
|
extern struct list_head slab_caches;
|
|
|
|
|
2012-09-05 08:20:33 +08:00
|
|
|
/* The slab cache that manages slab cache information */
|
|
|
|
extern struct kmem_cache *kmem_cache;
|
|
|
|
|
2017-02-23 07:41:05 +08:00
|
|
|
/* A table of kmalloc cache names and sizes */
|
|
|
|
extern const struct kmalloc_info_struct {
|
|
|
|
const char *name;
|
|
|
|
unsigned long size;
|
|
|
|
} kmalloc_info[];
|
|
|
|
|
2012-11-29 00:23:16 +08:00
|
|
|
unsigned long calculate_alignment(unsigned long flags,
|
|
|
|
unsigned long align, unsigned long size);
|
|
|
|
|
2013-01-11 03:12:17 +08:00
|
|
|
#ifndef CONFIG_SLOB
|
|
|
|
/* Kmalloc array related functions */
|
2015-06-25 07:55:57 +08:00
|
|
|
void setup_kmalloc_cache_index_table(void);
|
2013-01-11 03:12:17 +08:00
|
|
|
void create_kmalloc_caches(unsigned long);
|
2013-01-11 03:14:19 +08:00
|
|
|
|
|
|
|
/* Find the kmalloc slab corresponding for a certain size */
|
|
|
|
struct kmem_cache *kmalloc_slab(size_t, gfp_t);
|
2013-01-11 03:12:17 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
|
2012-09-05 08:20:33 +08:00
|
|
|
/* Functions provided by the slab allocators */
|
2012-09-05 07:18:33 +08:00
|
|
|
extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags);
|
2012-07-07 04:25:11 +08:00
|
|
|
|
2012-11-29 00:23:07 +08:00
|
|
|
extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size,
|
|
|
|
unsigned long flags);
|
|
|
|
extern void create_boot_cache(struct kmem_cache *, const char *name,
|
|
|
|
size_t size, unsigned long flags);
|
|
|
|
|
2014-10-10 06:26:22 +08:00
|
|
|
int slab_unmergeable(struct kmem_cache *s);
|
|
|
|
struct kmem_cache *find_mergeable(size_t size, size_t align,
|
|
|
|
unsigned long flags, const char *name, void (*ctor)(void *));
|
2014-10-10 06:26:24 +08:00
|
|
|
#ifndef CONFIG_SLOB
|
2012-12-19 06:22:34 +08:00
|
|
|
struct kmem_cache *
|
memcg, slab: never try to merge memcg caches
When a kmem cache is created (kmem_cache_create_memcg()), we first try to
find a compatible cache that already exists and can handle requests from
the new cache, i.e. has the same object size, alignment, ctor, etc. If
there is such a cache, we do not create any new caches, instead we simply
increment the refcount of the cache found and return it.
Currently we do this procedure not only when creating root caches, but
also for memcg caches. However, there is no point in that, because, as
every memcg cache has exactly the same parameters as its parent and cache
merging cannot be turned off in runtime (only on boot by passing
"slub_nomerge"), the root caches of any two potentially mergeable memcg
caches should be merged already, i.e. it must be the same root cache, and
therefore we couldn't even get to the memcg cache creation, because it
already exists.
The only exception is boot caches - they are explicitly forbidden to be
merged by setting their refcount to -1. There are currently only two of
them - kmem_cache and kmem_cache_node, which are used in slab internals (I
do not count kmalloc caches as their refcount is set to 1 immediately
after creation). Since they are prevented from merging preliminary I
guess we should avoid to merge their children too.
So let's remove the useless code responsible for merging memcg caches.
Signed-off-by: Vladimir Davydov <vdavydov@parallels.com>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Glauber Costa <glommer@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2014-04-08 06:39:23 +08:00
|
|
|
__kmem_cache_alias(const char *name, size_t size, size_t align,
|
|
|
|
unsigned long flags, void (*ctor)(void *));
|
2014-10-10 06:26:22 +08:00
|
|
|
|
|
|
|
unsigned long kmem_cache_flags(unsigned long object_size,
|
|
|
|
unsigned long flags, const char *name,
|
|
|
|
void (*ctor)(void *));
|
2012-09-05 08:18:32 +08:00
|
|
|
#else
|
2012-12-19 06:22:34 +08:00
|
|
|
static inline struct kmem_cache *
|
memcg, slab: never try to merge memcg caches
When a kmem cache is created (kmem_cache_create_memcg()), we first try to
find a compatible cache that already exists and can handle requests from
the new cache, i.e. has the same object size, alignment, ctor, etc. If
there is such a cache, we do not create any new caches, instead we simply
increment the refcount of the cache found and return it.
Currently we do this procedure not only when creating root caches, but
also for memcg caches. However, there is no point in that, because, as
every memcg cache has exactly the same parameters as its parent and cache
merging cannot be turned off in runtime (only on boot by passing
"slub_nomerge"), the root caches of any two potentially mergeable memcg
caches should be merged already, i.e. it must be the same root cache, and
therefore we couldn't even get to the memcg cache creation, because it
already exists.
The only exception is boot caches - they are explicitly forbidden to be
merged by setting their refcount to -1. There are currently only two of
them - kmem_cache and kmem_cache_node, which are used in slab internals (I
do not count kmalloc caches as their refcount is set to 1 immediately
after creation). Since they are prevented from merging preliminary I
guess we should avoid to merge their children too.
So let's remove the useless code responsible for merging memcg caches.
Signed-off-by: Vladimir Davydov <vdavydov@parallels.com>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Glauber Costa <glommer@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2014-04-08 06:39:23 +08:00
|
|
|
__kmem_cache_alias(const char *name, size_t size, size_t align,
|
|
|
|
unsigned long flags, void (*ctor)(void *))
|
2012-09-05 08:18:32 +08:00
|
|
|
{ return NULL; }
|
2014-10-10 06:26:22 +08:00
|
|
|
|
|
|
|
static inline unsigned long kmem_cache_flags(unsigned long object_size,
|
|
|
|
unsigned long flags, const char *name,
|
|
|
|
void (*ctor)(void *))
|
|
|
|
{
|
|
|
|
return flags;
|
|
|
|
}
|
2012-09-05 08:18:32 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
|
2012-10-17 19:36:51 +08:00
|
|
|
/* Legal flag mask for kmem_cache_create(), for various configurations */
|
|
|
|
#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
|
|
|
|
SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
|
|
|
|
|
|
|
|
#if defined(CONFIG_DEBUG_SLAB)
|
|
|
|
#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
|
|
|
|
#elif defined(CONFIG_SLUB_DEBUG)
|
|
|
|
#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
|
2016-03-16 05:55:06 +08:00
|
|
|
SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
|
2012-10-17 19:36:51 +08:00
|
|
|
#else
|
|
|
|
#define SLAB_DEBUG_FLAGS (0)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if defined(CONFIG_SLAB)
|
|
|
|
#define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
|
2016-01-15 07:18:15 +08:00
|
|
|
SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
|
|
|
|
SLAB_NOTRACK | SLAB_ACCOUNT)
|
2012-10-17 19:36:51 +08:00
|
|
|
#elif defined(CONFIG_SLUB)
|
|
|
|
#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
|
2016-01-15 07:18:15 +08:00
|
|
|
SLAB_TEMPORARY | SLAB_NOTRACK | SLAB_ACCOUNT)
|
2012-10-17 19:36:51 +08:00
|
|
|
#else
|
|
|
|
#define SLAB_CACHE_FLAGS (0)
|
|
|
|
#endif
|
|
|
|
|
2016-12-13 08:41:38 +08:00
|
|
|
/* Common flags available with current configuration */
|
2012-10-17 19:36:51 +08:00
|
|
|
#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
|
|
|
|
|
2016-12-13 08:41:38 +08:00
|
|
|
/* Common flags permitted for kmem_cache_create */
|
|
|
|
#define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
|
|
|
|
SLAB_RED_ZONE | \
|
|
|
|
SLAB_POISON | \
|
|
|
|
SLAB_STORE_USER | \
|
|
|
|
SLAB_TRACE | \
|
|
|
|
SLAB_CONSISTENCY_CHECKS | \
|
|
|
|
SLAB_MEM_SPREAD | \
|
|
|
|
SLAB_NOLEAKTRACE | \
|
|
|
|
SLAB_RECLAIM_ACCOUNT | \
|
|
|
|
SLAB_TEMPORARY | \
|
|
|
|
SLAB_NOTRACK | \
|
|
|
|
SLAB_ACCOUNT)
|
|
|
|
|
2012-09-05 07:18:33 +08:00
|
|
|
int __kmem_cache_shutdown(struct kmem_cache *);
|
2016-02-18 05:11:37 +08:00
|
|
|
void __kmem_cache_release(struct kmem_cache *);
|
Revert "slub: move synchronize_sched out of slab_mutex on shrink"
Patch series "slab: make memcg slab destruction scalable", v3.
With kmem cgroup support enabled, kmem_caches can be created and
destroyed frequently and a great number of near empty kmem_caches can
accumulate if there are a lot of transient cgroups and the system is not
under memory pressure. When memory reclaim starts under such
conditions, it can lead to consecutive deactivation and destruction of
many kmem_caches, easily hundreds of thousands on moderately large
systems, exposing scalability issues in the current slab management
code.
I've seen machines which end up with hundred thousands of caches and
many millions of kernfs_nodes. The current code is O(N^2) on the total
number of caches and has synchronous rcu_barrier() and
synchronize_sched() in cgroup offline / release path which is executed
while holding cgroup_mutex. Combined, this leads to very expensive and
slow cache destruction operations which can easily keep running for half
a day.
This also messes up /proc/slabinfo along with other cache iterating
operations. seq_file operates on 4k chunks and on each 4k boundary
tries to seek to the last position in the list. With a huge number of
caches on the list, this becomes very slow and very prone to the list
content changing underneath it leading to a lot of missing and/or
duplicate entries.
This patchset addresses the scalability problem.
* Add root and per-memcg lists. Update each user to use the
appropriate list.
* Make rcu_barrier() for SLAB_DESTROY_BY_RCU caches globally batched
and asynchronous.
* For dying empty slub caches, remove the sysfs files after
deactivation so that we don't end up with millions of sysfs files
without any useful information on them.
This patchset contains the following nine patches.
0001-Revert-slub-move-synchronize_sched-out-of-slab_mutex.patch
0002-slub-separate-out-sysfs_slab_release-from-sysfs_slab.patch
0003-slab-remove-synchronous-rcu_barrier-call-in-memcg-ca.patch
0004-slab-reorganize-memcg_cache_params.patch
0005-slab-link-memcg-kmem_caches-on-their-associated-memo.patch
0006-slab-implement-slab_root_caches-list.patch
0007-slab-introduce-__kmemcg_cache_deactivate.patch
0008-slab-remove-synchronous-synchronize_sched-from-memcg.patch
0009-slab-remove-slub-sysfs-interface-files-early-for-emp.patch
0010-slab-use-memcg_kmem_cache_wq-for-slab-destruction-op.patch
0001 reverts an existing optimization to prepare for the following
changes. 0002 is a prep patch. 0003 makes rcu_barrier() in release
path batched and asynchronous. 0004-0006 separate out the lists.
0007-0008 replace synchronize_sched() in slub destruction path with
call_rcu_sched(). 0009 removes sysfs files early for empty dying
caches. 0010 makes destruction work items use a workqueue with limited
concurrency.
This patch (of 10):
Revert 89e364db71fb5e ("slub: move synchronize_sched out of slab_mutex on
shrink").
With kmem cgroup support enabled, kmem_caches can be created and destroyed
frequently and a great number of near empty kmem_caches can accumulate if
there are a lot of transient cgroups and the system is not under memory
pressure. When memory reclaim starts under such conditions, it can lead
to consecutive deactivation and destruction of many kmem_caches, easily
hundreds of thousands on moderately large systems, exposing scalability
issues in the current slab management code. This is one of the patches to
address the issue.
Moving synchronize_sched() out of slab_mutex isn't enough as it's still
inside cgroup_mutex. The whole deactivation / release path will be
updated to avoid all synchronous RCU operations. Revert this insufficient
optimization in preparation to ease future changes.
Link: http://lkml.kernel.org/r/20170117235411.9408-2-tj@kernel.org
Signed-off-by: Tejun Heo <tj@kernel.org>
Reported-by: Jay Vana <jsvana@fb.com>
Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2017-02-23 07:41:08 +08:00
|
|
|
int __kmem_cache_shrink(struct kmem_cache *, bool);
|
2014-05-07 03:50:08 +08:00
|
|
|
void slab_kmem_cache_release(struct kmem_cache *);
|
2012-09-05 07:18:33 +08:00
|
|
|
|
2012-10-19 22:20:25 +08:00
|
|
|
struct seq_file;
|
|
|
|
struct file;
|
|
|
|
|
2012-10-19 22:20:27 +08:00
|
|
|
struct slabinfo {
|
|
|
|
unsigned long active_objs;
|
|
|
|
unsigned long num_objs;
|
|
|
|
unsigned long active_slabs;
|
|
|
|
unsigned long num_slabs;
|
|
|
|
unsigned long shared_avail;
|
|
|
|
unsigned int limit;
|
|
|
|
unsigned int batchcount;
|
|
|
|
unsigned int shared;
|
|
|
|
unsigned int objects_per_slab;
|
|
|
|
unsigned int cache_order;
|
|
|
|
};
|
|
|
|
|
|
|
|
void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
|
|
|
|
void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
|
2012-10-19 22:20:25 +08:00
|
|
|
ssize_t slabinfo_write(struct file *file, const char __user *buffer,
|
|
|
|
size_t count, loff_t *ppos);
|
2012-12-19 06:22:27 +08:00
|
|
|
|
2015-09-05 06:45:34 +08:00
|
|
|
/*
|
|
|
|
* Generic implementation of bulk operations
|
|
|
|
* These are useful for situations in which the allocator cannot
|
2016-03-16 05:54:03 +08:00
|
|
|
* perform optimizations. In that case segments of the object listed
|
2015-09-05 06:45:34 +08:00
|
|
|
* may be allocated or freed using these operations.
|
|
|
|
*/
|
|
|
|
void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
|
2015-11-21 07:57:58 +08:00
|
|
|
int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
|
2015-09-05 06:45:34 +08:00
|
|
|
|
2016-01-21 07:02:32 +08:00
|
|
|
#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
|
2017-02-23 07:41:24 +08:00
|
|
|
|
|
|
|
/* List of all root caches. */
|
|
|
|
extern struct list_head slab_root_caches;
|
|
|
|
#define root_caches_node memcg_params.__root_caches_node
|
|
|
|
|
2015-02-13 06:59:23 +08:00
|
|
|
/*
|
|
|
|
* Iterate over all memcg caches of the given root cache. The caller must hold
|
|
|
|
* slab_mutex.
|
|
|
|
*/
|
|
|
|
#define for_each_memcg_cache(iter, root) \
|
2017-02-23 07:41:17 +08:00
|
|
|
list_for_each_entry(iter, &(root)->memcg_params.children, \
|
|
|
|
memcg_params.children_node)
|
2015-02-13 06:59:23 +08:00
|
|
|
|
2012-12-19 06:22:27 +08:00
|
|
|
static inline bool is_root_cache(struct kmem_cache *s)
|
|
|
|
{
|
2017-02-23 07:41:17 +08:00
|
|
|
return !s->memcg_params.root_cache;
|
2012-12-19 06:22:27 +08:00
|
|
|
}
|
2012-12-19 06:22:34 +08:00
|
|
|
|
2012-12-19 06:22:46 +08:00
|
|
|
static inline bool slab_equal_or_root(struct kmem_cache *s,
|
2015-02-13 06:59:20 +08:00
|
|
|
struct kmem_cache *p)
|
2012-12-19 06:22:46 +08:00
|
|
|
{
|
2015-02-13 06:59:20 +08:00
|
|
|
return p == s || p == s->memcg_params.root_cache;
|
2012-12-19 06:22:46 +08:00
|
|
|
}
|
2012-12-19 06:23:01 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We use suffixes to the name in memcg because we can't have caches
|
|
|
|
* created in the system with the same name. But when we print them
|
|
|
|
* locally, better refer to them with the base name
|
|
|
|
*/
|
|
|
|
static inline const char *cache_name(struct kmem_cache *s)
|
|
|
|
{
|
|
|
|
if (!is_root_cache(s))
|
2015-02-13 06:59:20 +08:00
|
|
|
s = s->memcg_params.root_cache;
|
2012-12-19 06:23:01 +08:00
|
|
|
return s->name;
|
|
|
|
}
|
|
|
|
|
2014-01-24 07:53:06 +08:00
|
|
|
/*
|
|
|
|
* Note, we protect with RCU only the memcg_caches array, not per-memcg caches.
|
2015-02-13 06:59:20 +08:00
|
|
|
* That said the caller must assure the memcg's cache won't go away by either
|
|
|
|
* taking a css reference to the owner cgroup, or holding the slab_mutex.
|
2014-01-24 07:53:06 +08:00
|
|
|
*/
|
2013-11-13 07:08:23 +08:00
|
|
|
static inline struct kmem_cache *
|
|
|
|
cache_from_memcg_idx(struct kmem_cache *s, int idx)
|
2012-12-19 06:23:01 +08:00
|
|
|
{
|
2014-01-24 07:52:59 +08:00
|
|
|
struct kmem_cache *cachep;
|
2015-02-13 06:59:20 +08:00
|
|
|
struct memcg_cache_array *arr;
|
2014-01-24 07:53:06 +08:00
|
|
|
|
|
|
|
rcu_read_lock();
|
2015-02-13 06:59:20 +08:00
|
|
|
arr = rcu_dereference(s->memcg_params.memcg_caches);
|
2014-01-24 07:52:59 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Make sure we will access the up-to-date value. The code updating
|
|
|
|
* memcg_caches issues a write barrier to match this (see
|
2015-02-13 06:59:20 +08:00
|
|
|
* memcg_create_kmem_cache()).
|
2014-01-24 07:52:59 +08:00
|
|
|
*/
|
2015-02-13 06:59:20 +08:00
|
|
|
cachep = lockless_dereference(arr->entries[idx]);
|
2014-12-11 07:42:28 +08:00
|
|
|
rcu_read_unlock();
|
|
|
|
|
2014-01-24 07:52:59 +08:00
|
|
|
return cachep;
|
2012-12-19 06:23:01 +08:00
|
|
|
}
|
2012-12-19 06:23:03 +08:00
|
|
|
|
|
|
|
static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
|
|
|
|
{
|
|
|
|
if (is_root_cache(s))
|
|
|
|
return s;
|
2015-02-13 06:59:20 +08:00
|
|
|
return s->memcg_params.root_cache;
|
2012-12-19 06:23:03 +08:00
|
|
|
}
|
2014-06-05 07:06:38 +08:00
|
|
|
|
memcg: unify slab and other kmem pages charging
We have memcg_kmem_charge and memcg_kmem_uncharge methods for charging and
uncharging kmem pages to memcg, but currently they are not used for
charging slab pages (i.e. they are only used for charging pages allocated
with alloc_kmem_pages). The only reason why the slab subsystem uses
special helpers, memcg_charge_slab and memcg_uncharge_slab, is that it
needs to charge to the memcg of kmem cache while memcg_charge_kmem charges
to the memcg that the current task belongs to.
To remove this diversity, this patch adds an extra argument to
__memcg_kmem_charge that can be a pointer to a memcg or NULL. If it is
not NULL, the function tries to charge to the memcg it points to,
otherwise it charge to the current context. Next, it makes the slab
subsystem use this function to charge slab pages.
Since memcg_charge_kmem and memcg_uncharge_kmem helpers are now used only
in __memcg_kmem_charge and __memcg_kmem_uncharge, they are inlined. Since
__memcg_kmem_charge stores a pointer to the memcg in the page struct, we
don't need memcg_uncharge_slab anymore and can use free_kmem_pages.
Besides, one can now detect which memcg a slab page belongs to by reading
/proc/kpagecgroup.
Note, this patch switches slab to charge-after-alloc design. Since this
design is already used for all other memcg charges, it should not make any
difference.
[hannes@cmpxchg.org: better to have an outer function than a magic parameter for the memcg lookup]
Signed-off-by: Vladimir Davydov <vdavydov@virtuozzo.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-11-06 10:49:01 +08:00
|
|
|
static __always_inline int memcg_charge_slab(struct page *page,
|
|
|
|
gfp_t gfp, int order,
|
|
|
|
struct kmem_cache *s)
|
2014-06-05 07:06:38 +08:00
|
|
|
{
|
2016-03-18 05:17:35 +08:00
|
|
|
int ret;
|
|
|
|
|
2014-06-05 07:06:38 +08:00
|
|
|
if (!memcg_kmem_enabled())
|
|
|
|
return 0;
|
|
|
|
if (is_root_cache(s))
|
|
|
|
return 0;
|
2016-03-18 05:17:35 +08:00
|
|
|
|
2016-07-27 06:24:21 +08:00
|
|
|
ret = memcg_kmem_charge_memcg(page, gfp, order, s->memcg_params.memcg);
|
2016-03-18 05:17:35 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
memcg_kmem_update_page_stat(page,
|
|
|
|
(s->flags & SLAB_RECLAIM_ACCOUNT) ?
|
|
|
|
MEMCG_SLAB_RECLAIMABLE : MEMCG_SLAB_UNRECLAIMABLE,
|
|
|
|
1 << order);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline void memcg_uncharge_slab(struct page *page, int order,
|
|
|
|
struct kmem_cache *s)
|
|
|
|
{
|
2016-07-27 06:24:21 +08:00
|
|
|
if (!memcg_kmem_enabled())
|
|
|
|
return;
|
|
|
|
|
2016-03-18 05:17:35 +08:00
|
|
|
memcg_kmem_update_page_stat(page,
|
|
|
|
(s->flags & SLAB_RECLAIM_ACCOUNT) ?
|
|
|
|
MEMCG_SLAB_RECLAIMABLE : MEMCG_SLAB_UNRECLAIMABLE,
|
|
|
|
-(1 << order));
|
|
|
|
memcg_kmem_uncharge(page, order);
|
2014-06-05 07:06:38 +08:00
|
|
|
}
|
2015-02-13 06:59:20 +08:00
|
|
|
|
|
|
|
extern void slab_init_memcg_params(struct kmem_cache *);
|
2017-02-23 07:41:24 +08:00
|
|
|
extern void memcg_link_cache(struct kmem_cache *s);
|
2015-02-13 06:59:20 +08:00
|
|
|
|
2016-01-21 07:02:32 +08:00
|
|
|
#else /* CONFIG_MEMCG && !CONFIG_SLOB */
|
2015-02-13 06:59:20 +08:00
|
|
|
|
2017-02-23 07:41:24 +08:00
|
|
|
/* If !memcg, all caches are root. */
|
|
|
|
#define slab_root_caches slab_caches
|
|
|
|
#define root_caches_node list
|
|
|
|
|
2015-02-13 06:59:23 +08:00
|
|
|
#define for_each_memcg_cache(iter, root) \
|
|
|
|
for ((void)(iter), (void)(root); 0; )
|
|
|
|
|
2012-12-19 06:22:27 +08:00
|
|
|
static inline bool is_root_cache(struct kmem_cache *s)
|
|
|
|
{
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2012-12-19 06:22:46 +08:00
|
|
|
static inline bool slab_equal_or_root(struct kmem_cache *s,
|
|
|
|
struct kmem_cache *p)
|
|
|
|
{
|
|
|
|
return true;
|
|
|
|
}
|
2012-12-19 06:23:01 +08:00
|
|
|
|
|
|
|
static inline const char *cache_name(struct kmem_cache *s)
|
|
|
|
{
|
|
|
|
return s->name;
|
|
|
|
}
|
|
|
|
|
2013-11-13 07:08:23 +08:00
|
|
|
static inline struct kmem_cache *
|
|
|
|
cache_from_memcg_idx(struct kmem_cache *s, int idx)
|
2012-12-19 06:23:01 +08:00
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
2012-12-19 06:23:03 +08:00
|
|
|
|
|
|
|
static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
|
|
|
|
{
|
|
|
|
return s;
|
|
|
|
}
|
2014-06-05 07:06:38 +08:00
|
|
|
|
memcg: unify slab and other kmem pages charging
We have memcg_kmem_charge and memcg_kmem_uncharge methods for charging and
uncharging kmem pages to memcg, but currently they are not used for
charging slab pages (i.e. they are only used for charging pages allocated
with alloc_kmem_pages). The only reason why the slab subsystem uses
special helpers, memcg_charge_slab and memcg_uncharge_slab, is that it
needs to charge to the memcg of kmem cache while memcg_charge_kmem charges
to the memcg that the current task belongs to.
To remove this diversity, this patch adds an extra argument to
__memcg_kmem_charge that can be a pointer to a memcg or NULL. If it is
not NULL, the function tries to charge to the memcg it points to,
otherwise it charge to the current context. Next, it makes the slab
subsystem use this function to charge slab pages.
Since memcg_charge_kmem and memcg_uncharge_kmem helpers are now used only
in __memcg_kmem_charge and __memcg_kmem_uncharge, they are inlined. Since
__memcg_kmem_charge stores a pointer to the memcg in the page struct, we
don't need memcg_uncharge_slab anymore and can use free_kmem_pages.
Besides, one can now detect which memcg a slab page belongs to by reading
/proc/kpagecgroup.
Note, this patch switches slab to charge-after-alloc design. Since this
design is already used for all other memcg charges, it should not make any
difference.
[hannes@cmpxchg.org: better to have an outer function than a magic parameter for the memcg lookup]
Signed-off-by: Vladimir Davydov <vdavydov@virtuozzo.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-11-06 10:49:01 +08:00
|
|
|
static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order,
|
|
|
|
struct kmem_cache *s)
|
2014-06-05 07:06:38 +08:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-03-18 05:17:35 +08:00
|
|
|
static inline void memcg_uncharge_slab(struct page *page, int order,
|
|
|
|
struct kmem_cache *s)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2015-02-13 06:59:20 +08:00
|
|
|
static inline void slab_init_memcg_params(struct kmem_cache *s)
|
|
|
|
{
|
|
|
|
}
|
2017-02-23 07:41:24 +08:00
|
|
|
|
|
|
|
static inline void memcg_link_cache(struct kmem_cache *s)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2016-01-21 07:02:32 +08:00
|
|
|
#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
|
2012-12-19 06:22:46 +08:00
|
|
|
|
|
|
|
static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
|
|
|
|
{
|
|
|
|
struct kmem_cache *cachep;
|
|
|
|
struct page *page;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* When kmemcg is not being used, both assignments should return the
|
|
|
|
* same value. but we don't want to pay the assignment price in that
|
|
|
|
* case. If it is not compiled in, the compiler should be smart enough
|
|
|
|
* to not do even the assignment. In that case, slab_equal_or_root
|
|
|
|
* will also be a constant.
|
|
|
|
*/
|
2016-03-16 05:55:06 +08:00
|
|
|
if (!memcg_kmem_enabled() &&
|
|
|
|
!unlikely(s->flags & SLAB_CONSISTENCY_CHECKS))
|
2012-12-19 06:22:46 +08:00
|
|
|
return s;
|
|
|
|
|
|
|
|
page = virt_to_head_page(x);
|
|
|
|
cachep = page->slab_cache;
|
|
|
|
if (slab_equal_or_root(cachep, s))
|
|
|
|
return cachep;
|
|
|
|
|
|
|
|
pr_err("%s: Wrong slab cache. %s but object is from %s\n",
|
2015-09-05 06:45:57 +08:00
|
|
|
__func__, s->name, cachep->name);
|
2012-12-19 06:22:46 +08:00
|
|
|
WARN_ON_ONCE(1);
|
|
|
|
return s;
|
|
|
|
}
|
2013-01-11 03:14:19 +08:00
|
|
|
|
2016-03-16 05:53:35 +08:00
|
|
|
static inline size_t slab_ksize(const struct kmem_cache *s)
|
|
|
|
{
|
|
|
|
#ifndef CONFIG_SLUB
|
|
|
|
return s->object_size;
|
|
|
|
|
|
|
|
#else /* CONFIG_SLUB */
|
|
|
|
# ifdef CONFIG_SLUB_DEBUG
|
|
|
|
/*
|
|
|
|
* Debugging requires use of the padding between object
|
|
|
|
* and whatever may come after it.
|
|
|
|
*/
|
|
|
|
if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
|
|
|
|
return s->object_size;
|
|
|
|
# endif
|
2016-07-29 06:49:07 +08:00
|
|
|
if (s->flags & SLAB_KASAN)
|
|
|
|
return s->object_size;
|
2016-03-16 05:53:35 +08:00
|
|
|
/*
|
|
|
|
* If we have the need to store the freelist pointer
|
|
|
|
* back there or track user information then we can
|
|
|
|
* only use the space before that information.
|
|
|
|
*/
|
|
|
|
if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
|
|
|
|
return s->inuse;
|
|
|
|
/*
|
|
|
|
* Else we can use all the padding etc for the allocation
|
|
|
|
*/
|
|
|
|
return s->size;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
|
|
|
|
gfp_t flags)
|
|
|
|
{
|
|
|
|
flags &= gfp_allowed_mask;
|
|
|
|
lockdep_trace_alloc(flags);
|
|
|
|
might_sleep_if(gfpflags_allow_blocking(flags));
|
|
|
|
|
2016-03-16 05:53:38 +08:00
|
|
|
if (should_failslab(s, flags))
|
2016-03-16 05:53:35 +08:00
|
|
|
return NULL;
|
|
|
|
|
2016-07-27 06:24:21 +08:00
|
|
|
if (memcg_kmem_enabled() &&
|
|
|
|
((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT)))
|
|
|
|
return memcg_kmem_get_cache(s);
|
|
|
|
|
|
|
|
return s;
|
2016-03-16 05:53:35 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
|
|
|
|
size_t size, void **p)
|
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
flags &= gfp_allowed_mask;
|
|
|
|
for (i = 0; i < size; i++) {
|
|
|
|
void *object = p[i];
|
|
|
|
|
|
|
|
kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
|
|
|
|
kmemleak_alloc_recursive(object, s->object_size, 1,
|
|
|
|
s->flags, flags);
|
2016-03-26 05:22:02 +08:00
|
|
|
kasan_slab_alloc(s, object, flags);
|
2016-03-16 05:53:35 +08:00
|
|
|
}
|
2016-07-27 06:24:21 +08:00
|
|
|
|
|
|
|
if (memcg_kmem_enabled())
|
|
|
|
memcg_kmem_put_cache(s);
|
2016-03-16 05:53:35 +08:00
|
|
|
}
|
|
|
|
|
2014-08-07 07:04:07 +08:00
|
|
|
#ifndef CONFIG_SLOB
|
2013-01-11 03:14:19 +08:00
|
|
|
/*
|
|
|
|
* The slab lists for all objects.
|
|
|
|
*/
|
|
|
|
struct kmem_cache_node {
|
|
|
|
spinlock_t list_lock;
|
|
|
|
|
|
|
|
#ifdef CONFIG_SLAB
|
|
|
|
struct list_head slabs_partial; /* partial list first, better asm code */
|
|
|
|
struct list_head slabs_full;
|
|
|
|
struct list_head slabs_free;
|
2016-12-13 08:41:44 +08:00
|
|
|
unsigned long total_slabs; /* length of all slab lists */
|
|
|
|
unsigned long free_slabs; /* length of free slab list only */
|
2013-01-11 03:14:19 +08:00
|
|
|
unsigned long free_objects;
|
|
|
|
unsigned int free_limit;
|
|
|
|
unsigned int colour_next; /* Per-node cache coloring */
|
|
|
|
struct array_cache *shared; /* shared per node */
|
2014-08-07 07:04:29 +08:00
|
|
|
struct alien_cache **alien; /* on other nodes */
|
2013-01-11 03:14:19 +08:00
|
|
|
unsigned long next_reap; /* updated without locking */
|
|
|
|
int free_touched; /* updated without locking */
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef CONFIG_SLUB
|
|
|
|
unsigned long nr_partial;
|
|
|
|
struct list_head partial;
|
|
|
|
#ifdef CONFIG_SLUB_DEBUG
|
|
|
|
atomic_long_t nr_slabs;
|
|
|
|
atomic_long_t total_objects;
|
|
|
|
struct list_head full;
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|
|
|
|
};
|
2013-07-04 08:33:23 +08:00
|
|
|
|
2014-08-07 07:04:07 +08:00
|
|
|
static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
|
|
|
|
{
|
|
|
|
return s->node[node];
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Iterator over all nodes. The body will be executed for each node that has
|
|
|
|
* a kmem_cache_node structure allocated (which is true for all online nodes)
|
|
|
|
*/
|
|
|
|
#define for_each_kmem_cache_node(__s, __node, __n) \
|
2014-10-10 06:26:20 +08:00
|
|
|
for (__node = 0; __node < nr_node_ids; __node++) \
|
|
|
|
if ((__n = get_node(__s, __node)))
|
2014-08-07 07:04:07 +08:00
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2014-12-11 07:42:16 +08:00
|
|
|
void *slab_start(struct seq_file *m, loff_t *pos);
|
2013-07-08 08:08:28 +08:00
|
|
|
void *slab_next(struct seq_file *m, void *p, loff_t *pos);
|
|
|
|
void slab_stop(struct seq_file *m, void *p);
|
2017-02-23 07:41:21 +08:00
|
|
|
void *memcg_slab_start(struct seq_file *m, loff_t *pos);
|
|
|
|
void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos);
|
|
|
|
void memcg_slab_stop(struct seq_file *m, void *p);
|
2014-12-11 07:44:19 +08:00
|
|
|
int memcg_slab_show(struct seq_file *m, void *p);
|
2014-08-07 07:04:14 +08:00
|
|
|
|
mm: kasan: initial memory quarantine implementation
Quarantine isolates freed objects in a separate queue. The objects are
returned to the allocator later, which helps to detect use-after-free
errors.
When the object is freed, its state changes from KASAN_STATE_ALLOC to
KASAN_STATE_QUARANTINE. The object is poisoned and put into quarantine
instead of being returned to the allocator, therefore every subsequent
access to that object triggers a KASAN error, and the error handler is
able to say where the object has been allocated and deallocated.
When it's time for the object to leave quarantine, its state becomes
KASAN_STATE_FREE and it's returned to the allocator. From now on the
allocator may reuse it for another allocation. Before that happens,
it's still possible to detect a use-after free on that object (it
retains the allocation/deallocation stacks).
When the allocator reuses this object, the shadow is unpoisoned and old
allocation/deallocation stacks are wiped. Therefore a use of this
object, even an incorrect one, won't trigger ASan warning.
Without the quarantine, it's not guaranteed that the objects aren't
reused immediately, that's why the probability of catching a
use-after-free is lower than with quarantine in place.
Quarantine isolates freed objects in a separate queue. The objects are
returned to the allocator later, which helps to detect use-after-free
errors.
Freed objects are first added to per-cpu quarantine queues. When a
cache is destroyed or memory shrinking is requested, the objects are
moved into the global quarantine queue. Whenever a kmalloc call allows
memory reclaiming, the oldest objects are popped out of the global queue
until the total size of objects in quarantine is less than 3/4 of the
maximum quarantine size (which is a fraction of installed physical
memory).
As long as an object remains in the quarantine, KASAN is able to report
accesses to it, so the chance of reporting a use-after-free is
increased. Once the object leaves quarantine, the allocator may reuse
it, in which case the object is unpoisoned and KASAN can't detect
incorrect accesses to it.
Right now quarantine support is only enabled in SLAB allocator.
Unification of KASAN features in SLAB and SLUB will be done later.
This patch is based on the "mm: kasan: quarantine" patch originally
prepared by Dmitry Chernenkov. A number of improvements have been
suggested by Andrey Ryabinin.
[glider@google.com: v9]
Link: http://lkml.kernel.org/r/1462987130-144092-1-git-send-email-glider@google.com
Signed-off-by: Alexander Potapenko <glider@google.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Andrey Konovalov <adech.fo@gmail.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Konstantin Serebryany <kcc@google.com>
Cc: Dmitry Chernenkov <dmitryc@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-05-21 07:59:11 +08:00
|
|
|
void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
|
|
|
|
|
2016-07-27 06:21:56 +08:00
|
|
|
#ifdef CONFIG_SLAB_FREELIST_RANDOM
|
|
|
|
int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
|
|
|
|
gfp_t gfp);
|
|
|
|
void cache_random_seq_destroy(struct kmem_cache *cachep);
|
|
|
|
#else
|
|
|
|
static inline int cache_random_seq_create(struct kmem_cache *cachep,
|
|
|
|
unsigned int count, gfp_t gfp)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
|
|
|
|
#endif /* CONFIG_SLAB_FREELIST_RANDOM */
|
|
|
|
|
2014-08-07 07:04:14 +08:00
|
|
|
#endif /* MM_SLAB_H */
|