mm/slab: move SLUB alloc hooks to common mm/slab.h

First step towards sharing alloc_hook's between SLUB and SLAB
allocators.  Move the SLUB allocators *_alloc_hook to the common
mm/slab.h for internal slab definitions.

Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Vladimir Davydov <vdavydov@virtuozzo.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Jesper Dangaard Brouer 2016-03-15 14:53:35 -07:00 committed by Linus Torvalds
parent 376bf125ac
commit 11c7aec2a9
2 changed files with 62 additions and 54 deletions

View File

@ -38,6 +38,10 @@ struct kmem_cache {
#endif #endif
#include <linux/memcontrol.h> #include <linux/memcontrol.h>
#include <linux/fault-inject.h>
#include <linux/kmemcheck.h>
#include <linux/kasan.h>
#include <linux/kmemleak.h>
/* /*
* State of the slab allocator. * State of the slab allocator.
@ -321,6 +325,64 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
return s; return s;
} }
static inline size_t slab_ksize(const struct kmem_cache *s)
{
#ifndef CONFIG_SLUB
return s->object_size;
#else /* CONFIG_SLUB */
# ifdef CONFIG_SLUB_DEBUG
/*
* Debugging requires use of the padding between object
* and whatever may come after it.
*/
if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
return s->object_size;
# endif
/*
* If we have the need to store the freelist pointer
* back there or track user information then we can
* only use the space before that information.
*/
if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
return s->inuse;
/*
* Else we can use all the padding etc for the allocation
*/
return s->size;
#endif
}
static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
gfp_t flags)
{
flags &= gfp_allowed_mask;
lockdep_trace_alloc(flags);
might_sleep_if(gfpflags_allow_blocking(flags));
if (should_failslab(s->object_size, flags, s->flags))
return NULL;
return memcg_kmem_get_cache(s, flags);
}
static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
size_t size, void **p)
{
size_t i;
flags &= gfp_allowed_mask;
for (i = 0; i < size; i++) {
void *object = p[i];
kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
kmemleak_alloc_recursive(object, s->object_size, 1,
s->flags, flags);
kasan_slab_alloc(s, object);
}
memcg_kmem_put_cache(s);
}
#ifndef CONFIG_SLOB #ifndef CONFIG_SLOB
/* /*
* The slab lists for all objects. * The slab lists for all objects.

View File

@ -284,30 +284,6 @@ static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
return (p - addr) / s->size; return (p - addr) / s->size;
} }
static inline size_t slab_ksize(const struct kmem_cache *s)
{
#ifdef CONFIG_SLUB_DEBUG
/*
* Debugging requires use of the padding between object
* and whatever may come after it.
*/
if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
return s->object_size;
#endif
/*
* If we have the need to store the freelist pointer
* back there or track user information then we can
* only use the space before that information.
*/
if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
return s->inuse;
/*
* Else we can use all the padding etc for the allocation
*/
return s->size;
}
static inline int order_objects(int order, unsigned long size, int reserved) static inline int order_objects(int order, unsigned long size, int reserved)
{ {
return ((PAGE_SIZE << order) - reserved) / size; return ((PAGE_SIZE << order) - reserved) / size;
@ -1281,36 +1257,6 @@ static inline void kfree_hook(const void *x)
kasan_kfree_large(x); kasan_kfree_large(x);
} }
static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
gfp_t flags)
{
flags &= gfp_allowed_mask;
lockdep_trace_alloc(flags);
might_sleep_if(gfpflags_allow_blocking(flags));
if (should_failslab(s->object_size, flags, s->flags))
return NULL;
return memcg_kmem_get_cache(s, flags);
}
static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
size_t size, void **p)
{
size_t i;
flags &= gfp_allowed_mask;
for (i = 0; i < size; i++) {
void *object = p[i];
kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
kmemleak_alloc_recursive(object, s->object_size, 1,
s->flags, flags);
kasan_slab_alloc(s, object);
}
memcg_kmem_put_cache(s);
}
static inline void slab_free_hook(struct kmem_cache *s, void *x) static inline void slab_free_hook(struct kmem_cache *s, void *x)
{ {
kmemleak_free_recursive(x, s->flags); kmemleak_free_recursive(x, s->flags);