slub: Fallback to kmalloc_large for failing higher order allocs

Slub already has two ways of allocating an object. One is via its own
logic and the other is via the call to kmalloc_large to hand off object
allocation to the page allocator. kmalloc_large is typically used
for objects >= PAGE_SIZE.

We can use that handoff to avoid failing if a higher order kmalloc slab
allocation cannot be satisfied by the page allocator. If we reach the
out of memory path then simply try a kmalloc_large(). kfree() can
already handle the case of an object that was allocated via the page
allocator and so this will work just fine (apart from object
accounting...).

For any kmalloc slab that already requires higher order allocs (which
makes it impossible to use the page allocator fastpath!)
we just use PAGE_ALLOC_COSTLY_ORDER to get the largest number of
objects in one go from the page allocator slowpath.

On a 4k platform this patch will lead to the following use of higher
order pages for the following kmalloc slabs:

8 ... 1024	order 0
2048 .. 4096	order 3 (4k slab only after the next patch)

We may waste some space if fallback occurs on a 2k slab but we
are always able to fallback to an order 0 alloc.

Reviewed-by: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Christoph Lameter <clameter@sgi.com>
This commit is contained in:
Christoph Lameter 2008-02-14 14:28:01 -08:00 committed by Christoph Lameter
parent b7a49f0d4c
commit 71c7a06ff0
1 changed files with 38 additions and 5 deletions

View File

@ -211,6 +211,8 @@ static inline void ClearSlabDebug(struct page *page)
/* Internal SLUB flags */ /* Internal SLUB flags */
#define __OBJECT_POISON 0x80000000 /* Poison object */ #define __OBJECT_POISON 0x80000000 /* Poison object */
#define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */ #define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */
#define __KMALLOC_CACHE 0x20000000 /* objects freed using kfree */
#define __PAGE_ALLOC_FALLBACK 0x10000000 /* Allow fallback to page alloc */
/* Not all arches define cache_line_size */ /* Not all arches define cache_line_size */
#ifndef cache_line_size #ifndef cache_line_size
@ -1539,7 +1541,6 @@ static void *__slab_alloc(struct kmem_cache *s,
unlock_out: unlock_out:
slab_unlock(c->page); slab_unlock(c->page);
stat(c, ALLOC_SLOWPATH); stat(c, ALLOC_SLOWPATH);
out:
#ifdef SLUB_FASTPATH #ifdef SLUB_FASTPATH
local_irq_restore(flags); local_irq_restore(flags);
#endif #endif
@ -1574,8 +1575,24 @@ static void *__slab_alloc(struct kmem_cache *s,
c->page = new; c->page = new;
goto load_freelist; goto load_freelist;
} }
object = NULL; #ifdef SLUB_FASTPATH
goto out; local_irq_restore(flags);
#endif
/*
* No memory available.
*
* If the slab uses higher order allocs but the object is
* smaller than a page size then we can fallback in emergencies
* to the page allocator via kmalloc_large. The page allocator may
* have failed to obtain a higher order page and we can try to
* allocate a single page if the object fits into a single page.
* That is only possible if certain conditions are met that are being
* checked when a slab is created.
*/
if (!(gfpflags & __GFP_NORETRY) && (s->flags & __PAGE_ALLOC_FALLBACK))
return kmalloc_large(s->objsize, gfpflags);
return NULL;
debug: debug:
object = c->page->freelist; object = c->page->freelist;
if (!alloc_debug_processing(s, c->page, object, addr)) if (!alloc_debug_processing(s, c->page, object, addr))
@ -2322,7 +2339,20 @@ static int calculate_sizes(struct kmem_cache *s)
size = ALIGN(size, align); size = ALIGN(size, align);
s->size = size; s->size = size;
s->order = calculate_order(size); if ((flags & __KMALLOC_CACHE) &&
PAGE_SIZE / size < slub_min_objects) {
/*
* Kmalloc cache that would not have enough objects in
* an order 0 page. Kmalloc slabs can fallback to
* page allocator order 0 allocs so take a reasonably large
* order that will allows us a good number of objects.
*/
s->order = max(slub_max_order, PAGE_ALLOC_COSTLY_ORDER);
s->flags |= __PAGE_ALLOC_FALLBACK;
s->allocflags |= __GFP_NOWARN;
} else
s->order = calculate_order(size);
if (s->order < 0) if (s->order < 0)
return 0; return 0;
@ -2539,7 +2569,7 @@ static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
down_write(&slub_lock); down_write(&slub_lock);
if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN, if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN,
flags, NULL)) flags | __KMALLOC_CACHE, NULL))
goto panic; goto panic;
list_add(&s->list, &slab_caches); list_add(&s->list, &slab_caches);
@ -3058,6 +3088,9 @@ static int slab_unmergeable(struct kmem_cache *s)
if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE)) if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
return 1; return 1;
if ((s->flags & __PAGE_ALLOC_FALLBACK)
return 1;
if (s->ctor) if (s->ctor)
return 1; return 1;