linux_old1/mm/mempool.c

510 lines
14 KiB
C
Raw Normal View History

/*
* linux/mm/mempool.c
*
* memory buffer pool support. Such pools are mostly used
* for guaranteed, deadlock-free memory allocations during
* extreme VM load.
*
* started by Ingo Molnar, Copyright (C) 2001
* debugging by David Rientjes, Copyright (C) 2015
*/
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/kasan.h>
#include <linux/kmemleak.h>
#include <linux/export.h>
#include <linux/mempool.h>
#include <linux/blkdev.h>
#include <linux/writeback.h>
#include "slab.h"
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
static void poison_error(mempool_t *pool, void *element, size_t size,
size_t byte)
{
const int nr = pool->curr_nr;
const int start = max_t(int, byte - (BITS_PER_LONG / 8), 0);
const int end = min_t(int, byte + (BITS_PER_LONG / 8), size);
int i;
pr_err("BUG: mempool element poison mismatch\n");
pr_err("Mempool %p size %zu\n", pool, size);
pr_err(" nr=%d @ %p: %s0x", nr, element, start > 0 ? "... " : "");
for (i = start; i < end; i++)
pr_cont("%x ", *(u8 *)(element + i));
pr_cont("%s\n", end < size ? "..." : "");
dump_stack();
}
static void __check_element(mempool_t *pool, void *element, size_t size)
{
u8 *obj = element;
size_t i;
for (i = 0; i < size; i++) {
u8 exp = (i < size - 1) ? POISON_FREE : POISON_END;
if (obj[i] != exp) {
poison_error(pool, element, size, i);
return;
}
}
memset(obj, POISON_INUSE, size);
}
static void check_element(mempool_t *pool, void *element)
{
/* Mempools backed by slab allocator */
if (pool->free == mempool_free_slab || pool->free == mempool_kfree)
__check_element(pool, element, ksize(element));
/* Mempools backed by page allocator */
if (pool->free == mempool_free_pages) {
int order = (int)(long)pool->pool_data;
void *addr = kmap_atomic((struct page *)element);
__check_element(pool, addr, 1UL << (PAGE_SHIFT + order));
kunmap_atomic(addr);
}
}
static void __poison_element(void *element, size_t size)
{
u8 *obj = element;
memset(obj, POISON_FREE, size - 1);
obj[size - 1] = POISON_END;
}
static void poison_element(mempool_t *pool, void *element)
{
/* Mempools backed by slab allocator */
if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
__poison_element(element, ksize(element));
/* Mempools backed by page allocator */
if (pool->alloc == mempool_alloc_pages) {
int order = (int)(long)pool->pool_data;
void *addr = kmap_atomic((struct page *)element);
__poison_element(addr, 1UL << (PAGE_SHIFT + order));
kunmap_atomic(addr);
}
}
#else /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
static inline void check_element(mempool_t *pool, void *element)
{
}
static inline void poison_element(mempool_t *pool, void *element)
{
}
#endif /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
static void kasan_poison_element(mempool_t *pool, void *element)
{
if (pool->alloc == mempool_alloc_slab)
mm: kasan: initial memory quarantine implementation Quarantine isolates freed objects in a separate queue. The objects are returned to the allocator later, which helps to detect use-after-free errors. When the object is freed, its state changes from KASAN_STATE_ALLOC to KASAN_STATE_QUARANTINE. The object is poisoned and put into quarantine instead of being returned to the allocator, therefore every subsequent access to that object triggers a KASAN error, and the error handler is able to say where the object has been allocated and deallocated. When it's time for the object to leave quarantine, its state becomes KASAN_STATE_FREE and it's returned to the allocator. From now on the allocator may reuse it for another allocation. Before that happens, it's still possible to detect a use-after free on that object (it retains the allocation/deallocation stacks). When the allocator reuses this object, the shadow is unpoisoned and old allocation/deallocation stacks are wiped. Therefore a use of this object, even an incorrect one, won't trigger ASan warning. Without the quarantine, it's not guaranteed that the objects aren't reused immediately, that's why the probability of catching a use-after-free is lower than with quarantine in place. Quarantine isolates freed objects in a separate queue. The objects are returned to the allocator later, which helps to detect use-after-free errors. Freed objects are first added to per-cpu quarantine queues. When a cache is destroyed or memory shrinking is requested, the objects are moved into the global quarantine queue. Whenever a kmalloc call allows memory reclaiming, the oldest objects are popped out of the global queue until the total size of objects in quarantine is less than 3/4 of the maximum quarantine size (which is a fraction of installed physical memory). As long as an object remains in the quarantine, KASAN is able to report accesses to it, so the chance of reporting a use-after-free is increased. Once the object leaves quarantine, the allocator may reuse it, in which case the object is unpoisoned and KASAN can't detect incorrect accesses to it. Right now quarantine support is only enabled in SLAB allocator. Unification of KASAN features in SLAB and SLUB will be done later. This patch is based on the "mm: kasan: quarantine" patch originally prepared by Dmitry Chernenkov. A number of improvements have been suggested by Andrey Ryabinin. [glider@google.com: v9] Link: http://lkml.kernel.org/r/1462987130-144092-1-git-send-email-glider@google.com Signed-off-by: Alexander Potapenko <glider@google.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Andrey Konovalov <adech.fo@gmail.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Konstantin Serebryany <kcc@google.com> Cc: Dmitry Chernenkov <dmitryc@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-05-21 07:59:11 +08:00
kasan_poison_slab_free(pool->pool_data, element);
if (pool->alloc == mempool_kmalloc)
kasan_kfree(element);
if (pool->alloc == mempool_alloc_pages)
kasan_free_pages(element, (unsigned long)pool->pool_data);
}
static void kasan_unpoison_element(mempool_t *pool, void *element, gfp_t flags)
{
if (pool->alloc == mempool_alloc_slab)
kasan_slab_alloc(pool->pool_data, element, flags);
if (pool->alloc == mempool_kmalloc)
kasan_krealloc(element, (size_t)pool->pool_data, flags);
if (pool->alloc == mempool_alloc_pages)
kasan_alloc_pages(element, (unsigned long)pool->pool_data);
}
static void add_element(mempool_t *pool, void *element)
{
BUG_ON(pool->curr_nr >= pool->min_nr);
poison_element(pool, element);
kasan_poison_element(pool, element);
pool->elements[pool->curr_nr++] = element;
}
static void *remove_element(mempool_t *pool, gfp_t flags)
{
void *element = pool->elements[--pool->curr_nr];
BUG_ON(pool->curr_nr < 0);
kasan_unpoison_element(pool, element, flags);
check_element(pool, element);
return element;
}
/**
* mempool_destroy - deallocate a memory pool
* @pool: pointer to the memory pool which was allocated via
* mempool_create().
*
* Free all reserved elements in @pool and @pool itself. This function
* only sleeps if the free_fn() function sleeps.
*/
void mempool_destroy(mempool_t *pool)
{
if (unlikely(!pool))
return;
while (pool->curr_nr) {
void *element = remove_element(pool, GFP_KERNEL);
pool->free(element, pool->pool_data);
}
kfree(pool->elements);
kfree(pool);
}
EXPORT_SYMBOL(mempool_destroy);
/**
* mempool_create - create a memory pool
* @min_nr: the minimum number of elements guaranteed to be
* allocated for this pool.
* @alloc_fn: user-defined element-allocation function.
* @free_fn: user-defined element-freeing function.
* @pool_data: optional private data available to the user-defined functions.
*
* this function creates and allocates a guaranteed size, preallocated
* memory pool. The pool can be used from the mempool_alloc() and mempool_free()
* functions. This function might sleep. Both the alloc_fn() and the free_fn()
* functions might sleep - as long as the mempool_alloc() function is not called
* from IRQ contexts.
*/
mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
mempool_free_t *free_fn, void *pool_data)
{
return mempool_create_node(min_nr,alloc_fn,free_fn, pool_data,
GFP_KERNEL, NUMA_NO_NODE);
}
EXPORT_SYMBOL(mempool_create);
mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
mempool_free_t *free_fn, void *pool_data,
gfp_t gfp_mask, int node_id)
{
mempool_t *pool;
pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id);
if (!pool)
return NULL;
pool->elements = kmalloc_node(min_nr * sizeof(void *),
gfp_mask, node_id);
if (!pool->elements) {
kfree(pool);
return NULL;
}
spin_lock_init(&pool->lock);
pool->min_nr = min_nr;
pool->pool_data = pool_data;
init_waitqueue_head(&pool->wait);
pool->alloc = alloc_fn;
pool->free = free_fn;
/*
* First pre-allocate the guaranteed number of buffers.
*/
while (pool->curr_nr < pool->min_nr) {
void *element;
element = pool->alloc(gfp_mask, pool->pool_data);
if (unlikely(!element)) {
mempool_destroy(pool);
return NULL;
}
add_element(pool, element);
}
return pool;
}
EXPORT_SYMBOL(mempool_create_node);
/**
* mempool_resize - resize an existing memory pool
* @pool: pointer to the memory pool which was allocated via
* mempool_create().
* @new_min_nr: the new minimum number of elements guaranteed to be
* allocated for this pool.
*
* This function shrinks/grows the pool. In the case of growing,
* it cannot be guaranteed that the pool will be grown to the new
* size immediately, but new mempool_free() calls will refill it.
* This function may sleep.
*
* Note, the caller must guarantee that no mempool_destroy is called
* while this function is running. mempool_alloc() & mempool_free()
* might be called (eg. from IRQ contexts) while this function executes.
*/
int mempool_resize(mempool_t *pool, int new_min_nr)
{
void *element;
void **new_elements;
unsigned long flags;
BUG_ON(new_min_nr <= 0);
might_sleep();
spin_lock_irqsave(&pool->lock, flags);
if (new_min_nr <= pool->min_nr) {
while (new_min_nr < pool->curr_nr) {
element = remove_element(pool, GFP_KERNEL);
spin_unlock_irqrestore(&pool->lock, flags);
pool->free(element, pool->pool_data);
spin_lock_irqsave(&pool->lock, flags);
}
pool->min_nr = new_min_nr;
goto out_unlock;
}
spin_unlock_irqrestore(&pool->lock, flags);
/* Grow the pool */
new_elements = kmalloc_array(new_min_nr, sizeof(*new_elements),
GFP_KERNEL);
if (!new_elements)
return -ENOMEM;
spin_lock_irqsave(&pool->lock, flags);
if (unlikely(new_min_nr <= pool->min_nr)) {
/* Raced, other resize will do our work */
spin_unlock_irqrestore(&pool->lock, flags);
kfree(new_elements);
goto out;
}
memcpy(new_elements, pool->elements,
pool->curr_nr * sizeof(*new_elements));
kfree(pool->elements);
pool->elements = new_elements;
pool->min_nr = new_min_nr;
while (pool->curr_nr < pool->min_nr) {
spin_unlock_irqrestore(&pool->lock, flags);
element = pool->alloc(GFP_KERNEL, pool->pool_data);
if (!element)
goto out;
spin_lock_irqsave(&pool->lock, flags);
if (pool->curr_nr < pool->min_nr) {
add_element(pool, element);
} else {
spin_unlock_irqrestore(&pool->lock, flags);
pool->free(element, pool->pool_data); /* Raced */
goto out;
}
}
out_unlock:
spin_unlock_irqrestore(&pool->lock, flags);
out:
return 0;
}
EXPORT_SYMBOL(mempool_resize);
/**
* mempool_alloc - allocate an element from a specific memory pool
* @pool: pointer to the memory pool which was allocated via
* mempool_create().
* @gfp_mask: the usual allocation bitmask.
*
* this function only sleeps if the alloc_fn() function sleeps or
* returns NULL. Note that due to preallocation, this function
* *never* fails when called from process contexts. (it might
* fail if called from an IRQ context.)
* Note: neither __GFP_NOMEMALLOC nor __GFP_ZERO are supported.
*/
void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
{
void *element;
unsigned long flags;
wait_queue_t wait;
gfp_t gfp_temp;
/* If oom killed, memory reserves are essential to prevent livelock */
VM_WARN_ON_ONCE(gfp_mask & __GFP_NOMEMALLOC);
/* No element size to zero on allocation */
VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO);
mm, page_alloc: distinguish between being unable to sleep, unwilling to sleep and avoiding waking kswapd __GFP_WAIT has been used to identify atomic context in callers that hold spinlocks or are in interrupts. They are expected to be high priority and have access one of two watermarks lower than "min" which can be referred to as the "atomic reserve". __GFP_HIGH users get access to the first lower watermark and can be called the "high priority reserve". Over time, callers had a requirement to not block when fallback options were available. Some have abused __GFP_WAIT leading to a situation where an optimisitic allocation with a fallback option can access atomic reserves. This patch uses __GFP_ATOMIC to identify callers that are truely atomic, cannot sleep and have no alternative. High priority users continue to use __GFP_HIGH. __GFP_DIRECT_RECLAIM identifies callers that can sleep and are willing to enter direct reclaim. __GFP_KSWAPD_RECLAIM to identify callers that want to wake kswapd for background reclaim. __GFP_WAIT is redefined as a caller that is willing to enter direct reclaim and wake kswapd for background reclaim. This patch then converts a number of sites o __GFP_ATOMIC is used by callers that are high priority and have memory pools for those requests. GFP_ATOMIC uses this flag. o Callers that have a limited mempool to guarantee forward progress clear __GFP_DIRECT_RECLAIM but keep __GFP_KSWAPD_RECLAIM. bio allocations fall into this category where kswapd will still be woken but atomic reserves are not used as there is a one-entry mempool to guarantee progress. o Callers that are checking if they are non-blocking should use the helper gfpflags_allow_blocking() where possible. This is because checking for __GFP_WAIT as was done historically now can trigger false positives. Some exceptions like dm-crypt.c exist where the code intent is clearer if __GFP_DIRECT_RECLAIM is used instead of the helper due to flag manipulations. o Callers that built their own GFP flags instead of starting with GFP_KERNEL and friends now also need to specify __GFP_KSWAPD_RECLAIM. The first key hazard to watch out for is callers that removed __GFP_WAIT and was depending on access to atomic reserves for inconspicuous reasons. In some cases it may be appropriate for them to use __GFP_HIGH. The second key hazard is callers that assembled their own combination of GFP flags instead of starting with something like GFP_KERNEL. They may now wish to specify __GFP_KSWAPD_RECLAIM. It's almost certainly harmless if it's missed in most cases as other activity will wake kswapd. Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Acked-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Michal Hocko <mhocko@suse.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Christoph Lameter <cl@linux.com> Cc: David Rientjes <rientjes@google.com> Cc: Vitaly Wool <vitalywool@gmail.com> Cc: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-11-07 08:28:21 +08:00
might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
gfp_mask |= __GFP_NORETRY; /* don't loop in __alloc_pages */
gfp_mask |= __GFP_NOWARN; /* failures are OK */
mm, page_alloc: distinguish between being unable to sleep, unwilling to sleep and avoiding waking kswapd __GFP_WAIT has been used to identify atomic context in callers that hold spinlocks or are in interrupts. They are expected to be high priority and have access one of two watermarks lower than "min" which can be referred to as the "atomic reserve". __GFP_HIGH users get access to the first lower watermark and can be called the "high priority reserve". Over time, callers had a requirement to not block when fallback options were available. Some have abused __GFP_WAIT leading to a situation where an optimisitic allocation with a fallback option can access atomic reserves. This patch uses __GFP_ATOMIC to identify callers that are truely atomic, cannot sleep and have no alternative. High priority users continue to use __GFP_HIGH. __GFP_DIRECT_RECLAIM identifies callers that can sleep and are willing to enter direct reclaim. __GFP_KSWAPD_RECLAIM to identify callers that want to wake kswapd for background reclaim. __GFP_WAIT is redefined as a caller that is willing to enter direct reclaim and wake kswapd for background reclaim. This patch then converts a number of sites o __GFP_ATOMIC is used by callers that are high priority and have memory pools for those requests. GFP_ATOMIC uses this flag. o Callers that have a limited mempool to guarantee forward progress clear __GFP_DIRECT_RECLAIM but keep __GFP_KSWAPD_RECLAIM. bio allocations fall into this category where kswapd will still be woken but atomic reserves are not used as there is a one-entry mempool to guarantee progress. o Callers that are checking if they are non-blocking should use the helper gfpflags_allow_blocking() where possible. This is because checking for __GFP_WAIT as was done historically now can trigger false positives. Some exceptions like dm-crypt.c exist where the code intent is clearer if __GFP_DIRECT_RECLAIM is used instead of the helper due to flag manipulations. o Callers that built their own GFP flags instead of starting with GFP_KERNEL and friends now also need to specify __GFP_KSWAPD_RECLAIM. The first key hazard to watch out for is callers that removed __GFP_WAIT and was depending on access to atomic reserves for inconspicuous reasons. In some cases it may be appropriate for them to use __GFP_HIGH. The second key hazard is callers that assembled their own combination of GFP flags instead of starting with something like GFP_KERNEL. They may now wish to specify __GFP_KSWAPD_RECLAIM. It's almost certainly harmless if it's missed in most cases as other activity will wake kswapd. Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Acked-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Michal Hocko <mhocko@suse.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Christoph Lameter <cl@linux.com> Cc: David Rientjes <rientjes@google.com> Cc: Vitaly Wool <vitalywool@gmail.com> Cc: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-11-07 08:28:21 +08:00
gfp_temp = gfp_mask & ~(__GFP_DIRECT_RECLAIM|__GFP_IO);
repeat_alloc:
if (likely(pool->curr_nr)) {
/*
* Don't allocate from emergency reserves if there are
* elements available. This check is racy, but it will
* be rechecked each loop.
*/
gfp_temp |= __GFP_NOMEMALLOC;
}
element = pool->alloc(gfp_temp, pool->pool_data);
if (likely(element != NULL))
return element;
spin_lock_irqsave(&pool->lock, flags);
if (likely(pool->curr_nr)) {
element = remove_element(pool, gfp_temp);
spin_unlock_irqrestore(&pool->lock, flags);
mempool: fix and document synchronization and memory barrier usage mempool_alloc/free() use undocumented smp_mb()'s. The code is slightly broken and misleading. The lockless part is in mempool_free(). It wants to determine whether the item being freed needs to be returned to the pool or backing allocator without grabbing pool->lock. Two things need to be guaranteed for correct operation. 1. pool->curr_nr + #allocated should never dip below pool->min_nr. 2. Waiters shouldn't be left dangling. For #1, The only necessary condition is that curr_nr visible at free is from after the allocation of the element being freed (details in the comment). For most cases, this is true without any barrier but there can be fringe cases where the allocated pointer is passed to the freeing task without going through memory barriers. To cover this case, wmb is necessary before returning from allocation and rmb is necessary before reading curr_nr. IOW, ALLOCATING TASK FREEING TASK update pool state after alloc; wmb(); pass pointer to freeing task; read pointer; rmb(); read pool state to free; The current code doesn't have wmb after pool update during allocation and may theoretically, on machines where unlock doesn't behave as full wmb, lead to pool depletion and deadlock. smp_wmb() needs to be added after successful allocation from reserved elements and smp_mb() in mempool_free() can be replaced with smp_rmb(). For #2, the waiter needs to add itself to waitqueue and then check the wait condition and the waker needs to update the wait condition and then wake up. Because waitqueue operations always go through full spinlock synchronization, there is no need for extra memory barriers. Furthermore, mempool_alloc() is already holding pool->lock when it decides that it needs to wait. There is no reason to do unlock - add waitqueue - test condition again. It can simply add itself to waitqueue while holding pool->lock and then unlock and sleep. This patch adds smp_wmb() after successful allocation from reserved pool, replaces smp_mb() in mempool_free() with smp_rmb() and extend pool->lock over waitqueue addition. More importantly, it explains what memory barriers do and how the lockless testing is correct. -v2: Oleg pointed out that unlock doesn't imply wmb. Added explicit smp_wmb() after successful allocation from reserved pool and updated comments accordingly. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> Cc: David Howells <dhowells@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-01-11 07:08:23 +08:00
/* paired with rmb in mempool_free(), read comment there */
smp_wmb();
/*
* Update the allocation stack trace as this is more useful
* for debugging.
*/
kmemleak_update_trace(element);
return element;
}
/*
mm, page_alloc: distinguish between being unable to sleep, unwilling to sleep and avoiding waking kswapd __GFP_WAIT has been used to identify atomic context in callers that hold spinlocks or are in interrupts. They are expected to be high priority and have access one of two watermarks lower than "min" which can be referred to as the "atomic reserve". __GFP_HIGH users get access to the first lower watermark and can be called the "high priority reserve". Over time, callers had a requirement to not block when fallback options were available. Some have abused __GFP_WAIT leading to a situation where an optimisitic allocation with a fallback option can access atomic reserves. This patch uses __GFP_ATOMIC to identify callers that are truely atomic, cannot sleep and have no alternative. High priority users continue to use __GFP_HIGH. __GFP_DIRECT_RECLAIM identifies callers that can sleep and are willing to enter direct reclaim. __GFP_KSWAPD_RECLAIM to identify callers that want to wake kswapd for background reclaim. __GFP_WAIT is redefined as a caller that is willing to enter direct reclaim and wake kswapd for background reclaim. This patch then converts a number of sites o __GFP_ATOMIC is used by callers that are high priority and have memory pools for those requests. GFP_ATOMIC uses this flag. o Callers that have a limited mempool to guarantee forward progress clear __GFP_DIRECT_RECLAIM but keep __GFP_KSWAPD_RECLAIM. bio allocations fall into this category where kswapd will still be woken but atomic reserves are not used as there is a one-entry mempool to guarantee progress. o Callers that are checking if they are non-blocking should use the helper gfpflags_allow_blocking() where possible. This is because checking for __GFP_WAIT as was done historically now can trigger false positives. Some exceptions like dm-crypt.c exist where the code intent is clearer if __GFP_DIRECT_RECLAIM is used instead of the helper due to flag manipulations. o Callers that built their own GFP flags instead of starting with GFP_KERNEL and friends now also need to specify __GFP_KSWAPD_RECLAIM. The first key hazard to watch out for is callers that removed __GFP_WAIT and was depending on access to atomic reserves for inconspicuous reasons. In some cases it may be appropriate for them to use __GFP_HIGH. The second key hazard is callers that assembled their own combination of GFP flags instead of starting with something like GFP_KERNEL. They may now wish to specify __GFP_KSWAPD_RECLAIM. It's almost certainly harmless if it's missed in most cases as other activity will wake kswapd. Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Acked-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Michal Hocko <mhocko@suse.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Christoph Lameter <cl@linux.com> Cc: David Rientjes <rientjes@google.com> Cc: Vitaly Wool <vitalywool@gmail.com> Cc: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-11-07 08:28:21 +08:00
* We use gfp mask w/o direct reclaim or IO for the first round. If
* alloc failed with that and @pool was empty, retry immediately.
*/
if ((gfp_temp & ~__GFP_NOMEMALLOC) != gfp_mask) {
spin_unlock_irqrestore(&pool->lock, flags);
gfp_temp = gfp_mask;
goto repeat_alloc;
}
gfp_temp = gfp_mask;
mm, page_alloc: distinguish between being unable to sleep, unwilling to sleep and avoiding waking kswapd __GFP_WAIT has been used to identify atomic context in callers that hold spinlocks or are in interrupts. They are expected to be high priority and have access one of two watermarks lower than "min" which can be referred to as the "atomic reserve". __GFP_HIGH users get access to the first lower watermark and can be called the "high priority reserve". Over time, callers had a requirement to not block when fallback options were available. Some have abused __GFP_WAIT leading to a situation where an optimisitic allocation with a fallback option can access atomic reserves. This patch uses __GFP_ATOMIC to identify callers that are truely atomic, cannot sleep and have no alternative. High priority users continue to use __GFP_HIGH. __GFP_DIRECT_RECLAIM identifies callers that can sleep and are willing to enter direct reclaim. __GFP_KSWAPD_RECLAIM to identify callers that want to wake kswapd for background reclaim. __GFP_WAIT is redefined as a caller that is willing to enter direct reclaim and wake kswapd for background reclaim. This patch then converts a number of sites o __GFP_ATOMIC is used by callers that are high priority and have memory pools for those requests. GFP_ATOMIC uses this flag. o Callers that have a limited mempool to guarantee forward progress clear __GFP_DIRECT_RECLAIM but keep __GFP_KSWAPD_RECLAIM. bio allocations fall into this category where kswapd will still be woken but atomic reserves are not used as there is a one-entry mempool to guarantee progress. o Callers that are checking if they are non-blocking should use the helper gfpflags_allow_blocking() where possible. This is because checking for __GFP_WAIT as was done historically now can trigger false positives. Some exceptions like dm-crypt.c exist where the code intent is clearer if __GFP_DIRECT_RECLAIM is used instead of the helper due to flag manipulations. o Callers that built their own GFP flags instead of starting with GFP_KERNEL and friends now also need to specify __GFP_KSWAPD_RECLAIM. The first key hazard to watch out for is callers that removed __GFP_WAIT and was depending on access to atomic reserves for inconspicuous reasons. In some cases it may be appropriate for them to use __GFP_HIGH. The second key hazard is callers that assembled their own combination of GFP flags instead of starting with something like GFP_KERNEL. They may now wish to specify __GFP_KSWAPD_RECLAIM. It's almost certainly harmless if it's missed in most cases as other activity will wake kswapd. Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Acked-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Michal Hocko <mhocko@suse.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Christoph Lameter <cl@linux.com> Cc: David Rientjes <rientjes@google.com> Cc: Vitaly Wool <vitalywool@gmail.com> Cc: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-11-07 08:28:21 +08:00
/* We must not sleep if !__GFP_DIRECT_RECLAIM */
if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) {
mempool: fix and document synchronization and memory barrier usage mempool_alloc/free() use undocumented smp_mb()'s. The code is slightly broken and misleading. The lockless part is in mempool_free(). It wants to determine whether the item being freed needs to be returned to the pool or backing allocator without grabbing pool->lock. Two things need to be guaranteed for correct operation. 1. pool->curr_nr + #allocated should never dip below pool->min_nr. 2. Waiters shouldn't be left dangling. For #1, The only necessary condition is that curr_nr visible at free is from after the allocation of the element being freed (details in the comment). For most cases, this is true without any barrier but there can be fringe cases where the allocated pointer is passed to the freeing task without going through memory barriers. To cover this case, wmb is necessary before returning from allocation and rmb is necessary before reading curr_nr. IOW, ALLOCATING TASK FREEING TASK update pool state after alloc; wmb(); pass pointer to freeing task; read pointer; rmb(); read pool state to free; The current code doesn't have wmb after pool update during allocation and may theoretically, on machines where unlock doesn't behave as full wmb, lead to pool depletion and deadlock. smp_wmb() needs to be added after successful allocation from reserved elements and smp_mb() in mempool_free() can be replaced with smp_rmb(). For #2, the waiter needs to add itself to waitqueue and then check the wait condition and the waker needs to update the wait condition and then wake up. Because waitqueue operations always go through full spinlock synchronization, there is no need for extra memory barriers. Furthermore, mempool_alloc() is already holding pool->lock when it decides that it needs to wait. There is no reason to do unlock - add waitqueue - test condition again. It can simply add itself to waitqueue while holding pool->lock and then unlock and sleep. This patch adds smp_wmb() after successful allocation from reserved pool, replaces smp_mb() in mempool_free() with smp_rmb() and extend pool->lock over waitqueue addition. More importantly, it explains what memory barriers do and how the lockless testing is correct. -v2: Oleg pointed out that unlock doesn't imply wmb. Added explicit smp_wmb() after successful allocation from reserved pool and updated comments accordingly. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> Cc: David Howells <dhowells@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-01-11 07:08:23 +08:00
spin_unlock_irqrestore(&pool->lock, flags);
return NULL;
mempool: fix and document synchronization and memory barrier usage mempool_alloc/free() use undocumented smp_mb()'s. The code is slightly broken and misleading. The lockless part is in mempool_free(). It wants to determine whether the item being freed needs to be returned to the pool or backing allocator without grabbing pool->lock. Two things need to be guaranteed for correct operation. 1. pool->curr_nr + #allocated should never dip below pool->min_nr. 2. Waiters shouldn't be left dangling. For #1, The only necessary condition is that curr_nr visible at free is from after the allocation of the element being freed (details in the comment). For most cases, this is true without any barrier but there can be fringe cases where the allocated pointer is passed to the freeing task without going through memory barriers. To cover this case, wmb is necessary before returning from allocation and rmb is necessary before reading curr_nr. IOW, ALLOCATING TASK FREEING TASK update pool state after alloc; wmb(); pass pointer to freeing task; read pointer; rmb(); read pool state to free; The current code doesn't have wmb after pool update during allocation and may theoretically, on machines where unlock doesn't behave as full wmb, lead to pool depletion and deadlock. smp_wmb() needs to be added after successful allocation from reserved elements and smp_mb() in mempool_free() can be replaced with smp_rmb(). For #2, the waiter needs to add itself to waitqueue and then check the wait condition and the waker needs to update the wait condition and then wake up. Because waitqueue operations always go through full spinlock synchronization, there is no need for extra memory barriers. Furthermore, mempool_alloc() is already holding pool->lock when it decides that it needs to wait. There is no reason to do unlock - add waitqueue - test condition again. It can simply add itself to waitqueue while holding pool->lock and then unlock and sleep. This patch adds smp_wmb() after successful allocation from reserved pool, replaces smp_mb() in mempool_free() with smp_rmb() and extend pool->lock over waitqueue addition. More importantly, it explains what memory barriers do and how the lockless testing is correct. -v2: Oleg pointed out that unlock doesn't imply wmb. Added explicit smp_wmb() after successful allocation from reserved pool and updated comments accordingly. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> Cc: David Howells <dhowells@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-01-11 07:08:23 +08:00
}
mempool: fix and document synchronization and memory barrier usage mempool_alloc/free() use undocumented smp_mb()'s. The code is slightly broken and misleading. The lockless part is in mempool_free(). It wants to determine whether the item being freed needs to be returned to the pool or backing allocator without grabbing pool->lock. Two things need to be guaranteed for correct operation. 1. pool->curr_nr + #allocated should never dip below pool->min_nr. 2. Waiters shouldn't be left dangling. For #1, The only necessary condition is that curr_nr visible at free is from after the allocation of the element being freed (details in the comment). For most cases, this is true without any barrier but there can be fringe cases where the allocated pointer is passed to the freeing task without going through memory barriers. To cover this case, wmb is necessary before returning from allocation and rmb is necessary before reading curr_nr. IOW, ALLOCATING TASK FREEING TASK update pool state after alloc; wmb(); pass pointer to freeing task; read pointer; rmb(); read pool state to free; The current code doesn't have wmb after pool update during allocation and may theoretically, on machines where unlock doesn't behave as full wmb, lead to pool depletion and deadlock. smp_wmb() needs to be added after successful allocation from reserved elements and smp_mb() in mempool_free() can be replaced with smp_rmb(). For #2, the waiter needs to add itself to waitqueue and then check the wait condition and the waker needs to update the wait condition and then wake up. Because waitqueue operations always go through full spinlock synchronization, there is no need for extra memory barriers. Furthermore, mempool_alloc() is already holding pool->lock when it decides that it needs to wait. There is no reason to do unlock - add waitqueue - test condition again. It can simply add itself to waitqueue while holding pool->lock and then unlock and sleep. This patch adds smp_wmb() after successful allocation from reserved pool, replaces smp_mb() in mempool_free() with smp_rmb() and extend pool->lock over waitqueue addition. More importantly, it explains what memory barriers do and how the lockless testing is correct. -v2: Oleg pointed out that unlock doesn't imply wmb. Added explicit smp_wmb() after successful allocation from reserved pool and updated comments accordingly. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> Cc: David Howells <dhowells@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-01-11 07:08:23 +08:00
/* Let's wait for someone else to return an element to @pool */
init_wait(&wait);
prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
mempool: fix and document synchronization and memory barrier usage mempool_alloc/free() use undocumented smp_mb()'s. The code is slightly broken and misleading. The lockless part is in mempool_free(). It wants to determine whether the item being freed needs to be returned to the pool or backing allocator without grabbing pool->lock. Two things need to be guaranteed for correct operation. 1. pool->curr_nr + #allocated should never dip below pool->min_nr. 2. Waiters shouldn't be left dangling. For #1, The only necessary condition is that curr_nr visible at free is from after the allocation of the element being freed (details in the comment). For most cases, this is true without any barrier but there can be fringe cases where the allocated pointer is passed to the freeing task without going through memory barriers. To cover this case, wmb is necessary before returning from allocation and rmb is necessary before reading curr_nr. IOW, ALLOCATING TASK FREEING TASK update pool state after alloc; wmb(); pass pointer to freeing task; read pointer; rmb(); read pool state to free; The current code doesn't have wmb after pool update during allocation and may theoretically, on machines where unlock doesn't behave as full wmb, lead to pool depletion and deadlock. smp_wmb() needs to be added after successful allocation from reserved elements and smp_mb() in mempool_free() can be replaced with smp_rmb(). For #2, the waiter needs to add itself to waitqueue and then check the wait condition and the waker needs to update the wait condition and then wake up. Because waitqueue operations always go through full spinlock synchronization, there is no need for extra memory barriers. Furthermore, mempool_alloc() is already holding pool->lock when it decides that it needs to wait. There is no reason to do unlock - add waitqueue - test condition again. It can simply add itself to waitqueue while holding pool->lock and then unlock and sleep. This patch adds smp_wmb() after successful allocation from reserved pool, replaces smp_mb() in mempool_free() with smp_rmb() and extend pool->lock over waitqueue addition. More importantly, it explains what memory barriers do and how the lockless testing is correct. -v2: Oleg pointed out that unlock doesn't imply wmb. Added explicit smp_wmb() after successful allocation from reserved pool and updated comments accordingly. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> Cc: David Howells <dhowells@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-01-11 07:08:23 +08:00
spin_unlock_irqrestore(&pool->lock, flags);
/*
* FIXME: this should be io_schedule(). The timeout is there as a
* workaround for some DM problems in 2.6.18.
*/
io_schedule_timeout(5*HZ);
finish_wait(&pool->wait, &wait);
goto repeat_alloc;
}
EXPORT_SYMBOL(mempool_alloc);
/**
* mempool_free - return an element to the pool.
* @element: pool element pointer.
* @pool: pointer to the memory pool which was allocated via
* mempool_create().
*
* this function only sleeps if the free_fn() function sleeps.
*/
void mempool_free(void *element, mempool_t *pool)
{
unsigned long flags;
if (unlikely(element == NULL))
return;
mempool: fix and document synchronization and memory barrier usage mempool_alloc/free() use undocumented smp_mb()'s. The code is slightly broken and misleading. The lockless part is in mempool_free(). It wants to determine whether the item being freed needs to be returned to the pool or backing allocator without grabbing pool->lock. Two things need to be guaranteed for correct operation. 1. pool->curr_nr + #allocated should never dip below pool->min_nr. 2. Waiters shouldn't be left dangling. For #1, The only necessary condition is that curr_nr visible at free is from after the allocation of the element being freed (details in the comment). For most cases, this is true without any barrier but there can be fringe cases where the allocated pointer is passed to the freeing task without going through memory barriers. To cover this case, wmb is necessary before returning from allocation and rmb is necessary before reading curr_nr. IOW, ALLOCATING TASK FREEING TASK update pool state after alloc; wmb(); pass pointer to freeing task; read pointer; rmb(); read pool state to free; The current code doesn't have wmb after pool update during allocation and may theoretically, on machines where unlock doesn't behave as full wmb, lead to pool depletion and deadlock. smp_wmb() needs to be added after successful allocation from reserved elements and smp_mb() in mempool_free() can be replaced with smp_rmb(). For #2, the waiter needs to add itself to waitqueue and then check the wait condition and the waker needs to update the wait condition and then wake up. Because waitqueue operations always go through full spinlock synchronization, there is no need for extra memory barriers. Furthermore, mempool_alloc() is already holding pool->lock when it decides that it needs to wait. There is no reason to do unlock - add waitqueue - test condition again. It can simply add itself to waitqueue while holding pool->lock and then unlock and sleep. This patch adds smp_wmb() after successful allocation from reserved pool, replaces smp_mb() in mempool_free() with smp_rmb() and extend pool->lock over waitqueue addition. More importantly, it explains what memory barriers do and how the lockless testing is correct. -v2: Oleg pointed out that unlock doesn't imply wmb. Added explicit smp_wmb() after successful allocation from reserved pool and updated comments accordingly. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> Cc: David Howells <dhowells@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-01-11 07:08:23 +08:00
/*
* Paired with the wmb in mempool_alloc(). The preceding read is
* for @element and the following @pool->curr_nr. This ensures
* that the visible value of @pool->curr_nr is from after the
* allocation of @element. This is necessary for fringe cases
* where @element was passed to this task without going through
* barriers.
*
* For example, assume @p is %NULL at the beginning and one task
* performs "p = mempool_alloc(...);" while another task is doing
* "while (!p) cpu_relax(); mempool_free(p, ...);". This function
* may end up using curr_nr value which is from before allocation
* of @p without the following rmb.
*/
smp_rmb();
/*
* For correctness, we need a test which is guaranteed to trigger
* if curr_nr + #allocated == min_nr. Testing curr_nr < min_nr
* without locking achieves that and refilling as soon as possible
* is desirable.
*
* Because curr_nr visible here is always a value after the
* allocation of @element, any task which decremented curr_nr below
* min_nr is guaranteed to see curr_nr < min_nr unless curr_nr gets
* incremented to min_nr afterwards. If curr_nr gets incremented
* to min_nr after the allocation of @element, the elements
* allocated after that are subject to the same guarantee.
*
* Waiters happen iff curr_nr is 0 and the above guarantee also
* ensures that there will be frees which return elements to the
* pool waking up the waiters.
*/
if (unlikely(pool->curr_nr < pool->min_nr)) {
spin_lock_irqsave(&pool->lock, flags);
if (likely(pool->curr_nr < pool->min_nr)) {
add_element(pool, element);
spin_unlock_irqrestore(&pool->lock, flags);
wake_up(&pool->wait);
return;
}
spin_unlock_irqrestore(&pool->lock, flags);
}
pool->free(element, pool->pool_data);
}
EXPORT_SYMBOL(mempool_free);
/*
* A commonly used alloc and free fn.
*/
void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data)
{
struct kmem_cache *mem = pool_data;
VM_BUG_ON(mem->ctor);
return kmem_cache_alloc(mem, gfp_mask);
}
EXPORT_SYMBOL(mempool_alloc_slab);
void mempool_free_slab(void *element, void *pool_data)
{
struct kmem_cache *mem = pool_data;
kmem_cache_free(mem, element);
}
EXPORT_SYMBOL(mempool_free_slab);
/*
* A commonly used alloc and free fn that kmalloc/kfrees the amount of memory
* specified by pool_data
*/
void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data)
{
size_t size = (size_t)pool_data;
return kmalloc(size, gfp_mask);
}
EXPORT_SYMBOL(mempool_kmalloc);
void mempool_kfree(void *element, void *pool_data)
{
kfree(element);
}
EXPORT_SYMBOL(mempool_kfree);
/*
* A simple mempool-backed page allocator that allocates pages
* of the order specified by pool_data.
*/
void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data)
{
int order = (int)(long)pool_data;
return alloc_pages(gfp_mask, order);
}
EXPORT_SYMBOL(mempool_alloc_pages);
void mempool_free_pages(void *element, void *pool_data)
{
int order = (int)(long)pool_data;
__free_pages(element, order);
}
EXPORT_SYMBOL(mempool_free_pages);