mm, sl[aou]b: Use a common mutex definition

Use the mutex definition from SLAB and make it the common way to take a sleeping lock.

This has the effect of using a mutex instead of a rw semaphore for SLUB.

SLOB gains the use of a mutex for kmem_cache_create serialization.
Not needed now but SLOB may acquire some more features later (like slabinfo
/ sysfs support) through the expansion of the common code that will
need this.

Reviewed-by: Glauber Costa <glommer@parallels.com>
Reviewed-by: Joonsoo Kim <js1304@gmail.com>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
This commit is contained in:
Christoph Lameter 2012-07-06 15:25:12 -05:00 committed by Pekka Enberg
parent 97d0660915
commit 18004c5d40
4 changed files with 82 additions and 86 deletions

108
mm/slab.c
View File

@ -68,7 +68,7 @@
* Further notes from the original documentation: * Further notes from the original documentation:
* *
* 11 April '97. Started multi-threading - markhe * 11 April '97. Started multi-threading - markhe
* The global cache-chain is protected by the mutex 'cache_chain_mutex'. * The global cache-chain is protected by the mutex 'slab_mutex'.
* The sem is only needed when accessing/extending the cache-chain, which * The sem is only needed when accessing/extending the cache-chain, which
* can never happen inside an interrupt (kmem_cache_create(), * can never happen inside an interrupt (kmem_cache_create(),
* kmem_cache_shrink() and kmem_cache_reap()). * kmem_cache_shrink() and kmem_cache_reap()).
@ -671,12 +671,6 @@ static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
} }
#endif #endif
/*
* Guard access to the cache-chain.
*/
static DEFINE_MUTEX(cache_chain_mutex);
static struct list_head cache_chain;
static DEFINE_PER_CPU(struct delayed_work, slab_reap_work); static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
@ -1100,7 +1094,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
* When hotplugging memory or a cpu, existing nodelists are not replaced if * When hotplugging memory or a cpu, existing nodelists are not replaced if
* already in use. * already in use.
* *
* Must hold cache_chain_mutex. * Must hold slab_mutex.
*/ */
static int init_cache_nodelists_node(int node) static int init_cache_nodelists_node(int node)
{ {
@ -1108,7 +1102,7 @@ static int init_cache_nodelists_node(int node)
struct kmem_list3 *l3; struct kmem_list3 *l3;
const int memsize = sizeof(struct kmem_list3); const int memsize = sizeof(struct kmem_list3);
list_for_each_entry(cachep, &cache_chain, list) { list_for_each_entry(cachep, &slab_caches, list) {
/* /*
* Set up the size64 kmemlist for cpu before we can * Set up the size64 kmemlist for cpu before we can
* begin anything. Make sure some other cpu on this * begin anything. Make sure some other cpu on this
@ -1124,7 +1118,7 @@ static int init_cache_nodelists_node(int node)
/* /*
* The l3s don't come and go as CPUs come and * The l3s don't come and go as CPUs come and
* go. cache_chain_mutex is sufficient * go. slab_mutex is sufficient
* protection here. * protection here.
*/ */
cachep->nodelists[node] = l3; cachep->nodelists[node] = l3;
@ -1146,7 +1140,7 @@ static void __cpuinit cpuup_canceled(long cpu)
int node = cpu_to_mem(cpu); int node = cpu_to_mem(cpu);
const struct cpumask *mask = cpumask_of_node(node); const struct cpumask *mask = cpumask_of_node(node);
list_for_each_entry(cachep, &cache_chain, list) { list_for_each_entry(cachep, &slab_caches, list) {
struct array_cache *nc; struct array_cache *nc;
struct array_cache *shared; struct array_cache *shared;
struct array_cache **alien; struct array_cache **alien;
@ -1196,7 +1190,7 @@ static void __cpuinit cpuup_canceled(long cpu)
* the respective cache's slabs, now we can go ahead and * the respective cache's slabs, now we can go ahead and
* shrink each nodelist to its limit. * shrink each nodelist to its limit.
*/ */
list_for_each_entry(cachep, &cache_chain, list) { list_for_each_entry(cachep, &slab_caches, list) {
l3 = cachep->nodelists[node]; l3 = cachep->nodelists[node];
if (!l3) if (!l3)
continue; continue;
@ -1225,7 +1219,7 @@ static int __cpuinit cpuup_prepare(long cpu)
* Now we can go ahead with allocating the shared arrays and * Now we can go ahead with allocating the shared arrays and
* array caches * array caches
*/ */
list_for_each_entry(cachep, &cache_chain, list) { list_for_each_entry(cachep, &slab_caches, list) {
struct array_cache *nc; struct array_cache *nc;
struct array_cache *shared = NULL; struct array_cache *shared = NULL;
struct array_cache **alien = NULL; struct array_cache **alien = NULL;
@ -1293,9 +1287,9 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
switch (action) { switch (action) {
case CPU_UP_PREPARE: case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN: case CPU_UP_PREPARE_FROZEN:
mutex_lock(&cache_chain_mutex); mutex_lock(&slab_mutex);
err = cpuup_prepare(cpu); err = cpuup_prepare(cpu);
mutex_unlock(&cache_chain_mutex); mutex_unlock(&slab_mutex);
break; break;
case CPU_ONLINE: case CPU_ONLINE:
case CPU_ONLINE_FROZEN: case CPU_ONLINE_FROZEN:
@ -1305,7 +1299,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN: case CPU_DOWN_PREPARE_FROZEN:
/* /*
* Shutdown cache reaper. Note that the cache_chain_mutex is * Shutdown cache reaper. Note that the slab_mutex is
* held so that if cache_reap() is invoked it cannot do * held so that if cache_reap() is invoked it cannot do
* anything expensive but will only modify reap_work * anything expensive but will only modify reap_work
* and reschedule the timer. * and reschedule the timer.
@ -1332,9 +1326,9 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
#endif #endif
case CPU_UP_CANCELED: case CPU_UP_CANCELED:
case CPU_UP_CANCELED_FROZEN: case CPU_UP_CANCELED_FROZEN:
mutex_lock(&cache_chain_mutex); mutex_lock(&slab_mutex);
cpuup_canceled(cpu); cpuup_canceled(cpu);
mutex_unlock(&cache_chain_mutex); mutex_unlock(&slab_mutex);
break; break;
} }
return notifier_from_errno(err); return notifier_from_errno(err);
@ -1350,14 +1344,14 @@ static struct notifier_block __cpuinitdata cpucache_notifier = {
* Returns -EBUSY if all objects cannot be drained so that the node is not * Returns -EBUSY if all objects cannot be drained so that the node is not
* removed. * removed.
* *
* Must hold cache_chain_mutex. * Must hold slab_mutex.
*/ */
static int __meminit drain_cache_nodelists_node(int node) static int __meminit drain_cache_nodelists_node(int node)
{ {
struct kmem_cache *cachep; struct kmem_cache *cachep;
int ret = 0; int ret = 0;
list_for_each_entry(cachep, &cache_chain, list) { list_for_each_entry(cachep, &slab_caches, list) {
struct kmem_list3 *l3; struct kmem_list3 *l3;
l3 = cachep->nodelists[node]; l3 = cachep->nodelists[node];
@ -1388,14 +1382,14 @@ static int __meminit slab_memory_callback(struct notifier_block *self,
switch (action) { switch (action) {
case MEM_GOING_ONLINE: case MEM_GOING_ONLINE:
mutex_lock(&cache_chain_mutex); mutex_lock(&slab_mutex);
ret = init_cache_nodelists_node(nid); ret = init_cache_nodelists_node(nid);
mutex_unlock(&cache_chain_mutex); mutex_unlock(&slab_mutex);
break; break;
case MEM_GOING_OFFLINE: case MEM_GOING_OFFLINE:
mutex_lock(&cache_chain_mutex); mutex_lock(&slab_mutex);
ret = drain_cache_nodelists_node(nid); ret = drain_cache_nodelists_node(nid);
mutex_unlock(&cache_chain_mutex); mutex_unlock(&slab_mutex);
break; break;
case MEM_ONLINE: case MEM_ONLINE:
case MEM_OFFLINE: case MEM_OFFLINE:
@ -1499,8 +1493,8 @@ void __init kmem_cache_init(void)
node = numa_mem_id(); node = numa_mem_id();
/* 1) create the cache_cache */ /* 1) create the cache_cache */
INIT_LIST_HEAD(&cache_chain); INIT_LIST_HEAD(&slab_caches);
list_add(&cache_cache.list, &cache_chain); list_add(&cache_cache.list, &slab_caches);
cache_cache.colour_off = cache_line_size(); cache_cache.colour_off = cache_line_size();
cache_cache.array[smp_processor_id()] = &initarray_cache.cache; cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node]; cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node];
@ -1642,11 +1636,11 @@ void __init kmem_cache_init_late(void)
init_lock_keys(); init_lock_keys();
/* 6) resize the head arrays to their final sizes */ /* 6) resize the head arrays to their final sizes */
mutex_lock(&cache_chain_mutex); mutex_lock(&slab_mutex);
list_for_each_entry(cachep, &cache_chain, list) list_for_each_entry(cachep, &slab_caches, list)
if (enable_cpucache(cachep, GFP_NOWAIT)) if (enable_cpucache(cachep, GFP_NOWAIT))
BUG(); BUG();
mutex_unlock(&cache_chain_mutex); mutex_unlock(&slab_mutex);
/* Done! */ /* Done! */
slab_state = FULL; slab_state = FULL;
@ -2253,10 +2247,10 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
*/ */
if (slab_is_available()) { if (slab_is_available()) {
get_online_cpus(); get_online_cpus();
mutex_lock(&cache_chain_mutex); mutex_lock(&slab_mutex);
} }
list_for_each_entry(pc, &cache_chain, list) { list_for_each_entry(pc, &slab_caches, list) {
char tmp; char tmp;
int res; int res;
@ -2500,10 +2494,10 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
} }
/* cache setup completed, link it into the list */ /* cache setup completed, link it into the list */
list_add(&cachep->list, &cache_chain); list_add(&cachep->list, &slab_caches);
oops: oops:
if (slab_is_available()) { if (slab_is_available()) {
mutex_unlock(&cache_chain_mutex); mutex_unlock(&slab_mutex);
put_online_cpus(); put_online_cpus();
} }
return cachep; return cachep;
@ -2622,7 +2616,7 @@ static int drain_freelist(struct kmem_cache *cache,
return nr_freed; return nr_freed;
} }
/* Called with cache_chain_mutex held to protect against cpu hotplug */ /* Called with slab_mutex held to protect against cpu hotplug */
static int __cache_shrink(struct kmem_cache *cachep) static int __cache_shrink(struct kmem_cache *cachep)
{ {
int ret = 0, i = 0; int ret = 0, i = 0;
@ -2657,9 +2651,9 @@ int kmem_cache_shrink(struct kmem_cache *cachep)
BUG_ON(!cachep || in_interrupt()); BUG_ON(!cachep || in_interrupt());
get_online_cpus(); get_online_cpus();
mutex_lock(&cache_chain_mutex); mutex_lock(&slab_mutex);
ret = __cache_shrink(cachep); ret = __cache_shrink(cachep);
mutex_unlock(&cache_chain_mutex); mutex_unlock(&slab_mutex);
put_online_cpus(); put_online_cpus();
return ret; return ret;
} }
@ -2687,15 +2681,15 @@ void kmem_cache_destroy(struct kmem_cache *cachep)
/* Find the cache in the chain of caches. */ /* Find the cache in the chain of caches. */
get_online_cpus(); get_online_cpus();
mutex_lock(&cache_chain_mutex); mutex_lock(&slab_mutex);
/* /*
* the chain is never empty, cache_cache is never destroyed * the chain is never empty, cache_cache is never destroyed
*/ */
list_del(&cachep->list); list_del(&cachep->list);
if (__cache_shrink(cachep)) { if (__cache_shrink(cachep)) {
slab_error(cachep, "Can't free all objects"); slab_error(cachep, "Can't free all objects");
list_add(&cachep->list, &cache_chain); list_add(&cachep->list, &slab_caches);
mutex_unlock(&cache_chain_mutex); mutex_unlock(&slab_mutex);
put_online_cpus(); put_online_cpus();
return; return;
} }
@ -2704,7 +2698,7 @@ void kmem_cache_destroy(struct kmem_cache *cachep)
rcu_barrier(); rcu_barrier();
__kmem_cache_destroy(cachep); __kmem_cache_destroy(cachep);
mutex_unlock(&cache_chain_mutex); mutex_unlock(&slab_mutex);
put_online_cpus(); put_online_cpus();
} }
EXPORT_SYMBOL(kmem_cache_destroy); EXPORT_SYMBOL(kmem_cache_destroy);
@ -4017,7 +4011,7 @@ static void do_ccupdate_local(void *info)
new->new[smp_processor_id()] = old; new->new[smp_processor_id()] = old;
} }
/* Always called with the cache_chain_mutex held */ /* Always called with the slab_mutex held */
static int do_tune_cpucache(struct kmem_cache *cachep, int limit, static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
int batchcount, int shared, gfp_t gfp) int batchcount, int shared, gfp_t gfp)
{ {
@ -4061,7 +4055,7 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
return alloc_kmemlist(cachep, gfp); return alloc_kmemlist(cachep, gfp);
} }
/* Called with cache_chain_mutex held always */ /* Called with slab_mutex held always */
static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp) static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
{ {
int err; int err;
@ -4163,11 +4157,11 @@ static void cache_reap(struct work_struct *w)
int node = numa_mem_id(); int node = numa_mem_id();
struct delayed_work *work = to_delayed_work(w); struct delayed_work *work = to_delayed_work(w);
if (!mutex_trylock(&cache_chain_mutex)) if (!mutex_trylock(&slab_mutex))
/* Give up. Setup the next iteration. */ /* Give up. Setup the next iteration. */
goto out; goto out;
list_for_each_entry(searchp, &cache_chain, list) { list_for_each_entry(searchp, &slab_caches, list) {
check_irq_on(); check_irq_on();
/* /*
@ -4205,7 +4199,7 @@ static void cache_reap(struct work_struct *w)
cond_resched(); cond_resched();
} }
check_irq_on(); check_irq_on();
mutex_unlock(&cache_chain_mutex); mutex_unlock(&slab_mutex);
next_reap_node(); next_reap_node();
out: out:
/* Set up the next iteration */ /* Set up the next iteration */
@ -4241,21 +4235,21 @@ static void *s_start(struct seq_file *m, loff_t *pos)
{ {
loff_t n = *pos; loff_t n = *pos;
mutex_lock(&cache_chain_mutex); mutex_lock(&slab_mutex);
if (!n) if (!n)
print_slabinfo_header(m); print_slabinfo_header(m);
return seq_list_start(&cache_chain, *pos); return seq_list_start(&slab_caches, *pos);
} }
static void *s_next(struct seq_file *m, void *p, loff_t *pos) static void *s_next(struct seq_file *m, void *p, loff_t *pos)
{ {
return seq_list_next(p, &cache_chain, pos); return seq_list_next(p, &slab_caches, pos);
} }
static void s_stop(struct seq_file *m, void *p) static void s_stop(struct seq_file *m, void *p)
{ {
mutex_unlock(&cache_chain_mutex); mutex_unlock(&slab_mutex);
} }
static int s_show(struct seq_file *m, void *p) static int s_show(struct seq_file *m, void *p)
@ -4406,9 +4400,9 @@ static ssize_t slabinfo_write(struct file *file, const char __user *buffer,
return -EINVAL; return -EINVAL;
/* Find the cache in the chain of caches. */ /* Find the cache in the chain of caches. */
mutex_lock(&cache_chain_mutex); mutex_lock(&slab_mutex);
res = -EINVAL; res = -EINVAL;
list_for_each_entry(cachep, &cache_chain, list) { list_for_each_entry(cachep, &slab_caches, list) {
if (!strcmp(cachep->name, kbuf)) { if (!strcmp(cachep->name, kbuf)) {
if (limit < 1 || batchcount < 1 || if (limit < 1 || batchcount < 1 ||
batchcount > limit || shared < 0) { batchcount > limit || shared < 0) {
@ -4421,7 +4415,7 @@ static ssize_t slabinfo_write(struct file *file, const char __user *buffer,
break; break;
} }
} }
mutex_unlock(&cache_chain_mutex); mutex_unlock(&slab_mutex);
if (res >= 0) if (res >= 0)
res = count; res = count;
return res; return res;
@ -4444,8 +4438,8 @@ static const struct file_operations proc_slabinfo_operations = {
static void *leaks_start(struct seq_file *m, loff_t *pos) static void *leaks_start(struct seq_file *m, loff_t *pos)
{ {
mutex_lock(&cache_chain_mutex); mutex_lock(&slab_mutex);
return seq_list_start(&cache_chain, *pos); return seq_list_start(&slab_caches, *pos);
} }
static inline int add_caller(unsigned long *n, unsigned long v) static inline int add_caller(unsigned long *n, unsigned long v)
@ -4544,17 +4538,17 @@ static int leaks_show(struct seq_file *m, void *p)
name = cachep->name; name = cachep->name;
if (n[0] == n[1]) { if (n[0] == n[1]) {
/* Increase the buffer size */ /* Increase the buffer size */
mutex_unlock(&cache_chain_mutex); mutex_unlock(&slab_mutex);
m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL); m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL);
if (!m->private) { if (!m->private) {
/* Too bad, we are really out */ /* Too bad, we are really out */
m->private = n; m->private = n;
mutex_lock(&cache_chain_mutex); mutex_lock(&slab_mutex);
return -ENOMEM; return -ENOMEM;
} }
*(unsigned long *)m->private = n[0] * 2; *(unsigned long *)m->private = n[0] * 2;
kfree(n); kfree(n);
mutex_lock(&cache_chain_mutex); mutex_lock(&slab_mutex);
/* Now make sure this entry will be retried */ /* Now make sure this entry will be retried */
m->count = m->size; m->count = m->size;
return 0; return 0;

View File

@ -23,6 +23,10 @@ enum slab_state {
extern enum slab_state slab_state; extern enum slab_state slab_state;
/* The slab cache mutex protects the management structures during changes */
extern struct mutex slab_mutex;
extern struct list_head slab_caches;
struct kmem_cache *__kmem_cache_create(const char *name, size_t size, struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
size_t align, unsigned long flags, void (*ctor)(void *)); size_t align, unsigned long flags, void (*ctor)(void *));

View File

@ -19,6 +19,8 @@
#include "slab.h" #include "slab.h"
enum slab_state slab_state; enum slab_state slab_state;
LIST_HEAD(slab_caches);
DEFINE_MUTEX(slab_mutex);
/* /*
* kmem_cache_create - Create a cache. * kmem_cache_create - Create a cache.

View File

@ -36,13 +36,13 @@
/* /*
* Lock order: * Lock order:
* 1. slub_lock (Global Semaphore) * 1. slab_mutex (Global Mutex)
* 2. node->list_lock * 2. node->list_lock
* 3. slab_lock(page) (Only on some arches and for debugging) * 3. slab_lock(page) (Only on some arches and for debugging)
* *
* slub_lock * slab_mutex
* *
* The role of the slub_lock is to protect the list of all the slabs * The role of the slab_mutex is to protect the list of all the slabs
* and to synchronize major metadata changes to slab cache structures. * and to synchronize major metadata changes to slab cache structures.
* *
* The slab_lock is only used for debugging and on arches that do not * The slab_lock is only used for debugging and on arches that do not
@ -183,10 +183,6 @@ static int kmem_size = sizeof(struct kmem_cache);
static struct notifier_block slab_notifier; static struct notifier_block slab_notifier;
#endif #endif
/* A list of all slab caches on the system */
static DECLARE_RWSEM(slub_lock);
static LIST_HEAD(slab_caches);
/* /*
* Tracking user of a slab. * Tracking user of a slab.
*/ */
@ -3177,11 +3173,11 @@ static inline int kmem_cache_close(struct kmem_cache *s)
*/ */
void kmem_cache_destroy(struct kmem_cache *s) void kmem_cache_destroy(struct kmem_cache *s)
{ {
down_write(&slub_lock); mutex_lock(&slab_mutex);
s->refcount--; s->refcount--;
if (!s->refcount) { if (!s->refcount) {
list_del(&s->list); list_del(&s->list);
up_write(&slub_lock); mutex_unlock(&slab_mutex);
if (kmem_cache_close(s)) { if (kmem_cache_close(s)) {
printk(KERN_ERR "SLUB %s: %s called for cache that " printk(KERN_ERR "SLUB %s: %s called for cache that "
"still has objects.\n", s->name, __func__); "still has objects.\n", s->name, __func__);
@ -3191,7 +3187,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
rcu_barrier(); rcu_barrier();
sysfs_slab_remove(s); sysfs_slab_remove(s);
} else } else
up_write(&slub_lock); mutex_unlock(&slab_mutex);
} }
EXPORT_SYMBOL(kmem_cache_destroy); EXPORT_SYMBOL(kmem_cache_destroy);
@ -3253,7 +3249,7 @@ static struct kmem_cache *__init create_kmalloc_cache(const char *name,
/* /*
* This function is called with IRQs disabled during early-boot on * This function is called with IRQs disabled during early-boot on
* single CPU so there's no need to take slub_lock here. * single CPU so there's no need to take slab_mutex here.
*/ */
if (!kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN, if (!kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN,
flags, NULL)) flags, NULL))
@ -3538,10 +3534,10 @@ static int slab_mem_going_offline_callback(void *arg)
{ {
struct kmem_cache *s; struct kmem_cache *s;
down_read(&slub_lock); mutex_lock(&slab_mutex);
list_for_each_entry(s, &slab_caches, list) list_for_each_entry(s, &slab_caches, list)
kmem_cache_shrink(s); kmem_cache_shrink(s);
up_read(&slub_lock); mutex_unlock(&slab_mutex);
return 0; return 0;
} }
@ -3562,7 +3558,7 @@ static void slab_mem_offline_callback(void *arg)
if (offline_node < 0) if (offline_node < 0)
return; return;
down_read(&slub_lock); mutex_lock(&slab_mutex);
list_for_each_entry(s, &slab_caches, list) { list_for_each_entry(s, &slab_caches, list) {
n = get_node(s, offline_node); n = get_node(s, offline_node);
if (n) { if (n) {
@ -3578,7 +3574,7 @@ static void slab_mem_offline_callback(void *arg)
kmem_cache_free(kmem_cache_node, n); kmem_cache_free(kmem_cache_node, n);
} }
} }
up_read(&slub_lock); mutex_unlock(&slab_mutex);
} }
static int slab_mem_going_online_callback(void *arg) static int slab_mem_going_online_callback(void *arg)
@ -3601,7 +3597,7 @@ static int slab_mem_going_online_callback(void *arg)
* allocate a kmem_cache_node structure in order to bring the node * allocate a kmem_cache_node structure in order to bring the node
* online. * online.
*/ */
down_read(&slub_lock); mutex_lock(&slab_mutex);
list_for_each_entry(s, &slab_caches, list) { list_for_each_entry(s, &slab_caches, list) {
/* /*
* XXX: kmem_cache_alloc_node will fallback to other nodes * XXX: kmem_cache_alloc_node will fallback to other nodes
@ -3617,7 +3613,7 @@ static int slab_mem_going_online_callback(void *arg)
s->node[nid] = n; s->node[nid] = n;
} }
out: out:
up_read(&slub_lock); mutex_unlock(&slab_mutex);
return ret; return ret;
} }
@ -3915,7 +3911,7 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
struct kmem_cache *s; struct kmem_cache *s;
char *n; char *n;
down_write(&slub_lock); mutex_lock(&slab_mutex);
s = find_mergeable(size, align, flags, name, ctor); s = find_mergeable(size, align, flags, name, ctor);
if (s) { if (s) {
s->refcount++; s->refcount++;
@ -3930,7 +3926,7 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
s->refcount--; s->refcount--;
goto err; goto err;
} }
up_write(&slub_lock); mutex_unlock(&slab_mutex);
return s; return s;
} }
@ -3943,9 +3939,9 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
if (kmem_cache_open(s, n, if (kmem_cache_open(s, n,
size, align, flags, ctor)) { size, align, flags, ctor)) {
list_add(&s->list, &slab_caches); list_add(&s->list, &slab_caches);
up_write(&slub_lock); mutex_unlock(&slab_mutex);
if (sysfs_slab_add(s)) { if (sysfs_slab_add(s)) {
down_write(&slub_lock); mutex_lock(&slab_mutex);
list_del(&s->list); list_del(&s->list);
kfree(n); kfree(n);
kfree(s); kfree(s);
@ -3957,7 +3953,7 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
} }
kfree(n); kfree(n);
err: err:
up_write(&slub_lock); mutex_unlock(&slab_mutex);
return s; return s;
} }
@ -3978,13 +3974,13 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
case CPU_UP_CANCELED_FROZEN: case CPU_UP_CANCELED_FROZEN:
case CPU_DEAD: case CPU_DEAD:
case CPU_DEAD_FROZEN: case CPU_DEAD_FROZEN:
down_read(&slub_lock); mutex_lock(&slab_mutex);
list_for_each_entry(s, &slab_caches, list) { list_for_each_entry(s, &slab_caches, list) {
local_irq_save(flags); local_irq_save(flags);
__flush_cpu_slab(s, cpu); __flush_cpu_slab(s, cpu);
local_irq_restore(flags); local_irq_restore(flags);
} }
up_read(&slub_lock); mutex_unlock(&slab_mutex);
break; break;
default: default:
break; break;
@ -5360,11 +5356,11 @@ static int __init slab_sysfs_init(void)
struct kmem_cache *s; struct kmem_cache *s;
int err; int err;
down_write(&slub_lock); mutex_lock(&slab_mutex);
slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj); slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
if (!slab_kset) { if (!slab_kset) {
up_write(&slub_lock); mutex_unlock(&slab_mutex);
printk(KERN_ERR "Cannot register slab subsystem.\n"); printk(KERN_ERR "Cannot register slab subsystem.\n");
return -ENOSYS; return -ENOSYS;
} }
@ -5389,7 +5385,7 @@ static int __init slab_sysfs_init(void)
kfree(al); kfree(al);
} }
up_write(&slub_lock); mutex_unlock(&slab_mutex);
resiliency_test(); resiliency_test();
return 0; return 0;
} }
@ -5415,7 +5411,7 @@ static void *s_start(struct seq_file *m, loff_t *pos)
{ {
loff_t n = *pos; loff_t n = *pos;
down_read(&slub_lock); mutex_lock(&slab_mutex);
if (!n) if (!n)
print_slabinfo_header(m); print_slabinfo_header(m);
@ -5429,7 +5425,7 @@ static void *s_next(struct seq_file *m, void *p, loff_t *pos)
static void s_stop(struct seq_file *m, void *p) static void s_stop(struct seq_file *m, void *p)
{ {
up_read(&slub_lock); mutex_unlock(&slab_mutex);
} }
static int s_show(struct seq_file *m, void *p) static int s_show(struct seq_file *m, void *p)