slab: Convert to hotplug state machine
Install the callbacks via the state machine. Signed-off-by: Richard Weinberger <richard@nod.at> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Reviewed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Pekka Enberg <penberg@kernel.org> Cc: linux-mm@kvack.org Cc: rt@linutronix.de Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Christoph Lameter <cl@linux.com> Link: http://lkml.kernel.org/r/20160823125319.abeapfjapf2kfezp@linutronix.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
e6d4989a9a
commit
6731d4f123
|
@ -22,6 +22,7 @@ enum cpuhp_state {
|
|||
CPUHP_X2APIC_PREPARE,
|
||||
CPUHP_SMPCFD_PREPARE,
|
||||
CPUHP_RELAY_PREPARE,
|
||||
CPUHP_SLAB_PREPARE,
|
||||
CPUHP_RCUTREE_PREP,
|
||||
CPUHP_NOTIFY_PREPARE,
|
||||
CPUHP_TIMERS_DEAD,
|
||||
|
|
|
@ -650,4 +650,12 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
|
|||
unsigned int kmem_cache_size(struct kmem_cache *s);
|
||||
void __init kmem_cache_init_late(void);
|
||||
|
||||
#if defined(CONFIG_SMP) && defined(CONFIG_SLAB)
|
||||
int slab_prepare_cpu(unsigned int cpu);
|
||||
int slab_dead_cpu(unsigned int cpu);
|
||||
#else
|
||||
#define slab_prepare_cpu NULL
|
||||
#define slab_dead_cpu NULL
|
||||
#endif
|
||||
|
||||
#endif /* _LINUX_SLAB_H */
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#include <linux/irq.h>
|
||||
#include <linux/smpboot.h>
|
||||
#include <linux/relay.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include <trace/events/power.h>
|
||||
#define CREATE_TRACE_POINTS
|
||||
|
@ -1278,6 +1279,11 @@ static struct cpuhp_step cpuhp_bp_states[] = {
|
|||
.startup.single = relay_prepare_cpu,
|
||||
.teardown.single = NULL,
|
||||
},
|
||||
[CPUHP_SLAB_PREPARE] = {
|
||||
.name = "slab:prepare",
|
||||
.startup.single = slab_prepare_cpu,
|
||||
.teardown.single = slab_dead_cpu,
|
||||
},
|
||||
[CPUHP_RCUTREE_PREP] = {
|
||||
.name = "RCU/tree:prepare",
|
||||
.startup.single = rcutree_prepare_cpu,
|
||||
|
|
116
mm/slab.c
116
mm/slab.c
|
@ -886,6 +886,7 @@ static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#if (defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)) || defined(CONFIG_SMP)
|
||||
/*
|
||||
* Allocates and initializes node for a node on each slab cache, used for
|
||||
* either memory or cpu hotplug. If memory is being hot-added, the kmem_cache_node
|
||||
|
@ -908,6 +909,7 @@ static int init_cache_node_node(int node)
|
|||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int setup_kmem_cache_node(struct kmem_cache *cachep,
|
||||
int node, gfp_t gfp, bool force_change)
|
||||
|
@ -975,6 +977,8 @@ static int setup_kmem_cache_node(struct kmem_cache *cachep,
|
|||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
static void cpuup_canceled(long cpu)
|
||||
{
|
||||
struct kmem_cache *cachep;
|
||||
|
@ -1075,65 +1079,54 @@ static int cpuup_prepare(long cpu)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int cpuup_callback(struct notifier_block *nfb,
|
||||
unsigned long action, void *hcpu)
|
||||
int slab_prepare_cpu(unsigned int cpu)
|
||||
{
|
||||
long cpu = (long)hcpu;
|
||||
int err = 0;
|
||||
int err;
|
||||
|
||||
switch (action) {
|
||||
case CPU_UP_PREPARE:
|
||||
case CPU_UP_PREPARE_FROZEN:
|
||||
mutex_lock(&slab_mutex);
|
||||
err = cpuup_prepare(cpu);
|
||||
mutex_unlock(&slab_mutex);
|
||||
break;
|
||||
case CPU_ONLINE:
|
||||
case CPU_ONLINE_FROZEN:
|
||||
start_cpu_timer(cpu);
|
||||
break;
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
case CPU_DOWN_PREPARE:
|
||||
case CPU_DOWN_PREPARE_FROZEN:
|
||||
/*
|
||||
* Shutdown cache reaper. Note that the slab_mutex is
|
||||
* held so that if cache_reap() is invoked it cannot do
|
||||
* anything expensive but will only modify reap_work
|
||||
* and reschedule the timer.
|
||||
*/
|
||||
cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu));
|
||||
/* Now the cache_reaper is guaranteed to be not running. */
|
||||
per_cpu(slab_reap_work, cpu).work.func = NULL;
|
||||
break;
|
||||
case CPU_DOWN_FAILED:
|
||||
case CPU_DOWN_FAILED_FROZEN:
|
||||
start_cpu_timer(cpu);
|
||||
break;
|
||||
case CPU_DEAD:
|
||||
case CPU_DEAD_FROZEN:
|
||||
/*
|
||||
* Even if all the cpus of a node are down, we don't free the
|
||||
* kmem_cache_node of any cache. This to avoid a race between
|
||||
* cpu_down, and a kmalloc allocation from another cpu for
|
||||
* memory from the node of the cpu going down. The node
|
||||
* structure is usually allocated from kmem_cache_create() and
|
||||
* gets destroyed at kmem_cache_destroy().
|
||||
*/
|
||||
/* fall through */
|
||||
#endif
|
||||
case CPU_UP_CANCELED:
|
||||
case CPU_UP_CANCELED_FROZEN:
|
||||
mutex_lock(&slab_mutex);
|
||||
cpuup_canceled(cpu);
|
||||
mutex_unlock(&slab_mutex);
|
||||
break;
|
||||
}
|
||||
return notifier_from_errno(err);
|
||||
mutex_lock(&slab_mutex);
|
||||
err = cpuup_prepare(cpu);
|
||||
mutex_unlock(&slab_mutex);
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct notifier_block cpucache_notifier = {
|
||||
&cpuup_callback, NULL, 0
|
||||
};
|
||||
/*
|
||||
* This is called for a failed online attempt and for a successful
|
||||
* offline.
|
||||
*
|
||||
* Even if all the cpus of a node are down, we don't free the
|
||||
* kmem_list3 of any cache. This to avoid a race between cpu_down, and
|
||||
* a kmalloc allocation from another cpu for memory from the node of
|
||||
* the cpu going down. The list3 structure is usually allocated from
|
||||
* kmem_cache_create() and gets destroyed at kmem_cache_destroy().
|
||||
*/
|
||||
int slab_dead_cpu(unsigned int cpu)
|
||||
{
|
||||
mutex_lock(&slab_mutex);
|
||||
cpuup_canceled(cpu);
|
||||
mutex_unlock(&slab_mutex);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int slab_online_cpu(unsigned int cpu)
|
||||
{
|
||||
start_cpu_timer(cpu);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int slab_offline_cpu(unsigned int cpu)
|
||||
{
|
||||
/*
|
||||
* Shutdown cache reaper. Note that the slab_mutex is held so
|
||||
* that if cache_reap() is invoked it cannot do anything
|
||||
* expensive but will only modify reap_work and reschedule the
|
||||
* timer.
|
||||
*/
|
||||
cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu));
|
||||
/* Now the cache_reaper is guaranteed to be not running. */
|
||||
per_cpu(slab_reap_work, cpu).work.func = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
|
||||
/*
|
||||
|
@ -1336,12 +1329,6 @@ void __init kmem_cache_init_late(void)
|
|||
/* Done! */
|
||||
slab_state = FULL;
|
||||
|
||||
/*
|
||||
* Register a cpu startup notifier callback that initializes
|
||||
* cpu_cache_get for all new cpus
|
||||
*/
|
||||
register_cpu_notifier(&cpucache_notifier);
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
/*
|
||||
* Register a memory hotplug callback that initializes and frees
|
||||
|
@ -1358,13 +1345,14 @@ void __init kmem_cache_init_late(void)
|
|||
|
||||
static int __init cpucache_init(void)
|
||||
{
|
||||
int cpu;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Register the timers that return unneeded pages to the page allocator
|
||||
*/
|
||||
for_each_online_cpu(cpu)
|
||||
start_cpu_timer(cpu);
|
||||
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SLAB online",
|
||||
slab_online_cpu, slab_offline_cpu);
|
||||
WARN_ON(ret < 0);
|
||||
|
||||
/* Done! */
|
||||
slab_state = FULL;
|
||||
|
|
Loading…
Reference in New Issue