mirror of https://gitee.com/openkylin/linux.git
CPU hotplug notifiers registration fixes for 3.15-rc1
The purpose of this single series of commits from Srivatsa S Bhat (with a small piece from Gautham R Shenoy) touching multiple subsystems that use CPU hotplug notifiers is to provide a way to register them that will not lead to deadlocks with CPU online/offline operations as described in the changelog of commit93ae4f978c
(CPU hotplug: Provide lockless versions of callback registration functions). The first three commits in the series introduce the API and document it and the rest simply goes through the users of CPU hotplug notifiers and converts them to using the new method. / -----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.22 (GNU/Linux) iQIcBAABCAAGBQJTQow2AAoJEILEb/54YlRxW4QQAJlYRDUzwFJzJzYhltQYuVR+ 4D74XMtvXgoJfg3cwdSWvMKKpJZnA9BVN0f7Hcx9wYmgdexYUuHeZJmMNyc3S2+g KjKBIsugvgmZhHbbLd6TJ6GBbhGT5JLt9VmSfL9zIkveInU1YHFUUqL/mxdHm4J0 BSGKjk2rN3waRJgmY+xfliFLtQjDKFwJpMuvrgtoUyfas3f4sIV43UNbqdvA/weJ rzedxXOlKH/id4b56lj/4iIzcoL3mwvJJ7r6n0CEMsKv87z09kqR0O+69Tsq/cgs j17CsvoJOmZGk3QTeKVMQWBsvk6aPoDu3zK83gLbQMt+qjOpSTbJLz/3HZw4/TrW ss4nuZne1DLMGS+6hoxYbTP+6Ni//Kn+l/LrHc5jb7m1X3lMO4W2aV3IROtIE1rv lEP1IG01NU4u9YwkVj1dyhrkSp8tLPul4SrUK8W+oNweOC5crjJV7vJbIPJgmYiM IZN55wln0yVRtR4TX+rmvN0PixsInE8MeaVCmReApyF9pdzul/StxlBze5BKLSJD cqo1kNPpsmdxoDucqUpQ/gSvy+IOl2qnlisB5PpV93sk7De6TFDYrGHxjYIW7jMf StXwdCDDQhzd2Q8Kfpp895A1dbIl8rKtwA6bTU2eX+BfMVFzuMdT44cvosx1+UdQ sWl//rg76nb13dFjvF+q =SW7Q -----END PGP SIGNATURE----- Merge tag 'cpu-hotplug-3.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm Pull CPU hotplug notifiers registration fixes from Rafael Wysocki: "The purpose of this single series of commits from Srivatsa S Bhat (with a small piece from Gautham R Shenoy) touching multiple subsystems that use CPU hotplug notifiers is to provide a way to register them that will not lead to deadlocks with CPU online/offline operations as described in the changelog of commit93ae4f978c
("CPU hotplug: Provide lockless versions of callback registration functions"). The first three commits in the series introduce the API and document it and the rest simply goes through the users of CPU hotplug notifiers and converts them to using the new method" * tag 'cpu-hotplug-3.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (52 commits) net/iucv/iucv.c: Fix CPU hotplug callback registration net/core/flow.c: Fix CPU hotplug callback registration mm, zswap: Fix CPU hotplug callback registration mm, vmstat: Fix CPU hotplug callback registration profile: Fix CPU hotplug callback registration trace, ring-buffer: Fix CPU hotplug callback registration xen, balloon: Fix CPU hotplug callback registration hwmon, via-cputemp: Fix CPU hotplug callback registration hwmon, coretemp: Fix CPU hotplug callback registration thermal, x86-pkg-temp: Fix CPU hotplug callback registration octeon, watchdog: Fix CPU hotplug callback registration oprofile, nmi-timer: Fix CPU hotplug callback registration intel-idle: Fix CPU hotplug callback registration clocksource, dummy-timer: Fix CPU hotplug callback registration drivers/base/topology.c: Fix CPU hotplug callback registration acpi-cpufreq: Fix CPU hotplug callback registration zsmalloc: Fix CPU hotplug callback registration scsi, fcoe: Fix CPU hotplug callback registration scsi, bnx2fc: Fix CPU hotplug callback registration scsi, bnx2i: Fix CPU hotplug callback registration ...
This commit is contained in:
commit
467a9e1633
|
@ -312,12 +312,57 @@ things will happen if a notifier in path sent a BAD notify code.
|
|||
Q: I don't see my action being called for all CPUs already up and running?
|
||||
A: Yes, CPU notifiers are called only when new CPUs are on-lined or offlined.
|
||||
If you need to perform some action for each cpu already in the system, then
|
||||
do this:
|
||||
|
||||
for_each_online_cpu(i) {
|
||||
foobar_cpu_callback(&foobar_cpu_notifier, CPU_UP_PREPARE, i);
|
||||
foobar_cpu_callback(&foobar_cpu_notifier, CPU_ONLINE, i);
|
||||
}
|
||||
|
||||
However, if you want to register a hotplug callback, as well as perform
|
||||
some initialization for CPUs that are already online, then do this:
|
||||
|
||||
Version 1: (Correct)
|
||||
---------
|
||||
|
||||
cpu_notifier_register_begin();
|
||||
|
||||
for_each_online_cpu(i) {
|
||||
foobar_cpu_callback(&foobar_cpu_notifier,
|
||||
CPU_UP_PREPARE, i);
|
||||
foobar_cpu_callback(&foobar_cpu_notifier,
|
||||
CPU_ONLINE, i);
|
||||
}
|
||||
|
||||
/* Note the use of the double underscored version of the API */
|
||||
__register_cpu_notifier(&foobar_cpu_notifier);
|
||||
|
||||
cpu_notifier_register_done();
|
||||
|
||||
Note that the following code is *NOT* the right way to achieve this,
|
||||
because it is prone to an ABBA deadlock between the cpu_add_remove_lock
|
||||
and the cpu_hotplug.lock.
|
||||
|
||||
Version 2: (Wrong!)
|
||||
---------
|
||||
|
||||
get_online_cpus();
|
||||
|
||||
for_each_online_cpu(i) {
|
||||
foobar_cpu_callback(&foobar_cpu_notifier,
|
||||
CPU_UP_PREPARE, i);
|
||||
foobar_cpu_callback(&foobar_cpu_notifier,
|
||||
CPU_ONLINE, i);
|
||||
}
|
||||
|
||||
register_cpu_notifier(&foobar_cpu_notifier);
|
||||
|
||||
put_online_cpus();
|
||||
|
||||
So always use the first version shown above when you want to register
|
||||
callbacks as well as initialize the already online CPUs.
|
||||
|
||||
|
||||
Q: If i would like to develop cpu hotplug support for a new architecture,
|
||||
what do i need at a minimum?
|
||||
A: The following are what is required for CPU hotplug infrastructure to work
|
||||
|
|
|
@ -1073,6 +1073,8 @@ static int __init arch_hw_breakpoint_init(void)
|
|||
core_num_brps = get_num_brps();
|
||||
core_num_wrps = get_num_wrps();
|
||||
|
||||
cpu_notifier_register_begin();
|
||||
|
||||
/*
|
||||
* We need to tread carefully here because DBGSWENABLE may be
|
||||
* driven low on this core and there isn't an architected way to
|
||||
|
@ -1089,6 +1091,7 @@ static int __init arch_hw_breakpoint_init(void)
|
|||
if (!cpumask_empty(&debug_err_mask)) {
|
||||
core_num_brps = 0;
|
||||
core_num_wrps = 0;
|
||||
cpu_notifier_register_done();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1108,7 +1111,10 @@ static int __init arch_hw_breakpoint_init(void)
|
|||
TRAP_HWBKPT, "breakpoint debug exception");
|
||||
|
||||
/* Register hotplug and PM notifiers. */
|
||||
register_cpu_notifier(&dbg_reset_nb);
|
||||
__register_cpu_notifier(&dbg_reset_nb);
|
||||
|
||||
cpu_notifier_register_done();
|
||||
|
||||
pm_init();
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1051,21 +1051,26 @@ int kvm_arch_init(void *opaque)
|
|||
}
|
||||
}
|
||||
|
||||
cpu_notifier_register_begin();
|
||||
|
||||
err = init_hyp_mode();
|
||||
if (err)
|
||||
goto out_err;
|
||||
|
||||
err = register_cpu_notifier(&hyp_init_cpu_nb);
|
||||
err = __register_cpu_notifier(&hyp_init_cpu_nb);
|
||||
if (err) {
|
||||
kvm_err("Cannot register HYP init CPU notifier (%d)\n", err);
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
cpu_notifier_register_done();
|
||||
|
||||
hyp_cpu_pm_init();
|
||||
|
||||
kvm_coproc_table_init();
|
||||
return 0;
|
||||
out_err:
|
||||
cpu_notifier_register_done();
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -154,13 +154,17 @@ static struct notifier_block os_lock_nb = {
|
|||
|
||||
static int debug_monitors_init(void)
|
||||
{
|
||||
cpu_notifier_register_begin();
|
||||
|
||||
/* Clear the OS lock. */
|
||||
on_each_cpu(clear_os_lock, NULL, 1);
|
||||
isb();
|
||||
local_dbg_enable();
|
||||
|
||||
/* Register hotplug handler. */
|
||||
register_cpu_notifier(&os_lock_nb);
|
||||
__register_cpu_notifier(&os_lock_nb);
|
||||
|
||||
cpu_notifier_register_done();
|
||||
return 0;
|
||||
}
|
||||
postcore_initcall(debug_monitors_init);
|
||||
|
|
|
@ -913,6 +913,8 @@ static int __init arch_hw_breakpoint_init(void)
|
|||
pr_info("found %d breakpoint and %d watchpoint registers.\n",
|
||||
core_num_brps, core_num_wrps);
|
||||
|
||||
cpu_notifier_register_begin();
|
||||
|
||||
/*
|
||||
* Reset the breakpoint resources. We assume that a halting
|
||||
* debugger will leave the world in a nice state for us.
|
||||
|
@ -927,7 +929,10 @@ static int __init arch_hw_breakpoint_init(void)
|
|||
TRAP_HWBKPT, "hw-watchpoint handler");
|
||||
|
||||
/* Register hotplug notifier. */
|
||||
register_cpu_notifier(&hw_breakpoint_reset_nb);
|
||||
__register_cpu_notifier(&hw_breakpoint_reset_nb);
|
||||
|
||||
cpu_notifier_register_done();
|
||||
|
||||
/* Register cpu_suspend hw breakpoint restore hook */
|
||||
cpu_suspend_set_dbg_restorer(hw_breakpoint_reset);
|
||||
|
||||
|
|
|
@ -269,12 +269,17 @@ err_inject_init(void)
|
|||
#ifdef ERR_INJ_DEBUG
|
||||
printk(KERN_INFO "Enter error injection driver.\n");
|
||||
#endif
|
||||
|
||||
cpu_notifier_register_begin();
|
||||
|
||||
for_each_online_cpu(i) {
|
||||
err_inject_cpu_callback(&err_inject_cpu_notifier, CPU_ONLINE,
|
||||
(void *)(long)i);
|
||||
}
|
||||
|
||||
register_hotcpu_notifier(&err_inject_cpu_notifier);
|
||||
__register_hotcpu_notifier(&err_inject_cpu_notifier);
|
||||
|
||||
cpu_notifier_register_done();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -288,11 +293,17 @@ err_inject_exit(void)
|
|||
#ifdef ERR_INJ_DEBUG
|
||||
printk(KERN_INFO "Exit error injection driver.\n");
|
||||
#endif
|
||||
|
||||
cpu_notifier_register_begin();
|
||||
|
||||
for_each_online_cpu(i) {
|
||||
sys_dev = get_cpu_device(i);
|
||||
sysfs_remove_group(&sys_dev->kobj, &err_inject_attr_group);
|
||||
}
|
||||
unregister_hotcpu_notifier(&err_inject_cpu_notifier);
|
||||
|
||||
__unregister_hotcpu_notifier(&err_inject_cpu_notifier);
|
||||
|
||||
cpu_notifier_register_done();
|
||||
}
|
||||
|
||||
module_init(err_inject_init);
|
||||
|
|
|
@ -996,13 +996,17 @@ palinfo_init(void)
|
|||
if (!palinfo_dir)
|
||||
return -ENOMEM;
|
||||
|
||||
cpu_notifier_register_begin();
|
||||
|
||||
/* Create palinfo dirs in /proc for all online cpus */
|
||||
for_each_online_cpu(i) {
|
||||
create_palinfo_proc_entries(i);
|
||||
}
|
||||
|
||||
/* Register for future delivery via notify registration */
|
||||
register_hotcpu_notifier(&palinfo_cpu_notifier);
|
||||
__register_hotcpu_notifier(&palinfo_cpu_notifier);
|
||||
|
||||
cpu_notifier_register_done();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -635,6 +635,8 @@ salinfo_init(void)
|
|||
(void *)salinfo_entries[i].feature);
|
||||
}
|
||||
|
||||
cpu_notifier_register_begin();
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(salinfo_log_name); i++) {
|
||||
data = salinfo_data + i;
|
||||
data->type = i;
|
||||
|
@ -669,7 +671,9 @@ salinfo_init(void)
|
|||
salinfo_timer.function = &salinfo_timeout;
|
||||
add_timer(&salinfo_timer);
|
||||
|
||||
register_hotcpu_notifier(&salinfo_cpu_notifier);
|
||||
__register_hotcpu_notifier(&salinfo_cpu_notifier);
|
||||
|
||||
cpu_notifier_register_done();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -454,12 +454,16 @@ static int __init cache_sysfs_init(void)
|
|||
{
|
||||
int i;
|
||||
|
||||
cpu_notifier_register_begin();
|
||||
|
||||
for_each_online_cpu(i) {
|
||||
struct device *sys_dev = get_cpu_device((unsigned int)i);
|
||||
cache_add_dev(sys_dev);
|
||||
}
|
||||
|
||||
register_hotcpu_notifier(&cache_cpu_notifier);
|
||||
__register_hotcpu_notifier(&cache_cpu_notifier);
|
||||
|
||||
cpu_notifier_register_done();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -975,7 +975,8 @@ static int __init topology_init(void)
|
|||
int cpu;
|
||||
|
||||
register_nodes();
|
||||
register_cpu_notifier(&sysfs_cpu_nb);
|
||||
|
||||
cpu_notifier_register_begin();
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct cpu *c = &per_cpu(cpu_devices, cpu);
|
||||
|
@ -999,6 +1000,11 @@ static int __init topology_init(void)
|
|||
if (cpu_online(cpu))
|
||||
register_cpu_online(cpu);
|
||||
}
|
||||
|
||||
__register_cpu_notifier(&sysfs_cpu_nb);
|
||||
|
||||
cpu_notifier_register_done();
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
sysfs_create_dscr_default();
|
||||
#endif /* CONFIG_PPC64 */
|
||||
|
|
|
@ -378,9 +378,12 @@ static int __init cache_init(void)
|
|||
if (!test_facility(34))
|
||||
return 0;
|
||||
cache_build_info();
|
||||
|
||||
cpu_notifier_register_begin();
|
||||
for_each_online_cpu(cpu)
|
||||
cache_add_cpu(cpu);
|
||||
hotcpu_notifier(cache_hotplug, 0);
|
||||
__hotcpu_notifier(cache_hotplug, 0);
|
||||
cpu_notifier_register_done();
|
||||
return 0;
|
||||
}
|
||||
device_initcall(cache_init);
|
||||
|
|
|
@ -1057,19 +1057,24 @@ static DEVICE_ATTR(rescan, 0200, NULL, rescan_store);
|
|||
|
||||
static int __init s390_smp_init(void)
|
||||
{
|
||||
int cpu, rc;
|
||||
int cpu, rc = 0;
|
||||
|
||||
hotcpu_notifier(smp_cpu_notify, 0);
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
rc = device_create_file(cpu_subsys.dev_root, &dev_attr_rescan);
|
||||
if (rc)
|
||||
return rc;
|
||||
#endif
|
||||
cpu_notifier_register_begin();
|
||||
for_each_present_cpu(cpu) {
|
||||
rc = smp_add_present_cpu(cpu);
|
||||
if (rc)
|
||||
return rc;
|
||||
goto out;
|
||||
}
|
||||
return 0;
|
||||
|
||||
__hotcpu_notifier(smp_cpu_notify, 0);
|
||||
|
||||
out:
|
||||
cpu_notifier_register_done();
|
||||
return rc;
|
||||
}
|
||||
subsys_initcall(s390_smp_init);
|
||||
|
|
|
@ -300,7 +300,7 @@ static int __init topology_init(void)
|
|||
|
||||
check_mmu_stats();
|
||||
|
||||
register_cpu_notifier(&sysfs_cpu_nb);
|
||||
cpu_notifier_register_begin();
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct cpu *c = &per_cpu(cpu_devices, cpu);
|
||||
|
@ -310,6 +310,10 @@ static int __init topology_init(void)
|
|||
register_cpu_online(cpu);
|
||||
}
|
||||
|
||||
__register_cpu_notifier(&sysfs_cpu_nb);
|
||||
|
||||
cpu_notifier_register_done();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1225,21 +1225,24 @@ static struct notifier_block cacheinfo_cpu_notifier = {
|
|||
|
||||
static int __init cache_sysfs_init(void)
|
||||
{
|
||||
int i;
|
||||
int i, err = 0;
|
||||
|
||||
if (num_cache_leaves == 0)
|
||||
return 0;
|
||||
|
||||
cpu_notifier_register_begin();
|
||||
for_each_online_cpu(i) {
|
||||
int err;
|
||||
struct device *dev = get_cpu_device(i);
|
||||
|
||||
err = cache_add_dev(dev);
|
||||
if (err)
|
||||
return err;
|
||||
goto out;
|
||||
}
|
||||
register_hotcpu_notifier(&cacheinfo_cpu_notifier);
|
||||
return 0;
|
||||
__register_hotcpu_notifier(&cacheinfo_cpu_notifier);
|
||||
|
||||
out:
|
||||
cpu_notifier_register_done();
|
||||
return err;
|
||||
}
|
||||
|
||||
device_initcall(cache_sysfs_init);
|
||||
|
|
|
@ -2434,14 +2434,18 @@ static __init int mcheck_init_device(void)
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
cpu_notifier_register_begin();
|
||||
for_each_online_cpu(i) {
|
||||
err = mce_device_create(i);
|
||||
if (err)
|
||||
if (err) {
|
||||
cpu_notifier_register_done();
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
register_syscore_ops(&mce_syscore_ops);
|
||||
register_hotcpu_notifier(&mce_cpu_notifier);
|
||||
__register_hotcpu_notifier(&mce_cpu_notifier);
|
||||
cpu_notifier_register_done();
|
||||
|
||||
/* register character device /dev/mcelog */
|
||||
misc_register(&mce_chrdev_device);
|
||||
|
|
|
@ -271,9 +271,6 @@ static void thermal_throttle_remove_dev(struct device *dev)
|
|||
sysfs_remove_group(&dev->kobj, &thermal_attr_group);
|
||||
}
|
||||
|
||||
/* Mutex protecting device creation against CPU hotplug: */
|
||||
static DEFINE_MUTEX(therm_cpu_lock);
|
||||
|
||||
/* Get notified when a cpu comes on/off. Be hotplug friendly. */
|
||||
static int
|
||||
thermal_throttle_cpu_callback(struct notifier_block *nfb,
|
||||
|
@ -289,18 +286,14 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb,
|
|||
switch (action) {
|
||||
case CPU_UP_PREPARE:
|
||||
case CPU_UP_PREPARE_FROZEN:
|
||||
mutex_lock(&therm_cpu_lock);
|
||||
err = thermal_throttle_add_dev(dev, cpu);
|
||||
mutex_unlock(&therm_cpu_lock);
|
||||
WARN_ON(err);
|
||||
break;
|
||||
case CPU_UP_CANCELED:
|
||||
case CPU_UP_CANCELED_FROZEN:
|
||||
case CPU_DEAD:
|
||||
case CPU_DEAD_FROZEN:
|
||||
mutex_lock(&therm_cpu_lock);
|
||||
thermal_throttle_remove_dev(dev);
|
||||
mutex_unlock(&therm_cpu_lock);
|
||||
break;
|
||||
}
|
||||
return notifier_from_errno(err);
|
||||
|
@ -319,19 +312,16 @@ static __init int thermal_throttle_init_device(void)
|
|||
if (!atomic_read(&therm_throt_en))
|
||||
return 0;
|
||||
|
||||
register_hotcpu_notifier(&thermal_throttle_cpu_notifier);
|
||||
cpu_notifier_register_begin();
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
mutex_lock(&therm_cpu_lock);
|
||||
#endif
|
||||
/* connect live CPUs to sysfs */
|
||||
for_each_online_cpu(cpu) {
|
||||
err = thermal_throttle_add_dev(get_cpu_device(cpu), cpu);
|
||||
WARN_ON(err);
|
||||
}
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
mutex_unlock(&therm_cpu_lock);
|
||||
#endif
|
||||
|
||||
__register_hotcpu_notifier(&thermal_throttle_cpu_notifier);
|
||||
cpu_notifier_register_done();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -926,13 +926,13 @@ static __init int amd_ibs_init(void)
|
|||
goto out;
|
||||
|
||||
perf_ibs_pm_init();
|
||||
get_online_cpus();
|
||||
cpu_notifier_register_begin();
|
||||
ibs_caps = caps;
|
||||
/* make ibs_caps visible to other cpus: */
|
||||
smp_mb();
|
||||
perf_cpu_notifier(perf_ibs_cpu_notifier);
|
||||
smp_call_function(setup_APIC_ibs, NULL, 1);
|
||||
put_online_cpus();
|
||||
__perf_cpu_notifier(perf_ibs_cpu_notifier);
|
||||
cpu_notifier_register_done();
|
||||
|
||||
ret = perf_event_ibs_init();
|
||||
out:
|
||||
|
|
|
@ -531,15 +531,16 @@ static int __init amd_uncore_init(void)
|
|||
if (ret)
|
||||
return -ENODEV;
|
||||
|
||||
get_online_cpus();
|
||||
cpu_notifier_register_begin();
|
||||
|
||||
/* init cpus already online before registering for hotplug notifier */
|
||||
for_each_online_cpu(cpu) {
|
||||
amd_uncore_cpu_up_prepare(cpu);
|
||||
smp_call_function_single(cpu, init_cpu_already_online, NULL, 1);
|
||||
}
|
||||
|
||||
register_cpu_notifier(&amd_uncore_cpu_notifier_block);
|
||||
put_online_cpus();
|
||||
__register_cpu_notifier(&amd_uncore_cpu_notifier_block);
|
||||
cpu_notifier_register_done();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -646,19 +646,20 @@ static int __init rapl_pmu_init(void)
|
|||
/* unsupported */
|
||||
return 0;
|
||||
}
|
||||
get_online_cpus();
|
||||
|
||||
cpu_notifier_register_begin();
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
rapl_cpu_prepare(cpu);
|
||||
rapl_cpu_init(cpu);
|
||||
}
|
||||
|
||||
perf_cpu_notifier(rapl_cpu_notifier);
|
||||
__perf_cpu_notifier(rapl_cpu_notifier);
|
||||
|
||||
ret = perf_pmu_register(&rapl_pmu_class, "power", -1);
|
||||
if (WARN_ON(ret)) {
|
||||
pr_info("RAPL PMU detected, registration failed (%d), RAPL PMU disabled\n", ret);
|
||||
put_online_cpus();
|
||||
cpu_notifier_register_done();
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -672,7 +673,7 @@ static int __init rapl_pmu_init(void)
|
|||
hweight32(rapl_cntr_mask),
|
||||
ktime_to_ms(pmu->timer_interval));
|
||||
|
||||
put_online_cpus();
|
||||
cpu_notifier_register_done();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -4244,7 +4244,7 @@ static void __init uncore_cpumask_init(void)
|
|||
if (!cpumask_empty(&uncore_cpu_mask))
|
||||
return;
|
||||
|
||||
get_online_cpus();
|
||||
cpu_notifier_register_begin();
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
int i, phys_id = topology_physical_package_id(cpu);
|
||||
|
@ -4263,9 +4263,9 @@ static void __init uncore_cpumask_init(void)
|
|||
}
|
||||
on_each_cpu(uncore_cpu_setup, NULL, 1);
|
||||
|
||||
register_cpu_notifier(&uncore_cpu_nb);
|
||||
__register_cpu_notifier(&uncore_cpu_nb);
|
||||
|
||||
put_online_cpus();
|
||||
cpu_notifier_register_done();
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -198,14 +198,15 @@ static int __init cpuid_init(void)
|
|||
goto out_chrdev;
|
||||
}
|
||||
cpuid_class->devnode = cpuid_devnode;
|
||||
get_online_cpus();
|
||||
|
||||
cpu_notifier_register_begin();
|
||||
for_each_online_cpu(i) {
|
||||
err = cpuid_device_create(i);
|
||||
if (err != 0)
|
||||
goto out_class;
|
||||
}
|
||||
register_hotcpu_notifier(&cpuid_class_cpu_notifier);
|
||||
put_online_cpus();
|
||||
__register_hotcpu_notifier(&cpuid_class_cpu_notifier);
|
||||
cpu_notifier_register_done();
|
||||
|
||||
err = 0;
|
||||
goto out;
|
||||
|
@ -215,7 +216,7 @@ static int __init cpuid_init(void)
|
|||
for_each_online_cpu(i) {
|
||||
cpuid_device_destroy(i);
|
||||
}
|
||||
put_online_cpus();
|
||||
cpu_notifier_register_done();
|
||||
class_destroy(cpuid_class);
|
||||
out_chrdev:
|
||||
__unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid");
|
||||
|
@ -227,13 +228,13 @@ static void __exit cpuid_exit(void)
|
|||
{
|
||||
int cpu = 0;
|
||||
|
||||
get_online_cpus();
|
||||
cpu_notifier_register_begin();
|
||||
for_each_online_cpu(cpu)
|
||||
cpuid_device_destroy(cpu);
|
||||
class_destroy(cpuid_class);
|
||||
__unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid");
|
||||
unregister_hotcpu_notifier(&cpuid_class_cpu_notifier);
|
||||
put_online_cpus();
|
||||
__unregister_hotcpu_notifier(&cpuid_class_cpu_notifier);
|
||||
cpu_notifier_register_done();
|
||||
}
|
||||
|
||||
module_init(cpuid_init);
|
||||
|
|
|
@ -941,12 +941,14 @@ static __init int hpet_late_init(void)
|
|||
if (boot_cpu_has(X86_FEATURE_ARAT))
|
||||
return 0;
|
||||
|
||||
cpu_notifier_register_begin();
|
||||
for_each_online_cpu(cpu) {
|
||||
hpet_cpuhp_notify(NULL, CPU_ONLINE, (void *)(long)cpu);
|
||||
}
|
||||
|
||||
/* This notifier should be called after workqueue is ready */
|
||||
hotcpu_notifier(hpet_cpuhp_notify, -20);
|
||||
__hotcpu_notifier(hpet_cpuhp_notify, -20);
|
||||
cpu_notifier_register_done();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -259,14 +259,15 @@ static int __init msr_init(void)
|
|||
goto out_chrdev;
|
||||
}
|
||||
msr_class->devnode = msr_devnode;
|
||||
get_online_cpus();
|
||||
|
||||
cpu_notifier_register_begin();
|
||||
for_each_online_cpu(i) {
|
||||
err = msr_device_create(i);
|
||||
if (err != 0)
|
||||
goto out_class;
|
||||
}
|
||||
register_hotcpu_notifier(&msr_class_cpu_notifier);
|
||||
put_online_cpus();
|
||||
__register_hotcpu_notifier(&msr_class_cpu_notifier);
|
||||
cpu_notifier_register_done();
|
||||
|
||||
err = 0;
|
||||
goto out;
|
||||
|
@ -275,7 +276,7 @@ static int __init msr_init(void)
|
|||
i = 0;
|
||||
for_each_online_cpu(i)
|
||||
msr_device_destroy(i);
|
||||
put_online_cpus();
|
||||
cpu_notifier_register_done();
|
||||
class_destroy(msr_class);
|
||||
out_chrdev:
|
||||
__unregister_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr");
|
||||
|
@ -286,13 +287,14 @@ static int __init msr_init(void)
|
|||
static void __exit msr_exit(void)
|
||||
{
|
||||
int cpu = 0;
|
||||
get_online_cpus();
|
||||
|
||||
cpu_notifier_register_begin();
|
||||
for_each_online_cpu(cpu)
|
||||
msr_device_destroy(cpu);
|
||||
class_destroy(msr_class);
|
||||
__unregister_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr");
|
||||
unregister_hotcpu_notifier(&msr_class_cpu_notifier);
|
||||
put_online_cpus();
|
||||
__unregister_hotcpu_notifier(&msr_class_cpu_notifier);
|
||||
cpu_notifier_register_done();
|
||||
}
|
||||
|
||||
module_init(msr_init);
|
||||
|
|
|
@ -348,9 +348,13 @@ static int __init vsyscall_init(void)
|
|||
{
|
||||
BUG_ON(VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE));
|
||||
|
||||
cpu_notifier_register_begin();
|
||||
|
||||
on_each_cpu(cpu_vsyscall_init, NULL, 1);
|
||||
/* notifier priority > KVM */
|
||||
hotcpu_notifier(cpu_vsyscall_notifier, 30);
|
||||
__hotcpu_notifier(cpu_vsyscall_notifier, 30);
|
||||
|
||||
cpu_notifier_register_done();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -5422,7 +5422,8 @@ static void kvm_timer_init(void)
|
|||
int cpu;
|
||||
|
||||
max_tsc_khz = tsc_khz;
|
||||
register_hotcpu_notifier(&kvmclock_cpu_notifier_block);
|
||||
|
||||
cpu_notifier_register_begin();
|
||||
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
|
||||
#ifdef CONFIG_CPU_FREQ
|
||||
struct cpufreq_policy policy;
|
||||
|
@ -5439,6 +5440,10 @@ static void kvm_timer_init(void)
|
|||
pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz);
|
||||
for_each_online_cpu(cpu)
|
||||
smp_call_function_single(cpu, tsc_khz_changed, NULL, 1);
|
||||
|
||||
__register_hotcpu_notifier(&kvmclock_cpu_notifier_block);
|
||||
cpu_notifier_register_done();
|
||||
|
||||
}
|
||||
|
||||
static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
|
||||
|
|
|
@ -494,14 +494,19 @@ static int nmi_setup(void)
|
|||
if (err)
|
||||
goto fail;
|
||||
|
||||
cpu_notifier_register_begin();
|
||||
|
||||
/* Use get/put_online_cpus() to protect 'nmi_enabled' */
|
||||
get_online_cpus();
|
||||
register_cpu_notifier(&oprofile_cpu_nb);
|
||||
nmi_enabled = 1;
|
||||
/* make nmi_enabled visible to the nmi handler: */
|
||||
smp_mb();
|
||||
on_each_cpu(nmi_cpu_setup, NULL, 1);
|
||||
__register_cpu_notifier(&oprofile_cpu_nb);
|
||||
put_online_cpus();
|
||||
|
||||
cpu_notifier_register_done();
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
free_msrs();
|
||||
|
@ -512,12 +517,18 @@ static void nmi_shutdown(void)
|
|||
{
|
||||
struct op_msrs *msrs;
|
||||
|
||||
cpu_notifier_register_begin();
|
||||
|
||||
/* Use get/put_online_cpus() to protect 'nmi_enabled' & 'ctr_running' */
|
||||
get_online_cpus();
|
||||
unregister_cpu_notifier(&oprofile_cpu_nb);
|
||||
on_each_cpu(nmi_cpu_shutdown, NULL, 1);
|
||||
nmi_enabled = 0;
|
||||
ctr_running = 0;
|
||||
__unregister_cpu_notifier(&oprofile_cpu_nb);
|
||||
put_online_cpus();
|
||||
|
||||
cpu_notifier_register_done();
|
||||
|
||||
/* make variables visible to the nmi handler: */
|
||||
smp_mb();
|
||||
unregister_nmi_handler(NMI_LOCAL, "oprofile");
|
||||
|
|
|
@ -370,10 +370,13 @@ static int __init pci_io_ecs_init(void)
|
|||
if (early_pci_allowed())
|
||||
pci_enable_pci_io_ecs();
|
||||
|
||||
register_cpu_notifier(&amd_cpu_notifier);
|
||||
cpu_notifier_register_begin();
|
||||
for_each_online_cpu(cpu)
|
||||
amd_cpu_notify(&amd_cpu_notifier, (unsigned long)CPU_ONLINE,
|
||||
(void *)(long)cpu);
|
||||
__register_cpu_notifier(&amd_cpu_notifier);
|
||||
cpu_notifier_register_done();
|
||||
|
||||
pci_probe |= PCI_HAS_IO_ECS;
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -160,16 +160,20 @@ static int topology_cpu_callback(struct notifier_block *nfb,
|
|||
static int topology_sysfs_init(void)
|
||||
{
|
||||
int cpu;
|
||||
int rc;
|
||||
int rc = 0;
|
||||
|
||||
cpu_notifier_register_begin();
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
rc = topology_add_dev(cpu);
|
||||
if (rc)
|
||||
return rc;
|
||||
goto out;
|
||||
}
|
||||
hotcpu_notifier(topology_cpu_callback, 0);
|
||||
__hotcpu_notifier(topology_cpu_callback, 0);
|
||||
|
||||
return 0;
|
||||
out:
|
||||
cpu_notifier_register_done();
|
||||
return rc;
|
||||
}
|
||||
|
||||
device_initcall(topology_sysfs_init);
|
||||
|
|
|
@ -56,14 +56,19 @@ static struct notifier_block dummy_timer_cpu_nb = {
|
|||
|
||||
static int __init dummy_timer_register(void)
|
||||
{
|
||||
int err = register_cpu_notifier(&dummy_timer_cpu_nb);
|
||||
int err = 0;
|
||||
|
||||
cpu_notifier_register_begin();
|
||||
err = __register_cpu_notifier(&dummy_timer_cpu_nb);
|
||||
if (err)
|
||||
return err;
|
||||
goto out;
|
||||
|
||||
/* We won't get a call on the boot CPU, so register immediately */
|
||||
if (num_possible_cpus() > 1)
|
||||
dummy_timer_setup();
|
||||
|
||||
return 0;
|
||||
out:
|
||||
cpu_notifier_register_done();
|
||||
return err;
|
||||
}
|
||||
early_initcall(dummy_timer_register);
|
||||
|
|
|
@ -906,15 +906,16 @@ static void __init acpi_cpufreq_boost_init(void)
|
|||
|
||||
acpi_cpufreq_driver.boost_supported = true;
|
||||
acpi_cpufreq_driver.boost_enabled = boost_state(0);
|
||||
get_online_cpus();
|
||||
|
||||
cpu_notifier_register_begin();
|
||||
|
||||
/* Force all MSRs to the same value */
|
||||
boost_set_msrs(acpi_cpufreq_driver.boost_enabled,
|
||||
cpu_online_mask);
|
||||
|
||||
register_cpu_notifier(&boost_nb);
|
||||
__register_cpu_notifier(&boost_nb);
|
||||
|
||||
put_online_cpus();
|
||||
cpu_notifier_register_done();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -810,20 +810,20 @@ static int __init coretemp_init(void)
|
|||
if (err)
|
||||
goto exit;
|
||||
|
||||
get_online_cpus();
|
||||
cpu_notifier_register_begin();
|
||||
for_each_online_cpu(i)
|
||||
get_core_online(i);
|
||||
|
||||
#ifndef CONFIG_HOTPLUG_CPU
|
||||
if (list_empty(&pdev_list)) {
|
||||
put_online_cpus();
|
||||
cpu_notifier_register_done();
|
||||
err = -ENODEV;
|
||||
goto exit_driver_unreg;
|
||||
}
|
||||
#endif
|
||||
|
||||
register_hotcpu_notifier(&coretemp_cpu_notifier);
|
||||
put_online_cpus();
|
||||
__register_hotcpu_notifier(&coretemp_cpu_notifier);
|
||||
cpu_notifier_register_done();
|
||||
return 0;
|
||||
|
||||
#ifndef CONFIG_HOTPLUG_CPU
|
||||
|
@ -838,8 +838,8 @@ static void __exit coretemp_exit(void)
|
|||
{
|
||||
struct pdev_entry *p, *n;
|
||||
|
||||
get_online_cpus();
|
||||
unregister_hotcpu_notifier(&coretemp_cpu_notifier);
|
||||
cpu_notifier_register_begin();
|
||||
__unregister_hotcpu_notifier(&coretemp_cpu_notifier);
|
||||
mutex_lock(&pdev_list_mutex);
|
||||
list_for_each_entry_safe(p, n, &pdev_list, list) {
|
||||
platform_device_unregister(p->pdev);
|
||||
|
@ -847,7 +847,7 @@ static void __exit coretemp_exit(void)
|
|||
kfree(p);
|
||||
}
|
||||
mutex_unlock(&pdev_list_mutex);
|
||||
put_online_cpus();
|
||||
cpu_notifier_register_done();
|
||||
platform_driver_unregister(&coretemp_driver);
|
||||
}
|
||||
|
||||
|
|
|
@ -319,7 +319,7 @@ static int __init via_cputemp_init(void)
|
|||
if (err)
|
||||
goto exit;
|
||||
|
||||
get_online_cpus();
|
||||
cpu_notifier_register_begin();
|
||||
for_each_online_cpu(i) {
|
||||
struct cpuinfo_x86 *c = &cpu_data(i);
|
||||
|
||||
|
@ -339,14 +339,14 @@ static int __init via_cputemp_init(void)
|
|||
|
||||
#ifndef CONFIG_HOTPLUG_CPU
|
||||
if (list_empty(&pdev_list)) {
|
||||
put_online_cpus();
|
||||
cpu_notifier_register_done();
|
||||
err = -ENODEV;
|
||||
goto exit_driver_unreg;
|
||||
}
|
||||
#endif
|
||||
|
||||
register_hotcpu_notifier(&via_cputemp_cpu_notifier);
|
||||
put_online_cpus();
|
||||
__register_hotcpu_notifier(&via_cputemp_cpu_notifier);
|
||||
cpu_notifier_register_done();
|
||||
return 0;
|
||||
|
||||
#ifndef CONFIG_HOTPLUG_CPU
|
||||
|
@ -361,8 +361,8 @@ static void __exit via_cputemp_exit(void)
|
|||
{
|
||||
struct pdev_entry *p, *n;
|
||||
|
||||
get_online_cpus();
|
||||
unregister_hotcpu_notifier(&via_cputemp_cpu_notifier);
|
||||
cpu_notifier_register_begin();
|
||||
__unregister_hotcpu_notifier(&via_cputemp_cpu_notifier);
|
||||
mutex_lock(&pdev_list_mutex);
|
||||
list_for_each_entry_safe(p, n, &pdev_list, list) {
|
||||
platform_device_unregister(p->pdev);
|
||||
|
@ -370,7 +370,7 @@ static void __exit via_cputemp_exit(void)
|
|||
kfree(p);
|
||||
}
|
||||
mutex_unlock(&pdev_list_mutex);
|
||||
put_online_cpus();
|
||||
cpu_notifier_register_done();
|
||||
platform_driver_unregister(&via_cputemp_driver);
|
||||
}
|
||||
|
||||
|
|
|
@ -681,14 +681,19 @@ static int __init intel_idle_init(void)
|
|||
if (intel_idle_cpuidle_devices == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
cpu_notifier_register_begin();
|
||||
|
||||
for_each_online_cpu(i) {
|
||||
retval = intel_idle_cpu_init(i);
|
||||
if (retval) {
|
||||
cpu_notifier_register_done();
|
||||
cpuidle_unregister_driver(&intel_idle_driver);
|
||||
return retval;
|
||||
}
|
||||
}
|
||||
register_cpu_notifier(&cpu_hotplug_notifier);
|
||||
__register_cpu_notifier(&cpu_hotplug_notifier);
|
||||
|
||||
cpu_notifier_register_done();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -698,10 +703,13 @@ static void __exit intel_idle_exit(void)
|
|||
intel_idle_cpuidle_devices_uninit();
|
||||
cpuidle_unregister_driver(&intel_idle_driver);
|
||||
|
||||
cpu_notifier_register_begin();
|
||||
|
||||
if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE)
|
||||
on_each_cpu(__setup_broadcast_timer, (void *)false, 1);
|
||||
unregister_cpu_notifier(&cpu_hotplug_notifier);
|
||||
__unregister_cpu_notifier(&cpu_hotplug_notifier);
|
||||
|
||||
cpu_notifier_register_done();
|
||||
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -108,8 +108,8 @@ static void nmi_timer_shutdown(void)
|
|||
struct perf_event *event;
|
||||
int cpu;
|
||||
|
||||
get_online_cpus();
|
||||
unregister_cpu_notifier(&nmi_timer_cpu_nb);
|
||||
cpu_notifier_register_begin();
|
||||
__unregister_cpu_notifier(&nmi_timer_cpu_nb);
|
||||
for_each_possible_cpu(cpu) {
|
||||
event = per_cpu(nmi_timer_events, cpu);
|
||||
if (!event)
|
||||
|
@ -119,7 +119,7 @@ static void nmi_timer_shutdown(void)
|
|||
perf_event_release_kernel(event);
|
||||
}
|
||||
|
||||
put_online_cpus();
|
||||
cpu_notifier_register_done();
|
||||
}
|
||||
|
||||
static int nmi_timer_setup(void)
|
||||
|
@ -132,20 +132,23 @@ static int nmi_timer_setup(void)
|
|||
do_div(period, HZ);
|
||||
nmi_timer_attr.sample_period = period;
|
||||
|
||||
get_online_cpus();
|
||||
err = register_cpu_notifier(&nmi_timer_cpu_nb);
|
||||
cpu_notifier_register_begin();
|
||||
err = __register_cpu_notifier(&nmi_timer_cpu_nb);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
/* can't attach events to offline cpus: */
|
||||
for_each_online_cpu(cpu) {
|
||||
err = nmi_timer_start_cpu(cpu);
|
||||
if (err)
|
||||
break;
|
||||
if (err) {
|
||||
cpu_notifier_register_done();
|
||||
nmi_timer_shutdown();
|
||||
return err;
|
||||
}
|
||||
}
|
||||
if (err)
|
||||
nmi_timer_shutdown();
|
||||
|
||||
out:
|
||||
put_online_cpus();
|
||||
cpu_notifier_register_done();
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -1374,6 +1374,9 @@ static int __init rapl_init(void)
|
|||
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
cpu_notifier_register_begin();
|
||||
|
||||
/* prevent CPU hotplug during detection */
|
||||
get_online_cpus();
|
||||
ret = rapl_detect_topology();
|
||||
|
@ -1385,20 +1388,23 @@ static int __init rapl_init(void)
|
|||
ret = -ENODEV;
|
||||
goto done;
|
||||
}
|
||||
register_hotcpu_notifier(&rapl_cpu_notifier);
|
||||
__register_hotcpu_notifier(&rapl_cpu_notifier);
|
||||
done:
|
||||
put_online_cpus();
|
||||
cpu_notifier_register_done();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit rapl_exit(void)
|
||||
{
|
||||
cpu_notifier_register_begin();
|
||||
get_online_cpus();
|
||||
unregister_hotcpu_notifier(&rapl_cpu_notifier);
|
||||
__unregister_hotcpu_notifier(&rapl_cpu_notifier);
|
||||
rapl_unregister_powercap();
|
||||
rapl_cleanup_data();
|
||||
put_online_cpus();
|
||||
cpu_notifier_register_done();
|
||||
}
|
||||
|
||||
module_init(rapl_init);
|
||||
|
|
|
@ -2592,12 +2592,16 @@ static int __init bnx2fc_mod_init(void)
|
|||
spin_lock_init(&p->fp_work_lock);
|
||||
}
|
||||
|
||||
cpu_notifier_register_begin();
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
bnx2fc_percpu_thread_create(cpu);
|
||||
}
|
||||
|
||||
/* Initialize per CPU interrupt thread */
|
||||
register_hotcpu_notifier(&bnx2fc_cpu_notifier);
|
||||
__register_hotcpu_notifier(&bnx2fc_cpu_notifier);
|
||||
|
||||
cpu_notifier_register_done();
|
||||
|
||||
cnic_register_driver(CNIC_ULP_FCOE, &bnx2fc_cnic_cb);
|
||||
|
||||
|
@ -2662,13 +2666,17 @@ static void __exit bnx2fc_mod_exit(void)
|
|||
if (l2_thread)
|
||||
kthread_stop(l2_thread);
|
||||
|
||||
unregister_hotcpu_notifier(&bnx2fc_cpu_notifier);
|
||||
cpu_notifier_register_begin();
|
||||
|
||||
/* Destroy per cpu threads */
|
||||
for_each_online_cpu(cpu) {
|
||||
bnx2fc_percpu_thread_destroy(cpu);
|
||||
}
|
||||
|
||||
__unregister_hotcpu_notifier(&bnx2fc_cpu_notifier);
|
||||
|
||||
cpu_notifier_register_done();
|
||||
|
||||
destroy_workqueue(bnx2fc_wq);
|
||||
/*
|
||||
* detach from scsi transport
|
||||
|
|
|
@ -537,11 +537,15 @@ static int __init bnx2i_mod_init(void)
|
|||
p->iothread = NULL;
|
||||
}
|
||||
|
||||
cpu_notifier_register_begin();
|
||||
|
||||
for_each_online_cpu(cpu)
|
||||
bnx2i_percpu_thread_create(cpu);
|
||||
|
||||
/* Initialize per CPU interrupt thread */
|
||||
register_hotcpu_notifier(&bnx2i_cpu_notifier);
|
||||
__register_hotcpu_notifier(&bnx2i_cpu_notifier);
|
||||
|
||||
cpu_notifier_register_done();
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -581,11 +585,15 @@ static void __exit bnx2i_mod_exit(void)
|
|||
}
|
||||
mutex_unlock(&bnx2i_dev_lock);
|
||||
|
||||
unregister_hotcpu_notifier(&bnx2i_cpu_notifier);
|
||||
cpu_notifier_register_begin();
|
||||
|
||||
for_each_online_cpu(cpu)
|
||||
bnx2i_percpu_thread_destroy(cpu);
|
||||
|
||||
__unregister_hotcpu_notifier(&bnx2i_cpu_notifier);
|
||||
|
||||
cpu_notifier_register_done();
|
||||
|
||||
iscsi_unregister_transport(&bnx2i_iscsi_transport);
|
||||
cnic_unregister_driver(CNIC_ULP_ISCSI);
|
||||
}
|
||||
|
|
|
@ -2633,14 +2633,18 @@ static int __init fcoe_init(void)
|
|||
skb_queue_head_init(&p->fcoe_rx_list);
|
||||
}
|
||||
|
||||
cpu_notifier_register_begin();
|
||||
|
||||
for_each_online_cpu(cpu)
|
||||
fcoe_percpu_thread_create(cpu);
|
||||
|
||||
/* Initialize per CPU interrupt thread */
|
||||
rc = register_hotcpu_notifier(&fcoe_cpu_notifier);
|
||||
rc = __register_hotcpu_notifier(&fcoe_cpu_notifier);
|
||||
if (rc)
|
||||
goto out_free;
|
||||
|
||||
cpu_notifier_register_done();
|
||||
|
||||
/* Setup link change notification */
|
||||
fcoe_dev_setup();
|
||||
|
||||
|
@ -2655,6 +2659,9 @@ static int __init fcoe_init(void)
|
|||
for_each_online_cpu(cpu) {
|
||||
fcoe_percpu_thread_destroy(cpu);
|
||||
}
|
||||
|
||||
cpu_notifier_register_done();
|
||||
|
||||
mutex_unlock(&fcoe_config_mutex);
|
||||
destroy_workqueue(fcoe_wq);
|
||||
return rc;
|
||||
|
@ -2687,11 +2694,15 @@ static void __exit fcoe_exit(void)
|
|||
}
|
||||
rtnl_unlock();
|
||||
|
||||
unregister_hotcpu_notifier(&fcoe_cpu_notifier);
|
||||
cpu_notifier_register_begin();
|
||||
|
||||
for_each_online_cpu(cpu)
|
||||
fcoe_percpu_thread_destroy(cpu);
|
||||
|
||||
__unregister_hotcpu_notifier(&fcoe_cpu_notifier);
|
||||
|
||||
cpu_notifier_register_done();
|
||||
|
||||
mutex_unlock(&fcoe_config_mutex);
|
||||
|
||||
/*
|
||||
|
|
|
@ -590,12 +590,12 @@ static int __init pkg_temp_thermal_init(void)
|
|||
platform_thermal_package_rate_control =
|
||||
pkg_temp_thermal_platform_thermal_rate_control;
|
||||
|
||||
get_online_cpus();
|
||||
cpu_notifier_register_begin();
|
||||
for_each_online_cpu(i)
|
||||
if (get_core_online(i))
|
||||
goto err_ret;
|
||||
register_hotcpu_notifier(&pkg_temp_thermal_notifier);
|
||||
put_online_cpus();
|
||||
__register_hotcpu_notifier(&pkg_temp_thermal_notifier);
|
||||
cpu_notifier_register_done();
|
||||
|
||||
pkg_temp_debugfs_init(); /* Don't care if fails */
|
||||
|
||||
|
@ -604,7 +604,7 @@ static int __init pkg_temp_thermal_init(void)
|
|||
err_ret:
|
||||
for_each_online_cpu(i)
|
||||
put_core_offline(i);
|
||||
put_online_cpus();
|
||||
cpu_notifier_register_done();
|
||||
kfree(pkg_work_scheduled);
|
||||
platform_thermal_package_notify = NULL;
|
||||
platform_thermal_package_rate_control = NULL;
|
||||
|
@ -617,8 +617,8 @@ static void __exit pkg_temp_thermal_exit(void)
|
|||
struct phy_dev_entry *phdev, *n;
|
||||
int i;
|
||||
|
||||
get_online_cpus();
|
||||
unregister_hotcpu_notifier(&pkg_temp_thermal_notifier);
|
||||
cpu_notifier_register_begin();
|
||||
__unregister_hotcpu_notifier(&pkg_temp_thermal_notifier);
|
||||
mutex_lock(&phy_dev_list_mutex);
|
||||
list_for_each_entry_safe(phdev, n, &phy_dev_list, list) {
|
||||
/* Retore old MSR value for package thermal interrupt */
|
||||
|
@ -636,7 +636,7 @@ static void __exit pkg_temp_thermal_exit(void)
|
|||
for_each_online_cpu(i)
|
||||
cancel_delayed_work_sync(
|
||||
&per_cpu(pkg_temp_thermal_threshold_work, i));
|
||||
put_online_cpus();
|
||||
cpu_notifier_register_done();
|
||||
|
||||
kfree(pkg_work_scheduled);
|
||||
|
||||
|
|
|
@ -708,10 +708,13 @@ static int __init octeon_wdt_init(void)
|
|||
|
||||
cpumask_clear(&irq_enabled_cpus);
|
||||
|
||||
cpu_notifier_register_begin();
|
||||
for_each_online_cpu(cpu)
|
||||
octeon_wdt_setup_interrupt(cpu);
|
||||
|
||||
register_hotcpu_notifier(&octeon_wdt_cpu_notifier);
|
||||
__register_hotcpu_notifier(&octeon_wdt_cpu_notifier);
|
||||
cpu_notifier_register_done();
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
@ -725,7 +728,8 @@ static void __exit octeon_wdt_cleanup(void)
|
|||
|
||||
misc_deregister(&octeon_wdt_miscdev);
|
||||
|
||||
unregister_hotcpu_notifier(&octeon_wdt_cpu_notifier);
|
||||
cpu_notifier_register_begin();
|
||||
__unregister_hotcpu_notifier(&octeon_wdt_cpu_notifier);
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
int core = cpu2core(cpu);
|
||||
|
@ -734,6 +738,9 @@ static void __exit octeon_wdt_cleanup(void)
|
|||
/* Free the interrupt handler */
|
||||
free_irq(OCTEON_IRQ_WDOG0 + core, octeon_wdt_poke_irq);
|
||||
}
|
||||
|
||||
cpu_notifier_register_done();
|
||||
|
||||
/*
|
||||
* Disable the boot-bus memory, the code it points to is soon
|
||||
* to go missing.
|
||||
|
|
|
@ -604,19 +604,29 @@ static void __init balloon_add_region(unsigned long start_pfn,
|
|||
}
|
||||
}
|
||||
|
||||
static int alloc_balloon_scratch_page(int cpu)
|
||||
{
|
||||
if (per_cpu(balloon_scratch_page, cpu) != NULL)
|
||||
return 0;
|
||||
|
||||
per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL);
|
||||
if (per_cpu(balloon_scratch_page, cpu) == NULL) {
|
||||
pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int balloon_cpu_notify(struct notifier_block *self,
|
||||
unsigned long action, void *hcpu)
|
||||
{
|
||||
int cpu = (long)hcpu;
|
||||
switch (action) {
|
||||
case CPU_UP_PREPARE:
|
||||
if (per_cpu(balloon_scratch_page, cpu) != NULL)
|
||||
break;
|
||||
per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL);
|
||||
if (per_cpu(balloon_scratch_page, cpu) == NULL) {
|
||||
pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu);
|
||||
if (alloc_balloon_scratch_page(cpu))
|
||||
return NOTIFY_BAD;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
@ -636,15 +646,17 @@ static int __init balloon_init(void)
|
|||
return -ENODEV;
|
||||
|
||||
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
|
||||
for_each_online_cpu(cpu)
|
||||
{
|
||||
per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL);
|
||||
if (per_cpu(balloon_scratch_page, cpu) == NULL) {
|
||||
pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu);
|
||||
register_cpu_notifier(&balloon_cpu_notifier);
|
||||
|
||||
get_online_cpus();
|
||||
for_each_online_cpu(cpu) {
|
||||
if (alloc_balloon_scratch_page(cpu)) {
|
||||
put_online_cpus();
|
||||
unregister_cpu_notifier(&balloon_cpu_notifier);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
register_cpu_notifier(&balloon_cpu_notifier);
|
||||
put_online_cpus();
|
||||
}
|
||||
|
||||
pr_info("Initialising balloon driver\n");
|
||||
|
|
|
@ -115,26 +115,46 @@ enum {
|
|||
{ .notifier_call = fn, .priority = pri }; \
|
||||
register_cpu_notifier(&fn##_nb); \
|
||||
}
|
||||
|
||||
#define __cpu_notifier(fn, pri) { \
|
||||
static struct notifier_block fn##_nb = \
|
||||
{ .notifier_call = fn, .priority = pri }; \
|
||||
__register_cpu_notifier(&fn##_nb); \
|
||||
}
|
||||
#else /* #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
|
||||
#define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
|
||||
#define __cpu_notifier(fn, pri) do { (void)(fn); } while (0)
|
||||
#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
extern int register_cpu_notifier(struct notifier_block *nb);
|
||||
extern int __register_cpu_notifier(struct notifier_block *nb);
|
||||
extern void unregister_cpu_notifier(struct notifier_block *nb);
|
||||
extern void __unregister_cpu_notifier(struct notifier_block *nb);
|
||||
#else
|
||||
|
||||
#ifndef MODULE
|
||||
extern int register_cpu_notifier(struct notifier_block *nb);
|
||||
extern int __register_cpu_notifier(struct notifier_block *nb);
|
||||
#else
|
||||
static inline int register_cpu_notifier(struct notifier_block *nb)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int __register_cpu_notifier(struct notifier_block *nb)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void unregister_cpu_notifier(struct notifier_block *nb)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void __unregister_cpu_notifier(struct notifier_block *nb)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
int cpu_up(unsigned int cpu);
|
||||
|
@ -142,19 +162,32 @@ void notify_cpu_starting(unsigned int cpu);
|
|||
extern void cpu_maps_update_begin(void);
|
||||
extern void cpu_maps_update_done(void);
|
||||
|
||||
#define cpu_notifier_register_begin cpu_maps_update_begin
|
||||
#define cpu_notifier_register_done cpu_maps_update_done
|
||||
|
||||
#else /* CONFIG_SMP */
|
||||
|
||||
#define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
|
||||
#define __cpu_notifier(fn, pri) do { (void)(fn); } while (0)
|
||||
|
||||
static inline int register_cpu_notifier(struct notifier_block *nb)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int __register_cpu_notifier(struct notifier_block *nb)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void unregister_cpu_notifier(struct notifier_block *nb)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void __unregister_cpu_notifier(struct notifier_block *nb)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void cpu_maps_update_begin(void)
|
||||
{
|
||||
}
|
||||
|
@ -163,6 +196,14 @@ static inline void cpu_maps_update_done(void)
|
|||
{
|
||||
}
|
||||
|
||||
static inline void cpu_notifier_register_begin(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void cpu_notifier_register_done(void)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
extern struct bus_type cpu_subsys;
|
||||
|
||||
|
@ -176,8 +217,11 @@ extern void put_online_cpus(void);
|
|||
extern void cpu_hotplug_disable(void);
|
||||
extern void cpu_hotplug_enable(void);
|
||||
#define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri)
|
||||
#define __hotcpu_notifier(fn, pri) __cpu_notifier(fn, pri)
|
||||
#define register_hotcpu_notifier(nb) register_cpu_notifier(nb)
|
||||
#define __register_hotcpu_notifier(nb) __register_cpu_notifier(nb)
|
||||
#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb)
|
||||
#define __unregister_hotcpu_notifier(nb) __unregister_cpu_notifier(nb)
|
||||
void clear_tasks_mm_cpumask(int cpu);
|
||||
int cpu_down(unsigned int cpu);
|
||||
|
||||
|
@ -190,9 +234,12 @@ static inline void cpu_hotplug_done(void) {}
|
|||
#define cpu_hotplug_disable() do { } while (0)
|
||||
#define cpu_hotplug_enable() do { } while (0)
|
||||
#define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
|
||||
#define __hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
|
||||
/* These aren't inline functions due to a GCC bug. */
|
||||
#define register_hotcpu_notifier(nb) ({ (void)(nb); 0; })
|
||||
#define __register_hotcpu_notifier(nb) ({ (void)(nb); 0; })
|
||||
#define unregister_hotcpu_notifier(nb) ({ (void)(nb); })
|
||||
#define __unregister_hotcpu_notifier(nb) ({ (void)(nb); })
|
||||
#endif /* CONFIG_HOTPLUG_CPU */
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP_SMP
|
||||
|
|
|
@ -835,6 +835,8 @@ do { \
|
|||
{ .notifier_call = fn, .priority = CPU_PRI_PERF }; \
|
||||
unsigned long cpu = smp_processor_id(); \
|
||||
unsigned long flags; \
|
||||
\
|
||||
cpu_notifier_register_begin(); \
|
||||
fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \
|
||||
(void *)(unsigned long)cpu); \
|
||||
local_irq_save(flags); \
|
||||
|
@ -843,9 +845,21 @@ do { \
|
|||
local_irq_restore(flags); \
|
||||
fn(&fn##_nb, (unsigned long)CPU_ONLINE, \
|
||||
(void *)(unsigned long)cpu); \
|
||||
register_cpu_notifier(&fn##_nb); \
|
||||
__register_cpu_notifier(&fn##_nb); \
|
||||
cpu_notifier_register_done(); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* Bare-bones version of perf_cpu_notifier(), which doesn't invoke the
|
||||
* callback for already online CPUs.
|
||||
*/
|
||||
#define __perf_cpu_notifier(fn) \
|
||||
do { \
|
||||
static struct notifier_block fn##_nb = \
|
||||
{ .notifier_call = fn, .priority = CPU_PRI_PERF }; \
|
||||
\
|
||||
__register_cpu_notifier(&fn##_nb); \
|
||||
} while (0)
|
||||
|
||||
struct perf_pmu_events_attr {
|
||||
struct device_attribute attr;
|
||||
|
|
38
kernel/cpu.c
38
kernel/cpu.c
|
@ -19,6 +19,7 @@
|
|||
#include <linux/mutex.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/lockdep.h>
|
||||
|
||||
#include "smpboot.h"
|
||||
|
||||
|
@ -27,18 +28,23 @@
|
|||
static DEFINE_MUTEX(cpu_add_remove_lock);
|
||||
|
||||
/*
|
||||
* The following two API's must be used when attempting
|
||||
* to serialize the updates to cpu_online_mask, cpu_present_mask.
|
||||
* The following two APIs (cpu_maps_update_begin/done) must be used when
|
||||
* attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
|
||||
* The APIs cpu_notifier_register_begin/done() must be used to protect CPU
|
||||
* hotplug callback (un)registration performed using __register_cpu_notifier()
|
||||
* or __unregister_cpu_notifier().
|
||||
*/
|
||||
void cpu_maps_update_begin(void)
|
||||
{
|
||||
mutex_lock(&cpu_add_remove_lock);
|
||||
}
|
||||
EXPORT_SYMBOL(cpu_notifier_register_begin);
|
||||
|
||||
void cpu_maps_update_done(void)
|
||||
{
|
||||
mutex_unlock(&cpu_add_remove_lock);
|
||||
}
|
||||
EXPORT_SYMBOL(cpu_notifier_register_done);
|
||||
|
||||
static RAW_NOTIFIER_HEAD(cpu_chain);
|
||||
|
||||
|
@ -57,17 +63,30 @@ static struct {
|
|||
* an ongoing cpu hotplug operation.
|
||||
*/
|
||||
int refcount;
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
struct lockdep_map dep_map;
|
||||
#endif
|
||||
} cpu_hotplug = {
|
||||
.active_writer = NULL,
|
||||
.lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
|
||||
.refcount = 0,
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
.dep_map = {.name = "cpu_hotplug.lock" },
|
||||
#endif
|
||||
};
|
||||
|
||||
/* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
|
||||
#define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
|
||||
#define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
|
||||
#define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
|
||||
|
||||
void get_online_cpus(void)
|
||||
{
|
||||
might_sleep();
|
||||
if (cpu_hotplug.active_writer == current)
|
||||
return;
|
||||
cpuhp_lock_acquire_read();
|
||||
mutex_lock(&cpu_hotplug.lock);
|
||||
cpu_hotplug.refcount++;
|
||||
mutex_unlock(&cpu_hotplug.lock);
|
||||
|
@ -87,6 +106,7 @@ void put_online_cpus(void)
|
|||
if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
|
||||
wake_up_process(cpu_hotplug.active_writer);
|
||||
mutex_unlock(&cpu_hotplug.lock);
|
||||
cpuhp_lock_release();
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(put_online_cpus);
|
||||
|
@ -117,6 +137,7 @@ void cpu_hotplug_begin(void)
|
|||
{
|
||||
cpu_hotplug.active_writer = current;
|
||||
|
||||
cpuhp_lock_acquire();
|
||||
for (;;) {
|
||||
mutex_lock(&cpu_hotplug.lock);
|
||||
if (likely(!cpu_hotplug.refcount))
|
||||
|
@ -131,6 +152,7 @@ void cpu_hotplug_done(void)
|
|||
{
|
||||
cpu_hotplug.active_writer = NULL;
|
||||
mutex_unlock(&cpu_hotplug.lock);
|
||||
cpuhp_lock_release();
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -166,6 +188,11 @@ int __ref register_cpu_notifier(struct notifier_block *nb)
|
|||
return ret;
|
||||
}
|
||||
|
||||
int __ref __register_cpu_notifier(struct notifier_block *nb)
|
||||
{
|
||||
return raw_notifier_chain_register(&cpu_chain, nb);
|
||||
}
|
||||
|
||||
static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
|
||||
int *nr_calls)
|
||||
{
|
||||
|
@ -189,6 +216,7 @@ static void cpu_notify_nofail(unsigned long val, void *v)
|
|||
BUG_ON(cpu_notify(val, v));
|
||||
}
|
||||
EXPORT_SYMBOL(register_cpu_notifier);
|
||||
EXPORT_SYMBOL(__register_cpu_notifier);
|
||||
|
||||
void __ref unregister_cpu_notifier(struct notifier_block *nb)
|
||||
{
|
||||
|
@ -198,6 +226,12 @@ void __ref unregister_cpu_notifier(struct notifier_block *nb)
|
|||
}
|
||||
EXPORT_SYMBOL(unregister_cpu_notifier);
|
||||
|
||||
void __ref __unregister_cpu_notifier(struct notifier_block *nb)
|
||||
{
|
||||
raw_notifier_chain_unregister(&cpu_chain, nb);
|
||||
}
|
||||
EXPORT_SYMBOL(__unregister_cpu_notifier);
|
||||
|
||||
/**
|
||||
* clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
|
||||
* @cpu: a CPU id
|
||||
|
|
|
@ -591,18 +591,28 @@ static int create_hash_tables(void)
|
|||
int __ref create_proc_profile(void) /* false positive from hotcpu_notifier */
|
||||
{
|
||||
struct proc_dir_entry *entry;
|
||||
int err = 0;
|
||||
|
||||
if (!prof_on)
|
||||
return 0;
|
||||
if (create_hash_tables())
|
||||
return -ENOMEM;
|
||||
|
||||
cpu_notifier_register_begin();
|
||||
|
||||
if (create_hash_tables()) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
entry = proc_create("profile", S_IWUSR | S_IRUGO,
|
||||
NULL, &proc_profile_operations);
|
||||
if (!entry)
|
||||
return 0;
|
||||
goto out;
|
||||
proc_set_size(entry, (1 + prof_len) * sizeof(atomic_t));
|
||||
hotcpu_notifier(profile_cpu_callback, 0);
|
||||
return 0;
|
||||
__hotcpu_notifier(profile_cpu_callback, 0);
|
||||
|
||||
out:
|
||||
cpu_notifier_register_done();
|
||||
return err;
|
||||
}
|
||||
subsys_initcall(create_proc_profile);
|
||||
#endif /* CONFIG_PROC_FS */
|
||||
|
|
|
@ -1301,7 +1301,7 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
|
|||
* In that off case, we need to allocate for all possible cpus.
|
||||
*/
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
get_online_cpus();
|
||||
cpu_notifier_register_begin();
|
||||
cpumask_copy(buffer->cpumask, cpu_online_mask);
|
||||
#else
|
||||
cpumask_copy(buffer->cpumask, cpu_possible_mask);
|
||||
|
@ -1324,10 +1324,10 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
|
|||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
buffer->cpu_notify.notifier_call = rb_cpu_notify;
|
||||
buffer->cpu_notify.priority = 0;
|
||||
register_cpu_notifier(&buffer->cpu_notify);
|
||||
__register_cpu_notifier(&buffer->cpu_notify);
|
||||
cpu_notifier_register_done();
|
||||
#endif
|
||||
|
||||
put_online_cpus();
|
||||
mutex_init(&buffer->mutex);
|
||||
|
||||
return buffer;
|
||||
|
@ -1341,7 +1341,9 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
|
|||
|
||||
fail_free_cpumask:
|
||||
free_cpumask_var(buffer->cpumask);
|
||||
put_online_cpus();
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
cpu_notifier_register_done();
|
||||
#endif
|
||||
|
||||
fail_free_buffer:
|
||||
kfree(buffer);
|
||||
|
@ -1358,16 +1360,17 @@ ring_buffer_free(struct ring_buffer *buffer)
|
|||
{
|
||||
int cpu;
|
||||
|
||||
get_online_cpus();
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
unregister_cpu_notifier(&buffer->cpu_notify);
|
||||
cpu_notifier_register_begin();
|
||||
__unregister_cpu_notifier(&buffer->cpu_notify);
|
||||
#endif
|
||||
|
||||
for_each_buffer_cpu(buffer, cpu)
|
||||
rb_free_cpu_buffer(buffer->buffers[cpu]);
|
||||
|
||||
put_online_cpus();
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
cpu_notifier_register_done();
|
||||
#endif
|
||||
|
||||
kfree(buffer->buffers);
|
||||
free_cpumask_var(buffer->cpumask);
|
||||
|
|
|
@ -1298,14 +1298,14 @@ static int __init setup_vmstat(void)
|
|||
#ifdef CONFIG_SMP
|
||||
int cpu;
|
||||
|
||||
register_cpu_notifier(&vmstat_notifier);
|
||||
cpu_notifier_register_begin();
|
||||
__register_cpu_notifier(&vmstat_notifier);
|
||||
|
||||
get_online_cpus();
|
||||
for_each_online_cpu(cpu) {
|
||||
start_cpu_timer(cpu);
|
||||
node_set_state(cpu_to_node(cpu), N_CPU);
|
||||
}
|
||||
put_online_cpus();
|
||||
cpu_notifier_register_done();
|
||||
#endif
|
||||
#ifdef CONFIG_PROC_FS
|
||||
proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
|
||||
|
|
|
@ -814,21 +814,32 @@ static void zs_exit(void)
|
|||
{
|
||||
int cpu;
|
||||
|
||||
cpu_notifier_register_begin();
|
||||
|
||||
for_each_online_cpu(cpu)
|
||||
zs_cpu_notifier(NULL, CPU_DEAD, (void *)(long)cpu);
|
||||
unregister_cpu_notifier(&zs_cpu_nb);
|
||||
__unregister_cpu_notifier(&zs_cpu_nb);
|
||||
|
||||
cpu_notifier_register_done();
|
||||
}
|
||||
|
||||
static int zs_init(void)
|
||||
{
|
||||
int cpu, ret;
|
||||
|
||||
register_cpu_notifier(&zs_cpu_nb);
|
||||
cpu_notifier_register_begin();
|
||||
|
||||
__register_cpu_notifier(&zs_cpu_nb);
|
||||
for_each_online_cpu(cpu) {
|
||||
ret = zs_cpu_notifier(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
|
||||
if (notifier_to_errno(ret))
|
||||
if (notifier_to_errno(ret)) {
|
||||
cpu_notifier_register_done();
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
cpu_notifier_register_done();
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
zs_exit();
|
||||
|
|
|
@ -387,18 +387,18 @@ static int zswap_cpu_init(void)
|
|||
{
|
||||
unsigned long cpu;
|
||||
|
||||
get_online_cpus();
|
||||
cpu_notifier_register_begin();
|
||||
for_each_online_cpu(cpu)
|
||||
if (__zswap_cpu_notifier(CPU_UP_PREPARE, cpu) != NOTIFY_OK)
|
||||
goto cleanup;
|
||||
register_cpu_notifier(&zswap_cpu_notifier_block);
|
||||
put_online_cpus();
|
||||
__register_cpu_notifier(&zswap_cpu_notifier_block);
|
||||
cpu_notifier_register_done();
|
||||
return 0;
|
||||
|
||||
cleanup:
|
||||
for_each_online_cpu(cpu)
|
||||
__zswap_cpu_notifier(CPU_UP_CANCELED, cpu);
|
||||
put_online_cpus();
|
||||
cpu_notifier_register_done();
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
|
|
@ -455,6 +455,8 @@ int flow_cache_init(struct net *net)
|
|||
if (!fc->percpu)
|
||||
return -ENOMEM;
|
||||
|
||||
cpu_notifier_register_begin();
|
||||
|
||||
for_each_online_cpu(i) {
|
||||
if (flow_cache_cpu_prepare(fc, i))
|
||||
goto err;
|
||||
|
@ -462,7 +464,9 @@ int flow_cache_init(struct net *net)
|
|||
fc->hotcpu_notifier = (struct notifier_block){
|
||||
.notifier_call = flow_cache_cpu,
|
||||
};
|
||||
register_hotcpu_notifier(&fc->hotcpu_notifier);
|
||||
__register_hotcpu_notifier(&fc->hotcpu_notifier);
|
||||
|
||||
cpu_notifier_register_done();
|
||||
|
||||
setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd,
|
||||
(unsigned long) fc);
|
||||
|
@ -478,6 +482,8 @@ int flow_cache_init(struct net *net)
|
|||
fcp->hash_table = NULL;
|
||||
}
|
||||
|
||||
cpu_notifier_register_done();
|
||||
|
||||
free_percpu(fc->percpu);
|
||||
fc->percpu = NULL;
|
||||
|
||||
|
|
121
net/iucv/iucv.c
121
net/iucv/iucv.c
|
@ -621,6 +621,42 @@ static void iucv_disable(void)
|
|||
put_online_cpus();
|
||||
}
|
||||
|
||||
static void free_iucv_data(int cpu)
|
||||
{
|
||||
kfree(iucv_param_irq[cpu]);
|
||||
iucv_param_irq[cpu] = NULL;
|
||||
kfree(iucv_param[cpu]);
|
||||
iucv_param[cpu] = NULL;
|
||||
kfree(iucv_irq_data[cpu]);
|
||||
iucv_irq_data[cpu] = NULL;
|
||||
}
|
||||
|
||||
static int alloc_iucv_data(int cpu)
|
||||
{
|
||||
/* Note: GFP_DMA used to get memory below 2G */
|
||||
iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data),
|
||||
GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
|
||||
if (!iucv_irq_data[cpu])
|
||||
goto out_free;
|
||||
|
||||
/* Allocate parameter blocks. */
|
||||
iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param),
|
||||
GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
|
||||
if (!iucv_param[cpu])
|
||||
goto out_free;
|
||||
|
||||
iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param),
|
||||
GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
|
||||
if (!iucv_param_irq[cpu])
|
||||
goto out_free;
|
||||
|
||||
return 0;
|
||||
|
||||
out_free:
|
||||
free_iucv_data(cpu);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int iucv_cpu_notify(struct notifier_block *self,
|
||||
unsigned long action, void *hcpu)
|
||||
{
|
||||
|
@ -630,38 +666,14 @@ static int iucv_cpu_notify(struct notifier_block *self,
|
|||
switch (action) {
|
||||
case CPU_UP_PREPARE:
|
||||
case CPU_UP_PREPARE_FROZEN:
|
||||
iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data),
|
||||
GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
|
||||
if (!iucv_irq_data[cpu])
|
||||
if (alloc_iucv_data(cpu))
|
||||
return notifier_from_errno(-ENOMEM);
|
||||
|
||||
iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param),
|
||||
GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
|
||||
if (!iucv_param[cpu]) {
|
||||
kfree(iucv_irq_data[cpu]);
|
||||
iucv_irq_data[cpu] = NULL;
|
||||
return notifier_from_errno(-ENOMEM);
|
||||
}
|
||||
iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param),
|
||||
GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
|
||||
if (!iucv_param_irq[cpu]) {
|
||||
kfree(iucv_param[cpu]);
|
||||
iucv_param[cpu] = NULL;
|
||||
kfree(iucv_irq_data[cpu]);
|
||||
iucv_irq_data[cpu] = NULL;
|
||||
return notifier_from_errno(-ENOMEM);
|
||||
}
|
||||
break;
|
||||
case CPU_UP_CANCELED:
|
||||
case CPU_UP_CANCELED_FROZEN:
|
||||
case CPU_DEAD:
|
||||
case CPU_DEAD_FROZEN:
|
||||
kfree(iucv_param_irq[cpu]);
|
||||
iucv_param_irq[cpu] = NULL;
|
||||
kfree(iucv_param[cpu]);
|
||||
iucv_param[cpu] = NULL;
|
||||
kfree(iucv_irq_data[cpu]);
|
||||
iucv_irq_data[cpu] = NULL;
|
||||
free_iucv_data(cpu);
|
||||
break;
|
||||
case CPU_ONLINE:
|
||||
case CPU_ONLINE_FROZEN:
|
||||
|
@ -2025,33 +2037,20 @@ static int __init iucv_init(void)
|
|||
goto out_int;
|
||||
}
|
||||
|
||||
cpu_notifier_register_begin();
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
/* Note: GFP_DMA used to get memory below 2G */
|
||||
iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data),
|
||||
GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
|
||||
if (!iucv_irq_data[cpu]) {
|
||||
if (alloc_iucv_data(cpu)) {
|
||||
rc = -ENOMEM;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
/* Allocate parameter blocks. */
|
||||
iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param),
|
||||
GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
|
||||
if (!iucv_param[cpu]) {
|
||||
rc = -ENOMEM;
|
||||
goto out_free;
|
||||
}
|
||||
iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param),
|
||||
GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
|
||||
if (!iucv_param_irq[cpu]) {
|
||||
rc = -ENOMEM;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
}
|
||||
rc = register_hotcpu_notifier(&iucv_cpu_notifier);
|
||||
rc = __register_hotcpu_notifier(&iucv_cpu_notifier);
|
||||
if (rc)
|
||||
goto out_free;
|
||||
|
||||
cpu_notifier_register_done();
|
||||
|
||||
rc = register_reboot_notifier(&iucv_reboot_notifier);
|
||||
if (rc)
|
||||
goto out_cpu;
|
||||
|
@ -2069,16 +2068,14 @@ static int __init iucv_init(void)
|
|||
out_reboot:
|
||||
unregister_reboot_notifier(&iucv_reboot_notifier);
|
||||
out_cpu:
|
||||
unregister_hotcpu_notifier(&iucv_cpu_notifier);
|
||||
cpu_notifier_register_begin();
|
||||
__unregister_hotcpu_notifier(&iucv_cpu_notifier);
|
||||
out_free:
|
||||
for_each_possible_cpu(cpu) {
|
||||
kfree(iucv_param_irq[cpu]);
|
||||
iucv_param_irq[cpu] = NULL;
|
||||
kfree(iucv_param[cpu]);
|
||||
iucv_param[cpu] = NULL;
|
||||
kfree(iucv_irq_data[cpu]);
|
||||
iucv_irq_data[cpu] = NULL;
|
||||
}
|
||||
for_each_possible_cpu(cpu)
|
||||
free_iucv_data(cpu);
|
||||
|
||||
cpu_notifier_register_done();
|
||||
|
||||
root_device_unregister(iucv_root);
|
||||
out_int:
|
||||
unregister_external_interrupt(0x4000, iucv_external_interrupt);
|
||||
|
@ -2105,15 +2102,11 @@ static void __exit iucv_exit(void)
|
|||
kfree(p);
|
||||
spin_unlock_irq(&iucv_queue_lock);
|
||||
unregister_reboot_notifier(&iucv_reboot_notifier);
|
||||
unregister_hotcpu_notifier(&iucv_cpu_notifier);
|
||||
for_each_possible_cpu(cpu) {
|
||||
kfree(iucv_param_irq[cpu]);
|
||||
iucv_param_irq[cpu] = NULL;
|
||||
kfree(iucv_param[cpu]);
|
||||
iucv_param[cpu] = NULL;
|
||||
kfree(iucv_irq_data[cpu]);
|
||||
iucv_irq_data[cpu] = NULL;
|
||||
}
|
||||
cpu_notifier_register_begin();
|
||||
__unregister_hotcpu_notifier(&iucv_cpu_notifier);
|
||||
for_each_possible_cpu(cpu)
|
||||
free_iucv_data(cpu);
|
||||
cpu_notifier_register_done();
|
||||
root_device_unregister(iucv_root);
|
||||
bus_unregister(&iucv_bus);
|
||||
unregister_external_interrupt(0x4000, iucv_external_interrupt);
|
||||
|
|
Loading…
Reference in New Issue