jump_label: Provide hotplug context variants

As using the normal static key API under the hotplug lock is
pretty much impossible, let's provide a variant of some of them
that require the hotplug lock to have already been taken.

These function are only meant to be used in CPU hotplug callbacks.

Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-arm-kernel@lists.infradead.org
Link: http://lkml.kernel.org/r/20170801080257.5056-4-marc.zyngier@arm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Marc Zyngier 2017-08-01 09:02:56 +01:00 committed by Ingo Molnar
parent 8b7b412807
commit 5a40527f8f
3 changed files with 42 additions and 6 deletions

View File

@ -154,6 +154,21 @@ and 'static_key_count()'. In general, if you use these functions, they
should be protected with the same mutex used around the enable/disable should be protected with the same mutex used around the enable/disable
or increment/decrement function. or increment/decrement function.
Note that switching branches results in some locks being taken,
particularly the CPU hotplug lock (in order to avoid races against
CPUs being brought in the kernel whilst the kernel is getting
patched). Calling the static key API from within a hotplug notifier is
thus a sure deadlock recipe. In order to still allow use of the
functionnality, the following functions are provided:
static_key_enable_cpuslocked()
static_key_disable_cpuslocked()
static_branch_enable_cpuslocked()
static_branch_disable_cpuslocked()
These functions are *not* general purpose, and must only be used when
you really know that you're in the above context, and no other.
Where an array of keys is required, it can be defined as:: Where an array of keys is required, it can be defined as::
DEFINE_STATIC_KEY_ARRAY_TRUE(keys, count); DEFINE_STATIC_KEY_ARRAY_TRUE(keys, count);

View File

@ -163,6 +163,8 @@ extern void jump_label_apply_nops(struct module *mod);
extern int static_key_count(struct static_key *key); extern int static_key_count(struct static_key *key);
extern void static_key_enable(struct static_key *key); extern void static_key_enable(struct static_key *key);
extern void static_key_disable(struct static_key *key); extern void static_key_disable(struct static_key *key);
extern void static_key_enable_cpuslocked(struct static_key *key);
extern void static_key_disable_cpuslocked(struct static_key *key);
/* /*
* We should be using ATOMIC_INIT() for initializing .enabled, but * We should be using ATOMIC_INIT() for initializing .enabled, but
@ -254,6 +256,9 @@ static inline void static_key_disable(struct static_key *key)
atomic_set(&key->enabled, 0); atomic_set(&key->enabled, 0);
} }
#define static_key_enable_cpuslocked(k) static_key_enable((k))
#define static_key_disable_cpuslocked(k) static_key_disable((k))
#define STATIC_KEY_INIT_TRUE { .enabled = ATOMIC_INIT(1) } #define STATIC_KEY_INIT_TRUE { .enabled = ATOMIC_INIT(1) }
#define STATIC_KEY_INIT_FALSE { .enabled = ATOMIC_INIT(0) } #define STATIC_KEY_INIT_FALSE { .enabled = ATOMIC_INIT(0) }
@ -415,8 +420,10 @@ extern bool ____wrong_branch_error(void);
* Normal usage; boolean enable/disable. * Normal usage; boolean enable/disable.
*/ */
#define static_branch_enable(x) static_key_enable(&(x)->key) #define static_branch_enable(x) static_key_enable(&(x)->key)
#define static_branch_disable(x) static_key_disable(&(x)->key) #define static_branch_disable(x) static_key_disable(&(x)->key)
#define static_branch_enable_cpuslocked(x) static_key_enable_cpuslocked(&(x)->key)
#define static_branch_disable_cpuslocked(x) static_key_disable_cpuslocked(&(x)->key)
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */

View File

@ -126,15 +126,15 @@ void static_key_slow_inc(struct static_key *key)
} }
EXPORT_SYMBOL_GPL(static_key_slow_inc); EXPORT_SYMBOL_GPL(static_key_slow_inc);
void static_key_enable(struct static_key *key) void static_key_enable_cpuslocked(struct static_key *key)
{ {
STATIC_KEY_CHECK_USE(); STATIC_KEY_CHECK_USE();
if (atomic_read(&key->enabled) > 0) { if (atomic_read(&key->enabled) > 0) {
WARN_ON_ONCE(atomic_read(&key->enabled) != 1); WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
return; return;
} }
cpus_read_lock();
jump_label_lock(); jump_label_lock();
if (atomic_read(&key->enabled) == 0) { if (atomic_read(&key->enabled) == 0) {
atomic_set(&key->enabled, -1); atomic_set(&key->enabled, -1);
@ -145,23 +145,37 @@ void static_key_enable(struct static_key *key)
atomic_set_release(&key->enabled, 1); atomic_set_release(&key->enabled, 1);
} }
jump_label_unlock(); jump_label_unlock();
}
EXPORT_SYMBOL_GPL(static_key_enable_cpuslocked);
void static_key_enable(struct static_key *key)
{
cpus_read_lock();
static_key_enable_cpuslocked(key);
cpus_read_unlock(); cpus_read_unlock();
} }
EXPORT_SYMBOL_GPL(static_key_enable); EXPORT_SYMBOL_GPL(static_key_enable);
void static_key_disable(struct static_key *key) void static_key_disable_cpuslocked(struct static_key *key)
{ {
STATIC_KEY_CHECK_USE(); STATIC_KEY_CHECK_USE();
if (atomic_read(&key->enabled) != 1) { if (atomic_read(&key->enabled) != 1) {
WARN_ON_ONCE(atomic_read(&key->enabled) != 0); WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
return; return;
} }
cpus_read_lock();
jump_label_lock(); jump_label_lock();
if (atomic_cmpxchg(&key->enabled, 1, 0)) if (atomic_cmpxchg(&key->enabled, 1, 0))
jump_label_update(key); jump_label_update(key);
jump_label_unlock(); jump_label_unlock();
}
EXPORT_SYMBOL_GPL(static_key_disable_cpuslocked);
void static_key_disable(struct static_key *key)
{
cpus_read_lock();
static_key_disable_cpuslocked(key);
cpus_read_unlock(); cpus_read_unlock();
} }
EXPORT_SYMBOL_GPL(static_key_disable); EXPORT_SYMBOL_GPL(static_key_disable);