jump_label: Fix concurrent static_key_enable/disable()

static_key_enable/disable are trying to cap the static key count to
0/1.  However, their use of key->enabled is outside jump_label_lock
so they do not really ensure that.

Rewrite them to do a quick check for an already enabled (respectively,
already disabled), and then recheck under the jump label lock.  Unlike
static_key_slow_inc/dec, a failed check under the jump label lock does
not modify key->enabled.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Eric Dumazet <eric.dumazet@gmail.com>
Cc: Jason Baron <jbaron@akamai.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1501601046-35683-2-git-send-email-pbonzini@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Paolo Bonzini 2017-08-01 17:24:04 +02:00 committed by Ingo Molnar
parent 83ced169d9
commit 1dbb6704de
2 changed files with 49 additions and 32 deletions

View File

@ -234,22 +234,24 @@ static inline int jump_label_apply_nops(struct module *mod)
static inline void static_key_enable(struct static_key *key) static inline void static_key_enable(struct static_key *key)
{ {
int count = static_key_count(key); STATIC_KEY_CHECK_USE();
WARN_ON_ONCE(count < 0 || count > 1); if (atomic_read(&key->enabled) != 0) {
WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
if (!count) return;
static_key_slow_inc(key); }
atomic_set(&key->enabled, 1);
} }
static inline void static_key_disable(struct static_key *key) static inline void static_key_disable(struct static_key *key)
{ {
int count = static_key_count(key); STATIC_KEY_CHECK_USE();
WARN_ON_ONCE(count < 0 || count > 1); if (atomic_read(&key->enabled) != 1) {
WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
if (count) return;
static_key_slow_dec(key); }
atomic_set(&key->enabled, 0);
} }
#define STATIC_KEY_INIT_TRUE { .enabled = ATOMIC_INIT(1) } #define STATIC_KEY_INIT_TRUE { .enabled = ATOMIC_INIT(1) }

View File

@ -79,28 +79,6 @@ int static_key_count(struct static_key *key)
} }
EXPORT_SYMBOL_GPL(static_key_count); EXPORT_SYMBOL_GPL(static_key_count);
void static_key_enable(struct static_key *key)
{
int count = static_key_count(key);
WARN_ON_ONCE(count < 0 || count > 1);
if (!count)
static_key_slow_inc(key);
}
EXPORT_SYMBOL_GPL(static_key_enable);
void static_key_disable(struct static_key *key)
{
int count = static_key_count(key);
WARN_ON_ONCE(count < 0 || count > 1);
if (count)
static_key_slow_dec(key);
}
EXPORT_SYMBOL_GPL(static_key_disable);
void static_key_slow_inc(struct static_key *key) void static_key_slow_inc(struct static_key *key)
{ {
int v, v1; int v, v1;
@ -139,6 +117,43 @@ void static_key_slow_inc(struct static_key *key)
} }
EXPORT_SYMBOL_GPL(static_key_slow_inc); EXPORT_SYMBOL_GPL(static_key_slow_inc);
void static_key_enable(struct static_key *key)
{
STATIC_KEY_CHECK_USE();
if (atomic_read(&key->enabled) > 0) {
WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
return;
}
cpus_read_lock();
jump_label_lock();
if (atomic_read(&key->enabled) == 0) {
atomic_set(&key->enabled, -1);
jump_label_update(key);
atomic_set(&key->enabled, 1);
}
jump_label_unlock();
cpus_read_unlock();
}
EXPORT_SYMBOL_GPL(static_key_enable);
void static_key_disable(struct static_key *key)
{
STATIC_KEY_CHECK_USE();
if (atomic_read(&key->enabled) != 1) {
WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
return;
}
cpus_read_lock();
jump_label_lock();
if (atomic_cmpxchg(&key->enabled, 1, 0))
jump_label_update(key);
jump_label_unlock();
cpus_read_unlock();
}
EXPORT_SYMBOL_GPL(static_key_disable);
static void __static_key_slow_dec(struct static_key *key, static void __static_key_slow_dec(struct static_key *key,
unsigned long rate_limit, struct delayed_work *work) unsigned long rate_limit, struct delayed_work *work)
{ {