clocksource: Allow clocksource_mark_unstable() on unregistered clocksources

Because of how the code flips between tsc-early and tsc clocksources
it might need to mark one or both unstable. The current code in
mark_tsc_unstable() only worked because previously it registered the
tsc clocksource once and then never touched it.

Since it now unregisters the tsc-early clocksource, it needs to know
if a clocksource got unregistered and the current cs->mult test
doesn't work for that. Instead use list_empty(&cs->list) to test for
registration.

Furthermore, since clocksource_mark_unstable() needs to place the cs
on the wd_list, it links the cs->list and cs->wd_list serialization.
It must not see a clocsource registered (!empty cs->list) but already
past dequeue_watchdog(). So place {en,de}queue{,_watchdog}() under the
same lock.

Provided cs->list is initialized to empty, this then allows us to
unconditionally use clocksource_mark_unstable(), regardless of the
registration state.

Fixes: aa83c45762 ("x86/tsc: Introduce early tsc clocksource")
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Diego Viola <diego.viola@gmail.com>
Cc: len.brown@intel.com
Cc: rjw@rjwysocki.net
Cc: diego.viola@gmail.com
Cc: rui.zhang@intel.com
Cc: stable@vger.kernel.org
Link: https://lkml.kernel.org/r/20180502135312.GS12217@hirez.programming.kicks-ass.net
This commit is contained in:
Peter Zijlstra 2018-04-23 17:28:55 +02:00 committed by Thomas Gleixner
parent e9088adda1
commit 2aae7bcfa4
1 changed files with 34 additions and 16 deletions

View File

@ -119,6 +119,16 @@ static DEFINE_SPINLOCK(watchdog_lock);
static int watchdog_running; static int watchdog_running;
static atomic_t watchdog_reset_pending; static atomic_t watchdog_reset_pending;
static void inline clocksource_watchdog_lock(unsigned long *flags)
{
spin_lock_irqsave(&watchdog_lock, *flags);
}
static void inline clocksource_watchdog_unlock(unsigned long *flags)
{
spin_unlock_irqrestore(&watchdog_lock, *flags);
}
static int clocksource_watchdog_kthread(void *data); static int clocksource_watchdog_kthread(void *data);
static void __clocksource_change_rating(struct clocksource *cs, int rating); static void __clocksource_change_rating(struct clocksource *cs, int rating);
@ -142,6 +152,9 @@ static void __clocksource_unstable(struct clocksource *cs)
cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
cs->flags |= CLOCK_SOURCE_UNSTABLE; cs->flags |= CLOCK_SOURCE_UNSTABLE;
if (list_empty(&cs->list))
return;
if (cs->mark_unstable) if (cs->mark_unstable)
cs->mark_unstable(cs); cs->mark_unstable(cs);
@ -164,7 +177,7 @@ void clocksource_mark_unstable(struct clocksource *cs)
spin_lock_irqsave(&watchdog_lock, flags); spin_lock_irqsave(&watchdog_lock, flags);
if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) { if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) {
if (list_empty(&cs->wd_list)) if (!list_empty(&cs->list) && list_empty(&cs->wd_list))
list_add(&cs->wd_list, &watchdog_list); list_add(&cs->wd_list, &watchdog_list);
__clocksource_unstable(cs); __clocksource_unstable(cs);
} }
@ -319,9 +332,6 @@ static void clocksource_resume_watchdog(void)
static void clocksource_enqueue_watchdog(struct clocksource *cs) static void clocksource_enqueue_watchdog(struct clocksource *cs)
{ {
unsigned long flags;
spin_lock_irqsave(&watchdog_lock, flags);
if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
/* cs is a clocksource to be watched. */ /* cs is a clocksource to be watched. */
list_add(&cs->wd_list, &watchdog_list); list_add(&cs->wd_list, &watchdog_list);
@ -331,7 +341,6 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs)
if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
} }
spin_unlock_irqrestore(&watchdog_lock, flags);
} }
static void clocksource_select_watchdog(bool fallback) static void clocksource_select_watchdog(bool fallback)
@ -373,9 +382,6 @@ static void clocksource_select_watchdog(bool fallback)
static void clocksource_dequeue_watchdog(struct clocksource *cs) static void clocksource_dequeue_watchdog(struct clocksource *cs)
{ {
unsigned long flags;
spin_lock_irqsave(&watchdog_lock, flags);
if (cs != watchdog) { if (cs != watchdog) {
if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
/* cs is a watched clocksource. */ /* cs is a watched clocksource. */
@ -384,21 +390,19 @@ static void clocksource_dequeue_watchdog(struct clocksource *cs)
clocksource_stop_watchdog(); clocksource_stop_watchdog();
} }
} }
spin_unlock_irqrestore(&watchdog_lock, flags);
} }
static int __clocksource_watchdog_kthread(void) static int __clocksource_watchdog_kthread(void)
{ {
struct clocksource *cs, *tmp; struct clocksource *cs, *tmp;
unsigned long flags; unsigned long flags;
LIST_HEAD(unstable);
int select = 0; int select = 0;
spin_lock_irqsave(&watchdog_lock, flags); spin_lock_irqsave(&watchdog_lock, flags);
list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) { list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) {
if (cs->flags & CLOCK_SOURCE_UNSTABLE) { if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
list_del_init(&cs->wd_list); list_del_init(&cs->wd_list);
list_add(&cs->wd_list, &unstable); __clocksource_change_rating(cs, 0);
select = 1; select = 1;
} }
if (cs->flags & CLOCK_SOURCE_RESELECT) { if (cs->flags & CLOCK_SOURCE_RESELECT) {
@ -410,11 +414,6 @@ static int __clocksource_watchdog_kthread(void)
clocksource_stop_watchdog(); clocksource_stop_watchdog();
spin_unlock_irqrestore(&watchdog_lock, flags); spin_unlock_irqrestore(&watchdog_lock, flags);
/* Needs to be done outside of watchdog lock */
list_for_each_entry_safe(cs, tmp, &unstable, wd_list) {
list_del_init(&cs->wd_list);
__clocksource_change_rating(cs, 0);
}
return select; return select;
} }
@ -447,6 +446,9 @@ static inline int __clocksource_watchdog_kthread(void) { return 0; }
static bool clocksource_is_watchdog(struct clocksource *cs) { return false; } static bool clocksource_is_watchdog(struct clocksource *cs) { return false; }
void clocksource_mark_unstable(struct clocksource *cs) { } void clocksource_mark_unstable(struct clocksource *cs) { }
static void inline clocksource_watchdog_lock(unsigned long *flags) { }
static void inline clocksource_watchdog_unlock(unsigned long *flags) { }
#endif /* CONFIG_CLOCKSOURCE_WATCHDOG */ #endif /* CONFIG_CLOCKSOURCE_WATCHDOG */
/** /**
@ -779,14 +781,19 @@ EXPORT_SYMBOL_GPL(__clocksource_update_freq_scale);
*/ */
int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
{ {
unsigned long flags;
/* Initialize mult/shift and max_idle_ns */ /* Initialize mult/shift and max_idle_ns */
__clocksource_update_freq_scale(cs, scale, freq); __clocksource_update_freq_scale(cs, scale, freq);
/* Add clocksource to the clocksource list */ /* Add clocksource to the clocksource list */
mutex_lock(&clocksource_mutex); mutex_lock(&clocksource_mutex);
clocksource_watchdog_lock(&flags);
clocksource_enqueue(cs); clocksource_enqueue(cs);
clocksource_enqueue_watchdog(cs); clocksource_enqueue_watchdog(cs);
clocksource_watchdog_unlock(&flags);
clocksource_select(); clocksource_select();
clocksource_select_watchdog(false); clocksource_select_watchdog(false);
mutex_unlock(&clocksource_mutex); mutex_unlock(&clocksource_mutex);
@ -808,8 +815,13 @@ static void __clocksource_change_rating(struct clocksource *cs, int rating)
*/ */
void clocksource_change_rating(struct clocksource *cs, int rating) void clocksource_change_rating(struct clocksource *cs, int rating)
{ {
unsigned long flags;
mutex_lock(&clocksource_mutex); mutex_lock(&clocksource_mutex);
clocksource_watchdog_lock(&flags);
__clocksource_change_rating(cs, rating); __clocksource_change_rating(cs, rating);
clocksource_watchdog_unlock(&flags);
clocksource_select(); clocksource_select();
clocksource_select_watchdog(false); clocksource_select_watchdog(false);
mutex_unlock(&clocksource_mutex); mutex_unlock(&clocksource_mutex);
@ -821,6 +833,8 @@ EXPORT_SYMBOL(clocksource_change_rating);
*/ */
static int clocksource_unbind(struct clocksource *cs) static int clocksource_unbind(struct clocksource *cs)
{ {
unsigned long flags;
if (clocksource_is_watchdog(cs)) { if (clocksource_is_watchdog(cs)) {
/* Select and try to install a replacement watchdog. */ /* Select and try to install a replacement watchdog. */
clocksource_select_watchdog(true); clocksource_select_watchdog(true);
@ -834,8 +848,12 @@ static int clocksource_unbind(struct clocksource *cs)
if (curr_clocksource == cs) if (curr_clocksource == cs)
return -EBUSY; return -EBUSY;
} }
clocksource_watchdog_lock(&flags);
clocksource_dequeue_watchdog(cs); clocksource_dequeue_watchdog(cs);
list_del_init(&cs->list); list_del_init(&cs->list);
clocksource_watchdog_unlock(&flags);
return 0; return 0;
} }