clocksource: fix resume logic

We need to make sure that the clocksources are resumed, when timekeeping is
resumed.  The current resume logic does not guarantee this.

Add a resume function pointer to the clocksource struct, so clocksource
drivers which need to reinitialize the clocksource can provide a resume
function.

Add a resume function, which calls the maybe available clocksource resume
functions and resets the watchdog function, so a stable TSC can be used
accross suspend/resume.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: john stultz <johnstul@us.ibm.com>
Cc: Andi Kleen <ak@suse.de>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: <stable@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Thomas Gleixner 2007-05-09 02:35:15 -07:00 committed by Linus Torvalds
parent 4037d45220
commit b52f52a093
3 changed files with 50 additions and 0 deletions

View File

@ -49,6 +49,7 @@ struct clocksource;
* @shift: cycle to nanosecond divisor (power of two) * @shift: cycle to nanosecond divisor (power of two)
* @flags: flags describing special properties * @flags: flags describing special properties
* @vread: vsyscall based read * @vread: vsyscall based read
* @resume: resume function for the clocksource, if necessary
* @cycle_interval: Used internally by timekeeping core, please ignore. * @cycle_interval: Used internally by timekeeping core, please ignore.
* @xtime_interval: Used internally by timekeeping core, please ignore. * @xtime_interval: Used internally by timekeeping core, please ignore.
*/ */
@ -65,6 +66,7 @@ struct clocksource {
u32 shift; u32 shift;
unsigned long flags; unsigned long flags;
cycle_t (*vread)(void); cycle_t (*vread)(void);
void (*resume)(void);
/* timekeeping specific data, ignore */ /* timekeeping specific data, ignore */
cycle_t cycle_interval; cycle_t cycle_interval;
@ -209,6 +211,7 @@ static inline void clocksource_calculate_interval(struct clocksource *c,
extern int clocksource_register(struct clocksource*); extern int clocksource_register(struct clocksource*);
extern struct clocksource* clocksource_get_next(void); extern struct clocksource* clocksource_get_next(void);
extern void clocksource_change_rating(struct clocksource *cs, int rating); extern void clocksource_change_rating(struct clocksource *cs, int rating);
extern void clocksource_resume(void);
#ifdef CONFIG_GENERIC_TIME_VSYSCALL #ifdef CONFIG_GENERIC_TIME_VSYSCALL
extern void update_vsyscall(struct timespec *ts, struct clocksource *c); extern void update_vsyscall(struct timespec *ts, struct clocksource *c);

View File

@ -74,6 +74,8 @@ static struct clocksource *watchdog;
static struct timer_list watchdog_timer; static struct timer_list watchdog_timer;
static DEFINE_SPINLOCK(watchdog_lock); static DEFINE_SPINLOCK(watchdog_lock);
static cycle_t watchdog_last; static cycle_t watchdog_last;
static int watchdog_resumed;
/* /*
* Interval: 0.5sec Threshold: 0.0625s * Interval: 0.5sec Threshold: 0.0625s
*/ */
@ -98,15 +100,26 @@ static void clocksource_watchdog(unsigned long data)
struct clocksource *cs, *tmp; struct clocksource *cs, *tmp;
cycle_t csnow, wdnow; cycle_t csnow, wdnow;
int64_t wd_nsec, cs_nsec; int64_t wd_nsec, cs_nsec;
int resumed;
spin_lock(&watchdog_lock); spin_lock(&watchdog_lock);
resumed = watchdog_resumed;
if (unlikely(resumed))
watchdog_resumed = 0;
wdnow = watchdog->read(); wdnow = watchdog->read();
wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask); wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask);
watchdog_last = wdnow; watchdog_last = wdnow;
list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) { list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) {
csnow = cs->read(); csnow = cs->read();
if (unlikely(resumed)) {
cs->wd_last = csnow;
continue;
}
/* Initialized ? */ /* Initialized ? */
if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) { if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) {
if ((cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) && if ((cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) &&
@ -136,6 +149,13 @@ static void clocksource_watchdog(unsigned long data)
} }
spin_unlock(&watchdog_lock); spin_unlock(&watchdog_lock);
} }
static void clocksource_resume_watchdog(void)
{
spin_lock(&watchdog_lock);
watchdog_resumed = 1;
spin_unlock(&watchdog_lock);
}
static void clocksource_check_watchdog(struct clocksource *cs) static void clocksource_check_watchdog(struct clocksource *cs)
{ {
struct clocksource *cse; struct clocksource *cse;
@ -182,8 +202,33 @@ static void clocksource_check_watchdog(struct clocksource *cs)
if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
} }
static inline void clocksource_resume_watchdog(void) { }
#endif #endif
/**
* clocksource_resume - resume the clocksource(s)
*/
void clocksource_resume(void)
{
struct list_head *tmp;
unsigned long flags;
spin_lock_irqsave(&clocksource_lock, flags);
list_for_each(tmp, &clocksource_list) {
struct clocksource *cs;
cs = list_entry(tmp, struct clocksource, list);
if (cs->resume)
cs->resume();
}
clocksource_resume_watchdog();
spin_unlock_irqrestore(&clocksource_lock, flags);
}
/** /**
* clocksource_get_next - Returns the selected clocksource * clocksource_get_next - Returns the selected clocksource
* *

View File

@ -1499,6 +1499,8 @@ unregister_time_interpolator(struct time_interpolator *ti)
prev = &curr->next; prev = &curr->next;
} }
clocksource_resume();
write_seqlock_irqsave(&xtime_lock, flags); write_seqlock_irqsave(&xtime_lock, flags);
if (ti == time_interpolator) { if (ti == time_interpolator) {
/* we lost the best time-interpolator: */ /* we lost the best time-interpolator: */