diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2007-05-09 02:35:15 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-09 12:30:56 -0700 |
commit | b52f52a093bb1e841e014c2087b5bee7162da413 (patch) | |
tree | 7b7135897195fc9d14473d3ab824d59a4b65e5ad | |
parent | 4037d452202e34214e8a939fa5621b2b3bbb45b7 (diff) |
clocksource: fix resume logic
We need to make sure that the clocksources are resumed, when timekeeping is
resumed. The current resume logic does not guarantee this.
Add a resume function pointer to the clocksource struct, so clocksource
drivers which need to reinitialize the clocksource can provide a resume
function.
Add a resume function, which calls the maybe available clocksource resume
functions and resets the watchdog function, so a stable TSC can be used
accross suspend/resume.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: john stultz <johnstul@us.ibm.com>
Cc: Andi Kleen <ak@suse.de>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: <stable@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/clocksource.h | 3 | ||||
-rw-r--r-- | kernel/time/clocksource.c | 45 | ||||
-rw-r--r-- | kernel/timer.c | 2 |
3 files changed, 50 insertions, 0 deletions
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index 2665ca04cf8f..bf297b03a4e4 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -49,6 +49,7 @@ struct clocksource; * @shift: cycle to nanosecond divisor (power of two) * @flags: flags describing special properties * @vread: vsyscall based read + * @resume: resume function for the clocksource, if necessary * @cycle_interval: Used internally by timekeeping core, please ignore. * @xtime_interval: Used internally by timekeeping core, please ignore. */ @@ -65,6 +66,7 @@ struct clocksource { u32 shift; unsigned long flags; cycle_t (*vread)(void); + void (*resume)(void); /* timekeeping specific data, ignore */ cycle_t cycle_interval; @@ -209,6 +211,7 @@ static inline void clocksource_calculate_interval(struct clocksource *c, extern int clocksource_register(struct clocksource*); extern struct clocksource* clocksource_get_next(void); extern void clocksource_change_rating(struct clocksource *cs, int rating); +extern void clocksource_resume(void); #ifdef CONFIG_GENERIC_TIME_VSYSCALL extern void update_vsyscall(struct timespec *ts, struct clocksource *c); diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index db0c725de5ea..3db5c3c460d7 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -74,6 +74,8 @@ static struct clocksource *watchdog; static struct timer_list watchdog_timer; static DEFINE_SPINLOCK(watchdog_lock); static cycle_t watchdog_last; +static int watchdog_resumed; + /* * Interval: 0.5sec Threshold: 0.0625s */ @@ -98,15 +100,26 @@ static void clocksource_watchdog(unsigned long data) struct clocksource *cs, *tmp; cycle_t csnow, wdnow; int64_t wd_nsec, cs_nsec; + int resumed; spin_lock(&watchdog_lock); + resumed = watchdog_resumed; + if (unlikely(resumed)) + watchdog_resumed = 0; + wdnow = watchdog->read(); wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask); watchdog_last = wdnow; list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) { csnow = cs->read(); + + if (unlikely(resumed)) { + cs->wd_last = csnow; + continue; + } + /* Initialized ? */ if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) { if ((cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) && @@ -136,6 +149,13 @@ static void clocksource_watchdog(unsigned long data) } spin_unlock(&watchdog_lock); } +static void clocksource_resume_watchdog(void) +{ + spin_lock(&watchdog_lock); + watchdog_resumed = 1; + spin_unlock(&watchdog_lock); +} + static void clocksource_check_watchdog(struct clocksource *cs) { struct clocksource *cse; @@ -182,9 +202,34 @@ static void clocksource_check_watchdog(struct clocksource *cs) if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; } + +static inline void clocksource_resume_watchdog(void) { } #endif /** + * clocksource_resume - resume the clocksource(s) + */ +void clocksource_resume(void) +{ + struct list_head *tmp; + unsigned long flags; + + spin_lock_irqsave(&clocksource_lock, flags); + + list_for_each(tmp, &clocksource_list) { + struct clocksource *cs; + + cs = list_entry(tmp, struct clocksource, list); + if (cs->resume) + cs->resume(); + } + + clocksource_resume_watchdog(); + + spin_unlock_irqrestore(&clocksource_lock, flags); +} + +/** * clocksource_get_next - Returns the selected clocksource * */ diff --git a/kernel/timer.c b/kernel/timer.c index de85f8491c1d..59a28b1752f8 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -1499,6 +1499,8 @@ unregister_time_interpolator(struct time_interpolator *ti) prev = &curr->next; } + clocksource_resume(); + write_seqlock_irqsave(&xtime_lock, flags); if (ti == time_interpolator) { /* we lost the best time-interpolator: */ |