diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-08-13 13:02:31 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-08-13 13:02:31 -0700 |
commit | 1e45e9a95ec277272f73439629b6e3fe1f047e92 (patch) | |
tree | d7236a7917fe6e44a625491228fb26bd5af49d38 /kernel/time | |
parent | 8603596a327c978534f5c45db135e6c36b4b1425 (diff) | |
parent | fbfa9260085b5b578a049a90135e5c51928c5f7f (diff) |
Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull timer updates from Thomas Gleixner:
"The timers departement more or less proudly presents:
- More Y2038 timekeeping work mostly in the core code. The work is
slowly, but steadily targeting the actuall syscalls.
- Enhanced timekeeping suspend/resume support by utilizing
clocksources which do not stop during suspend, but are otherwise
not the main timekeeping clocksources.
- Make NTP adjustmets more accurate and immediate when the frequency
is set directly and not incrementally.
- Sanitize the overrung handing of posix timers
- A new timer driver for Mediatek SoCs
- The usual pile of fixes and updates all over the place"
* 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (32 commits)
clockevents: Warn if cpu_all_mask is used as cpumask
tick/broadcast-hrtimer: Use cpu_possible_mask for ce_broadcast_hrtimer
clocksource/drivers/arm_arch_timer: Fix bogus cpu_all_mask usage
clocksource: ti-32k: Remove CLOCK_SOURCE_SUSPEND_NONSTOP flag
timers: Clear timer_base::must_forward_clk with timer_base::lock held
clocksource/drivers/sprd: Register one always-on timer to compensate suspend time
clocksource/drivers/timer-mediatek: Add support for system timer
clocksource/drivers/timer-mediatek: Convert the driver to timer-of
clocksource/drivers/timer-mediatek: Use specific prefix for GPT
clocksource/drivers/timer-mediatek: Rename mtk_timer to timer-mediatek
clocksource/drivers/timer-mediatek: Add system timer bindings
clocksource/drivers: Set clockevent device cpumask to cpu_possible_mask
time: Introduce one suspend clocksource to compensate the suspend time
time: Fix extra sleeptime injection when suspend fails
timekeeping/ntp: Constify some function arguments
ntp: Use kstrtos64 for s64 variable
ntp: Remove redundant arguments
timer: Fix coding style
ktime: Provide typesafe ktime_to_ns()
hrtimer: Improve kernel message printing
...
Diffstat (limited to 'kernel/time')
-rw-r--r-- | kernel/time/alarmtimer.c | 7 | ||||
-rw-r--r-- | kernel/time/clockevents.c | 6 | ||||
-rw-r--r-- | kernel/time/clocksource.c | 149 | ||||
-rw-r--r-- | kernel/time/hrtimer.c | 7 | ||||
-rw-r--r-- | kernel/time/ntp.c | 23 | ||||
-rw-r--r-- | kernel/time/ntp_internal.h | 4 | ||||
-rw-r--r-- | kernel/time/posix-cpu-timers.c | 2 | ||||
-rw-r--r-- | kernel/time/posix-stubs.c | 2 | ||||
-rw-r--r-- | kernel/time/posix-timers.c | 55 | ||||
-rw-r--r-- | kernel/time/posix-timers.h | 2 | ||||
-rw-r--r-- | kernel/time/tick-broadcast-hrtimer.c | 2 | ||||
-rw-r--r-- | kernel/time/time.c | 31 | ||||
-rw-r--r-- | kernel/time/timekeeping.c | 121 | ||||
-rw-r--r-- | kernel/time/timekeeping_debug.c | 2 | ||||
-rw-r--r-- | kernel/time/timekeeping_internal.h | 2 | ||||
-rw-r--r-- | kernel/time/timer.c | 31 |
16 files changed, 340 insertions, 106 deletions
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c index 639321bf2e39..fa5de5e8de61 100644 --- a/kernel/time/alarmtimer.c +++ b/kernel/time/alarmtimer.c @@ -581,11 +581,11 @@ static void alarm_timer_rearm(struct k_itimer *timr) * @timr: Pointer to the posixtimer data struct * @now: Current time to forward the timer against */ -static int alarm_timer_forward(struct k_itimer *timr, ktime_t now) +static s64 alarm_timer_forward(struct k_itimer *timr, ktime_t now) { struct alarm *alarm = &timr->it.alarm.alarmtimer; - return (int) alarm_forward(alarm, timr->it_interval, now); + return alarm_forward(alarm, timr->it_interval, now); } /** @@ -808,7 +808,8 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags, /* Convert (if necessary) to absolute time */ if (flags != TIMER_ABSTIME) { ktime_t now = alarm_bases[type].gettime(); - exp = ktime_add(now, exp); + + exp = ktime_add_safe(now, exp); } ret = alarmtimer_do_nsleep(&alarm, exp, type); diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 16c027e9cc73..8c0e4092f661 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c @@ -463,6 +463,12 @@ void clockevents_register_device(struct clock_event_device *dev) dev->cpumask = cpumask_of(smp_processor_id()); } + if (dev->cpumask == cpu_all_mask) { + WARN(1, "%s cpumask == cpu_all_mask, using cpu_possible_mask instead\n", + dev->name); + dev->cpumask = cpu_possible_mask; + } + raw_spin_lock_irqsave(&clockevents_lock, flags); list_add(&dev->list, &clockevent_devices); diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index f89a78e2792b..f74fb00d8064 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -94,6 +94,8 @@ EXPORT_SYMBOL_GPL(clocks_calc_mult_shift); /*[Clocksource internal variables]--------- * curr_clocksource: * currently selected clocksource. + * suspend_clocksource: + * used to calculate the suspend time. * clocksource_list: * linked list with the registered clocksources * clocksource_mutex: @@ -102,10 +104,12 @@ EXPORT_SYMBOL_GPL(clocks_calc_mult_shift); * Name of the user-specified clocksource. */ static struct clocksource *curr_clocksource; +static struct clocksource *suspend_clocksource; static LIST_HEAD(clocksource_list); static DEFINE_MUTEX(clocksource_mutex); static char override_name[CS_NAME_LEN]; static int finished_booting; +static u64 suspend_start; #ifdef CONFIG_CLOCKSOURCE_WATCHDOG static void clocksource_watchdog_work(struct work_struct *work); @@ -447,6 +451,140 @@ static inline void clocksource_watchdog_unlock(unsigned long *flags) { } #endif /* CONFIG_CLOCKSOURCE_WATCHDOG */ +static bool clocksource_is_suspend(struct clocksource *cs) +{ + return cs == suspend_clocksource; +} + +static void __clocksource_suspend_select(struct clocksource *cs) +{ + /* + * Skip the clocksource which will be stopped in suspend state. + */ + if (!(cs->flags & CLOCK_SOURCE_SUSPEND_NONSTOP)) + return; + + /* + * The nonstop clocksource can be selected as the suspend clocksource to + * calculate the suspend time, so it should not supply suspend/resume + * interfaces to suspend the nonstop clocksource when system suspends. + */ + if (cs->suspend || cs->resume) { + pr_warn("Nonstop clocksource %s should not supply suspend/resume interfaces\n", + cs->name); + } + + /* Pick the best rating. */ + if (!suspend_clocksource || cs->rating > suspend_clocksource->rating) + suspend_clocksource = cs; +} + +/** + * clocksource_suspend_select - Select the best clocksource for suspend timing + * @fallback: if select a fallback clocksource + */ +static void clocksource_suspend_select(bool fallback) +{ + struct clocksource *cs, *old_suspend; + + old_suspend = suspend_clocksource; + if (fallback) + suspend_clocksource = NULL; + + list_for_each_entry(cs, &clocksource_list, list) { + /* Skip current if we were requested for a fallback. */ + if (fallback && cs == old_suspend) + continue; + + __clocksource_suspend_select(cs); + } +} + +/** + * clocksource_start_suspend_timing - Start measuring the suspend timing + * @cs: current clocksource from timekeeping + * @start_cycles: current cycles from timekeeping + * + * This function will save the start cycle values of suspend timer to calculate + * the suspend time when resuming system. + * + * This function is called late in the suspend process from timekeeping_suspend(), + * that means processes are freezed, non-boot cpus and interrupts are disabled + * now. It is therefore possible to start the suspend timer without taking the + * clocksource mutex. + */ +void clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles) +{ + if (!suspend_clocksource) + return; + + /* + * If current clocksource is the suspend timer, we should use the + * tkr_mono.cycle_last value as suspend_start to avoid same reading + * from suspend timer. + */ + if (clocksource_is_suspend(cs)) { + suspend_start = start_cycles; + return; + } + + if (suspend_clocksource->enable && + suspend_clocksource->enable(suspend_clocksource)) { + pr_warn_once("Failed to enable the non-suspend-able clocksource.\n"); + return; + } + + suspend_start = suspend_clocksource->read(suspend_clocksource); +} + +/** + * clocksource_stop_suspend_timing - Stop measuring the suspend timing + * @cs: current clocksource from timekeeping + * @cycle_now: current cycles from timekeeping + * + * This function will calculate the suspend time from suspend timer. + * + * Returns nanoseconds since suspend started, 0 if no usable suspend clocksource. + * + * This function is called early in the resume process from timekeeping_resume(), + * that means there is only one cpu, no processes are running and the interrupts + * are disabled. It is therefore possible to stop the suspend timer without + * taking the clocksource mutex. + */ +u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 cycle_now) +{ + u64 now, delta, nsec = 0; + + if (!suspend_clocksource) + return 0; + + /* + * If current clocksource is the suspend timer, we should use the + * tkr_mono.cycle_last value from timekeeping as current cycle to + * avoid same reading from suspend timer. + */ + if (clocksource_is_suspend(cs)) + now = cycle_now; + else + now = suspend_clocksource->read(suspend_clocksource); + + if (now > suspend_start) { + delta = clocksource_delta(now, suspend_start, + suspend_clocksource->mask); + nsec = mul_u64_u32_shr(delta, suspend_clocksource->mult, + suspend_clocksource->shift); + } + + /* + * Disable the suspend timer to save power if current clocksource is + * not the suspend timer. + */ + if (!clocksource_is_suspend(cs) && suspend_clocksource->disable) + suspend_clocksource->disable(suspend_clocksource); + + return nsec; +} + /** * clocksource_suspend - suspend the clocksource(s) */ @@ -792,6 +930,7 @@ int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) clocksource_select(); clocksource_select_watchdog(false); + __clocksource_suspend_select(cs); mutex_unlock(&clocksource_mutex); return 0; } @@ -820,6 +959,7 @@ void clocksource_change_rating(struct clocksource *cs, int rating) clocksource_select(); clocksource_select_watchdog(false); + clocksource_suspend_select(false); mutex_unlock(&clocksource_mutex); } EXPORT_SYMBOL(clocksource_change_rating); @@ -845,6 +985,15 @@ static int clocksource_unbind(struct clocksource *cs) return -EBUSY; } + if (clocksource_is_suspend(cs)) { + /* + * Select and try to install a replacement suspend clocksource. + * If no replacement suspend clocksource, we will just let the + * clocksource go and have no suspend clocksource. + */ + clocksource_suspend_select(true); + } + clocksource_watchdog_lock(&flags); clocksource_dequeue_watchdog(cs); list_del_init(&cs->list); diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 3e93c54bd3a1..e1a549c9e399 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -718,8 +718,8 @@ static void hrtimer_switch_to_hres(void) struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases); if (tick_init_highres()) { - printk(KERN_WARNING "Could not switch to high resolution " - "mode on CPU %d\n", base->cpu); + pr_warn("Could not switch to high resolution mode on CPU %u\n", + base->cpu); return; } base->hres_active = 1; @@ -1573,8 +1573,7 @@ retry: else expires_next = ktime_add(now, delta); tick_program_event(expires_next, 1); - printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n", - ktime_to_ns(delta)); + pr_warn_once("hrtimer: interrupt took %llu ns\n", ktime_to_ns(delta)); } /* called with interrupts disabled */ diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index a09ded765f6c..c5e0cba3b39c 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c @@ -502,7 +502,7 @@ static void sched_sync_hw_clock(struct timespec64 now, { struct timespec64 next; - getnstimeofday64(&next); + ktime_get_real_ts64(&next); if (!fail) next.tv_sec = 659; else { @@ -537,7 +537,7 @@ static void sync_rtc_clock(void) if (!IS_ENABLED(CONFIG_RTC_SYSTOHC)) return; - getnstimeofday64(&now); + ktime_get_real_ts64(&now); adjust = now; if (persistent_clock_is_local) @@ -591,7 +591,7 @@ static bool sync_cmos_clock(void) * Architectures are strongly encouraged to use rtclib and not * implement this legacy API. */ - getnstimeofday64(&now); + ktime_get_real_ts64(&now); if (rtc_tv_nsec_ok(-1 * target_nsec, &adjust, &now)) { if (persistent_clock_is_local) adjust.tv_sec -= (sys_tz.tz_minuteswest * 60); @@ -642,7 +642,7 @@ void ntp_notify_cmos_timer(void) /* * Propagate a new txc->status value into the NTP state: */ -static inline void process_adj_status(struct timex *txc, struct timespec64 *ts) +static inline void process_adj_status(const struct timex *txc) { if ((time_status & STA_PLL) && !(txc->status & STA_PLL)) { time_state = TIME_OK; @@ -665,12 +665,10 @@ static inline void process_adj_status(struct timex *txc, struct timespec64 *ts) } -static inline void process_adjtimex_modes(struct timex *txc, - struct timespec64 *ts, - s32 *time_tai) +static inline void process_adjtimex_modes(const struct timex *txc, s32 *time_tai) { if (txc->modes & ADJ_STATUS) - process_adj_status(txc, ts); + process_adj_status(txc); if (txc->modes & ADJ_NANO) time_status |= STA_NANO; @@ -718,7 +716,7 @@ static inline void process_adjtimex_modes(struct timex *txc, * adjtimex mainly allows reading (and writing, if superuser) of * kernel time-keeping variables. used by xntpd. */ -int __do_adjtimex(struct timex *txc, struct timespec64 *ts, s32 *time_tai) +int __do_adjtimex(struct timex *txc, const struct timespec64 *ts, s32 *time_tai) { int result; @@ -735,7 +733,7 @@ int __do_adjtimex(struct timex *txc, struct timespec64 *ts, s32 *time_tai) /* If there are input parameters, then process them: */ if (txc->modes) - process_adjtimex_modes(txc, ts, time_tai); + process_adjtimex_modes(txc, time_tai); txc->offset = shift_right(time_offset * NTP_INTERVAL_FREQ, NTP_SCALE_SHIFT); @@ -1022,12 +1020,11 @@ void __hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_t static int __init ntp_tick_adj_setup(char *str) { - int rc = kstrtol(str, 0, (long *)&ntp_tick_adj); - + int rc = kstrtos64(str, 0, &ntp_tick_adj); if (rc) return rc; - ntp_tick_adj <<= NTP_SCALE_SHIFT; + ntp_tick_adj <<= NTP_SCALE_SHIFT; return 1; } diff --git a/kernel/time/ntp_internal.h b/kernel/time/ntp_internal.h index 909bd1f1bfb1..c24b0e13f011 100644 --- a/kernel/time/ntp_internal.h +++ b/kernel/time/ntp_internal.h @@ -8,6 +8,6 @@ extern void ntp_clear(void); extern u64 ntp_tick_length(void); extern ktime_t ntp_get_next_leap(void); extern int second_overflow(time64_t secs); -extern int __do_adjtimex(struct timex *, struct timespec64 *, s32 *); -extern void __hardpps(const struct timespec64 *, const struct timespec64 *); +extern int __do_adjtimex(struct timex *txc, const struct timespec64 *ts, s32 *time_tai); +extern void __hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts); #endif /* _LINUX_NTP_INTERNAL_H */ diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index 9cdf54b04ca8..294d7b65af33 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c @@ -85,7 +85,7 @@ static void bump_cpu_timer(struct k_itimer *timer, u64 now) continue; timer->it.cpu.expires += incr; - timer->it_overrun += 1 << i; + timer->it_overrun += 1LL << i; delta -= incr; } } diff --git a/kernel/time/posix-stubs.c b/kernel/time/posix-stubs.c index 26aa9569e24a..2c6847d5d69b 100644 --- a/kernel/time/posix-stubs.c +++ b/kernel/time/posix-stubs.c @@ -81,7 +81,7 @@ int do_clock_gettime(clockid_t which_clock, struct timespec64 *tp) ktime_get_ts64(tp); break; case CLOCK_BOOTTIME: - get_monotonic_boottime64(tp); + ktime_get_boottime_ts64(tp); break; default: return -EINVAL; diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c index e08ce3f27447..3ac7295306dc 100644 --- a/kernel/time/posix-timers.c +++ b/kernel/time/posix-timers.c @@ -228,21 +228,21 @@ static int posix_ktime_get_ts(clockid_t which_clock, struct timespec64 *tp) */ static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec64 *tp) { - getrawmonotonic64(tp); + ktime_get_raw_ts64(tp); return 0; } static int posix_get_realtime_coarse(clockid_t which_clock, struct timespec64 *tp) { - *tp = current_kernel_time64(); + ktime_get_coarse_real_ts64(tp); return 0; } static int posix_get_monotonic_coarse(clockid_t which_clock, struct timespec64 *tp) { - *tp = get_monotonic_coarse64(); + ktime_get_coarse_ts64(tp); return 0; } @@ -254,13 +254,13 @@ static int posix_get_coarse_res(const clockid_t which_clock, struct timespec64 * static int posix_get_boottime(const clockid_t which_clock, struct timespec64 *tp) { - get_monotonic_boottime64(tp); + ktime_get_boottime_ts64(tp); return 0; } static int posix_get_tai(clockid_t which_clock, struct timespec64 *tp) { - timekeeping_clocktai64(tp); + ktime_get_clocktai_ts64(tp); return 0; } @@ -283,6 +283,17 @@ static __init int init_posix_timers(void) } __initcall(init_posix_timers); +/* + * The siginfo si_overrun field and the return value of timer_getoverrun(2) + * are of type int. Clamp the overrun value to INT_MAX + */ +static inline int timer_overrun_to_int(struct k_itimer *timr, int baseval) +{ + s64 sum = timr->it_overrun_last + (s64)baseval; + + return sum > (s64)INT_MAX ? INT_MAX : (int)sum; +} + static void common_hrtimer_rearm(struct k_itimer *timr) { struct hrtimer *timer = &timr->it.real.timer; @@ -290,9 +301,8 @@ static void common_hrtimer_rearm(struct k_itimer *timr) if (!timr->it_interval) return; - timr->it_overrun += (unsigned int) hrtimer_forward(timer, - timer->base->get_time(), - timr->it_interval); + timr->it_overrun += hrtimer_forward(timer, timer->base->get_time(), + timr->it_interval); hrtimer_restart(timer); } @@ -321,10 +331,10 @@ void posixtimer_rearm(struct siginfo *info) timr->it_active = 1; timr->it_overrun_last = timr->it_overrun; - timr->it_overrun = -1; + timr->it_overrun = -1LL; ++timr->it_requeue_pending; - info->si_overrun += timr->it_overrun_last; + info->si_overrun = timer_overrun_to_int(timr, info->si_overrun); } unlock_timer(timr, flags); @@ -418,9 +428,8 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer) now = ktime_add(now, kj); } #endif - timr->it_overrun += (unsigned int) - hrtimer_forward(timer, now, - timr->it_interval); + timr->it_overrun += hrtimer_forward(timer, now, + timr->it_interval); ret = HRTIMER_RESTART; ++timr->it_requeue_pending; timr->it_active = 1; @@ -524,7 +533,7 @@ static int do_timer_create(clockid_t which_clock, struct sigevent *event, new_timer->it_id = (timer_t) new_timer_id; new_timer->it_clock = which_clock; new_timer->kclock = kc; - new_timer->it_overrun = -1; + new_timer->it_overrun = -1LL; if (event) { rcu_read_lock(); @@ -645,11 +654,11 @@ static ktime_t common_hrtimer_remaining(struct k_itimer *timr, ktime_t now) return __hrtimer_expires_remaining_adjusted(timer, now); } -static int common_hrtimer_forward(struct k_itimer *timr, ktime_t now) +static s64 common_hrtimer_forward(struct k_itimer *timr, ktime_t now) { struct hrtimer *timer = &timr->it.real.timer; - return (int)hrtimer_forward(timer, now, timr->it_interval); + return hrtimer_forward(timer, now, timr->it_interval); } /* @@ -743,7 +752,7 @@ static int do_timer_gettime(timer_t timer_id, struct itimerspec64 *setting) /* Get the time remaining on a POSIX.1b interval timer. */ SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id, - struct itimerspec __user *, setting) + struct __kernel_itimerspec __user *, setting) { struct itimerspec64 cur_setting; @@ -755,7 +764,8 @@ SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id, return ret; } -#ifdef CONFIG_COMPAT +#ifdef CONFIG_COMPAT_32BIT_TIME + COMPAT_SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id, struct compat_itimerspec __user *, setting) { @@ -768,6 +778,7 @@ COMPAT_SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id, } return ret; } + #endif /* @@ -789,7 +800,7 @@ SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id) if (!timr) return -EINVAL; - overrun = timr->it_overrun_last; + overrun = timer_overrun_to_int(timr, 0); unlock_timer(timr, flags); return overrun; @@ -906,8 +917,8 @@ retry: /* Set a POSIX.1b interval timer */ SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags, - const struct itimerspec __user *, new_setting, - struct itimerspec __user *, old_setting) + const struct __kernel_itimerspec __user *, new_setting, + struct __kernel_itimerspec __user *, old_setting) { struct itimerspec64 new_spec, old_spec; struct itimerspec64 *rtn = old_setting ? &old_spec : NULL; @@ -927,7 +938,7 @@ SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags, return error; } -#ifdef CONFIG_COMPAT +#ifdef CONFIG_COMPAT_32BIT_TIME COMPAT_SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags, struct compat_itimerspec __user *, new, struct compat_itimerspec __user *, old) diff --git a/kernel/time/posix-timers.h b/kernel/time/posix-timers.h index 151e28f5bf30..ddb21145211a 100644 --- a/kernel/time/posix-timers.h +++ b/kernel/time/posix-timers.h @@ -19,7 +19,7 @@ struct k_clock { void (*timer_get)(struct k_itimer *timr, struct itimerspec64 *cur_setting); void (*timer_rearm)(struct k_itimer *timr); - int (*timer_forward)(struct k_itimer *timr, ktime_t now); + s64 (*timer_forward)(struct k_itimer *timr, ktime_t now); ktime_t (*timer_remaining)(struct k_itimer *timr, ktime_t now); int (*timer_try_to_cancel)(struct k_itimer *timr); void (*timer_arm)(struct k_itimer *timr, ktime_t expires, diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c index 58045eb976c3..a59641fb88b6 100644 --- a/kernel/time/tick-broadcast-hrtimer.c +++ b/kernel/time/tick-broadcast-hrtimer.c @@ -90,7 +90,7 @@ static struct clock_event_device ce_broadcast_hrtimer = { .max_delta_ticks = ULONG_MAX, .mult = 1, .shift = 0, - .cpumask = cpu_all_mask, + .cpumask = cpu_possible_mask, }; static enum hrtimer_restart bc_handler(struct hrtimer *t) diff --git a/kernel/time/time.c b/kernel/time/time.c index 2b41e8e2d31d..ccdb351277ee 100644 --- a/kernel/time/time.c +++ b/kernel/time/time.c @@ -64,7 +64,7 @@ EXPORT_SYMBOL(sys_tz); */ SYSCALL_DEFINE1(time, time_t __user *, tloc) { - time_t i = get_seconds(); + time_t i = (time_t)ktime_get_real_seconds(); if (tloc) { if (put_user(i,tloc)) @@ -107,11 +107,9 @@ SYSCALL_DEFINE1(stime, time_t __user *, tptr) /* compat_time_t is a 32 bit "long" and needs to get converted. */ COMPAT_SYSCALL_DEFINE1(time, compat_time_t __user *, tloc) { - struct timeval tv; compat_time_t i; - do_gettimeofday(&tv); - i = tv.tv_sec; + i = (compat_time_t)ktime_get_real_seconds(); if (tloc) { if (put_user(i,tloc)) @@ -931,7 +929,7 @@ int compat_put_timespec64(const struct timespec64 *ts, void __user *uts) EXPORT_SYMBOL_GPL(compat_put_timespec64); int get_itimerspec64(struct itimerspec64 *it, - const struct itimerspec __user *uit) + const struct __kernel_itimerspec __user *uit) { int ret; @@ -946,7 +944,7 @@ int get_itimerspec64(struct itimerspec64 *it, EXPORT_SYMBOL_GPL(get_itimerspec64); int put_itimerspec64(const struct itimerspec64 *it, - struct itimerspec __user *uit) + struct __kernel_itimerspec __user *uit) { int ret; @@ -959,3 +957,24 @@ int put_itimerspec64(const struct itimerspec64 *it, return ret; } EXPORT_SYMBOL_GPL(put_itimerspec64); + +int get_compat_itimerspec64(struct itimerspec64 *its, + const struct compat_itimerspec __user *uits) +{ + + if (__compat_get_timespec64(&its->it_interval, &uits->it_interval) || + __compat_get_timespec64(&its->it_value, &uits->it_value)) + return -EFAULT; + return 0; +} +EXPORT_SYMBOL_GPL(get_compat_itimerspec64); + +int put_compat_itimerspec64(const struct itimerspec64 *its, + struct compat_itimerspec __user *uits) +{ + if (__compat_put_timespec64(&its->it_interval, &uits->it_interval) || + __compat_put_timespec64(&its->it_value, &uits->it_value)) + return -EFAULT; + return 0; +} +EXPORT_SYMBOL_GPL(put_compat_itimerspec64); diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 4786df904c22..d9e659a12c76 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -34,6 +34,14 @@ #define TK_MIRROR (1 << 1) #define TK_CLOCK_WAS_SET (1 << 2) +enum timekeeping_adv_mode { + /* Update timekeeper when a tick has passed */ + TK_ADV_TICK, + + /* Update timekeeper on a direct frequency change */ + TK_ADV_FREQ +}; + /* * The most important data for readout fits into a single 64 byte * cache line. @@ -97,7 +105,7 @@ static inline void tk_normalize_xtime(struct timekeeper *tk) } } -static inline struct timespec64 tk_xtime(struct timekeeper *tk) +static inline struct timespec64 tk_xtime(const struct timekeeper *tk) { struct timespec64 ts; @@ -154,7 +162,7 @@ static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta) * a read of the fast-timekeeper tkrs (which is protected by its own locking * and update logic). */ -static inline u64 tk_clock_read(struct tk_read_base *tkr) +static inline u64 tk_clock_read(const struct tk_read_base *tkr) { struct clocksource *clock = READ_ONCE(tkr->clock); @@ -203,7 +211,7 @@ static void timekeeping_check_update(struct timekeeper *tk, u64 offset) } } -static inline u64 timekeeping_get_delta(struct tk_read_base *tkr) +static inline u64 timekeeping_get_delta(const struct tk_read_base *tkr) { struct timekeeper *tk = &tk_core.timekeeper; u64 now, last, mask, max, delta; @@ -247,7 +255,7 @@ static inline u64 timekeeping_get_delta(struct tk_read_base *tkr) static inline void timekeeping_check_update(struct timekeeper *tk, u64 offset) { } -static inline u64 timekeeping_get_delta(struct tk_read_base *tkr) +static inline u64 timekeeping_get_delta(const struct tk_read_base *tkr) { u64 cycle_now, delta; @@ -344,7 +352,7 @@ u32 (*arch_gettimeoffset)(void) = default_arch_gettimeoffset; static inline u32 arch_gettimeoffset(void) { return 0; } #endif -static inline u64 timekeeping_delta_to_ns(struct tk_read_base *tkr, u64 delta) +static inline u64 timekeeping_delta_to_ns(const struct tk_read_base *tkr, u64 delta) { u64 nsec; @@ -355,7 +363,7 @@ static inline u64 timekeeping_delta_to_ns(struct tk_read_base *tkr, u64 delta) return nsec + arch_gettimeoffset(); } -static inline u64 timekeeping_get_ns(struct tk_read_base *tkr) +static inline u64 timekeeping_get_ns(const struct tk_read_base *tkr) { u64 delta; @@ -363,7 +371,7 @@ static inline u64 timekeeping_get_ns(struct tk_read_base *tkr) return timekeeping_delta_to_ns(tkr, delta); } -static inline u64 timekeeping_cycles_to_ns(struct tk_read_base *tkr, u64 cycles) +static inline u64 timekeeping_cycles_to_ns(const struct tk_read_base *tkr, u64 cycles) { u64 delta; @@ -386,7 +394,8 @@ static inline u64 timekeeping_cycles_to_ns(struct tk_read_base *tkr, u64 cycles) * slightly wrong timestamp (a few nanoseconds). See * @ktime_get_mono_fast_ns. */ -static void update_fast_timekeeper(struct tk_read_base *tkr, struct tk_fast *tkf) +static void update_fast_timekeeper(const struct tk_read_base *tkr, + struct tk_fast *tkf) { struct tk_read_base *base = tkf->base; @@ -541,10 +550,10 @@ EXPORT_SYMBOL_GPL(ktime_get_real_fast_ns); * number of cycles every time until timekeeping is resumed at which time the * proper readout base for the fast timekeeper will be restored automatically. */ -static void halt_fast_timekeeper(struct timekeeper *tk) +static void halt_fast_timekeeper(const struct timekeeper *tk) { static struct tk_read_base tkr_dummy; - struct tk_read_base *tkr = &tk->tkr_mono; + const struct tk_read_base *tkr = &tk->tkr_mono; memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy)); cycles_at_suspend = tk_clock_read(tkr); @@ -1269,7 +1278,7 @@ EXPORT_SYMBOL(do_settimeofday64); * * Adds or subtracts an offset value from the current time. */ -static int timekeeping_inject_offset(struct timespec64 *ts) +static int timekeeping_inject_offset(const struct timespec64 *ts) { struct timekeeper *tk = &tk_core.timekeeper; unsigned long flags; @@ -1510,8 +1519,20 @@ void __weak read_boot_clock64(struct timespec64 *ts) ts->tv_nsec = 0; } -/* Flag for if timekeeping_resume() has injected sleeptime */ -static bool sleeptime_injected; +/* + * Flag reflecting whether timekeeping_resume() has injected sleeptime. + * + * The flag starts of false and is only set when a suspend reaches + * timekeeping_suspend(), timekeeping_resume() sets it to false when the + * timekeeper clocksource is not stopping across suspend and has been + * used to update sleep time. If the timekeeper clocksource has stopped + * then the flag stays true and is used by the RTC resume code to decide + * whether sleeptime must be injected and if so the flag gets false then. + * + * If a suspend fails before reaching timekeeping_resume() then the flag + * stays false and prevents erroneous sleeptime injection. + */ +static bool suspend_timing_needed; /* Flag for if there is a persistent clock on this platform */ static bool persistent_clock_exists; @@ -1577,7 +1598,7 @@ static struct timespec64 timekeeping_suspend_time; * adds the sleep offset to the timekeeping variables. */ static void __timekeeping_inject_sleeptime(struct timekeeper *tk, - struct timespec64 *delta) + const struct timespec64 *delta) { if (!timespec64_valid_strict(delta)) { printk_deferred(KERN_WARNING @@ -1610,7 +1631,7 @@ static void __timekeeping_inject_sleeptime(struct timekeeper *tk, */ bool timekeeping_rtc_skipresume(void) { - return sleeptime_injected; + return !suspend_timing_needed; } /** @@ -1638,7 +1659,7 @@ bool timekeeping_rtc_skipsuspend(void) * This function should only be called by rtc_resume(), and allows * a suspend offset to be injected into the timekeeping values. */ -void timekeeping_inject_sleeptime64(struct timespec64 *delta) +void timekeeping_inject_sleeptime64(const struct timespec64 *delta) { struct timekeeper *tk = &tk_core.timekeeper; unsigned long flags; @@ -1646,6 +1667,8 @@ void timekeeping_inject_sleeptime64(struct timespec64 *delta) raw_spin_lock_irqsave(&timekeeper_lock, flags); write_seqcount_begin(&tk_core.seq); + suspend_timing_needed = false; + timekeeping_forward_now(tk); __timekeeping_inject_sleeptime(tk, delta); @@ -1669,9 +1692,9 @@ void timekeeping_resume(void) struct clocksource *clock = tk->tkr_mono.clock; unsigned long flags; struct timespec64 ts_new, ts_delta; - u64 cycle_now; + u64 cycle_now, nsec; + bool inject_sleeptime = false; - sleeptime_injected = false; read_persistent_clock64(&ts_new); clockevents_resume(); @@ -1693,22 +1716,19 @@ void timekeeping_resume(void) * usable source. The rtc part is handled separately in rtc core code. */ cycle_now = tk_clock_read(&tk->tkr_mono); - if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) && - cycle_now > tk->tkr_mono.cycle_last) { - u64 nsec, cyc_delta; - - cyc_delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, - tk->tkr_mono.mask); - nsec = mul_u64_u32_shr(cyc_delta, clock->mult, clock->shift); + nsec = clocksource_stop_suspend_timing(clock, cycle_now); + if (nsec > 0) { ts_delta = ns_to_timespec64(nsec); - sleeptime_injected = true; + inject_sleeptime = true; } else if (timespec64_compare(&ts_new, &timekeeping_suspend_time) > 0) { ts_delta = timespec64_sub(ts_new, timekeeping_suspend_time); - sleeptime_injected = true; + inject_sleeptime = true; } - if (sleeptime_injected) + if (inject_sleeptime) { + suspend_timing_needed = false; __timekeeping_inject_sleeptime(tk, &ts_delta); + } /* Re-base the last cycle value */ tk->tkr_mono.cycle_last = cycle_now; @@ -1732,6 +1752,8 @@ int timekeeping_suspend(void) unsigned long flags; struct timespec64 delta, delta_delta; static struct timespec64 old_delta; + struct clocksource *curr_clock; + u64 cycle_now; read_persistent_clock64(&timekeeping_suspend_time); @@ -1743,11 +1765,22 @@ int timekeeping_suspend(void) if (timekeeping_suspend_time.tv_sec || timekeeping_suspend_time.tv_nsec) persistent_clock_exists = true; + suspend_timing_needed = true; + raw_spin_lock_irqsave(&timekeeper_lock, flags); write_seqcount_begin(&tk_core.seq); timekeeping_forward_now(tk); timekeeping_suspended = 1; + /* + * Since we've called forward_now, cycle_last stores the value + * just read from the current clocksource. Save this to potentially + * use in suspend timing. + */ + curr_clock = tk->tkr_mono.clock; + cycle_now = tk->tkr_mono.cycle_last; + clocksource_start_suspend_timing(curr_clock, cycle_now); + if (persistent_clock_exists) { /* * To avoid drift caused by repeated suspend/resumes, @@ -2021,11 +2054,11 @@ static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset, return offset; } -/** - * update_wall_time - Uses the current clocksource to increment the wall time - * +/* + * timekeeping_advance - Updates the timekeeper to the current time and + * current NTP tick length */ -void update_wall_time(void) +static void timekeeping_advance(enum timekeeping_adv_mode mode) { struct timekeeper *real_tk = &tk_core.timekeeper; struct timekeeper *tk = &shadow_timekeeper; @@ -2042,14 +2075,17 @@ void update_wall_time(void) #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET offset = real_tk->cycle_interval; + + if (mode != TK_ADV_TICK) + goto out; #else offset = clocksource_delta(tk_clock_read(&tk->tkr_mono), tk->tkr_mono.cycle_last, tk->tkr_mono.mask); -#endif /* Check if there's really nothing to do */ - if (offset < real_tk->cycle_interval) + if (offset < real_tk->cycle_interval && mode == TK_ADV_TICK) goto out; +#endif /* Do some additional sanity checking */ timekeeping_check_update(tk, offset); @@ -2106,6 +2142,15 @@ out: } /** + * update_wall_time - Uses the current clocksource to increment the wall time + * + */ +void update_wall_time(void) +{ + timekeeping_advance(TK_ADV_TICK); +} + +/** * getboottime64 - Return the real time of system boot. * @ts: pointer to the timespec64 to be set * @@ -2220,7 +2265,7 @@ ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real, /** * timekeeping_validate_timex - Ensures the timex is ok for use in do_adjtimex */ -static int timekeeping_validate_timex(struct timex *txc) +static int timekeeping_validate_timex(const struct timex *txc) { if (txc->modes & ADJ_ADJTIME) { /* singleshot must not be used with any other mode bits */ @@ -2310,7 +2355,7 @@ int do_adjtimex(struct timex *txc) return ret; } - getnstimeofday64(&ts); + ktime_get_real_ts64(&ts); raw_spin_lock_irqsave(&timekeeper_lock, flags); write_seqcount_begin(&tk_core.seq); @@ -2327,6 +2372,10 @@ int do_adjtimex(struct timex *txc) write_seqcount_end(&tk_core.seq); raw_spin_unlock_irqrestore(&timekeeper_lock, flags); + /* Update the multiplier immediately if frequency was set directly */ + if (txc->modes & (ADJ_FREQUENCY | ADJ_TICK)) + timekeeping_advance(TK_ADV_FREQ); + if (tai != orig_tai) clock_was_set(); diff --git a/kernel/time/timekeeping_debug.c b/kernel/time/timekeeping_debug.c index 0754cadfa9e6..238e4be60229 100644 --- a/kernel/time/timekeeping_debug.c +++ b/kernel/time/timekeeping_debug.c @@ -70,7 +70,7 @@ static int __init tk_debug_sleep_time_init(void) } late_initcall(tk_debug_sleep_time_init); -void tk_debug_account_sleep_time(struct timespec64 *t) +void tk_debug_account_sleep_time(const struct timespec64 *t) { /* Cap bin index so we don't overflow the array */ int bin = min(fls(t->tv_sec), NUM_BINS-1); diff --git a/kernel/time/timekeeping_internal.h b/kernel/time/timekeeping_internal.h index cf5c0828ee31..bcbb52db2256 100644 --- a/kernel/time/timekeeping_internal.h +++ b/kernel/time/timekeeping_internal.h @@ -8,7 +8,7 @@ #include <linux/time.h> #ifdef CONFIG_DEBUG_FS -extern void tk_debug_account_sleep_time(struct timespec64 *t); +extern void tk_debug_account_sleep_time(const struct timespec64 *t); #else #define tk_debug_account_sleep_time(x) #endif diff --git a/kernel/time/timer.c b/kernel/time/timer.c index cc2d23e6ff61..fa49cd753dea 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -581,7 +581,7 @@ trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer) * wheel: */ base->next_expiry = timer->expires; - wake_up_nohz_cpu(base->cpu); + wake_up_nohz_cpu(base->cpu); } static void @@ -1657,6 +1657,22 @@ static inline void __run_timers(struct timer_base *base) raw_spin_lock_irq(&base->lock); + /* + * timer_base::must_forward_clk must be cleared before running + * timers so that any timer functions that call mod_timer() will + * not try to forward the base. Idle tracking / clock forwarding + * logic is only used with BASE_STD timers. + * + * The must_forward_clk flag is cleared unconditionally also for + * the deferrable base. The deferrable base is not affected by idle + * tracking and never forwarded, so clearing the flag is a NOOP. + * + * The fact that the deferrable base is never forwarded can cause + * large variations in granularity for deferrable timers, but they + * can be deferred for long periods due to idle anyway. + */ + base->must_forward_clk = false; + while (time_after_eq(jiffies, base->clk)) { levels = collect_expired_timers(base, heads); @@ -1676,19 +1692,6 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h) { struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); - /* - * must_forward_clk must be cleared before running timers so that any - * timer functions that call mod_timer will not try to forward the - * base. idle trcking / clock forwarding logic is only used with - * BASE_STD timers. - * - * The deferrable base does not do idle tracking at all, so we do - * not forward it. This can result in very large variations in - * granularity for deferrable timers, but they can be deferred for - * long periods due to idle. - */ - base->must_forward_clk = false; - __run_timers(base); if (IS_ENABLED(CONFIG_NO_HZ_COMMON)) __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF])); |