From f5a89295e2f566d8e9f1d4f3d524d8d3c966958c Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 18 Jun 2018 16:08:00 +0200 Subject: time: Use ktime_get_real_seconds() in time syscall Both get_seconds() and do_gettimeofday() are deprecated. Change the time() implementation to use the replacement function instead. Obviously the system call will still overflow in 2038, but this gets us closer to removing the old helper functions. Signed-off-by: Arnd Bergmann Signed-off-by: Thomas Gleixner Cc: John Stultz Cc: y2038@lists.linaro.org Cc: Stephen Boyd Cc: Deepa Dinamani Cc: Al Viro Link: https://lkml.kernel.org/r/20180618140811.2998503-2-arnd@arndb.de --- kernel/time/time.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'kernel/time') diff --git a/kernel/time/time.c b/kernel/time/time.c index 6fa99213fc72..b1225db61eb2 100644 --- a/kernel/time/time.c +++ b/kernel/time/time.c @@ -63,7 +63,7 @@ EXPORT_SYMBOL(sys_tz); */ SYSCALL_DEFINE1(time, time_t __user *, tloc) { - time_t i = get_seconds(); + time_t i = (time_t)ktime_get_real_seconds(); if (tloc) { if (put_user(i,tloc)) @@ -106,11 +106,9 @@ SYSCALL_DEFINE1(stime, time_t __user *, tptr) /* compat_time_t is a 32 bit "long" and needs to get converted. */ COMPAT_SYSCALL_DEFINE1(time, compat_time_t __user *, tloc) { - struct timeval tv; compat_time_t i; - do_gettimeofday(&tv); - i = tv.tv_sec; + i = (compat_time_t)ktime_get_real_seconds(); if (tloc) { if (put_user(i,tloc)) -- cgit v1.2.3-58-ga151 From d30faff900e666f9a6395a159fdd353c02f5bed0 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 18 Jun 2018 16:08:01 +0200 Subject: timekeeping: Use ktime_get_real_ts64() instead of getnstimeofday64() The two do the same, this moves all users to the newer name for consistency. Signed-off-by: Arnd Bergmann Signed-off-by: Thomas Gleixner Cc: John Stultz Cc: y2038@lists.linaro.org Cc: Stephen Boyd Cc: Miroslav Lichvar Link: https://lkml.kernel.org/r/20180618140811.2998503-3-arnd@arndb.de --- kernel/time/ntp.c | 6 +++--- kernel/time/timekeeping.c | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'kernel/time') diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index a09ded765f6c..10a79053e82f 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c @@ -502,7 +502,7 @@ static void sched_sync_hw_clock(struct timespec64 now, { struct timespec64 next; - getnstimeofday64(&next); + ktime_get_real_ts64(&next); if (!fail) next.tv_sec = 659; else { @@ -537,7 +537,7 @@ static void sync_rtc_clock(void) if (!IS_ENABLED(CONFIG_RTC_SYSTOHC)) return; - getnstimeofday64(&now); + ktime_get_real_ts64(&now); adjust = now; if (persistent_clock_is_local) @@ -591,7 +591,7 @@ static bool sync_cmos_clock(void) * Architectures are strongly encouraged to use rtclib and not * implement this legacy API. */ - getnstimeofday64(&now); + ktime_get_real_ts64(&now); if (rtc_tv_nsec_ok(-1 * target_nsec, &adjust, &now)) { if (persistent_clock_is_local) adjust.tv_sec -= (sys_tz.tz_minuteswest * 60); diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 4786df904c22..77c436a0070b 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -2310,7 +2310,7 @@ int do_adjtimex(struct timex *txc) return ret; } - getnstimeofday64(&ts); + ktime_get_real_ts64(&ts); raw_spin_lock_irqsave(&timekeeper_lock, flags); write_seqcount_begin(&tk_core.seq); -- cgit v1.2.3-58-ga151 From 58a10456d7175fa674b951f1fd7ead3d9a550db4 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 18 Jun 2018 16:32:24 +0200 Subject: posix-timers: Use new ktime_get_*_ts64() helpers Some of the oddly named time accessor functions now have a more consistent naming, which should be used from now on so the aliases can be removed. Signed-off-by: Arnd Bergmann Signed-off-by: Thomas Gleixner Cc: y2038@lists.linaro.org Cc: Deepa Dinamani Cc: "Eric W. Biederman" Link: https://lkml.kernel.org/r/20180618143246.3865099-1-arnd@arndb.de --- kernel/time/posix-stubs.c | 2 +- kernel/time/posix-timers.c | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'kernel/time') diff --git a/kernel/time/posix-stubs.c b/kernel/time/posix-stubs.c index 26aa9569e24a..2c6847d5d69b 100644 --- a/kernel/time/posix-stubs.c +++ b/kernel/time/posix-stubs.c @@ -81,7 +81,7 @@ int do_clock_gettime(clockid_t which_clock, struct timespec64 *tp) ktime_get_ts64(tp); break; case CLOCK_BOOTTIME: - get_monotonic_boottime64(tp); + ktime_get_boottime_ts64(tp); break; default: return -EINVAL; diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c index e08ce3f27447..fcf90a10c43a 100644 --- a/kernel/time/posix-timers.c +++ b/kernel/time/posix-timers.c @@ -228,21 +228,21 @@ static int posix_ktime_get_ts(clockid_t which_clock, struct timespec64 *tp) */ static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec64 *tp) { - getrawmonotonic64(tp); + ktime_get_raw_ts64(tp); return 0; } static int posix_get_realtime_coarse(clockid_t which_clock, struct timespec64 *tp) { - *tp = current_kernel_time64(); + ktime_get_coarse_real_ts64(tp); return 0; } static int posix_get_monotonic_coarse(clockid_t which_clock, struct timespec64 *tp) { - *tp = get_monotonic_coarse64(); + ktime_get_coarse_ts64(tp); return 0; } @@ -254,13 +254,13 @@ static int posix_get_coarse_res(const clockid_t which_clock, struct timespec64 * static int posix_get_boottime(const clockid_t which_clock, struct timespec64 *tp) { - get_monotonic_boottime64(tp); + ktime_get_boottime_ts64(tp); return 0; } static int posix_get_tai(clockid_t which_clock, struct timespec64 *tp) { - timekeeping_clocktai64(tp); + ktime_get_clocktai_ts64(tp); return 0; } -- cgit v1.2.3-58-ga151 From d0dd63a8aee1ef89f2e48e554b796b9f9e4fcadb Mon Sep 17 00:00:00 2001 From: Deepa Dinamani Date: Sat, 16 Jun 2018 22:11:42 -0700 Subject: time: Introduce struct __kernel_itimerspec struct itimerspec is not y2038-safe. Introduce a new struct __kernel_itimerspec based on the kernel internal y2038-safe struct itimerspec64. The definition of struct __kernel_itimerspec includes two struct __kernel_timespec. Since struct __kernel_timespec has the same representation in native and compat modes, so does struct __kernel_itimerspec. This helps have a common entry point for syscalls using struct __kernel_itimerspec. New y2038-safe syscalls will use this new type. Since most of the new syscalls are just an update to the native syscalls with the type update, place the new definition under CONFIG_64BIT_TIME. This helps architectures that do not support the above config to keep using the old definition of struct itimerspec. Also change the get/put_itimerspec64 to use struct__kernel_itimerspec. This will help 32 bit architectures to use the new syscalls when architectures select CONFIG_64BIT_TIME. Signed-off-by: Deepa Dinamani Signed-off-by: Thomas Gleixner Cc: arnd@arndb.de Cc: viro@zeniv.linux.org.uk Cc: linux-fsdevel@vger.kernel.org Cc: linux-api@vger.kernel.org Cc: y2038@lists.linaro.org Link: https://lkml.kernel.org/r/20180617051144.29756-2-deepa.kernel@gmail.com --- include/linux/time.h | 4 ++-- include/linux/time64.h | 1 + include/uapi/linux/time.h | 7 +++++++ kernel/time/time.c | 4 ++-- 4 files changed, 12 insertions(+), 4 deletions(-) (limited to 'kernel/time') diff --git a/include/linux/time.h b/include/linux/time.h index aed74463592d..27d83fd2ae61 100644 --- a/include/linux/time.h +++ b/include/linux/time.h @@ -14,9 +14,9 @@ int get_timespec64(struct timespec64 *ts, int put_timespec64(const struct timespec64 *ts, struct __kernel_timespec __user *uts); int get_itimerspec64(struct itimerspec64 *it, - const struct itimerspec __user *uit); + const struct __kernel_itimerspec __user *uit); int put_itimerspec64(const struct itimerspec64 *it, - struct itimerspec __user *uit); + struct __kernel_itimerspec __user *uit); extern time64_t mktime64(const unsigned int year, const unsigned int mon, const unsigned int day, const unsigned int hour, diff --git a/include/linux/time64.h b/include/linux/time64.h index 0a7b2f79cec7..05634afba0db 100644 --- a/include/linux/time64.h +++ b/include/linux/time64.h @@ -12,6 +12,7 @@ typedef __u64 timeu64_t; */ #ifndef CONFIG_64BIT_TIME #define __kernel_timespec timespec +#define __kernel_itimerspec itimerspec #endif #include diff --git a/include/uapi/linux/time.h b/include/uapi/linux/time.h index fcf936656493..6b56a2208be7 100644 --- a/include/uapi/linux/time.h +++ b/include/uapi/linux/time.h @@ -49,6 +49,13 @@ struct __kernel_timespec { }; #endif +#ifndef __kernel_itimerspec +struct __kernel_itimerspec { + struct __kernel_timespec it_interval; /* timer period */ + struct __kernel_timespec it_value; /* timer expiration */ +}; +#endif + /* * legacy timeval structure, only embedded in structures that * traditionally used 'timeval' to pass time intervals (not absolute diff --git a/kernel/time/time.c b/kernel/time/time.c index b1225db61eb2..c0195225fdce 100644 --- a/kernel/time/time.c +++ b/kernel/time/time.c @@ -927,7 +927,7 @@ int compat_put_timespec64(const struct timespec64 *ts, void __user *uts) EXPORT_SYMBOL_GPL(compat_put_timespec64); int get_itimerspec64(struct itimerspec64 *it, - const struct itimerspec __user *uit) + const struct __kernel_itimerspec __user *uit) { int ret; @@ -942,7 +942,7 @@ int get_itimerspec64(struct itimerspec64 *it, EXPORT_SYMBOL_GPL(get_itimerspec64); int put_itimerspec64(const struct itimerspec64 *it, - struct itimerspec __user *uit) + struct __kernel_itimerspec __user *uit) { int ret; -- cgit v1.2.3-58-ga151 From afef05cf238cfcecdc5ea9b06f31027b13ce6214 Mon Sep 17 00:00:00 2001 From: Deepa Dinamani Date: Sat, 16 Jun 2018 22:11:43 -0700 Subject: time: Enable get/put_compat_itimerspec64 always This will aid in enabling the compat syscalls on 32-bit architectures later on. Also move compat_itimerspec and related defines to compat_time.h. The compat_time.h file will eventually be deleted. Signed-off-by: Deepa Dinamani Signed-off-by: Thomas Gleixner Cc: arnd@arndb.de Cc: viro@zeniv.linux.org.uk Cc: linux-fsdevel@vger.kernel.org Cc: linux-api@vger.kernel.org Cc: y2038@lists.linaro.org Link: https://lkml.kernel.org/r/20180617051144.29756-3-deepa.kernel@gmail.com --- include/linux/compat.h | 9 --------- include/linux/compat_time.h | 9 +++++++++ kernel/compat.c | 29 ----------------------------- kernel/time/time.c | 21 +++++++++++++++++++++ 4 files changed, 30 insertions(+), 38 deletions(-) (limited to 'kernel/time') diff --git a/include/linux/compat.h b/include/linux/compat.h index b1a5562b3215..18a13f2df14b 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -109,11 +109,6 @@ typedef compat_ulong_t compat_aio_context_t; struct compat_sel_arg_struct; struct rusage; -struct compat_itimerspec { - struct compat_timespec it_interval; - struct compat_timespec it_value; -}; - struct compat_utimbuf { compat_time_t actime; compat_time_t modtime; @@ -294,10 +289,6 @@ extern int compat_get_timespec(struct timespec *, const void __user *); extern int compat_put_timespec(const struct timespec *, void __user *); extern int compat_get_timeval(struct timeval *, const void __user *); extern int compat_put_timeval(const struct timeval *, void __user *); -extern int get_compat_itimerspec64(struct itimerspec64 *its, - const struct compat_itimerspec __user *uits); -extern int put_compat_itimerspec64(const struct itimerspec64 *its, - struct compat_itimerspec __user *uits); struct compat_iovec { compat_uptr_t iov_base; diff --git a/include/linux/compat_time.h b/include/linux/compat_time.h index 31f2774f1994..e70bfd1d2c3f 100644 --- a/include/linux/compat_time.h +++ b/include/linux/compat_time.h @@ -17,7 +17,16 @@ struct compat_timeval { s32 tv_usec; }; +struct compat_itimerspec { + struct compat_timespec it_interval; + struct compat_timespec it_value; +}; + extern int compat_get_timespec64(struct timespec64 *, const void __user *); extern int compat_put_timespec64(const struct timespec64 *, void __user *); +extern int get_compat_itimerspec64(struct itimerspec64 *its, + const struct compat_itimerspec __user *uits); +extern int put_compat_itimerspec64(const struct itimerspec64 *its, + struct compat_itimerspec __user *uits); #endif /* _LINUX_COMPAT_TIME_H */ diff --git a/kernel/compat.c b/kernel/compat.c index 702aa846ddac..8e40efc2928a 100644 --- a/kernel/compat.c +++ b/kernel/compat.c @@ -324,35 +324,6 @@ COMPAT_SYSCALL_DEFINE3(sched_getaffinity, compat_pid_t, pid, unsigned int, len, return ret; } -/* Todo: Delete these extern declarations when get/put_compat_itimerspec64() - * are moved to kernel/time/time.c . - */ -extern int __compat_get_timespec64(struct timespec64 *ts64, - const struct compat_timespec __user *cts); -extern int __compat_put_timespec64(const struct timespec64 *ts64, - struct compat_timespec __user *cts); - -int get_compat_itimerspec64(struct itimerspec64 *its, - const struct compat_itimerspec __user *uits) -{ - - if (__compat_get_timespec64(&its->it_interval, &uits->it_interval) || - __compat_get_timespec64(&its->it_value, &uits->it_value)) - return -EFAULT; - return 0; -} -EXPORT_SYMBOL_GPL(get_compat_itimerspec64); - -int put_compat_itimerspec64(const struct itimerspec64 *its, - struct compat_itimerspec __user *uits) -{ - if (__compat_put_timespec64(&its->it_interval, &uits->it_interval) || - __compat_put_timespec64(&its->it_value, &uits->it_value)) - return -EFAULT; - return 0; -} -EXPORT_SYMBOL_GPL(put_compat_itimerspec64); - /* * We currently only need the following fields from the sigevent * structure: sigev_value, sigev_signo, sig_notify and (sometimes diff --git a/kernel/time/time.c b/kernel/time/time.c index c0195225fdce..72caf051901c 100644 --- a/kernel/time/time.c +++ b/kernel/time/time.c @@ -955,3 +955,24 @@ int put_itimerspec64(const struct itimerspec64 *it, return ret; } EXPORT_SYMBOL_GPL(put_itimerspec64); + +int get_compat_itimerspec64(struct itimerspec64 *its, + const struct compat_itimerspec __user *uits) +{ + + if (__compat_get_timespec64(&its->it_interval, &uits->it_interval) || + __compat_get_timespec64(&its->it_value, &uits->it_value)) + return -EFAULT; + return 0; +} +EXPORT_SYMBOL_GPL(get_compat_itimerspec64); + +int put_compat_itimerspec64(const struct itimerspec64 *its, + struct compat_itimerspec __user *uits) +{ + if (__compat_put_timespec64(&its->it_interval, &uits->it_interval) || + __compat_put_timespec64(&its->it_value, &uits->it_value)) + return -EFAULT; + return 0; +} +EXPORT_SYMBOL_GPL(put_compat_itimerspec64); -- cgit v1.2.3-58-ga151 From 6ff84735070276d72af716e21c3214ee20d60e70 Mon Sep 17 00:00:00 2001 From: Deepa Dinamani Date: Sat, 16 Jun 2018 22:11:44 -0700 Subject: time: Change types to new y2038 safe __kernel_itimerspec timer_set/gettime and timerfd_set/get apis use struct itimerspec at the user interface layer. struct itimerspec is not y2038-safe. Change these interfaces to use y2038-safe struct __kernel_itimerspec instead. This will help define new syscalls when 32bit architectures select CONFIG_64BIT_TIME. Signed-off-by: Deepa Dinamani Signed-off-by: Thomas Gleixner Cc: arnd@arndb.de Cc: viro@zeniv.linux.org.uk Cc: linux-fsdevel@vger.kernel.org Cc: linux-api@vger.kernel.org Cc: y2038@lists.linaro.org Link: https://lkml.kernel.org/r/20180617051144.29756-4-deepa.kernel@gmail.com --- fs/timerfd.c | 8 ++++---- include/linux/syscalls.h | 10 +++++----- kernel/time/posix-timers.c | 12 +++++++----- 3 files changed, 16 insertions(+), 14 deletions(-) (limited to 'kernel/time') diff --git a/fs/timerfd.c b/fs/timerfd.c index d84a2bee4f82..8bb926253f88 100644 --- a/fs/timerfd.c +++ b/fs/timerfd.c @@ -533,8 +533,8 @@ static int do_timerfd_gettime(int ufd, struct itimerspec64 *t) } SYSCALL_DEFINE4(timerfd_settime, int, ufd, int, flags, - const struct itimerspec __user *, utmr, - struct itimerspec __user *, otmr) + const struct __kernel_itimerspec __user *, utmr, + struct __kernel_itimerspec __user *, otmr) { struct itimerspec64 new, old; int ret; @@ -550,7 +550,7 @@ SYSCALL_DEFINE4(timerfd_settime, int, ufd, int, flags, return ret; } -SYSCALL_DEFINE2(timerfd_gettime, int, ufd, struct itimerspec __user *, otmr) +SYSCALL_DEFINE2(timerfd_gettime, int, ufd, struct __kernel_itimerspec __user *, otmr) { struct itimerspec64 kotmr; int ret = do_timerfd_gettime(ufd, &kotmr); @@ -559,7 +559,7 @@ SYSCALL_DEFINE2(timerfd_gettime, int, ufd, struct itimerspec __user *, otmr) return put_itimerspec64(&kotmr, otmr) ? -EFAULT : 0; } -#ifdef CONFIG_COMPAT +#ifdef CONFIG_COMPAT_32BIT_TIME COMPAT_SYSCALL_DEFINE4(timerfd_settime, int, ufd, int, flags, const struct compat_itimerspec __user *, utmr, struct compat_itimerspec __user *, otmr) diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 73810808cdf2..38b9ec152024 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -501,9 +501,9 @@ asmlinkage long sys_sync_file_range(int fd, loff_t offset, loff_t nbytes, /* fs/timerfd.c */ asmlinkage long sys_timerfd_create(int clockid, int flags); asmlinkage long sys_timerfd_settime(int ufd, int flags, - const struct itimerspec __user *utmr, - struct itimerspec __user *otmr); -asmlinkage long sys_timerfd_gettime(int ufd, struct itimerspec __user *otmr); + const struct __kernel_itimerspec __user *utmr, + struct __kernel_itimerspec __user *otmr); +asmlinkage long sys_timerfd_gettime(int ufd, struct __kernel_itimerspec __user *otmr); /* fs/utimes.c */ asmlinkage long sys_utimensat(int dfd, const char __user *filename, @@ -568,10 +568,10 @@ asmlinkage long sys_timer_create(clockid_t which_clock, struct sigevent __user *timer_event_spec, timer_t __user * created_timer_id); asmlinkage long sys_timer_gettime(timer_t timer_id, - struct itimerspec __user *setting); + struct __kernel_itimerspec __user *setting); asmlinkage long sys_timer_getoverrun(timer_t timer_id); asmlinkage long sys_timer_settime(timer_t timer_id, int flags, - const struct itimerspec __user *new_setting, + const struct __kernel_itimerspec __user *new_setting, struct itimerspec __user *old_setting); asmlinkage long sys_timer_delete(timer_t timer_id); asmlinkage long sys_clock_settime(clockid_t which_clock, diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c index fcf90a10c43a..80d59333c76e 100644 --- a/kernel/time/posix-timers.c +++ b/kernel/time/posix-timers.c @@ -743,7 +743,7 @@ static int do_timer_gettime(timer_t timer_id, struct itimerspec64 *setting) /* Get the time remaining on a POSIX.1b interval timer. */ SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id, - struct itimerspec __user *, setting) + struct __kernel_itimerspec __user *, setting) { struct itimerspec64 cur_setting; @@ -755,7 +755,8 @@ SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id, return ret; } -#ifdef CONFIG_COMPAT +#ifdef CONFIG_COMPAT_32BIT_TIME + COMPAT_SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id, struct compat_itimerspec __user *, setting) { @@ -768,6 +769,7 @@ COMPAT_SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id, } return ret; } + #endif /* @@ -906,8 +908,8 @@ retry: /* Set a POSIX.1b interval timer */ SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags, - const struct itimerspec __user *, new_setting, - struct itimerspec __user *, old_setting) + const struct __kernel_itimerspec __user *, new_setting, + struct __kernel_itimerspec __user *, old_setting) { struct itimerspec64 new_spec, old_spec; struct itimerspec64 *rtn = old_setting ? &old_spec : NULL; @@ -927,7 +929,7 @@ SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags, return error; } -#ifdef CONFIG_COMPAT +#ifdef CONFIG_COMPAT_32BIT_TIME COMPAT_SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags, struct compat_itimerspec __user *, new, struct compat_itimerspec __user *, old) -- cgit v1.2.3-58-ga151 From 6fec64e1c92d5c715c6d0f50786daa7708266bde Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 26 Jun 2018 15:21:31 +0200 Subject: posix-timers: Make forward callback return s64 The posix timer ti_overrun handling is broken because the forwarding functions can return a huge number of overruns which does not fit in an int. As a consequence timer_getoverrun(2) and siginfo::si_overrun can turn into random number generators. As a first step to address that let the timer_forward() callbacks return the full 64 bit value. Cast it to (int) temporarily until k_itimer::ti_overrun is converted to 64bit and the conversion to user space visible values is sanitized. Reported-by: Team OWL337 Signed-off-by: Thomas Gleixner Acked-by: John Stultz Cc: Peter Zijlstra Cc: Michael Kerrisk Link: https://lkml.kernel.org/r/20180626132704.922098090@linutronix.de --- kernel/time/alarmtimer.c | 4 ++-- kernel/time/posix-timers.c | 6 +++--- kernel/time/posix-timers.h | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) (limited to 'kernel/time') diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c index 639321bf2e39..78a3cc555823 100644 --- a/kernel/time/alarmtimer.c +++ b/kernel/time/alarmtimer.c @@ -581,11 +581,11 @@ static void alarm_timer_rearm(struct k_itimer *timr) * @timr: Pointer to the posixtimer data struct * @now: Current time to forward the timer against */ -static int alarm_timer_forward(struct k_itimer *timr, ktime_t now) +static s64 alarm_timer_forward(struct k_itimer *timr, ktime_t now) { struct alarm *alarm = &timr->it.alarm.alarmtimer; - return (int) alarm_forward(alarm, timr->it_interval, now); + return alarm_forward(alarm, timr->it_interval, now); } /** diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c index 80d59333c76e..db1d65963a57 100644 --- a/kernel/time/posix-timers.c +++ b/kernel/time/posix-timers.c @@ -645,11 +645,11 @@ static ktime_t common_hrtimer_remaining(struct k_itimer *timr, ktime_t now) return __hrtimer_expires_remaining_adjusted(timer, now); } -static int common_hrtimer_forward(struct k_itimer *timr, ktime_t now) +static s64 common_hrtimer_forward(struct k_itimer *timr, ktime_t now) { struct hrtimer *timer = &timr->it.real.timer; - return (int)hrtimer_forward(timer, now, timr->it_interval); + return hrtimer_forward(timer, now, timr->it_interval); } /* @@ -702,7 +702,7 @@ void common_timer_get(struct k_itimer *timr, struct itimerspec64 *cur_setting) * expiry time forward by intervals, so expiry is > now. */ if (iv && (timr->it_requeue_pending & REQUEUE_PENDING || sig_none)) - timr->it_overrun += kc->timer_forward(timr, now); + timr->it_overrun += (int)kc->timer_forward(timr, now); remaining = kc->timer_remaining(timr, now); /* Return 0 only, when the timer is expired and not pending */ diff --git a/kernel/time/posix-timers.h b/kernel/time/posix-timers.h index 151e28f5bf30..ddb21145211a 100644 --- a/kernel/time/posix-timers.h +++ b/kernel/time/posix-timers.h @@ -19,7 +19,7 @@ struct k_clock { void (*timer_get)(struct k_itimer *timr, struct itimerspec64 *cur_setting); void (*timer_rearm)(struct k_itimer *timr); - int (*timer_forward)(struct k_itimer *timr, ktime_t now); + s64 (*timer_forward)(struct k_itimer *timr, ktime_t now); ktime_t (*timer_remaining)(struct k_itimer *timr, ktime_t now); int (*timer_try_to_cancel)(struct k_itimer *timr); void (*timer_arm)(struct k_itimer *timr, ktime_t expires, -- cgit v1.2.3-58-ga151 From 78c9c4dfbf8c04883941445a195276bb4bb92c76 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 26 Jun 2018 15:21:32 +0200 Subject: posix-timers: Sanitize overrun handling The posix timer overrun handling is broken because the forwarding functions can return a huge number of overruns which does not fit in an int. As a consequence timer_getoverrun(2) and siginfo::si_overrun can turn into random number generators. The k_clock::timer_forward() callbacks return a 64 bit value now. Make k_itimer::ti_overrun[_last] 64bit as well, so the kernel internal accounting is correct. 3Remove the temporary (int) casts. Add a helper function which clamps the overrun value returned to user space via timer_getoverrun(2) or siginfo::si_overrun limited to a positive value between 0 and INT_MAX. INT_MAX is an indicator for user space that the overrun value has been clamped. Reported-by: Team OWL337 Signed-off-by: Thomas Gleixner Acked-by: John Stultz Cc: Peter Zijlstra Cc: Michael Kerrisk Link: https://lkml.kernel.org/r/20180626132705.018623573@linutronix.de --- include/linux/posix-timers.h | 4 ++-- kernel/time/posix-cpu-timers.c | 2 +- kernel/time/posix-timers.c | 31 ++++++++++++++++++++----------- 3 files changed, 23 insertions(+), 14 deletions(-) (limited to 'kernel/time') diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h index c85704fcdbd2..ee7e987ea1b4 100644 --- a/include/linux/posix-timers.h +++ b/include/linux/posix-timers.h @@ -95,8 +95,8 @@ struct k_itimer { clockid_t it_clock; timer_t it_id; int it_active; - int it_overrun; - int it_overrun_last; + s64 it_overrun; + s64 it_overrun_last; int it_requeue_pending; int it_sigev_notify; ktime_t it_interval; diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index 5a6251ac6f7a..562cc3891b57 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c @@ -85,7 +85,7 @@ static void bump_cpu_timer(struct k_itimer *timer, u64 now) continue; timer->it.cpu.expires += incr; - timer->it_overrun += 1 << i; + timer->it_overrun += 1LL << i; delta -= incr; } } diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c index db1d65963a57..3ac7295306dc 100644 --- a/kernel/time/posix-timers.c +++ b/kernel/time/posix-timers.c @@ -283,6 +283,17 @@ static __init int init_posix_timers(void) } __initcall(init_posix_timers); +/* + * The siginfo si_overrun field and the return value of timer_getoverrun(2) + * are of type int. Clamp the overrun value to INT_MAX + */ +static inline int timer_overrun_to_int(struct k_itimer *timr, int baseval) +{ + s64 sum = timr->it_overrun_last + (s64)baseval; + + return sum > (s64)INT_MAX ? INT_MAX : (int)sum; +} + static void common_hrtimer_rearm(struct k_itimer *timr) { struct hrtimer *timer = &timr->it.real.timer; @@ -290,9 +301,8 @@ static void common_hrtimer_rearm(struct k_itimer *timr) if (!timr->it_interval) return; - timr->it_overrun += (unsigned int) hrtimer_forward(timer, - timer->base->get_time(), - timr->it_interval); + timr->it_overrun += hrtimer_forward(timer, timer->base->get_time(), + timr->it_interval); hrtimer_restart(timer); } @@ -321,10 +331,10 @@ void posixtimer_rearm(struct siginfo *info) timr->it_active = 1; timr->it_overrun_last = timr->it_overrun; - timr->it_overrun = -1; + timr->it_overrun = -1LL; ++timr->it_requeue_pending; - info->si_overrun += timr->it_overrun_last; + info->si_overrun = timer_overrun_to_int(timr, info->si_overrun); } unlock_timer(timr, flags); @@ -418,9 +428,8 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer) now = ktime_add(now, kj); } #endif - timr->it_overrun += (unsigned int) - hrtimer_forward(timer, now, - timr->it_interval); + timr->it_overrun += hrtimer_forward(timer, now, + timr->it_interval); ret = HRTIMER_RESTART; ++timr->it_requeue_pending; timr->it_active = 1; @@ -524,7 +533,7 @@ static int do_timer_create(clockid_t which_clock, struct sigevent *event, new_timer->it_id = (timer_t) new_timer_id; new_timer->it_clock = which_clock; new_timer->kclock = kc; - new_timer->it_overrun = -1; + new_timer->it_overrun = -1LL; if (event) { rcu_read_lock(); @@ -702,7 +711,7 @@ void common_timer_get(struct k_itimer *timr, struct itimerspec64 *cur_setting) * expiry time forward by intervals, so expiry is > now. */ if (iv && (timr->it_requeue_pending & REQUEUE_PENDING || sig_none)) - timr->it_overrun += (int)kc->timer_forward(timr, now); + timr->it_overrun += kc->timer_forward(timr, now); remaining = kc->timer_remaining(timr, now); /* Return 0 only, when the timer is expired and not pending */ @@ -791,7 +800,7 @@ SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id) if (!timr) return -EINVAL; - overrun = timr->it_overrun_last; + overrun = timer_overrun_to_int(timr, 0); unlock_timer(timr, flags); return overrun; -- cgit v1.2.3-58-ga151 From 5f936e19cc0ef97dbe3a56e9498922ad5ba1edef Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 2 Jul 2018 09:34:29 +0200 Subject: alarmtimer: Prevent overflow for relative nanosleep Air Icy reported: UBSAN: Undefined behaviour in kernel/time/alarmtimer.c:811:7 signed integer overflow: 1529859276030040771 + 9223372036854775807 cannot be represented in type 'long long int' Call Trace: alarm_timer_nsleep+0x44c/0x510 kernel/time/alarmtimer.c:811 __do_sys_clock_nanosleep kernel/time/posix-timers.c:1235 [inline] __se_sys_clock_nanosleep kernel/time/posix-timers.c:1213 [inline] __x64_sys_clock_nanosleep+0x326/0x4e0 kernel/time/posix-timers.c:1213 do_syscall_64+0xb8/0x3a0 arch/x86/entry/common.c:290 alarm_timer_nsleep() uses ktime_add() to add the current time and the relative expiry value. ktime_add() has no sanity checks so the addition can overflow when the relative timeout is large enough. Use ktime_add_safe() which has the necessary sanity checks in place and limits the result to the valid range. Fixes: 9a7adcf5c6de ("timers: Posix interface for alarm-timers") Reported-by: Team OWL337 Signed-off-by: Thomas Gleixner Cc: John Stultz Link: https://lkml.kernel.org/r/alpine.DEB.2.21.1807020926360.1595@nanos.tec.linutronix.de --- kernel/time/alarmtimer.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'kernel/time') diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c index 78a3cc555823..fa5de5e8de61 100644 --- a/kernel/time/alarmtimer.c +++ b/kernel/time/alarmtimer.c @@ -808,7 +808,8 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags, /* Convert (if necessary) to absolute time */ if (flags != TIMER_ABSTIME) { ktime_t now = alarm_bases[type].gettime(); - exp = ktime_add(now, exp); + + exp = ktime_add_safe(now, exp); } ret = alarmtimer_do_nsleep(&alarm, exp, type); -- cgit v1.2.3-58-ga151 From b061c7a513afe14a68af41cec7c3476befc40e95 Mon Sep 17 00:00:00 2001 From: Miroslav Lichvar Date: Mon, 4 Jun 2018 15:34:21 +0200 Subject: timekeeping: Update multiplier when NTP frequency is set directly When the NTP frequency is set directly from userspace using the ADJ_FREQUENCY or ADJ_TICK timex mode, immediately update the timekeeper's multiplier instead of waiting for the next tick. This removes a hidden non-deterministic delay in setting of the frequency and allows an extremely tight control of the system clock with update rates close to or even exceeding the kernel HZ. The update is limited to archs using modern timekeeping (!ARCH_USES_GETTIMEOFFSET). Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Miroslav Lichvar Cc: Richard Cochran Cc: Prarit Bhargava Cc: Stephen Boyd Signed-off-by: Miroslav Lichvar Signed-off-by: John Stultz --- kernel/time/timekeeping.c | 36 ++++++++++++++++++++++++++++++------ 1 file changed, 30 insertions(+), 6 deletions(-) (limited to 'kernel/time') diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 4786df904c22..67720110dc33 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -34,6 +34,14 @@ #define TK_MIRROR (1 << 1) #define TK_CLOCK_WAS_SET (1 << 2) +enum timekeeping_adv_mode { + /* Update timekeeper when a tick has passed */ + TK_ADV_TICK, + + /* Update timekeeper on a direct frequency change */ + TK_ADV_FREQ +}; + /* * The most important data for readout fits into a single 64 byte * cache line. @@ -2021,11 +2029,11 @@ static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset, return offset; } -/** - * update_wall_time - Uses the current clocksource to increment the wall time - * +/* + * timekeeping_advance - Updates the timekeeper to the current time and + * current NTP tick length */ -void update_wall_time(void) +static void timekeeping_advance(enum timekeeping_adv_mode mode) { struct timekeeper *real_tk = &tk_core.timekeeper; struct timekeeper *tk = &shadow_timekeeper; @@ -2042,14 +2050,17 @@ void update_wall_time(void) #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET offset = real_tk->cycle_interval; + + if (mode != TK_ADV_TICK) + goto out; #else offset = clocksource_delta(tk_clock_read(&tk->tkr_mono), tk->tkr_mono.cycle_last, tk->tkr_mono.mask); -#endif /* Check if there's really nothing to do */ - if (offset < real_tk->cycle_interval) + if (offset < real_tk->cycle_interval && mode == TK_ADV_TICK) goto out; +#endif /* Do some additional sanity checking */ timekeeping_check_update(tk, offset); @@ -2105,6 +2116,15 @@ out: clock_was_set_delayed(); } +/** + * update_wall_time - Uses the current clocksource to increment the wall time + * + */ +void update_wall_time(void) +{ + timekeeping_advance(TK_ADV_TICK); +} + /** * getboottime64 - Return the real time of system boot. * @ts: pointer to the timespec64 to be set @@ -2327,6 +2347,10 @@ int do_adjtimex(struct timex *txc) write_seqcount_end(&tk_core.seq); raw_spin_unlock_irqrestore(&timekeeper_lock, flags); + /* Update the multiplier immediately if frequency was set directly */ + if (txc->modes & (ADJ_FREQUENCY | ADJ_TICK)) + timekeeping_advance(TK_ADV_FREQ); + if (tai != orig_tai) clock_was_set(); -- cgit v1.2.3-58-ga151 From 7a6e55375d5c584c7c11cfdcaa8ad9d6cccb419d Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Thu, 12 Jul 2018 16:41:18 +0200 Subject: hrtimer: Improve kernel message printing - Join split message for easier grepping, - Use pr_*() instead of printk*(), - Use %u to format unsigned cpu numbers. Signed-off-by: Geert Uytterhoeven Signed-off-by: Thomas Gleixner Link: https://lkml.kernel.org/r/20180712144118.8819-1-geert+renesas@glider.be --- kernel/time/hrtimer.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'kernel/time') diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 055a4a728c00..5c2af16c7c14 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -718,8 +718,8 @@ static void hrtimer_switch_to_hres(void) struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases); if (tick_init_highres()) { - printk(KERN_WARNING "Could not switch to high resolution " - "mode on CPU %d\n", base->cpu); + pr_warn("Could not switch to high resolution mode on CPU %u\n", + base->cpu); return; } base->hres_active = 1; @@ -1573,8 +1573,7 @@ retry: else expires_next = ktime_add(now, delta); tick_program_event(expires_next, 1); - printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n", - ktime_to_ns(delta)); + pr_warn_once("hrtimer: interrupt took %llu ns\n", ktime_to_ns(delta)); } /* called with interrupts disabled */ -- cgit v1.2.3-58-ga151 From 30587589251a00974115e0815ac316980f48dbb5 Mon Sep 17 00:00:00 2001 From: Yi Wang Date: Mon, 16 Jul 2018 14:08:57 +0800 Subject: timer: Fix coding style The call to wake_up_nohz_cpu() is incorrectly indented. Remove the surplus TAB. Signed-off-by: Yi Wang Signed-off-by: Thomas Gleixner Reviewed-by: Jiang Biao Cc: john.stultz@linaro.org Cc: sboyd@kernel.org Cc: zhong.weidong@zte.com.cn CC: Anna-Maria Gleixner Link: https://lkml.kernel.org/r/1531721337-30284-1-git-send-email-wang.yi59@zte.com.cn --- kernel/time/timer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel/time') diff --git a/kernel/time/timer.c b/kernel/time/timer.c index cc2d23e6ff61..baa528a24a73 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -581,7 +581,7 @@ trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer) * wheel: */ base->next_expiry = timer->expires; - wake_up_nohz_cpu(base->cpu); + wake_up_nohz_cpu(base->cpu); } static void -- cgit v1.2.3-58-ga151 From 0f9987b63dcc68606b82a349bbb2016b392a2c34 Mon Sep 17 00:00:00 2001 From: Ondrej Mosnacek Date: Fri, 13 Jul 2018 14:06:40 +0200 Subject: ntp: Remove redundant arguments The 'ts' argument of process_adj_status() and process_adjtimex_modes() is unused and can be safely removed. Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Miroslav Lichvar Cc: Richard Cochran Cc: Prarit Bhargava Cc: Stephen Boyd Signed-off-by: Ondrej Mosnacek Signed-off-by: John Stultz --- kernel/time/ntp.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'kernel/time') diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index 10a79053e82f..3eddac25675f 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c @@ -642,7 +642,7 @@ void ntp_notify_cmos_timer(void) /* * Propagate a new txc->status value into the NTP state: */ -static inline void process_adj_status(struct timex *txc, struct timespec64 *ts) +static inline void process_adj_status(struct timex *txc) { if ((time_status & STA_PLL) && !(txc->status & STA_PLL)) { time_state = TIME_OK; @@ -665,12 +665,10 @@ static inline void process_adj_status(struct timex *txc, struct timespec64 *ts) } -static inline void process_adjtimex_modes(struct timex *txc, - struct timespec64 *ts, - s32 *time_tai) +static inline void process_adjtimex_modes(struct timex *txc, s32 *time_tai) { if (txc->modes & ADJ_STATUS) - process_adj_status(txc, ts); + process_adj_status(txc); if (txc->modes & ADJ_NANO) time_status |= STA_NANO; @@ -735,7 +733,7 @@ int __do_adjtimex(struct timex *txc, struct timespec64 *ts, s32 *time_tai) /* If there are input parameters, then process them: */ if (txc->modes) - process_adjtimex_modes(txc, ts, time_tai); + process_adjtimex_modes(txc, time_tai); txc->offset = shift_right(time_offset * NTP_INTERVAL_FREQ, NTP_SCALE_SHIFT); -- cgit v1.2.3-58-ga151 From 86b2dcd4f02b86f6b5927fc0adcac777825f4030 Mon Sep 17 00:00:00 2001 From: Ondrej Mosnacek Date: Fri, 13 Jul 2018 14:06:41 +0200 Subject: ntp: Use kstrtos64 for s64 variable ...instead of kstrtol with a dirty cast. Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Miroslav Lichvar Cc: Richard Cochran Cc: Prarit Bhargava Cc: Stephen Boyd Signed-off-by: Ondrej Mosnacek Signed-off-by: John Stultz --- kernel/time/ntp.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'kernel/time') diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index 3eddac25675f..a627cae8baab 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c @@ -1020,12 +1020,11 @@ void __hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_t static int __init ntp_tick_adj_setup(char *str) { - int rc = kstrtol(str, 0, (long *)&ntp_tick_adj); - + int rc = kstrtos64(str, 0, &ntp_tick_adj); if (rc) return rc; - ntp_tick_adj <<= NTP_SCALE_SHIFT; + ntp_tick_adj <<= NTP_SCALE_SHIFT; return 1; } -- cgit v1.2.3-58-ga151 From 985e695074d35768cb04d65f58bca45f7bf1a99d Mon Sep 17 00:00:00 2001 From: Ondrej Mosnacek Date: Fri, 13 Jul 2018 14:06:42 +0200 Subject: timekeeping/ntp: Constify some function arguments Add 'const' to some function arguments and variables to make it easier to read the code. Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Miroslav Lichvar Cc: Richard Cochran Cc: Prarit Bhargava Cc: Stephen Boyd Signed-off-by: Ondrej Mosnacek [jstultz: Also fixup pre-existing checkpatch warnings for prototype arguments with no variable name] Signed-off-by: John Stultz --- include/linux/timekeeping.h | 2 +- kernel/time/ntp.c | 6 +++--- kernel/time/ntp_internal.h | 4 ++-- kernel/time/timekeeping.c | 29 +++++++++++++++-------------- kernel/time/timekeeping_debug.c | 2 +- kernel/time/timekeeping_internal.h | 2 +- 6 files changed, 23 insertions(+), 22 deletions(-) (limited to 'kernel/time') diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h index 86bc2026efce..edace6b656e9 100644 --- a/include/linux/timekeeping.h +++ b/include/linux/timekeeping.h @@ -177,7 +177,7 @@ static inline time64_t ktime_get_clocktai_seconds(void) extern bool timekeeping_rtc_skipsuspend(void); extern bool timekeeping_rtc_skipresume(void); -extern void timekeeping_inject_sleeptime64(struct timespec64 *delta); +extern void timekeeping_inject_sleeptime64(const struct timespec64 *delta); /* * struct system_time_snapshot - simultaneous raw/real time capture with diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index a627cae8baab..c5e0cba3b39c 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c @@ -642,7 +642,7 @@ void ntp_notify_cmos_timer(void) /* * Propagate a new txc->status value into the NTP state: */ -static inline void process_adj_status(struct timex *txc) +static inline void process_adj_status(const struct timex *txc) { if ((time_status & STA_PLL) && !(txc->status & STA_PLL)) { time_state = TIME_OK; @@ -665,7 +665,7 @@ static inline void process_adj_status(struct timex *txc) } -static inline void process_adjtimex_modes(struct timex *txc, s32 *time_tai) +static inline void process_adjtimex_modes(const struct timex *txc, s32 *time_tai) { if (txc->modes & ADJ_STATUS) process_adj_status(txc); @@ -716,7 +716,7 @@ static inline void process_adjtimex_modes(struct timex *txc, s32 *time_tai) * adjtimex mainly allows reading (and writing, if superuser) of * kernel time-keeping variables. used by xntpd. */ -int __do_adjtimex(struct timex *txc, struct timespec64 *ts, s32 *time_tai) +int __do_adjtimex(struct timex *txc, const struct timespec64 *ts, s32 *time_tai) { int result; diff --git a/kernel/time/ntp_internal.h b/kernel/time/ntp_internal.h index 909bd1f1bfb1..c24b0e13f011 100644 --- a/kernel/time/ntp_internal.h +++ b/kernel/time/ntp_internal.h @@ -8,6 +8,6 @@ extern void ntp_clear(void); extern u64 ntp_tick_length(void); extern ktime_t ntp_get_next_leap(void); extern int second_overflow(time64_t secs); -extern int __do_adjtimex(struct timex *, struct timespec64 *, s32 *); -extern void __hardpps(const struct timespec64 *, const struct timespec64 *); +extern int __do_adjtimex(struct timex *txc, const struct timespec64 *ts, s32 *time_tai); +extern void __hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts); #endif /* _LINUX_NTP_INTERNAL_H */ diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index ad779c2ec53a..7033ac1fee65 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -105,7 +105,7 @@ static inline void tk_normalize_xtime(struct timekeeper *tk) } } -static inline struct timespec64 tk_xtime(struct timekeeper *tk) +static inline struct timespec64 tk_xtime(const struct timekeeper *tk) { struct timespec64 ts; @@ -162,7 +162,7 @@ static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta) * a read of the fast-timekeeper tkrs (which is protected by its own locking * and update logic). */ -static inline u64 tk_clock_read(struct tk_read_base *tkr) +static inline u64 tk_clock_read(const struct tk_read_base *tkr) { struct clocksource *clock = READ_ONCE(tkr->clock); @@ -211,7 +211,7 @@ static void timekeeping_check_update(struct timekeeper *tk, u64 offset) } } -static inline u64 timekeeping_get_delta(struct tk_read_base *tkr) +static inline u64 timekeeping_get_delta(const struct tk_read_base *tkr) { struct timekeeper *tk = &tk_core.timekeeper; u64 now, last, mask, max, delta; @@ -255,7 +255,7 @@ static inline u64 timekeeping_get_delta(struct tk_read_base *tkr) static inline void timekeeping_check_update(struct timekeeper *tk, u64 offset) { } -static inline u64 timekeeping_get_delta(struct tk_read_base *tkr) +static inline u64 timekeeping_get_delta(const struct tk_read_base *tkr) { u64 cycle_now, delta; @@ -352,7 +352,7 @@ u32 (*arch_gettimeoffset)(void) = default_arch_gettimeoffset; static inline u32 arch_gettimeoffset(void) { return 0; } #endif -static inline u64 timekeeping_delta_to_ns(struct tk_read_base *tkr, u64 delta) +static inline u64 timekeeping_delta_to_ns(const struct tk_read_base *tkr, u64 delta) { u64 nsec; @@ -363,7 +363,7 @@ static inline u64 timekeeping_delta_to_ns(struct tk_read_base *tkr, u64 delta) return nsec + arch_gettimeoffset(); } -static inline u64 timekeeping_get_ns(struct tk_read_base *tkr) +static inline u64 timekeeping_get_ns(const struct tk_read_base *tkr) { u64 delta; @@ -371,7 +371,7 @@ static inline u64 timekeeping_get_ns(struct tk_read_base *tkr) return timekeeping_delta_to_ns(tkr, delta); } -static inline u64 timekeeping_cycles_to_ns(struct tk_read_base *tkr, u64 cycles) +static inline u64 timekeeping_cycles_to_ns(const struct tk_read_base *tkr, u64 cycles) { u64 delta; @@ -394,7 +394,8 @@ static inline u64 timekeeping_cycles_to_ns(struct tk_read_base *tkr, u64 cycles) * slightly wrong timestamp (a few nanoseconds). See * @ktime_get_mono_fast_ns. */ -static void update_fast_timekeeper(struct tk_read_base *tkr, struct tk_fast *tkf) +static void update_fast_timekeeper(const struct tk_read_base *tkr, + struct tk_fast *tkf) { struct tk_read_base *base = tkf->base; @@ -549,10 +550,10 @@ EXPORT_SYMBOL_GPL(ktime_get_real_fast_ns); * number of cycles every time until timekeeping is resumed at which time the * proper readout base for the fast timekeeper will be restored automatically. */ -static void halt_fast_timekeeper(struct timekeeper *tk) +static void halt_fast_timekeeper(const struct timekeeper *tk) { static struct tk_read_base tkr_dummy; - struct tk_read_base *tkr = &tk->tkr_mono; + const struct tk_read_base *tkr = &tk->tkr_mono; memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy)); cycles_at_suspend = tk_clock_read(tkr); @@ -1277,7 +1278,7 @@ EXPORT_SYMBOL(do_settimeofday64); * * Adds or subtracts an offset value from the current time. */ -static int timekeeping_inject_offset(struct timespec64 *ts) +static int timekeeping_inject_offset(const struct timespec64 *ts) { struct timekeeper *tk = &tk_core.timekeeper; unsigned long flags; @@ -1585,7 +1586,7 @@ static struct timespec64 timekeeping_suspend_time; * adds the sleep offset to the timekeeping variables. */ static void __timekeeping_inject_sleeptime(struct timekeeper *tk, - struct timespec64 *delta) + const struct timespec64 *delta) { if (!timespec64_valid_strict(delta)) { printk_deferred(KERN_WARNING @@ -1646,7 +1647,7 @@ bool timekeeping_rtc_skipsuspend(void) * This function should only be called by rtc_resume(), and allows * a suspend offset to be injected into the timekeeping values. */ -void timekeeping_inject_sleeptime64(struct timespec64 *delta) +void timekeeping_inject_sleeptime64(const struct timespec64 *delta) { struct timekeeper *tk = &tk_core.timekeeper; unsigned long flags; @@ -2240,7 +2241,7 @@ ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real, /** * timekeeping_validate_timex - Ensures the timex is ok for use in do_adjtimex */ -static int timekeeping_validate_timex(struct timex *txc) +static int timekeeping_validate_timex(const struct timex *txc) { if (txc->modes & ADJ_ADJTIME) { /* singleshot must not be used with any other mode bits */ diff --git a/kernel/time/timekeeping_debug.c b/kernel/time/timekeeping_debug.c index 0754cadfa9e6..238e4be60229 100644 --- a/kernel/time/timekeeping_debug.c +++ b/kernel/time/timekeeping_debug.c @@ -70,7 +70,7 @@ static int __init tk_debug_sleep_time_init(void) } late_initcall(tk_debug_sleep_time_init); -void tk_debug_account_sleep_time(struct timespec64 *t) +void tk_debug_account_sleep_time(const struct timespec64 *t) { /* Cap bin index so we don't overflow the array */ int bin = min(fls(t->tv_sec), NUM_BINS-1); diff --git a/kernel/time/timekeeping_internal.h b/kernel/time/timekeeping_internal.h index cf5c0828ee31..bcbb52db2256 100644 --- a/kernel/time/timekeeping_internal.h +++ b/kernel/time/timekeeping_internal.h @@ -8,7 +8,7 @@ #include #ifdef CONFIG_DEBUG_FS -extern void tk_debug_account_sleep_time(struct timespec64 *t); +extern void tk_debug_account_sleep_time(const struct timespec64 *t); #else #define tk_debug_account_sleep_time(x) #endif -- cgit v1.2.3-58-ga151 From f473e5f467f6049370575390b08dc42131315d60 Mon Sep 17 00:00:00 2001 From: Mukesh Ojha Date: Tue, 17 Jul 2018 12:01:29 +0530 Subject: time: Fix extra sleeptime injection when suspend fails Currently, there exists a corner case assuming when there is only one clocksource e.g RTC, and system failed to go to suspend mode. While resume rtc_resume() injects the sleeptime as timekeeping_rtc_skipresume() returned 'false' (default value of sleeptime_injected) due to which we can see mismatch in timestamps. This issue can also come in a system where more than one clocksource are present and very first suspend fails. Success case: ------------ {sleeptime_injected=false} rtc_suspend() => timekeeping_suspend() => timekeeping_resume() => (sleeptime injected) rtc_resume() Failure case: ------------ {failure in sleep path} {sleeptime_injected=false} rtc_suspend() => rtc_resume() {sleeptime injected again which was not required as the suspend failed} Fix this by handling the boolean logic properly. Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Miroslav Lichvar Cc: Richard Cochran Cc: Prarit Bhargava Cc: Stephen Boyd Originally-by: Thomas Gleixner Signed-off-by: Mukesh Ojha Signed-off-by: John Stultz --- kernel/time/timekeeping.c | 32 +++++++++++++++++++++++++------- 1 file changed, 25 insertions(+), 7 deletions(-) (limited to 'kernel/time') diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 7033ac1fee65..19414b16cf9e 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -1519,8 +1519,20 @@ void __weak read_boot_clock64(struct timespec64 *ts) ts->tv_nsec = 0; } -/* Flag for if timekeeping_resume() has injected sleeptime */ -static bool sleeptime_injected; +/* + * Flag reflecting whether timekeeping_resume() has injected sleeptime. + * + * The flag starts of false and is only set when a suspend reaches + * timekeeping_suspend(), timekeeping_resume() sets it to false when the + * timekeeper clocksource is not stopping across suspend and has been + * used to update sleep time. If the timekeeper clocksource has stopped + * then the flag stays true and is used by the RTC resume code to decide + * whether sleeptime must be injected and if so the flag gets false then. + * + * If a suspend fails before reaching timekeeping_resume() then the flag + * stays false and prevents erroneous sleeptime injection. + */ +static bool suspend_timing_needed; /* Flag for if there is a persistent clock on this platform */ static bool persistent_clock_exists; @@ -1619,7 +1631,7 @@ static void __timekeeping_inject_sleeptime(struct timekeeper *tk, */ bool timekeeping_rtc_skipresume(void) { - return sleeptime_injected; + return !suspend_timing_needed; } /** @@ -1655,6 +1667,8 @@ void timekeeping_inject_sleeptime64(const struct timespec64 *delta) raw_spin_lock_irqsave(&timekeeper_lock, flags); write_seqcount_begin(&tk_core.seq); + suspend_timing_needed = false; + timekeeping_forward_now(tk); __timekeeping_inject_sleeptime(tk, delta); @@ -1679,8 +1693,8 @@ void timekeeping_resume(void) unsigned long flags; struct timespec64 ts_new, ts_delta; u64 cycle_now; + bool inject_sleeptime = false; - sleeptime_injected = false; read_persistent_clock64(&ts_new); clockevents_resume(); @@ -1710,14 +1724,16 @@ void timekeeping_resume(void) tk->tkr_mono.mask); nsec = mul_u64_u32_shr(cyc_delta, clock->mult, clock->shift); ts_delta = ns_to_timespec64(nsec); - sleeptime_injected = true; + inject_sleeptime = true; } else if (timespec64_compare(&ts_new, &timekeeping_suspend_time) > 0) { ts_delta = timespec64_sub(ts_new, timekeeping_suspend_time); - sleeptime_injected = true; + inject_sleeptime = true; } - if (sleeptime_injected) + if (inject_sleeptime) { + suspend_timing_needed = false; __timekeeping_inject_sleeptime(tk, &ts_delta); + } /* Re-base the last cycle value */ tk->tkr_mono.cycle_last = cycle_now; @@ -1752,6 +1768,8 @@ int timekeeping_suspend(void) if (timekeeping_suspend_time.tv_sec || timekeeping_suspend_time.tv_nsec) persistent_clock_exists = true; + suspend_timing_needed = true; + raw_spin_lock_irqsave(&timekeeper_lock, flags); write_seqcount_begin(&tk_core.seq); timekeeping_forward_now(tk); -- cgit v1.2.3-58-ga151 From 39232ed5a1793f67b11430c43ed8a9ed6e96c6eb Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Tue, 17 Jul 2018 15:55:16 +0800 Subject: time: Introduce one suspend clocksource to compensate the suspend time On some hardware with multiple clocksources, we have coarse grained clocksources that support the CLOCK_SOURCE_SUSPEND_NONSTOP flag, but which are less than ideal for timekeeping whereas other clocksources can be better candidates but halt on suspend. Currently, the timekeeping core only supports timing suspend using CLOCK_SOURCE_SUSPEND_NONSTOP clocksources if that clocksource is the current clocksource for timekeeping. As a result, some architectures try to implement read_persistent_clock64() using those non-stop clocksources, but isn't really ideal, which will introduce more duplicate code. To fix this, provide logic to allow a registered SUSPEND_NONSTOP clocksource, which isn't the current clocksource, to be used to calculate the suspend time. Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Miroslav Lichvar Cc: Richard Cochran Cc: Prarit Bhargava Cc: Stephen Boyd Cc: Daniel Lezcano Reviewed-by: Thomas Gleixner Reviewed-by: Daniel Lezcano Suggested-by: Thomas Gleixner Signed-off-by: Baolin Wang [jstultz: minor tweaks to merge with previous resume changes] Signed-off-by: John Stultz --- include/linux/clocksource.h | 3 + kernel/time/clocksource.c | 149 ++++++++++++++++++++++++++++++++++++++++++++ kernel/time/timekeeping.c | 22 ++++--- 3 files changed, 166 insertions(+), 8 deletions(-) (limited to 'kernel/time') diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index 7dff1963c185..308918928767 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -194,6 +194,9 @@ extern void clocksource_suspend(void); extern void clocksource_resume(void); extern struct clocksource * __init clocksource_default_clock(void); extern void clocksource_mark_unstable(struct clocksource *cs); +extern void +clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles); +extern u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 now); extern u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cycles); diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index f89a78e2792b..f74fb00d8064 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -94,6 +94,8 @@ EXPORT_SYMBOL_GPL(clocks_calc_mult_shift); /*[Clocksource internal variables]--------- * curr_clocksource: * currently selected clocksource. + * suspend_clocksource: + * used to calculate the suspend time. * clocksource_list: * linked list with the registered clocksources * clocksource_mutex: @@ -102,10 +104,12 @@ EXPORT_SYMBOL_GPL(clocks_calc_mult_shift); * Name of the user-specified clocksource. */ static struct clocksource *curr_clocksource; +static struct clocksource *suspend_clocksource; static LIST_HEAD(clocksource_list); static DEFINE_MUTEX(clocksource_mutex); static char override_name[CS_NAME_LEN]; static int finished_booting; +static u64 suspend_start; #ifdef CONFIG_CLOCKSOURCE_WATCHDOG static void clocksource_watchdog_work(struct work_struct *work); @@ -447,6 +451,140 @@ static inline void clocksource_watchdog_unlock(unsigned long *flags) { } #endif /* CONFIG_CLOCKSOURCE_WATCHDOG */ +static bool clocksource_is_suspend(struct clocksource *cs) +{ + return cs == suspend_clocksource; +} + +static void __clocksource_suspend_select(struct clocksource *cs) +{ + /* + * Skip the clocksource which will be stopped in suspend state. + */ + if (!(cs->flags & CLOCK_SOURCE_SUSPEND_NONSTOP)) + return; + + /* + * The nonstop clocksource can be selected as the suspend clocksource to + * calculate the suspend time, so it should not supply suspend/resume + * interfaces to suspend the nonstop clocksource when system suspends. + */ + if (cs->suspend || cs->resume) { + pr_warn("Nonstop clocksource %s should not supply suspend/resume interfaces\n", + cs->name); + } + + /* Pick the best rating. */ + if (!suspend_clocksource || cs->rating > suspend_clocksource->rating) + suspend_clocksource = cs; +} + +/** + * clocksource_suspend_select - Select the best clocksource for suspend timing + * @fallback: if select a fallback clocksource + */ +static void clocksource_suspend_select(bool fallback) +{ + struct clocksource *cs, *old_suspend; + + old_suspend = suspend_clocksource; + if (fallback) + suspend_clocksource = NULL; + + list_for_each_entry(cs, &clocksource_list, list) { + /* Skip current if we were requested for a fallback. */ + if (fallback && cs == old_suspend) + continue; + + __clocksource_suspend_select(cs); + } +} + +/** + * clocksource_start_suspend_timing - Start measuring the suspend timing + * @cs: current clocksource from timekeeping + * @start_cycles: current cycles from timekeeping + * + * This function will save the start cycle values of suspend timer to calculate + * the suspend time when resuming system. + * + * This function is called late in the suspend process from timekeeping_suspend(), + * that means processes are freezed, non-boot cpus and interrupts are disabled + * now. It is therefore possible to start the suspend timer without taking the + * clocksource mutex. + */ +void clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles) +{ + if (!suspend_clocksource) + return; + + /* + * If current clocksource is the suspend timer, we should use the + * tkr_mono.cycle_last value as suspend_start to avoid same reading + * from suspend timer. + */ + if (clocksource_is_suspend(cs)) { + suspend_start = start_cycles; + return; + } + + if (suspend_clocksource->enable && + suspend_clocksource->enable(suspend_clocksource)) { + pr_warn_once("Failed to enable the non-suspend-able clocksource.\n"); + return; + } + + suspend_start = suspend_clocksource->read(suspend_clocksource); +} + +/** + * clocksource_stop_suspend_timing - Stop measuring the suspend timing + * @cs: current clocksource from timekeeping + * @cycle_now: current cycles from timekeeping + * + * This function will calculate the suspend time from suspend timer. + * + * Returns nanoseconds since suspend started, 0 if no usable suspend clocksource. + * + * This function is called early in the resume process from timekeeping_resume(), + * that means there is only one cpu, no processes are running and the interrupts + * are disabled. It is therefore possible to stop the suspend timer without + * taking the clocksource mutex. + */ +u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 cycle_now) +{ + u64 now, delta, nsec = 0; + + if (!suspend_clocksource) + return 0; + + /* + * If current clocksource is the suspend timer, we should use the + * tkr_mono.cycle_last value from timekeeping as current cycle to + * avoid same reading from suspend timer. + */ + if (clocksource_is_suspend(cs)) + now = cycle_now; + else + now = suspend_clocksource->read(suspend_clocksource); + + if (now > suspend_start) { + delta = clocksource_delta(now, suspend_start, + suspend_clocksource->mask); + nsec = mul_u64_u32_shr(delta, suspend_clocksource->mult, + suspend_clocksource->shift); + } + + /* + * Disable the suspend timer to save power if current clocksource is + * not the suspend timer. + */ + if (!clocksource_is_suspend(cs) && suspend_clocksource->disable) + suspend_clocksource->disable(suspend_clocksource); + + return nsec; +} + /** * clocksource_suspend - suspend the clocksource(s) */ @@ -792,6 +930,7 @@ int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) clocksource_select(); clocksource_select_watchdog(false); + __clocksource_suspend_select(cs); mutex_unlock(&clocksource_mutex); return 0; } @@ -820,6 +959,7 @@ void clocksource_change_rating(struct clocksource *cs, int rating) clocksource_select(); clocksource_select_watchdog(false); + clocksource_suspend_select(false); mutex_unlock(&clocksource_mutex); } EXPORT_SYMBOL(clocksource_change_rating); @@ -845,6 +985,15 @@ static int clocksource_unbind(struct clocksource *cs) return -EBUSY; } + if (clocksource_is_suspend(cs)) { + /* + * Select and try to install a replacement suspend clocksource. + * If no replacement suspend clocksource, we will just let the + * clocksource go and have no suspend clocksource. + */ + clocksource_suspend_select(true); + } + clocksource_watchdog_lock(&flags); clocksource_dequeue_watchdog(cs); list_del_init(&cs->list); diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 19414b16cf9e..d9e659a12c76 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -1692,7 +1692,7 @@ void timekeeping_resume(void) struct clocksource *clock = tk->tkr_mono.clock; unsigned long flags; struct timespec64 ts_new, ts_delta; - u64 cycle_now; + u64 cycle_now, nsec; bool inject_sleeptime = false; read_persistent_clock64(&ts_new); @@ -1716,13 +1716,8 @@ void timekeeping_resume(void) * usable source. The rtc part is handled separately in rtc core code. */ cycle_now = tk_clock_read(&tk->tkr_mono); - if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) && - cycle_now > tk->tkr_mono.cycle_last) { - u64 nsec, cyc_delta; - - cyc_delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, - tk->tkr_mono.mask); - nsec = mul_u64_u32_shr(cyc_delta, clock->mult, clock->shift); + nsec = clocksource_stop_suspend_timing(clock, cycle_now); + if (nsec > 0) { ts_delta = ns_to_timespec64(nsec); inject_sleeptime = true; } else if (timespec64_compare(&ts_new, &timekeeping_suspend_time) > 0) { @@ -1757,6 +1752,8 @@ int timekeeping_suspend(void) unsigned long flags; struct timespec64 delta, delta_delta; static struct timespec64 old_delta; + struct clocksource *curr_clock; + u64 cycle_now; read_persistent_clock64(&timekeeping_suspend_time); @@ -1775,6 +1772,15 @@ int timekeeping_suspend(void) timekeeping_forward_now(tk); timekeeping_suspended = 1; + /* + * Since we've called forward_now, cycle_last stores the value + * just read from the current clocksource. Save this to potentially + * use in suspend timing. + */ + curr_clock = tk->tkr_mono.clock; + cycle_now = tk->tkr_mono.cycle_last; + clocksource_start_suspend_timing(curr_clock, cycle_now); + if (persistent_clock_exists) { /* * To avoid drift caused by repeated suspend/resumes, -- cgit v1.2.3-58-ga151 From 363e934d8811d799c88faffc5bfca782fd728334 Mon Sep 17 00:00:00 2001 From: Gaurav Kohli Date: Thu, 2 Aug 2018 14:21:03 +0530 Subject: timers: Clear timer_base::must_forward_clk with timer_base::lock held timer_base::must_forward_clock is indicating that the base clock might be stale due to a long idle sleep. The forwarding of the base clock takes place in the timer softirq or when a timer is enqueued to a base which is idle. If the enqueue of timer to an idle base happens from a remote CPU, then the following race can happen: CPU0 CPU1 run_timer_softirq mod_timer base = lock_timer_base(timer); base->must_forward_clk = false if (base->must_forward_clk) forward(base); -> skipped enqueue_timer(base, timer, idx); -> idx is calculated high due to stale base unlock_timer_base(timer); base = lock_timer_base(timer); forward(base); The root cause is that timer_base::must_forward_clk is cleared outside the timer_base::lock held region, so the remote queuing CPU observes it as cleared, but the base clock is still stale. This can cause large granularity values for timers, i.e. the accuracy of the expiry time suffers. Prevent this by clearing the flag with timer_base::lock held, so that the forwarding takes place before the cleared flag is observable by a remote CPU. Signed-off-by: Gaurav Kohli Signed-off-by: Thomas Gleixner Cc: john.stultz@linaro.org Cc: sboyd@kernel.org Cc: linux-arm-msm@vger.kernel.org Link: https://lkml.kernel.org/r/1533199863-22748-1-git-send-email-gkohli@codeaurora.org --- kernel/time/timer.c | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) (limited to 'kernel/time') diff --git a/kernel/time/timer.c b/kernel/time/timer.c index baa528a24a73..fa49cd753dea 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -1657,6 +1657,22 @@ static inline void __run_timers(struct timer_base *base) raw_spin_lock_irq(&base->lock); + /* + * timer_base::must_forward_clk must be cleared before running + * timers so that any timer functions that call mod_timer() will + * not try to forward the base. Idle tracking / clock forwarding + * logic is only used with BASE_STD timers. + * + * The must_forward_clk flag is cleared unconditionally also for + * the deferrable base. The deferrable base is not affected by idle + * tracking and never forwarded, so clearing the flag is a NOOP. + * + * The fact that the deferrable base is never forwarded can cause + * large variations in granularity for deferrable timers, but they + * can be deferred for long periods due to idle anyway. + */ + base->must_forward_clk = false; + while (time_after_eq(jiffies, base->clk)) { levels = collect_expired_timers(base, heads); @@ -1676,19 +1692,6 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h) { struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); - /* - * must_forward_clk must be cleared before running timers so that any - * timer functions that call mod_timer will not try to forward the - * base. idle trcking / clock forwarding logic is only used with - * BASE_STD timers. - * - * The deferrable base does not do idle tracking at all, so we do - * not forward it. This can result in very large variations in - * granularity for deferrable timers, but they can be deferred for - * long periods due to idle. - */ - base->must_forward_clk = false; - __run_timers(base); if (IS_ENABLED(CONFIG_NO_HZ_COMMON)) __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF])); -- cgit v1.2.3-58-ga151 From 234b3840d73430564a03f53973a311b7a83a95a9 Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Wed, 11 Jul 2018 12:24:23 +0100 Subject: tick/broadcast-hrtimer: Use cpu_possible_mask for ce_broadcast_hrtimer This is the last instance of cpu_all_mask usage in the core framework. Replace it with cpu_possible_mask like all other instances in the clockevent drivers. This makes it possible to add a warning in the core clockevents_register_device on usage of cpu_all_mask from any clockevent drivers in the future. Signed-off-by: Sudeep Holla Signed-off-by: Thomas Gleixner Cc: linux-arm-kernel@lists.infradead.org Link: https://lkml.kernel.org/r/1531308264-24220-2-git-send-email-sudeep.holla@arm.com --- kernel/time/tick-broadcast-hrtimer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel/time') diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c index 58045eb976c3..a59641fb88b6 100644 --- a/kernel/time/tick-broadcast-hrtimer.c +++ b/kernel/time/tick-broadcast-hrtimer.c @@ -90,7 +90,7 @@ static struct clock_event_device ce_broadcast_hrtimer = { .max_delta_ticks = ULONG_MAX, .mult = 1, .shift = 0, - .cpumask = cpu_all_mask, + .cpumask = cpu_possible_mask, }; static enum hrtimer_restart bc_handler(struct hrtimer *t) -- cgit v1.2.3-58-ga151 From fbfa9260085b5b578a049a90135e5c51928c5f7f Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Wed, 11 Jul 2018 12:24:24 +0100 Subject: clockevents: Warn if cpu_all_mask is used as cpumask Using cpu_all_mask in clockevents cpumask may result in issues while comparing multiple clockevent devices to choose the preferred one. On one of the platforms with 2 system (i.e. non per-CPU) timers with different ratings, having cpu_all_mask for one of the device resulted in a boot hang due to a endless loop in clockevents_notify_released() as both were clocksources were selected as preferred. In order to prevent such issues in the future, warn if any clockevent driver sets cpu_all_mask as it's cpumask and just override it to use cpu_possible_mask. All the existing occurrences of cpu_all_mask are already replaced with cpu_possible_mask. Signed-off-by: Sudeep Holla Signed-off-by: Thomas Gleixner Cc: linux-arm-kernel@lists.infradead.org Link: https://lkml.kernel.org/r/1531308264-24220-3-git-send-email-sudeep.holla@arm.com --- kernel/time/clockevents.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'kernel/time') diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 16c027e9cc73..8c0e4092f661 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c @@ -463,6 +463,12 @@ void clockevents_register_device(struct clock_event_device *dev) dev->cpumask = cpumask_of(smp_processor_id()); } + if (dev->cpumask == cpu_all_mask) { + WARN(1, "%s cpumask == cpu_all_mask, using cpu_possible_mask instead\n", + dev->name); + dev->cpumask = cpu_possible_mask; + } + raw_spin_lock_irqsave(&clockevents_lock, flags); list_add(&dev->list, &clockevent_devices); -- cgit v1.2.3-58-ga151