summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/timer/renesas,tmu.txt49
-rw-r--r--Documentation/devicetree/bindings/timer/renesas,tmu.yaml99
-rw-r--r--drivers/clocksource/Kconfig12
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/arm_arch_timer.c27
-rw-r--r--drivers/clocksource/dw_apb_timer_of.c57
-rw-r--r--drivers/clocksource/ingenic-timer.c2
-rw-r--r--drivers/clocksource/sh_cmt.c18
-rw-r--r--drivers/clocksource/timer-cadence-ttc.c18
-rw-r--r--drivers/clocksource/timer-nps.c284
-rw-r--r--drivers/clocksource/timer-orion.c11
-rw-r--r--drivers/clocksource/timer-sp804.c49
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/class.c9
-rw-r--r--drivers/rtc/rtc-cmos.c3
-rw-r--r--drivers/rtc/rtc-mc146818-lib.c70
-rw-r--r--drivers/rtc/systohc.c61
-rw-r--r--include/dt-bindings/clock/ingenic,sysost.h10
-rw-r--r--include/linux/hrtimer.h6
-rw-r--r--include/linux/rtc.h69
-rw-r--r--include/linux/timekeeping.h2
-rw-r--r--include/linux/timer.h1
-rw-r--r--include/linux/timex.h1
-rw-r--r--kernel/time/hrtimer.c2
-rw-r--r--kernel/time/jiffies.c3
-rw-r--r--kernel/time/ntp.c229
-rw-r--r--kernel/time/ntp_internal.h7
-rw-r--r--kernel/time/tick-broadcast.c25
-rw-r--r--kernel/time/tick-common.c12
-rw-r--r--kernel/time/tick-internal.h1
-rw-r--r--kernel/time/tick-sched.c122
-rw-r--r--kernel/time/timeconv.c6
-rw-r--r--kernel/time/timekeeping.c85
-rw-r--r--kernel/time/timekeeping.h2
-rw-r--r--kernel/time/timer.c57
-rw-r--r--kernel/time/timer_list.c66
36 files changed, 666 insertions, 811 deletions
diff --git a/Documentation/devicetree/bindings/timer/renesas,tmu.txt b/Documentation/devicetree/bindings/timer/renesas,tmu.txt
deleted file mode 100644
index 29159f4e65ab..000000000000
--- a/Documentation/devicetree/bindings/timer/renesas,tmu.txt
+++ /dev/null
@@ -1,49 +0,0 @@
-* Renesas R-Mobile/R-Car Timer Unit (TMU)
-
-The TMU is a 32-bit timer/counter with configurable clock inputs and
-programmable compare match.
-
-Channels share hardware resources but their counter and compare match value
-are independent. The TMU hardware supports up to three channels.
-
-Required Properties:
-
- - compatible: must contain one or more of the following:
- - "renesas,tmu-r8a7740" for the r8a7740 TMU
- - "renesas,tmu-r8a774a1" for the r8a774A1 TMU
- - "renesas,tmu-r8a774b1" for the r8a774B1 TMU
- - "renesas,tmu-r8a774c0" for the r8a774C0 TMU
- - "renesas,tmu-r8a7778" for the r8a7778 TMU
- - "renesas,tmu-r8a7779" for the r8a7779 TMU
- - "renesas,tmu-r8a77970" for the r8a77970 TMU
- - "renesas,tmu-r8a77980" for the r8a77980 TMU
- - "renesas,tmu" for any TMU.
- This is a fallback for the above renesas,tmu-* entries
-
- - reg: base address and length of the registers block for the timer module.
-
- - interrupts: interrupt-specifier for the timer, one per channel.
-
- - clocks: a list of phandle + clock-specifier pairs, one for each entry
- in clock-names.
- - clock-names: must contain "fck" for the functional clock.
-
-Optional Properties:
-
- - #renesas,channels: number of channels implemented by the timer, must be 2
- or 3 (if not specified the value defaults to 3).
-
-
-Example: R8A7779 (R-Car H1) TMU0 node
-
- tmu0: timer@ffd80000 {
- compatible = "renesas,tmu-r8a7779", "renesas,tmu";
- reg = <0xffd80000 0x30>;
- interrupts = <0 32 IRQ_TYPE_LEVEL_HIGH>,
- <0 33 IRQ_TYPE_LEVEL_HIGH>,
- <0 34 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&mstp0_clks R8A7779_CLK_TMU0>;
- clock-names = "fck";
-
- #renesas,channels = <3>;
- };
diff --git a/Documentation/devicetree/bindings/timer/renesas,tmu.yaml b/Documentation/devicetree/bindings/timer/renesas,tmu.yaml
new file mode 100644
index 000000000000..c54188731a1b
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/renesas,tmu.yaml
@@ -0,0 +1,99 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/timer/renesas,tmu.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas R-Mobile/R-Car Timer Unit (TMU)
+
+maintainers:
+ - Geert Uytterhoeven <geert+renesas@glider.be>
+ - Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
+
+description:
+ The TMU is a 32-bit timer/counter with configurable clock inputs and
+ programmable compare match.
+
+ Channels share hardware resources but their counter and compare match value
+ are independent. The TMU hardware supports up to three channels.
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - renesas,tmu-r8a7740 # R-Mobile A1
+ - renesas,tmu-r8a774a1 # RZ/G2M
+ - renesas,tmu-r8a774b1 # RZ/G2N
+ - renesas,tmu-r8a774c0 # RZ/G2E
+ - renesas,tmu-r8a774e1 # RZ/G2H
+ - renesas,tmu-r8a7778 # R-Car M1A
+ - renesas,tmu-r8a7779 # R-Car H1
+ - renesas,tmu-r8a77970 # R-Car V3M
+ - renesas,tmu-r8a77980 # R-Car V3H
+ - const: renesas,tmu
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ minItems: 2
+ maxItems: 3
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: fck
+
+ power-domains:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ '#renesas,channels':
+ description:
+ Number of channels implemented by the timer.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [ 2, 3 ]
+ default: 3
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - power-domains
+
+if:
+ not:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - renesas,tmu-r8a7740
+ - renesas,tmu-r8a7778
+ - renesas,tmu-r8a7779
+then:
+ required:
+ - resets
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/r8a7779-clock.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/power/r8a7779-sysc.h>
+ tmu0: timer@ffd80000 {
+ compatible = "renesas,tmu-r8a7779", "renesas,tmu";
+ reg = <0xffd80000 0x30>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp0_clks R8A7779_CLK_TMU0>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7779_PD_ALWAYS_ON>;
+ #renesas,channels = <3>;
+ };
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 68b087bff59c..9f00b8385fd4 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -275,16 +275,6 @@ config CLKSRC_TI_32K
This option enables support for Texas Instruments 32.768 Hz clocksource
available on many OMAP-like platforms.
-config CLKSRC_NPS
- bool "NPS400 clocksource driver" if COMPILE_TEST
- depends on !PHYS_ADDR_T_64BIT
- select CLKSRC_MMIO
- select TIMER_OF if OF
- help
- NPS400 clocksource support.
- It has a 64-bit counter with update rate up to 1000MHz.
- This counter is accessed via couple of 32-bit memory-mapped registers.
-
config CLKSRC_STM32
bool "Clocksource for STM32 SoCs" if !ARCH_STM32
depends on OF && ARM && (ARCH_STM32 || COMPILE_TEST)
@@ -654,7 +644,7 @@ config ATCPIT100_TIMER
config RISCV_TIMER
bool "Timer for the RISC-V platform" if COMPILE_TEST
- depends on GENERIC_SCHED_CLOCK && RISCV
+ depends on GENERIC_SCHED_CLOCK && RISCV && RISCV_SBI
select TIMER_PROBE
select TIMER_OF
help
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 1c444cc3bb44..3c75cbbf8533 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -56,7 +56,6 @@ obj-$(CONFIG_CLKSRC_QCOM) += timer-qcom.o
obj-$(CONFIG_MTK_TIMER) += timer-mediatek.o
obj-$(CONFIG_CLKSRC_PISTACHIO) += timer-pistachio.o
obj-$(CONFIG_CLKSRC_TI_32K) += timer-ti-32k.o
-obj-$(CONFIG_CLKSRC_NPS) += timer-nps.o
obj-$(CONFIG_OXNAS_RPS_TIMER) += timer-oxnas-rps.o
obj-$(CONFIG_OWL_TIMER) += timer-owl.o
obj-$(CONFIG_MILBEAUT_TIMER) += timer-milbeaut.o
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 6c3e84180146..d0177824c518 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -396,10 +396,10 @@ static void erratum_set_next_event_tval_generic(const int access, unsigned long
ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
if (access == ARCH_TIMER_PHYS_ACCESS) {
- cval = evt + arch_counter_get_cntpct();
+ cval = evt + arch_counter_get_cntpct_stable();
write_sysreg(cval, cntp_cval_el0);
} else {
- cval = evt + arch_counter_get_cntvct();
+ cval = evt + arch_counter_get_cntvct_stable();
write_sysreg(cval, cntv_cval_el0);
}
@@ -822,15 +822,24 @@ static void arch_timer_evtstrm_enable(int divider)
static void arch_timer_configure_evtstream(void)
{
- int evt_stream_div, pos;
+ int evt_stream_div, lsb;
+
+ /*
+ * As the event stream can at most be generated at half the frequency
+ * of the counter, use half the frequency when computing the divider.
+ */
+ evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ / 2;
+
+ /*
+ * Find the closest power of two to the divisor. If the adjacent bit
+ * of lsb (last set bit, starts from 0) is set, then we use (lsb + 1).
+ */
+ lsb = fls(evt_stream_div) - 1;
+ if (lsb > 0 && (evt_stream_div & BIT(lsb - 1)))
+ lsb++;
- /* Find the closest power of two to the divisor */
- evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ;
- pos = fls(evt_stream_div);
- if (pos > 1 && !(evt_stream_div & (1 << (pos - 2))))
- pos--;
/* enable event stream */
- arch_timer_evtstrm_enable(min(pos, 15));
+ arch_timer_evtstrm_enable(max(0, min(lsb, 15)));
}
static void arch_counter_set_user_access(void)
diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c
index ab3ddebe8344..42e7e43b8fcd 100644
--- a/drivers/clocksource/dw_apb_timer_of.c
+++ b/drivers/clocksource/dw_apb_timer_of.c
@@ -14,12 +14,13 @@
#include <linux/reset.h>
#include <linux/sched_clock.h>
-static void __init timer_get_base_and_rate(struct device_node *np,
+static int __init timer_get_base_and_rate(struct device_node *np,
void __iomem **base, u32 *rate)
{
struct clk *timer_clk;
struct clk *pclk;
struct reset_control *rstc;
+ int ret;
*base = of_iomap(np, 0);
@@ -46,55 +47,67 @@ static void __init timer_get_base_and_rate(struct device_node *np,
pr_warn("pclk for %pOFn is present, but could not be activated\n",
np);
+ if (!of_property_read_u32(np, "clock-freq", rate) &&
+ !of_property_read_u32(np, "clock-frequency", rate))
+ return 0;
+
timer_clk = of_clk_get_by_name(np, "timer");
if (IS_ERR(timer_clk))
- goto try_clock_freq;
+ return PTR_ERR(timer_clk);
- if (!clk_prepare_enable(timer_clk)) {
- *rate = clk_get_rate(timer_clk);
- return;
- }
+ ret = clk_prepare_enable(timer_clk);
+ if (ret)
+ return ret;
+
+ *rate = clk_get_rate(timer_clk);
+ if (!(*rate))
+ return -EINVAL;
-try_clock_freq:
- if (of_property_read_u32(np, "clock-freq", rate) &&
- of_property_read_u32(np, "clock-frequency", rate))
- panic("No clock nor clock-frequency property for %pOFn", np);
+ return 0;
}
-static void __init add_clockevent(struct device_node *event_timer)
+static int __init add_clockevent(struct device_node *event_timer)
{
void __iomem *iobase;
struct dw_apb_clock_event_device *ced;
u32 irq, rate;
+ int ret = 0;
irq = irq_of_parse_and_map(event_timer, 0);
if (irq == 0)
panic("No IRQ for clock event timer");
- timer_get_base_and_rate(event_timer, &iobase, &rate);
+ ret = timer_get_base_and_rate(event_timer, &iobase, &rate);
+ if (ret)
+ return ret;
ced = dw_apb_clockevent_init(-1, event_timer->name, 300, iobase, irq,
rate);
if (!ced)
- panic("Unable to initialise clockevent device");
+ return -EINVAL;
dw_apb_clockevent_register(ced);
+
+ return 0;
}
static void __iomem *sched_io_base;
static u32 sched_rate;
-static void __init add_clocksource(struct device_node *source_timer)
+static int __init add_clocksource(struct device_node *source_timer)
{
void __iomem *iobase;
struct dw_apb_clocksource *cs;
u32 rate;
+ int ret;
- timer_get_base_and_rate(source_timer, &iobase, &rate);
+ ret = timer_get_base_and_rate(source_timer, &iobase, &rate);
+ if (ret)
+ return ret;
cs = dw_apb_clocksource_init(300, source_timer->name, iobase, rate);
if (!cs)
- panic("Unable to initialise clocksource device");
+ return -EINVAL;
dw_apb_clocksource_start(cs);
dw_apb_clocksource_register(cs);
@@ -106,6 +119,8 @@ static void __init add_clocksource(struct device_node *source_timer)
*/
sched_io_base = iobase + 0x04;
sched_rate = rate;
+
+ return 0;
}
static u64 notrace read_sched_clock(void)
@@ -146,10 +161,14 @@ static struct delay_timer dw_apb_delay_timer = {
static int num_called;
static int __init dw_apb_timer_init(struct device_node *timer)
{
+ int ret = 0;
+
switch (num_called) {
case 1:
pr_debug("%s: found clocksource timer\n", __func__);
- add_clocksource(timer);
+ ret = add_clocksource(timer);
+ if (ret)
+ return ret;
init_sched_clock();
#ifdef CONFIG_ARM
dw_apb_delay_timer.freq = sched_rate;
@@ -158,7 +177,9 @@ static int __init dw_apb_timer_init(struct device_node *timer)
break;
default:
pr_debug("%s: found clockevent timer\n", __func__);
- add_clockevent(timer);
+ ret = add_clockevent(timer);
+ if (ret)
+ return ret;
break;
}
diff --git a/drivers/clocksource/ingenic-timer.c b/drivers/clocksource/ingenic-timer.c
index 58fd9189fab7..905fd6b163a8 100644
--- a/drivers/clocksource/ingenic-timer.c
+++ b/drivers/clocksource/ingenic-timer.c
@@ -127,7 +127,7 @@ static irqreturn_t ingenic_tcu_cevt_cb(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static struct clk * __init ingenic_tcu_get_clock(struct device_node *np, int id)
+static struct clk *ingenic_tcu_get_clock(struct device_node *np, int id)
{
struct of_phandle_args args;
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 760777458a90..19fa3ef75e3b 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -319,7 +319,6 @@ static int sh_cmt_enable(struct sh_cmt_channel *ch)
{
int k, ret;
- pm_runtime_get_sync(&ch->cmt->pdev->dev);
dev_pm_syscore_device(&ch->cmt->pdev->dev, true);
/* enable clock */
@@ -394,7 +393,6 @@ static void sh_cmt_disable(struct sh_cmt_channel *ch)
clk_disable(ch->cmt->clk);
dev_pm_syscore_device(&ch->cmt->pdev->dev, false);
- pm_runtime_put(&ch->cmt->pdev->dev);
}
/* private flags */
@@ -562,10 +560,16 @@ static int sh_cmt_start(struct sh_cmt_channel *ch, unsigned long flag)
int ret = 0;
unsigned long flags;
+ if (flag & FLAG_CLOCKSOURCE)
+ pm_runtime_get_sync(&ch->cmt->pdev->dev);
+
raw_spin_lock_irqsave(&ch->lock, flags);
- if (!(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
+ if (!(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) {
+ if (flag & FLAG_CLOCKEVENT)
+ pm_runtime_get_sync(&ch->cmt->pdev->dev);
ret = sh_cmt_enable(ch);
+ }
if (ret)
goto out;
@@ -590,14 +594,20 @@ static void sh_cmt_stop(struct sh_cmt_channel *ch, unsigned long flag)
f = ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE);
ch->flags &= ~flag;
- if (f && !(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
+ if (f && !(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) {
sh_cmt_disable(ch);
+ if (flag & FLAG_CLOCKEVENT)
+ pm_runtime_put(&ch->cmt->pdev->dev);
+ }
/* adjust the timeout to maximum if only clocksource left */
if ((flag == FLAG_CLOCKEVENT) && (ch->flags & FLAG_CLOCKSOURCE))
__sh_cmt_set_next(ch, ch->max_match_value);
raw_spin_unlock_irqrestore(&ch->lock, flags);
+
+ if (flag & FLAG_CLOCKSOURCE)
+ pm_runtime_put(&ch->cmt->pdev->dev);
}
static struct sh_cmt_channel *cs_to_sh_cmt(struct clocksource *cs)
diff --git a/drivers/clocksource/timer-cadence-ttc.c b/drivers/clocksource/timer-cadence-ttc.c
index 80e960602030..4efd0cf3b602 100644
--- a/drivers/clocksource/timer-cadence-ttc.c
+++ b/drivers/clocksource/timer-cadence-ttc.c
@@ -413,10 +413,8 @@ static int __init ttc_setup_clockevent(struct clk *clk,
ttcce->ttc.clk = clk;
err = clk_prepare_enable(ttcce->ttc.clk);
- if (err) {
- kfree(ttcce);
- return err;
- }
+ if (err)
+ goto out_kfree;
ttcce->ttc.clk_rate_change_nb.notifier_call =
ttc_rate_change_clockevent_cb;
@@ -426,7 +424,7 @@ static int __init ttc_setup_clockevent(struct clk *clk,
&ttcce->ttc.clk_rate_change_nb);
if (err) {
pr_warn("Unable to register clock notifier.\n");
- return err;
+ goto out_kfree;
}
ttcce->ttc.freq = clk_get_rate(ttcce->ttc.clk);
@@ -455,15 +453,17 @@ static int __init ttc_setup_clockevent(struct clk *clk,
err = request_irq(irq, ttc_clock_event_interrupt,
IRQF_TIMER, ttcce->ce.name, ttcce);
- if (err) {
- kfree(ttcce);
- return err;
- }
+ if (err)
+ goto out_kfree;
clockevents_config_and_register(&ttcce->ce,
ttcce->ttc.freq / PRESCALE, 1, 0xfffe);
return 0;
+
+out_kfree:
+ kfree(ttcce);
+ return err;
}
static int __init ttc_timer_probe(struct platform_device *pdev)
diff --git a/drivers/clocksource/timer-nps.c b/drivers/clocksource/timer-nps.c
deleted file mode 100644
index 7b6bb0df96ae..000000000000
--- a/drivers/clocksource/timer-nps.c
+++ /dev/null
@@ -1,284 +0,0 @@
-/*
- * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/interrupt.h>
-#include <linux/clocksource.h>
-#include <linux/clockchips.h>
-#include <linux/clk.h>
-#include <linux/of.h>
-#include <linux/of_irq.h>
-#include <linux/cpu.h>
-#include <soc/nps/common.h>
-
-#define NPS_MSU_TICK_LOW 0xC8
-#define NPS_CLUSTER_OFFSET 8
-#define NPS_CLUSTER_NUM 16
-
-/* This array is per cluster of CPUs (Each NPS400 cluster got 256 CPUs) */
-static void *nps_msu_reg_low_addr[NPS_CLUSTER_NUM] __read_mostly;
-
-static int __init nps_get_timer_clk(struct device_node *node,
- unsigned long *timer_freq,
- struct clk **clk)
-{
- int ret;
-
- *clk = of_clk_get(node, 0);
- ret = PTR_ERR_OR_ZERO(*clk);
- if (ret) {
- pr_err("timer missing clk\n");
- return ret;
- }
-
- ret = clk_prepare_enable(*clk);
- if (ret) {
- pr_err("Couldn't enable parent clk\n");
- clk_put(*clk);
- return ret;
- }
-
- *timer_freq = clk_get_rate(*clk);
- if (!(*timer_freq)) {
- pr_err("Couldn't get clk rate\n");
- clk_disable_unprepare(*clk);
- clk_put(*clk);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static u64 nps_clksrc_read(struct clocksource *clksrc)
-{
- int cluster = raw_smp_processor_id() >> NPS_CLUSTER_OFFSET;
-
- return (u64)ioread32be(nps_msu_reg_low_addr[cluster]);
-}
-
-static int __init nps_setup_clocksource(struct device_node *node)
-{
- int ret, cluster;
- struct clk *clk;
- unsigned long nps_timer1_freq;
-
-
- for (cluster = 0; cluster < NPS_CLUSTER_NUM; cluster++)
- nps_msu_reg_low_addr[cluster] =
- nps_host_reg((cluster << NPS_CLUSTER_OFFSET),
- NPS_MSU_BLKID, NPS_MSU_TICK_LOW);
-
- ret = nps_get_timer_clk(node, &nps_timer1_freq, &clk);
- if (ret)
- return ret;
-
- ret = clocksource_mmio_init(nps_msu_reg_low_addr, "nps-tick",
- nps_timer1_freq, 300, 32, nps_clksrc_read);
- if (ret) {
- pr_err("Couldn't register clock source.\n");
- clk_disable_unprepare(clk);
- }
-
- return ret;
-}
-
-TIMER_OF_DECLARE(ezchip_nps400_clksrc, "ezchip,nps400-timer",
- nps_setup_clocksource);
-TIMER_OF_DECLARE(ezchip_nps400_clk_src, "ezchip,nps400-timer1",
- nps_setup_clocksource);
-
-#ifdef CONFIG_EZNPS_MTM_EXT
-#include <soc/nps/mtm.h>
-
-/* Timer related Aux registers */
-#define NPS_REG_TIMER0_TSI 0xFFFFF850
-#define NPS_REG_TIMER0_LIMIT 0x23
-#define NPS_REG_TIMER0_CTRL 0x22
-#define NPS_REG_TIMER0_CNT 0x21
-
-/*
- * Interrupt Enabled (IE) - re-arm the timer
- * Not Halted (NH) - is cleared when working with JTAG (for debug)
- */
-#define TIMER0_CTRL_IE BIT(0)
-#define TIMER0_CTRL_NH BIT(1)
-
-static unsigned long nps_timer0_freq;
-static unsigned long nps_timer0_irq;
-
-static void nps_clkevent_rm_thread(void)
-{
- int thread;
- unsigned int cflags, enabled_threads;
-
- hw_schd_save(&cflags);
-
- enabled_threads = read_aux_reg(NPS_REG_TIMER0_TSI);
-
- /* remove thread from TSI1 */
- thread = read_aux_reg(CTOP_AUX_THREAD_ID);
- enabled_threads &= ~(1 << thread);
- write_aux_reg(NPS_REG_TIMER0_TSI, enabled_threads);
-
- /* Acknowledge and if needed re-arm the timer */
- if (!enabled_threads)
- write_aux_reg(NPS_REG_TIMER0_CTRL, TIMER0_CTRL_NH);
- else
- write_aux_reg(NPS_REG_TIMER0_CTRL,
- TIMER0_CTRL_IE | TIMER0_CTRL_NH);
-
- hw_schd_restore(cflags);
-}
-
-static void nps_clkevent_add_thread(unsigned long delta)
-{
- int thread;
- unsigned int cflags, enabled_threads;
-
- hw_schd_save(&cflags);
-
- /* add thread to TSI1 */
- thread = read_aux_reg(CTOP_AUX_THREAD_ID);
- enabled_threads = read_aux_reg(NPS_REG_TIMER0_TSI);
- enabled_threads |= (1 << thread);
- write_aux_reg(NPS_REG_TIMER0_TSI, enabled_threads);
-
- /* set next timer event */
- write_aux_reg(NPS_REG_TIMER0_LIMIT, delta);
- write_aux_reg(NPS_REG_TIMER0_CNT, 0);
- write_aux_reg(NPS_REG_TIMER0_CTRL,
- TIMER0_CTRL_IE | TIMER0_CTRL_NH);
-
- hw_schd_restore(cflags);
-}
-
-/*
- * Whenever anyone tries to change modes, we just mask interrupts
- * and wait for the next event to get set.
- */
-static int nps_clkevent_set_state(struct clock_event_device *dev)
-{
- nps_clkevent_rm_thread();
- disable_percpu_irq(nps_timer0_irq);
-
- return 0;
-}
-
-static int nps_clkevent_set_next_event(unsigned long delta,
- struct clock_event_device *dev)
-{
- nps_clkevent_add_thread(delta);
- enable_percpu_irq(nps_timer0_irq, IRQ_TYPE_NONE);
-
- return 0;
-}
-
-static DEFINE_PER_CPU(struct clock_event_device, nps_clockevent_device) = {
- .name = "NPS Timer0",
- .features = CLOCK_EVT_FEAT_ONESHOT,
- .rating = 300,
- .set_next_event = nps_clkevent_set_next_event,
- .set_state_oneshot = nps_clkevent_set_state,
- .set_state_oneshot_stopped = nps_clkevent_set_state,
- .set_state_shutdown = nps_clkevent_set_state,
- .tick_resume = nps_clkevent_set_state,
-};
-
-static irqreturn_t timer_irq_handler(int irq, void *dev_id)
-{
- struct clock_event_device *evt = dev_id;
-
- nps_clkevent_rm_thread();
- evt->event_handler(evt);
-
- return IRQ_HANDLED;
-}
-
-static int nps_timer_starting_cpu(unsigned int cpu)
-{
- struct clock_event_device *evt = this_cpu_ptr(&nps_clockevent_device);
-
- evt->cpumask = cpumask_of(smp_processor_id());
-
- clockevents_config_and_register(evt, nps_timer0_freq, 0, ULONG_MAX);
- enable_percpu_irq(nps_timer0_irq, IRQ_TYPE_NONE);
-
- return 0;
-}
-
-static int nps_timer_dying_cpu(unsigned int cpu)
-{
- disable_percpu_irq(nps_timer0_irq);
- return 0;
-}
-
-static int __init nps_setup_clockevent(struct device_node *node)
-{
- struct clk *clk;
- int ret;
-
- nps_timer0_irq = irq_of_parse_and_map(node, 0);
- if (nps_timer0_irq <= 0) {
- pr_err("clockevent: missing irq\n");
- return -EINVAL;
- }
-
- ret = nps_get_timer_clk(node, &nps_timer0_freq, &clk);
- if (ret)
- return ret;
-
- /* Needs apriori irq_set_percpu_devid() done in intc map function */
- ret = request_percpu_irq(nps_timer0_irq, timer_irq_handler,
- "Timer0 (per-cpu-tick)",
- &nps_clockevent_device);
- if (ret) {
- pr_err("Couldn't request irq\n");
- clk_disable_unprepare(clk);
- return ret;
- }
-
- ret = cpuhp_setup_state(CPUHP_AP_ARC_TIMER_STARTING,
- "clockevents/nps:starting",
- nps_timer_starting_cpu,
- nps_timer_dying_cpu);
- if (ret) {
- pr_err("Failed to setup hotplug state\n");
- clk_disable_unprepare(clk);
- free_percpu_irq(nps_timer0_irq, &nps_clockevent_device);
- return ret;
- }
-
- return 0;
-}
-
-TIMER_OF_DECLARE(ezchip_nps400_clk_evt, "ezchip,nps400-timer0",
- nps_setup_clockevent);
-#endif /* CONFIG_EZNPS_MTM_EXT */
diff --git a/drivers/clocksource/timer-orion.c b/drivers/clocksource/timer-orion.c
index d01ff4181867..5101e834d78f 100644
--- a/drivers/clocksource/timer-orion.c
+++ b/drivers/clocksource/timer-orion.c
@@ -143,7 +143,8 @@ static int __init orion_timer_init(struct device_node *np)
irq = irq_of_parse_and_map(np, 1);
if (irq <= 0) {
pr_err("%pOFn: unable to parse timer1 irq\n", np);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_unprep_clk;
}
rate = clk_get_rate(clk);
@@ -160,7 +161,7 @@ static int __init orion_timer_init(struct device_node *np)
clocksource_mmio_readl_down);
if (ret) {
pr_err("Failed to initialize mmio timer\n");
- return ret;
+ goto out_unprep_clk;
}
sched_clock_register(orion_read_sched_clock, 32, rate);
@@ -170,7 +171,7 @@ static int __init orion_timer_init(struct device_node *np)
"orion_event", NULL);
if (ret) {
pr_err("%pOFn: unable to setup irq\n", np);
- return ret;
+ goto out_unprep_clk;
}
ticks_per_jiffy = (clk_get_rate(clk) + HZ/2) / HZ;
@@ -183,5 +184,9 @@ static int __init orion_timer_init(struct device_node *np)
orion_delay_timer_init(rate);
return 0;
+
+out_unprep_clk:
+ clk_disable_unprepare(clk);
+ return ret;
}
TIMER_OF_DECLARE(orion_timer, "marvell,orion-timer", orion_timer_init);
diff --git a/drivers/clocksource/timer-sp804.c b/drivers/clocksource/timer-sp804.c
index 6e8ad4a4ea3c..401d592e85f5 100644
--- a/drivers/clocksource/timer-sp804.c
+++ b/drivers/clocksource/timer-sp804.c
@@ -5,6 +5,9 @@
* Copyright (C) 1999 - 2003 ARM Limited
* Copyright (C) 2000 Deep Blue Solutions Ltd
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/clk.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
@@ -34,8 +37,7 @@
#define HISI_TIMER_BGLOAD 0x20
#define HISI_TIMER_BGLOAD_H 0x24
-
-struct sp804_timer __initdata arm_sp804_timer = {
+static struct sp804_timer arm_sp804_timer __initdata = {
.load = TIMER_LOAD,
.value = TIMER_VALUE,
.ctrl = TIMER_CTRL,
@@ -44,7 +46,7 @@ struct sp804_timer __initdata arm_sp804_timer = {
.width = 32,
};
-struct sp804_timer __initdata hisi_sp804_timer = {
+static struct sp804_timer hisi_sp804_timer __initdata = {
.load = HISI_TIMER_LOAD,
.load_h = HISI_TIMER_LOAD_H,
.value = HISI_TIMER_VALUE,
@@ -59,40 +61,23 @@ static struct sp804_clkevt sp804_clkevt[NR_TIMERS];
static long __init sp804_get_clock_rate(struct clk *clk, const char *name)
{
- long rate;
int err;
if (!clk)
clk = clk_get_sys("sp804", name);
if (IS_ERR(clk)) {
- pr_err("sp804: %s clock not found: %ld\n", name, PTR_ERR(clk));
+ pr_err("%s clock not found: %ld\n", name, PTR_ERR(clk));
return PTR_ERR(clk);
}
- err = clk_prepare(clk);
- if (err) {
- pr_err("sp804: clock failed to prepare: %d\n", err);
- clk_put(clk);
- return err;
- }
-
- err = clk_enable(clk);
+ err = clk_prepare_enable(clk);
if (err) {
- pr_err("sp804: clock failed to enable: %d\n", err);
- clk_unprepare(clk);
+ pr_err("clock failed to enable: %d\n", err);
clk_put(clk);
return err;
}
- rate = clk_get_rate(clk);
- if (rate < 0) {
- pr_err("sp804: clock failed to get rate: %ld\n", rate);
- clk_disable(clk);
- clk_unprepare(clk);
- clk_put(clk);
- }
-
- return rate;
+ return clk_get_rate(clk);
}
static struct sp804_clkevt * __init sp804_clkevt_get(void __iomem *base)
@@ -117,10 +102,10 @@ static u64 notrace sp804_read(void)
return ~readl_relaxed(sched_clkevt->value);
}
-int __init sp804_clocksource_and_sched_clock_init(void __iomem *base,
- const char *name,
- struct clk *clk,
- int use_sched_clock)
+static int __init sp804_clocksource_and_sched_clock_init(void __iomem *base,
+ const char *name,
+ struct clk *clk,
+ int use_sched_clock)
{
long rate;
struct sp804_clkevt *clkevt;
@@ -216,8 +201,8 @@ static struct clock_event_device sp804_clockevent = {
.rating = 300,
};
-int __init sp804_clockevents_init(void __iomem *base, unsigned int irq,
- struct clk *clk, const char *name)
+static int __init sp804_clockevents_init(void __iomem *base, unsigned int irq,
+ struct clk *clk, const char *name)
{
struct clock_event_device *evt = &sp804_clockevent;
long rate;
@@ -236,7 +221,7 @@ int __init sp804_clockevents_init(void __iomem *base, unsigned int irq,
if (request_irq(irq, sp804_timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL,
"timer", &sp804_clockevent))
- pr_err("%s: request_irq() failed\n", "timer");
+ pr_err("request_irq() failed\n");
clockevents_config_and_register(evt, rate, 0xf, 0xffffffff);
return 0;
@@ -298,7 +283,7 @@ static int __init sp804_of_init(struct device_node *np, struct sp804_timer *time
if (of_clk_get_parent_count(np) == 3) {
clk2 = of_clk_get(np, 1);
if (IS_ERR(clk2)) {
- pr_err("sp804: %pOFn clock not found: %d\n", np,
+ pr_err("%pOFn clock not found: %d\n", np,
(int)PTR_ERR(clk2));
clk2 = NULL;
}
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index bfb57464118d..bb8f319b09fb 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -6,7 +6,6 @@
ccflags-$(CONFIG_RTC_DEBUG) := -DDEBUG
obj-$(CONFIG_RTC_LIB) += lib.o
-obj-$(CONFIG_RTC_SYSTOHC) += systohc.o
obj-$(CONFIG_RTC_CLASS) += rtc-core.o
obj-$(CONFIG_RTC_MC146818_LIB) += rtc-mc146818-lib.o
rtc-core-y := class.o interface.o
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index 7c88d190c51f..5855aa2eef62 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -200,8 +200,13 @@ static struct rtc_device *rtc_allocate_device(void)
device_initialize(&rtc->dev);
- /* Drivers can revise this default after allocating the device. */
- rtc->set_offset_nsec = NSEC_PER_SEC / 2;
+ /*
+ * Drivers can revise this default after allocating the device.
+ * The default is what most RTCs do: Increment seconds exactly one
+ * second after the write happened. This adds a default transport
+ * time of 5ms which is at least halfways close to reality.
+ */
+ rtc->set_offset_nsec = NSEC_PER_SEC + 5 * NSEC_PER_MSEC;
rtc->irq_freq = 1;
rtc->max_user_freq = 64;
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index c633319cdb91..c5bcd2adc9fe 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -868,6 +868,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
if (retval)
goto cleanup2;
+ /* Set the sync offset for the periodic 11min update correct */
+ cmos_rtc.rtc->set_offset_nsec = NSEC_PER_SEC / 2;
+
/* export at least the first block of NVRAM */
nvmem_cfg.size = address_space - NVRAM_OFFSET;
if (rtc_nvmem_register(cmos_rtc.rtc, &nvmem_cfg))
diff --git a/drivers/rtc/rtc-mc146818-lib.c b/drivers/rtc/rtc-mc146818-lib.c
index 2ecd8752b088..972a5b9a629d 100644
--- a/drivers/rtc/rtc-mc146818-lib.c
+++ b/drivers/rtc/rtc-mc146818-lib.c
@@ -8,41 +8,41 @@
#include <linux/acpi.h>
#endif
-/*
- * Returns true if a clock update is in progress
- */
-static inline unsigned char mc146818_is_updating(void)
-{
- unsigned char uip;
- unsigned long flags;
-
- spin_lock_irqsave(&rtc_lock, flags);
- uip = (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP);
- spin_unlock_irqrestore(&rtc_lock, flags);
- return uip;
-}
-
unsigned int mc146818_get_time(struct rtc_time *time)
{
unsigned char ctrl;
unsigned long flags;
unsigned char century = 0;
+ bool retry;
#ifdef CONFIG_MACH_DECSTATION
unsigned int real_year;
#endif
+again:
+ spin_lock_irqsave(&rtc_lock, flags);
/*
- * read RTC once any update in progress is done. The update
- * can take just over 2ms. We wait 20ms. There is no need to
- * to poll-wait (up to 1s - eeccch) for the falling edge of RTC_UIP.
- * If you need to know *exactly* when a second has started, enable
- * periodic update complete interrupts, (via ioctl) and then
- * immediately read /dev/rtc which will block until you get the IRQ.
- * Once the read clears, read the RTC time (again via ioctl). Easy.
+ * Check whether there is an update in progress during which the
+ * readout is unspecified. The maximum update time is ~2ms. Poll
+ * every msec for completion.
+ *
+ * Store the second value before checking UIP so a long lasting NMI
+ * which happens to hit after the UIP check cannot make an update
+ * cycle invisible.
*/
- if (mc146818_is_updating())
- mdelay(20);
+ time->tm_sec = CMOS_READ(RTC_SECONDS);
+
+ if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) {
+ spin_unlock_irqrestore(&rtc_lock, flags);
+ mdelay(1);
+ goto again;
+ }
+
+ /* Revalidate the above readout */
+ if (time->tm_sec != CMOS_READ(RTC_SECONDS)) {
+ spin_unlock_irqrestore(&rtc_lock, flags);
+ goto again;
+ }
/*
* Only the values that we read from the RTC are set. We leave
@@ -50,8 +50,6 @@ unsigned int mc146818_get_time(struct rtc_time *time)
* RTC has RTC_DAY_OF_WEEK, we ignore it, as it is only updated
* by the RTC when initially set to a non-zero value.
*/
- spin_lock_irqsave(&rtc_lock, flags);
- time->tm_sec = CMOS_READ(RTC_SECONDS);
time->tm_min = CMOS_READ(RTC_MINUTES);
time->tm_hour = CMOS_READ(RTC_HOURS);
time->tm_mday = CMOS_READ(RTC_DAY_OF_MONTH);
@@ -66,8 +64,24 @@ unsigned int mc146818_get_time(struct rtc_time *time)
century = CMOS_READ(acpi_gbl_FADT.century);
#endif
ctrl = CMOS_READ(RTC_CONTROL);
+ /*
+ * Check for the UIP bit again. If it is set now then
+ * the above values may contain garbage.
+ */
+ retry = CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP;
+ /*
+ * A NMI might have interrupted the above sequence so check whether
+ * the seconds value has changed which indicates that the NMI took
+ * longer than the UIP bit was set. Unlikely, but possible and
+ * there is also virt...
+ */
+ retry |= time->tm_sec != CMOS_READ(RTC_SECONDS);
+
spin_unlock_irqrestore(&rtc_lock, flags);
+ if (retry)
+ goto again;
+
if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
{
time->tm_sec = bcd2bin(time->tm_sec);
@@ -121,7 +135,6 @@ int mc146818_set_time(struct rtc_time *time)
if (yrs > 255) /* They are unsigned */
return -EINVAL;
- spin_lock_irqsave(&rtc_lock, flags);
#ifdef CONFIG_MACH_DECSTATION
real_yrs = yrs;
leap_yr = ((!((yrs + 1900) % 4) && ((yrs + 1900) % 100)) ||
@@ -150,10 +163,8 @@ int mc146818_set_time(struct rtc_time *time)
/* These limits and adjustments are independent of
* whether the chip is in binary mode or not.
*/
- if (yrs > 169) {
- spin_unlock_irqrestore(&rtc_lock, flags);
+ if (yrs > 169)
return -EINVAL;
- }
if (yrs >= 100)
yrs -= 100;
@@ -169,6 +180,7 @@ int mc146818_set_time(struct rtc_time *time)
century = bin2bcd(century);
}
+ spin_lock_irqsave(&rtc_lock, flags);
save_control = CMOS_READ(RTC_CONTROL);
CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
diff --git a/drivers/rtc/systohc.c b/drivers/rtc/systohc.c
deleted file mode 100644
index 8b70f0520e13..000000000000
--- a/drivers/rtc/systohc.c
+++ /dev/null
@@ -1,61 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/rtc.h>
-#include <linux/time.h>
-
-/**
- * rtc_set_ntp_time - Save NTP synchronized time to the RTC
- * @now: Current time of day
- * @target_nsec: pointer for desired now->tv_nsec value
- *
- * Replacement for the NTP platform function update_persistent_clock64
- * that stores time for later retrieval by rtc_hctosys.
- *
- * Returns 0 on successful RTC update, -ENODEV if a RTC update is not
- * possible at all, and various other -errno for specific temporary failure
- * cases.
- *
- * -EPROTO is returned if now.tv_nsec is not close enough to *target_nsec.
- *
- * If temporary failure is indicated the caller should try again 'soon'
- */
-int rtc_set_ntp_time(struct timespec64 now, unsigned long *target_nsec)
-{
- struct rtc_device *rtc;
- struct rtc_time tm;
- struct timespec64 to_set;
- int err = -ENODEV;
- bool ok;
-
- rtc = rtc_class_open(CONFIG_RTC_SYSTOHC_DEVICE);
- if (!rtc)
- goto out_err;
-
- if (!rtc->ops || !rtc->ops->set_time)
- goto out_close;
-
- /* Compute the value of tv_nsec we require the caller to supply in
- * now.tv_nsec. This is the value such that (now +
- * set_offset_nsec).tv_nsec == 0.
- */
- set_normalized_timespec64(&to_set, 0, -rtc->set_offset_nsec);
- *target_nsec = to_set.tv_nsec;
-
- /* The ntp code must call this with the correct value in tv_nsec, if
- * it does not we update target_nsec and return EPROTO to make the ntp
- * code try again later.
- */
- ok = rtc_tv_nsec_ok(rtc->set_offset_nsec, &to_set, &now);
- if (!ok) {
- err = -EPROTO;
- goto out_close;
- }
-
- rtc_time64_to_tm(to_set.tv_sec, &tm);
-
- err = rtc_set_time(rtc, &tm);
-
-out_close:
- rtc_class_close(rtc);
-out_err:
- return err;
-}
diff --git a/include/dt-bindings/clock/ingenic,sysost.h b/include/dt-bindings/clock/ingenic,sysost.h
index 9ac88e90babf..063791b01ab3 100644
--- a/include/dt-bindings/clock/ingenic,sysost.h
+++ b/include/dt-bindings/clock/ingenic,sysost.h
@@ -1,12 +1,16 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * This header provides clock numbers for the ingenic,tcu DT binding.
+ * This header provides clock numbers for the Ingenic OST DT binding.
*/
#ifndef __DT_BINDINGS_CLOCK_INGENIC_OST_H__
#define __DT_BINDINGS_CLOCK_INGENIC_OST_H__
-#define OST_CLK_PERCPU_TIMER 0
-#define OST_CLK_GLOBAL_TIMER 1
+#define OST_CLK_PERCPU_TIMER 1
+#define OST_CLK_GLOBAL_TIMER 0
+#define OST_CLK_PERCPU_TIMER0 1
+#define OST_CLK_PERCPU_TIMER1 2
+#define OST_CLK_PERCPU_TIMER2 3
+#define OST_CLK_PERCPU_TIMER3 4
#endif /* __DT_BINDINGS_CLOCK_INGENIC_OST_H__ */
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 107cedd7019a..bb5e7b0a4274 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -447,6 +447,10 @@ static inline void hrtimer_restart(struct hrtimer *timer)
/* Query timers: */
extern ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust);
+/**
+ * hrtimer_get_remaining - get remaining time for the timer
+ * @timer: the timer to read
+ */
static inline ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
{
return __hrtimer_get_remaining(timer, false);
@@ -458,7 +462,7 @@ extern u64 hrtimer_next_event_without(const struct hrtimer *exclude);
extern bool hrtimer_active(const struct hrtimer *timer);
/**
- * hrtimer_is_queued = check, whether the timer is on one of the queues
+ * hrtimer_is_queued - check, whether the timer is on one of the queues
* @timer: Timer to check
*
* Returns: True if the timer is queued, false otherwise
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 22d1575e4991..b829382de6c3 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -110,13 +110,36 @@ struct rtc_device {
/* Some hardware can't support UIE mode */
int uie_unsupported;
- /* Number of nsec it takes to set the RTC clock. This influences when
- * the set ops are called. An offset:
- * - of 0.5 s will call RTC set for wall clock time 10.0 s at 9.5 s
- * - of 1.5 s will call RTC set for wall clock time 10.0 s at 8.5 s
- * - of -0.5 s will call RTC set for wall clock time 10.0 s at 10.5 s
+ /*
+ * This offset specifies the update timing of the RTC.
+ *
+ * tsched t1 write(t2.tv_sec - 1sec)) t2 RTC increments seconds
+ *
+ * The offset defines how tsched is computed so that the write to
+ * the RTC (t2.tv_sec - 1sec) is correct versus the time required
+ * for the transport of the write and the time which the RTC needs
+ * to increment seconds the first time after the write (t2).
+ *
+ * For direct accessible RTCs tsched ~= t1 because the write time
+ * is negligible. For RTCs behind slow busses the transport time is
+ * significant and has to be taken into account.
+ *
+ * The time between the write (t1) and the first increment after
+ * the write (t2) is RTC specific. For a MC146818 RTC it's 500ms,
+ * for many others it's exactly 1 second. Consult the datasheet.
+ *
+ * The value of this offset is also used to calculate the to be
+ * written value (t2.tv_sec - 1sec) at tsched.
+ *
+ * The default value for this is NSEC_PER_SEC + 10 msec default
+ * transport time. The offset can be adjusted by drivers so the
+ * calculation for the to be written value at tsched becomes
+ * correct:
+ *
+ * newval = tsched + set_offset_nsec - NSEC_PER_SEC
+ * and (tsched + set_offset_nsec) % NSEC_PER_SEC == 0
*/
- long set_offset_nsec;
+ unsigned long set_offset_nsec;
bool registered;
@@ -165,7 +188,6 @@ int __rtc_register_device(struct module *owner, struct rtc_device *rtc);
extern int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm);
extern int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm);
-extern int rtc_set_ntp_time(struct timespec64 now, unsigned long *target_nsec);
int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm);
extern int rtc_read_alarm(struct rtc_device *rtc,
struct rtc_wkalrm *alrm);
@@ -205,39 +227,6 @@ static inline bool is_leap_year(unsigned int year)
return (!(year % 4) && (year % 100)) || !(year % 400);
}
-/* Determine if we can call to driver to set the time. Drivers can only be
- * called to set a second aligned time value, and the field set_offset_nsec
- * specifies how far away from the second aligned time to call the driver.
- *
- * This also computes 'to_set' which is the time we are trying to set, and has
- * a zero in tv_nsecs, such that:
- * to_set - set_delay_nsec == now +/- FUZZ
- *
- */
-static inline bool rtc_tv_nsec_ok(s64 set_offset_nsec,
- struct timespec64 *to_set,
- const struct timespec64 *now)
-{
- /* Allowed error in tv_nsec, arbitarily set to 5 jiffies in ns. */
- const unsigned long TIME_SET_NSEC_FUZZ = TICK_NSEC * 5;
- struct timespec64 delay = {.tv_sec = 0,
- .tv_nsec = set_offset_nsec};
-
- *to_set = timespec64_add(*now, delay);
-
- if (to_set->tv_nsec < TIME_SET_NSEC_FUZZ) {
- to_set->tv_nsec = 0;
- return true;
- }
-
- if (to_set->tv_nsec > NSEC_PER_SEC - TIME_SET_NSEC_FUZZ) {
- to_set->tv_sec++;
- to_set->tv_nsec = 0;
- return true;
- }
- return false;
-}
-
#define rtc_register_device(device) \
__rtc_register_device(THIS_MODULE, device)
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index 7f7e4a3f4394..929d3f3937c0 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -303,6 +303,8 @@ extern int persistent_clock_is_local;
extern void read_persistent_clock64(struct timespec64 *ts);
void read_persistent_wall_and_boot_offset(struct timespec64 *wall_clock,
struct timespec64 *boot_offset);
+#ifdef CONFIG_GENERIC_CMOS_UPDATE
extern int update_persistent_clock64(struct timespec64 now);
+#endif
#endif
diff --git a/include/linux/timer.h b/include/linux/timer.h
index d10bc7e73b41..fda13c9d1256 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -193,7 +193,6 @@ extern int try_to_del_timer_sync(struct timer_list *timer);
#define del_singleshot_timer_sync(t) del_timer_sync(t)
extern void init_timers(void);
-extern void run_local_timers(void);
struct hrtimer;
extern enum hrtimer_restart it_real_fn(struct hrtimer *);
diff --git a/include/linux/timex.h b/include/linux/timex.h
index ce0859763670..9c2e54faf9b7 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -157,7 +157,6 @@ extern int do_clock_adjtime(const clockid_t which_clock, struct __kernel_timex *
extern void hardpps(const struct timespec64 *, const struct timespec64 *);
int read_current_timer(unsigned long *timer_val);
-void ntp_notify_cmos_timer(void);
/* The clock frequency of the i8253/i8254 PIT */
#define PIT_TICK_RATE 1193182ul
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 387b4bef7dd1..743c852e10f2 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -1284,7 +1284,7 @@ int hrtimer_cancel(struct hrtimer *timer)
EXPORT_SYMBOL_GPL(hrtimer_cancel);
/**
- * hrtimer_get_remaining - get remaining time for the timer
+ * __hrtimer_get_remaining - get remaining time for the timer
* @timer: the timer to read
* @adjust: adjust relative timers when CONFIG_TIME_LOW_RES=y
*/
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index eddcf4970444..a5cffe2a1770 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -59,7 +59,8 @@ static struct clocksource clocksource_jiffies = {
};
__cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(jiffies_lock);
-__cacheline_aligned_in_smp seqcount_t jiffies_seq;
+__cacheline_aligned_in_smp seqcount_raw_spinlock_t jiffies_seq =
+ SEQCNT_RAW_SPINLOCK_ZERO(jiffies_seq, &jiffies_lock);
#if (BITS_PER_LONG < 64)
u64 get_jiffies_64(void)
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 069ca78fb0bf..7404d3831527 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -494,65 +494,74 @@ out:
return leap;
}
+#if defined(CONFIG_GENERIC_CMOS_UPDATE) || defined(CONFIG_RTC_SYSTOHC)
static void sync_hw_clock(struct work_struct *work);
-static DECLARE_DELAYED_WORK(sync_work, sync_hw_clock);
-
-static void sched_sync_hw_clock(struct timespec64 now,
- unsigned long target_nsec, bool fail)
+static DECLARE_WORK(sync_work, sync_hw_clock);
+static struct hrtimer sync_hrtimer;
+#define SYNC_PERIOD_NS (11UL * 60 * NSEC_PER_SEC)
+static enum hrtimer_restart sync_timer_callback(struct hrtimer *timer)
{
- struct timespec64 next;
-
- ktime_get_real_ts64(&next);
- if (!fail)
- next.tv_sec = 659;
- else {
- /*
- * Try again as soon as possible. Delaying long periods
- * decreases the accuracy of the work queue timer. Due to this
- * the algorithm is very likely to require a short-sleep retry
- * after the above long sleep to synchronize ts_nsec.
- */
- next.tv_sec = 0;
- }
-
- /* Compute the needed delay that will get to tv_nsec == target_nsec */
- next.tv_nsec = target_nsec - next.tv_nsec;
- if (next.tv_nsec <= 0)
- next.tv_nsec += NSEC_PER_SEC;
- if (next.tv_nsec >= NSEC_PER_SEC) {
- next.tv_sec++;
- next.tv_nsec -= NSEC_PER_SEC;
- }
+ queue_work(system_power_efficient_wq, &sync_work);
- queue_delayed_work(system_power_efficient_wq, &sync_work,
- timespec64_to_jiffies(&next));
+ return HRTIMER_NORESTART;
}
-static void sync_rtc_clock(void)
+static void sched_sync_hw_clock(unsigned long offset_nsec, bool retry)
{
- unsigned long target_nsec;
- struct timespec64 adjust, now;
- int rc;
+ ktime_t exp = ktime_set(ktime_get_real_seconds(), 0);
- if (!IS_ENABLED(CONFIG_RTC_SYSTOHC))
- return;
+ if (retry)
+ exp = ktime_add_ns(exp, 2 * NSEC_PER_SEC - offset_nsec);
+ else
+ exp = ktime_add_ns(exp, SYNC_PERIOD_NS - offset_nsec);
- ktime_get_real_ts64(&now);
+ hrtimer_start(&sync_hrtimer, exp, HRTIMER_MODE_ABS);
+}
- adjust = now;
- if (persistent_clock_is_local)
- adjust.tv_sec -= (sys_tz.tz_minuteswest * 60);
+/*
+ * Check whether @now is correct versus the required time to update the RTC
+ * and calculate the value which needs to be written to the RTC so that the
+ * next seconds increment of the RTC after the write is aligned with the next
+ * seconds increment of clock REALTIME.
+ *
+ * tsched t1 write(t2.tv_sec - 1sec)) t2 RTC increments seconds
+ *
+ * t2.tv_nsec == 0
+ * tsched = t2 - set_offset_nsec
+ * newval = t2 - NSEC_PER_SEC
+ *
+ * ==> neval = tsched + set_offset_nsec - NSEC_PER_SEC
+ *
+ * As the execution of this code is not guaranteed to happen exactly at
+ * tsched this allows it to happen within a fuzzy region:
+ *
+ * abs(now - tsched) < FUZZ
+ *
+ * If @now is not inside the allowed window the function returns false.
+ */
+static inline bool rtc_tv_nsec_ok(unsigned long set_offset_nsec,
+ struct timespec64 *to_set,
+ const struct timespec64 *now)
+{
+ /* Allowed error in tv_nsec, arbitarily set to 5 jiffies in ns. */
+ const unsigned long TIME_SET_NSEC_FUZZ = TICK_NSEC * 5;
+ struct timespec64 delay = {.tv_sec = -1,
+ .tv_nsec = set_offset_nsec};
- /*
- * The current RTC in use will provide the target_nsec it wants to be
- * called at, and does rtc_tv_nsec_ok internally.
- */
- rc = rtc_set_ntp_time(adjust, &target_nsec);
- if (rc == -ENODEV)
- return;
+ *to_set = timespec64_add(*now, delay);
+
+ if (to_set->tv_nsec < TIME_SET_NSEC_FUZZ) {
+ to_set->tv_nsec = 0;
+ return true;
+ }
- sched_sync_hw_clock(now, target_nsec, rc);
+ if (to_set->tv_nsec > NSEC_PER_SEC - TIME_SET_NSEC_FUZZ) {
+ to_set->tv_sec++;
+ to_set->tv_nsec = 0;
+ return true;
+ }
+ return false;
}
#ifdef CONFIG_GENERIC_CMOS_UPDATE
@@ -560,48 +569,47 @@ int __weak update_persistent_clock64(struct timespec64 now64)
{
return -ENODEV;
}
+#else
+static inline int update_persistent_clock64(struct timespec64 now64)
+{
+ return -ENODEV;
+}
#endif
-static bool sync_cmos_clock(void)
+#ifdef CONFIG_RTC_SYSTOHC
+/* Save NTP synchronized time to the RTC */
+static int update_rtc(struct timespec64 *to_set, unsigned long *offset_nsec)
{
- static bool no_cmos;
- struct timespec64 now;
- struct timespec64 adjust;
- int rc = -EPROTO;
- long target_nsec = NSEC_PER_SEC / 2;
+ struct rtc_device *rtc;
+ struct rtc_time tm;
+ int err = -ENODEV;
- if (!IS_ENABLED(CONFIG_GENERIC_CMOS_UPDATE))
- return false;
+ rtc = rtc_class_open(CONFIG_RTC_SYSTOHC_DEVICE);
+ if (!rtc)
+ return -ENODEV;
- if (no_cmos)
- return false;
+ if (!rtc->ops || !rtc->ops->set_time)
+ goto out_close;
- /*
- * Historically update_persistent_clock64() has followed x86
- * semantics, which match the MC146818A/etc RTC. This RTC will store
- * 'adjust' and then in .5s it will advance once second.
- *
- * Architectures are strongly encouraged to use rtclib and not
- * implement this legacy API.
- */
- ktime_get_real_ts64(&now);
- if (rtc_tv_nsec_ok(-1 * target_nsec, &adjust, &now)) {
- if (persistent_clock_is_local)
- adjust.tv_sec -= (sys_tz.tz_minuteswest * 60);
- rc = update_persistent_clock64(adjust);
- /*
- * The machine does not support update_persistent_clock64 even
- * though it defines CONFIG_GENERIC_CMOS_UPDATE.
- */
- if (rc == -ENODEV) {
- no_cmos = true;
- return false;
- }
+ /* First call might not have the correct offset */
+ if (*offset_nsec == rtc->set_offset_nsec) {
+ rtc_time64_to_tm(to_set->tv_sec, &tm);
+ err = rtc_set_time(rtc, &tm);
+ } else {
+ /* Store the update offset and let the caller try again */
+ *offset_nsec = rtc->set_offset_nsec;
+ err = -EAGAIN;
}
-
- sched_sync_hw_clock(now, target_nsec, rc);
- return true;
+out_close:
+ rtc_class_close(rtc);
+ return err;
+}
+#else
+static inline int update_rtc(struct timespec64 *to_set, unsigned long *offset_nsec)
+{
+ return -ENODEV;
}
+#endif
/*
* If we have an externally synchronized Linux clock, then update RTC clock
@@ -613,24 +621,64 @@ static bool sync_cmos_clock(void)
*/
static void sync_hw_clock(struct work_struct *work)
{
- if (!ntp_synced())
- return;
+ /*
+ * The default synchronization offset is 500ms for the deprecated
+ * update_persistent_clock64() under the assumption that it uses
+ * the infamous CMOS clock (MC146818).
+ */
+ static unsigned long offset_nsec = NSEC_PER_SEC / 2;
+ struct timespec64 now, to_set;
+ int res = -EAGAIN;
- if (sync_cmos_clock())
+ /*
+ * Don't update if STA_UNSYNC is set and if ntp_notify_cmos_timer()
+ * managed to schedule the work between the timer firing and the
+ * work being able to rearm the timer. Wait for the timer to expire.
+ */
+ if (!ntp_synced() || hrtimer_is_queued(&sync_hrtimer))
return;
- sync_rtc_clock();
+ ktime_get_real_ts64(&now);
+ /* If @now is not in the allowed window, try again */
+ if (!rtc_tv_nsec_ok(offset_nsec, &to_set, &now))
+ goto rearm;
+
+ /* Take timezone adjusted RTCs into account */
+ if (persistent_clock_is_local)
+ to_set.tv_sec -= (sys_tz.tz_minuteswest * 60);
+
+ /* Try the legacy RTC first. */
+ res = update_persistent_clock64(to_set);
+ if (res != -ENODEV)
+ goto rearm;
+
+ /* Try the RTC class */
+ res = update_rtc(&to_set, &offset_nsec);
+ if (res == -ENODEV)
+ return;
+rearm:
+ sched_sync_hw_clock(offset_nsec, res != 0);
}
void ntp_notify_cmos_timer(void)
{
- if (!ntp_synced())
- return;
+ /*
+ * When the work is currently executed but has not yet the timer
+ * rearmed this queues the work immediately again. No big issue,
+ * just a pointless work scheduled.
+ */
+ if (ntp_synced() && !hrtimer_is_queued(&sync_hrtimer))
+ queue_work(system_power_efficient_wq, &sync_work);
+}
- if (IS_ENABLED(CONFIG_GENERIC_CMOS_UPDATE) ||
- IS_ENABLED(CONFIG_RTC_SYSTOHC))
- queue_delayed_work(system_power_efficient_wq, &sync_work, 0);
+static void __init ntp_init_cmos_sync(void)
+{
+ hrtimer_init(&sync_hrtimer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
+ sync_hrtimer.function = sync_timer_callback;
}
+#else /* CONFIG_GENERIC_CMOS_UPDATE) || defined(CONFIG_RTC_SYSTOHC) */
+static inline void __init ntp_init_cmos_sync(void) { }
+#endif /* !CONFIG_GENERIC_CMOS_UPDATE) || defined(CONFIG_RTC_SYSTOHC) */
/*
* Propagate a new txc->status value into the NTP state:
@@ -1044,4 +1092,5 @@ __setup("ntp_tick_adj=", ntp_tick_adj_setup);
void __init ntp_init(void)
{
ntp_clear();
+ ntp_init_cmos_sync();
}
diff --git a/kernel/time/ntp_internal.h b/kernel/time/ntp_internal.h
index 908ecaa65fc3..23d1b74c3065 100644
--- a/kernel/time/ntp_internal.h
+++ b/kernel/time/ntp_internal.h
@@ -12,4 +12,11 @@ extern int __do_adjtimex(struct __kernel_timex *txc,
const struct timespec64 *ts,
s32 *time_tai, struct audit_ntp_data *ad);
extern void __hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts);
+
+#if defined(CONFIG_GENERIC_CMOS_UPDATE) || defined(CONFIG_RTC_SYSTOHC)
+extern void ntp_notify_cmos_timer(void);
+#else
+static inline void ntp_notify_cmos_timer(void) { }
+#endif
+
#endif /* _LINUX_NTP_INTERNAL_H */
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 36d7464c8962..5a23829372c7 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -331,7 +331,7 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
bc_local = tick_do_periodic_broadcast();
if (clockevent_state_oneshot(dev)) {
- ktime_t next = ktime_add(dev->next_event, tick_period);
+ ktime_t next = ktime_add_ns(dev->next_event, TICK_NSEC);
clockevents_program_event(dev, next, true);
}
@@ -877,6 +877,22 @@ static void tick_broadcast_init_next_event(struct cpumask *mask,
}
}
+static inline ktime_t tick_get_next_period(void)
+{
+ ktime_t next;
+
+ /*
+ * Protect against concurrent updates (store /load tearing on
+ * 32bit). It does not matter if the time is already in the
+ * past. The broadcast device which is about to be programmed will
+ * fire in any case.
+ */
+ raw_spin_lock(&jiffies_lock);
+ next = tick_next_period;
+ raw_spin_unlock(&jiffies_lock);
+ return next;
+}
+
/**
* tick_broadcast_setup_oneshot - setup the broadcast device
*/
@@ -905,10 +921,11 @@ static void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
tick_broadcast_oneshot_mask, tmpmask);
if (was_periodic && !cpumask_empty(tmpmask)) {
+ ktime_t nextevt = tick_get_next_period();
+
clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT);
- tick_broadcast_init_next_event(tmpmask,
- tick_next_period);
- tick_broadcast_set_event(bc, cpu, tick_next_period);
+ tick_broadcast_init_next_event(tmpmask, nextevt);
+ tick_broadcast_set_event(bc, cpu, nextevt);
} else
bc->next_event = KTIME_MAX;
} else {
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 6c9c342dd0e5..a03764df5366 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -27,10 +27,11 @@
*/
DEFINE_PER_CPU(struct tick_device, tick_cpu_device);
/*
- * Tick next event: keeps track of the tick time
+ * Tick next event: keeps track of the tick time. It's updated by the
+ * CPU which handles the tick and protected by jiffies_lock. There is
+ * no requirement to write hold the jiffies seqcount for it.
*/
ktime_t tick_next_period;
-ktime_t tick_period;
/*
* tick_do_timer_cpu is a timer core internal variable which holds the CPU NR
@@ -88,7 +89,7 @@ static void tick_periodic(int cpu)
write_seqcount_begin(&jiffies_seq);
/* Keep track of the next tick event */
- tick_next_period = ktime_add(tick_next_period, tick_period);
+ tick_next_period = ktime_add_ns(tick_next_period, TICK_NSEC);
do_timer(1);
write_seqcount_end(&jiffies_seq);
@@ -127,7 +128,7 @@ void tick_handle_periodic(struct clock_event_device *dev)
* Setup the next period for devices, which do not have
* periodic mode:
*/
- next = ktime_add(next, tick_period);
+ next = ktime_add_ns(next, TICK_NSEC);
if (!clockevents_program_event(dev, next, false))
return;
@@ -173,7 +174,7 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
for (;;) {
if (!clockevents_program_event(dev, next, false))
return;
- next = ktime_add(next, tick_period);
+ next = ktime_add_ns(next, TICK_NSEC);
}
}
}
@@ -220,7 +221,6 @@ static void tick_setup_device(struct tick_device *td,
tick_do_timer_cpu = cpu;
tick_next_period = ktime_get();
- tick_period = NSEC_PER_SEC / HZ;
#ifdef CONFIG_NO_HZ_FULL
/*
* The boot CPU may be nohz_full, in which case set
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index 7b2496136729..7a981c9e87a4 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -15,7 +15,6 @@
DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
extern ktime_t tick_next_period;
-extern ktime_t tick_period;
extern int tick_do_timer_cpu __read_mostly;
extern void tick_setup_periodic(struct clock_event_device *dev, int broadcast);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 81632cd5e3b7..a9e68936822d 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -20,6 +20,7 @@
#include <linux/sched/clock.h>
#include <linux/sched/stat.h>
#include <linux/sched/nohz.h>
+#include <linux/sched/loadavg.h>
#include <linux/module.h>
#include <linux/irq_work.h>
#include <linux/posix-timers.h>
@@ -44,7 +45,9 @@ struct tick_sched *tick_get_tick_sched(int cpu)
#if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS)
/*
- * The time, when the last jiffy update happened. Protected by jiffies_lock.
+ * The time, when the last jiffy update happened. Write access must hold
+ * jiffies_lock and jiffies_seq. tick_nohz_next_event() needs to get a
+ * consistent view of jiffies and last_jiffies_update.
*/
static ktime_t last_jiffies_update;
@@ -53,50 +56,97 @@ static ktime_t last_jiffies_update;
*/
static void tick_do_update_jiffies64(ktime_t now)
{
- unsigned long ticks = 0;
- ktime_t delta;
+ unsigned long ticks = 1;
+ ktime_t delta, nextp;
/*
- * Do a quick check without holding jiffies_lock:
- * The READ_ONCE() pairs with two updates done later in this function.
+ * 64bit can do a quick check without holding jiffies lock and
+ * without looking at the sequence count. The smp_load_acquire()
+ * pairs with the update done later in this function.
+ *
+ * 32bit cannot do that because the store of tick_next_period
+ * consists of two 32bit stores and the first store could move it
+ * to a random point in the future.
*/
- delta = ktime_sub(now, READ_ONCE(last_jiffies_update));
- if (delta < tick_period)
- return;
+ if (IS_ENABLED(CONFIG_64BIT)) {
+ if (ktime_before(now, smp_load_acquire(&tick_next_period)))
+ return;
+ } else {
+ unsigned int seq;
- /* Reevaluate with jiffies_lock held */
+ /*
+ * Avoid contention on jiffies_lock and protect the quick
+ * check with the sequence count.
+ */
+ do {
+ seq = read_seqcount_begin(&jiffies_seq);
+ nextp = tick_next_period;
+ } while (read_seqcount_retry(&jiffies_seq, seq));
+
+ if (ktime_before(now, nextp))
+ return;
+ }
+
+ /* Quick check failed, i.e. update is required. */
raw_spin_lock(&jiffies_lock);
+ /*
+ * Reevaluate with the lock held. Another CPU might have done the
+ * update already.
+ */
+ if (ktime_before(now, tick_next_period)) {
+ raw_spin_unlock(&jiffies_lock);
+ return;
+ }
+
write_seqcount_begin(&jiffies_seq);
- delta = ktime_sub(now, last_jiffies_update);
- if (delta >= tick_period) {
+ delta = ktime_sub(now, tick_next_period);
+ if (unlikely(delta >= TICK_NSEC)) {
+ /* Slow path for long idle sleep times */
+ s64 incr = TICK_NSEC;
- delta = ktime_sub(delta, tick_period);
- /* Pairs with the lockless read in this function. */
- WRITE_ONCE(last_jiffies_update,
- ktime_add(last_jiffies_update, tick_period));
+ ticks += ktime_divns(delta, incr);
- /* Slow path for long timeouts */
- if (unlikely(delta >= tick_period)) {
- s64 incr = ktime_to_ns(tick_period);
+ last_jiffies_update = ktime_add_ns(last_jiffies_update,
+ incr * ticks);
+ } else {
+ last_jiffies_update = ktime_add_ns(last_jiffies_update,
+ TICK_NSEC);
+ }
- ticks = ktime_divns(delta, incr);
+ /* Advance jiffies to complete the jiffies_seq protected job */
+ jiffies_64 += ticks;
- /* Pairs with the lockless read in this function. */
- WRITE_ONCE(last_jiffies_update,
- ktime_add_ns(last_jiffies_update,
- incr * ticks));
- }
- do_timer(++ticks);
+ /*
+ * Keep the tick_next_period variable up to date.
+ */
+ nextp = ktime_add_ns(last_jiffies_update, TICK_NSEC);
- /* Keep the tick_next_period variable up to date */
- tick_next_period = ktime_add(last_jiffies_update, tick_period);
+ if (IS_ENABLED(CONFIG_64BIT)) {
+ /*
+ * Pairs with smp_load_acquire() in the lockless quick
+ * check above and ensures that the update to jiffies_64 is
+ * not reordered vs. the store to tick_next_period, neither
+ * by the compiler nor by the CPU.
+ */
+ smp_store_release(&tick_next_period, nextp);
} else {
- write_seqcount_end(&jiffies_seq);
- raw_spin_unlock(&jiffies_lock);
- return;
+ /*
+ * A plain store is good enough on 32bit as the quick check
+ * above is protected by the sequence count.
+ */
+ tick_next_period = nextp;
}
+
+ /*
+ * Release the sequence count. calc_global_load() below is not
+ * protected by it, but jiffies_lock needs to be held to prevent
+ * concurrent invocations.
+ */
write_seqcount_end(&jiffies_seq);
+
+ calc_global_load();
+
raw_spin_unlock(&jiffies_lock);
update_wall_time();
}
@@ -661,7 +711,7 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
hrtimer_set_expires(&ts->sched_timer, ts->last_tick);
/* Forward the time to expire in the future */
- hrtimer_forward(&ts->sched_timer, now, tick_period);
+ hrtimer_forward(&ts->sched_timer, now, TICK_NSEC);
if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
hrtimer_start_expires(&ts->sched_timer,
@@ -1230,7 +1280,7 @@ static void tick_nohz_handler(struct clock_event_device *dev)
if (unlikely(ts->tick_stopped))
return;
- hrtimer_forward(&ts->sched_timer, now, tick_period);
+ hrtimer_forward(&ts->sched_timer, now, TICK_NSEC);
tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
}
@@ -1267,7 +1317,7 @@ static void tick_nohz_switch_to_nohz(void)
next = tick_init_jiffy_update();
hrtimer_set_expires(&ts->sched_timer, next);
- hrtimer_forward_now(&ts->sched_timer, tick_period);
+ hrtimer_forward_now(&ts->sched_timer, TICK_NSEC);
tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
tick_nohz_activate(ts, NOHZ_MODE_LOWRES);
}
@@ -1333,7 +1383,7 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
if (unlikely(ts->tick_stopped))
return HRTIMER_NORESTART;
- hrtimer_forward(timer, now, tick_period);
+ hrtimer_forward(timer, now, TICK_NSEC);
return HRTIMER_RESTART;
}
@@ -1367,13 +1417,13 @@ void tick_setup_sched_timer(void)
/* Offset the tick to avert jiffies_lock contention. */
if (sched_skew_tick) {
- u64 offset = ktime_to_ns(tick_period) >> 1;
+ u64 offset = TICK_NSEC >> 1;
do_div(offset, num_possible_cpus());
offset *= smp_processor_id();
hrtimer_add_expires_ns(&ts->sched_timer, offset);
}
- hrtimer_forward(&ts->sched_timer, now, tick_period);
+ hrtimer_forward(&ts->sched_timer, now, TICK_NSEC);
hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED_HARD);
tick_nohz_activate(ts, NOHZ_MODE_HIGHRES);
}
diff --git a/kernel/time/timeconv.c b/kernel/time/timeconv.c
index 589e0a552129..62e3b46717a6 100644
--- a/kernel/time/timeconv.c
+++ b/kernel/time/timeconv.c
@@ -70,10 +70,10 @@ static const unsigned short __mon_yday[2][13] = {
/**
* time64_to_tm - converts the calendar time to local broken-down time
*
- * @totalsecs the number of seconds elapsed since 00:00:00 on January 1, 1970,
+ * @totalsecs: the number of seconds elapsed since 00:00:00 on January 1, 1970,
* Coordinated Universal Time (UTC).
- * @offset offset seconds adding to totalsecs.
- * @result pointer to struct tm variable to receive broken-down time
+ * @offset: offset seconds adding to totalsecs.
+ * @result: pointer to struct tm variable to receive broken-down time
*/
void time64_to_tm(time64_t totalsecs, int offset, struct tm *result)
{
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 6858a31364b6..74503c0151e5 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -407,6 +407,7 @@ static inline u64 timekeeping_cycles_to_ns(const struct tk_read_base *tkr, u64 c
/**
* update_fast_timekeeper - Update the fast and NMI safe monotonic timekeeper.
* @tkr: Timekeeping readout base from which we take the update
+ * @tkf: Pointer to NMI safe timekeeper
*
* We want to use this from any context including NMI and tracing /
* instrumenting the timekeeping code itself.
@@ -436,6 +437,27 @@ static void update_fast_timekeeper(const struct tk_read_base *tkr,
memcpy(base + 1, base, sizeof(*base));
}
+static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
+{
+ struct tk_read_base *tkr;
+ unsigned int seq;
+ u64 now;
+
+ do {
+ seq = raw_read_seqcount_latch(&tkf->seq);
+ tkr = tkf->base + (seq & 0x01);
+ now = ktime_to_ns(tkr->base);
+
+ now += timekeeping_delta_to_ns(tkr,
+ clocksource_delta(
+ tk_clock_read(tkr),
+ tkr->cycle_last,
+ tkr->mask));
+ } while (read_seqcount_latch_retry(&tkf->seq, seq));
+
+ return now;
+}
+
/**
* ktime_get_mono_fast_ns - Fast NMI safe access to clock monotonic
*
@@ -462,39 +484,24 @@ static void update_fast_timekeeper(const struct tk_read_base *tkr,
*
* So reader 6 will observe time going backwards versus reader 5.
*
- * While other CPUs are likely to be able observe that, the only way
+ * While other CPUs are likely to be able to observe that, the only way
* for a CPU local observation is when an NMI hits in the middle of
* the update. Timestamps taken from that NMI context might be ahead
* of the following timestamps. Callers need to be aware of that and
* deal with it.
*/
-static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
-{
- struct tk_read_base *tkr;
- unsigned int seq;
- u64 now;
-
- do {
- seq = raw_read_seqcount_latch(&tkf->seq);
- tkr = tkf->base + (seq & 0x01);
- now = ktime_to_ns(tkr->base);
-
- now += timekeeping_delta_to_ns(tkr,
- clocksource_delta(
- tk_clock_read(tkr),
- tkr->cycle_last,
- tkr->mask));
- } while (read_seqcount_latch_retry(&tkf->seq, seq));
-
- return now;
-}
-
u64 ktime_get_mono_fast_ns(void)
{
return __ktime_get_fast_ns(&tk_fast_mono);
}
EXPORT_SYMBOL_GPL(ktime_get_mono_fast_ns);
+/**
+ * ktime_get_raw_fast_ns - Fast NMI safe access to clock monotonic raw
+ *
+ * Contrary to ktime_get_mono_fast_ns() this is always correct because the
+ * conversion factor is not affected by NTP/PTP correction.
+ */
u64 ktime_get_raw_fast_ns(void)
{
return __ktime_get_fast_ns(&tk_fast_raw);
@@ -521,6 +528,9 @@ EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns);
* (2) On 32-bit systems, the 64-bit boot offset (tk->offs_boot) may be
* partially updated. Since the tk->offs_boot update is a rare event, this
* should be a rare occurrence which postprocessing should be able to handle.
+ *
+ * The caveats vs. timestamp ordering as documented for ktime_get_fast_ns()
+ * apply as well.
*/
u64 notrace ktime_get_boot_fast_ns(void)
{
@@ -530,9 +540,6 @@ u64 notrace ktime_get_boot_fast_ns(void)
}
EXPORT_SYMBOL_GPL(ktime_get_boot_fast_ns);
-/*
- * See comment for __ktime_get_fast_ns() vs. timestamp ordering
- */
static __always_inline u64 __ktime_get_real_fast(struct tk_fast *tkf, u64 *mono)
{
struct tk_read_base *tkr;
@@ -557,6 +564,8 @@ static __always_inline u64 __ktime_get_real_fast(struct tk_fast *tkf, u64 *mono)
/**
* ktime_get_real_fast_ns: - NMI safe and fast access to clock realtime.
+ *
+ * See ktime_get_fast_ns() for documentation of the time stamp ordering.
*/
u64 ktime_get_real_fast_ns(void)
{
@@ -654,6 +663,7 @@ static void update_pvclock_gtod(struct timekeeper *tk, bool was_set)
/**
* pvclock_gtod_register_notifier - register a pvclock timedata update listener
+ * @nb: Pointer to the notifier block to register
*/
int pvclock_gtod_register_notifier(struct notifier_block *nb)
{
@@ -673,6 +683,7 @@ EXPORT_SYMBOL_GPL(pvclock_gtod_register_notifier);
/**
* pvclock_gtod_unregister_notifier - unregister a pvclock
* timedata update listener
+ * @nb: Pointer to the notifier block to unregister
*/
int pvclock_gtod_unregister_notifier(struct notifier_block *nb)
{
@@ -763,6 +774,7 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action)
/**
* timekeeping_forward_now - update clock to the current time
+ * @tk: Pointer to the timekeeper to update
*
* Forward the current clock to update its state since the last call to
* update_wall_time(). This is useful before significant clock changes,
@@ -1339,7 +1351,7 @@ EXPORT_SYMBOL(do_settimeofday64);
/**
* timekeeping_inject_offset - Adds or subtracts from the current time.
- * @tv: pointer to the timespec variable containing the offset
+ * @ts: Pointer to the timespec variable containing the offset
*
* Adds or subtracts an offset value from the current time.
*/
@@ -1415,9 +1427,8 @@ void timekeeping_warp_clock(void)
}
}
-/**
+/*
* __timekeeping_set_tai_offset - Sets the TAI offset from UTC and monotonic
- *
*/
static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
{
@@ -1425,7 +1436,7 @@ static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tai_offset, 0));
}
-/**
+/*
* change_clocksource - Swaps clocksources if a new one is available
*
* Accumulates current time interval and initializes new clocksource
@@ -1548,6 +1559,7 @@ u64 timekeeping_max_deferment(void)
/**
* read_persistent_clock64 - Return time from the persistent clock.
+ * @ts: Pointer to the storage for the readout value
*
* Weak dummy function for arches that do not yet support it.
* Reads the time from the battery backed persistent clock.
@@ -1566,8 +1578,9 @@ void __weak read_persistent_clock64(struct timespec64 *ts)
* from the boot.
*
* Weak dummy function for arches that do not yet support it.
- * wall_time - current time as returned by persistent clock
- * boot_offset - offset that is defined as wall_time - boot_time
+ * @wall_time: - current time as returned by persistent clock
+ * @boot_offset: - offset that is defined as wall_time - boot_time
+ *
* The default function calculates offset based on the current value of
* local_clock(). This way architectures that support sched_clock() but don't
* support dedicated boot time clock will provide the best estimate of the
@@ -1652,7 +1665,8 @@ static struct timespec64 timekeeping_suspend_time;
/**
* __timekeeping_inject_sleeptime - Internal function to add sleep interval
- * @delta: pointer to a timespec delta value
+ * @tk: Pointer to the timekeeper to be updated
+ * @delta: Pointer to the delta value in timespec64 format
*
* Takes a timespec offset measuring a suspend interval and properly
* adds the sleep offset to the timekeeping variables.
@@ -2023,13 +2037,12 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
}
}
-/**
+/*
* accumulate_nsecs_to_secs - Accumulates nsecs into secs
*
* Helper function that accumulates the nsecs greater than a second
* from the xtime_nsec field to the xtime_secs field.
* It also calls into the NTP code to handle leapsecond processing.
- *
*/
static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
{
@@ -2071,7 +2084,7 @@ static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
return clock_set;
}
-/**
+/*
* logarithmic_accumulation - shifted accumulation of cycles
*
* This functions accumulates a shifted interval of cycles into
@@ -2314,7 +2327,7 @@ ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real,
return base;
}
-/**
+/*
* timekeeping_validate_timex - Ensures the timex is ok for use in do_adjtimex
*/
static int timekeeping_validate_timex(const struct __kernel_timex *txc)
diff --git a/kernel/time/timekeeping.h b/kernel/time/timekeeping.h
index 099737f6f10c..6c2cbd9ef999 100644
--- a/kernel/time/timekeeping.h
+++ b/kernel/time/timekeeping.h
@@ -26,7 +26,7 @@ extern void do_timer(unsigned long ticks);
extern void update_wall_time(void);
extern raw_spinlock_t jiffies_lock;
-extern seqcount_t jiffies_seq;
+extern seqcount_raw_spinlock_t jiffies_seq;
#define CS_NAME_LEN 32
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index c3ad64fb9d8b..8dbc008f8942 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -1283,7 +1283,7 @@ static void del_timer_wait_running(struct timer_list *timer)
u32 tf;
tf = READ_ONCE(timer->flags);
- if (!(tf & TIMER_MIGRATING)) {
+ if (!(tf & (TIMER_MIGRATING | TIMER_IRQSAFE))) {
struct timer_base *base = get_timer_base(tf);
/*
@@ -1367,6 +1367,13 @@ int del_timer_sync(struct timer_list *timer)
*/
WARN_ON(in_irq() && !(timer->flags & TIMER_IRQSAFE));
+ /*
+ * Must be able to sleep on PREEMPT_RT because of the slowpath in
+ * del_timer_wait_running().
+ */
+ if (IS_ENABLED(CONFIG_PREEMPT_RT) && !(timer->flags & TIMER_IRQSAFE))
+ lockdep_assert_preemption_enabled();
+
do {
ret = try_to_del_timer_sync(timer);
@@ -1693,29 +1700,6 @@ void timer_clear_idle(void)
}
#endif
-/*
- * Called from the timer interrupt handler to charge one tick to the current
- * process. user_tick is 1 if the tick is user time, 0 for system.
- */
-void update_process_times(int user_tick)
-{
- struct task_struct *p = current;
-
- PRANDOM_ADD_NOISE(jiffies, user_tick, p, 0);
-
- /* Note: this timer irq context must be accounted for as well. */
- account_process_tick(p, user_tick);
- run_local_timers();
- rcu_sched_clock_irq(user_tick);
-#ifdef CONFIG_IRQ_WORK
- if (in_irq())
- irq_work_tick();
-#endif
- scheduler_tick();
- if (IS_ENABLED(CONFIG_POSIX_TIMERS))
- run_posix_cpu_timers();
-}
-
/**
* __run_timers - run all expired timers (if any) on this CPU.
* @base: the timer vector to be processed.
@@ -1765,7 +1749,7 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h)
/*
* Called by the local, per-CPU timer interrupt on SMP.
*/
-void run_local_timers(void)
+static void run_local_timers(void)
{
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
@@ -1783,6 +1767,29 @@ void run_local_timers(void)
}
/*
+ * Called from the timer interrupt handler to charge one tick to the current
+ * process. user_tick is 1 if the tick is user time, 0 for system.
+ */
+void update_process_times(int user_tick)
+{
+ struct task_struct *p = current;
+
+ PRANDOM_ADD_NOISE(jiffies, user_tick, p, 0);
+
+ /* Note: this timer irq context must be accounted for as well. */
+ account_process_tick(p, user_tick);
+ run_local_timers();
+ rcu_sched_clock_irq(user_tick);
+#ifdef CONFIG_IRQ_WORK
+ if (in_irq())
+ irq_work_tick();
+#endif
+ scheduler_tick();
+ if (IS_ENABLED(CONFIG_POSIX_TIMERS))
+ run_posix_cpu_timers();
+}
+
+/*
* Since schedule_timeout()'s timer is defined on the stack, it must store
* the target task on the stack as well.
*/
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index acb326f5f50a..6939140ab7c5 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -42,24 +42,11 @@ static void SEQ_printf(struct seq_file *m, const char *fmt, ...)
va_end(args);
}
-static void print_name_offset(struct seq_file *m, void *sym)
-{
- char symname[KSYM_NAME_LEN];
-
- if (lookup_symbol_name((unsigned long)sym, symname) < 0)
- SEQ_printf(m, "<%pK>", sym);
- else
- SEQ_printf(m, "%s", symname);
-}
-
static void
print_timer(struct seq_file *m, struct hrtimer *taddr, struct hrtimer *timer,
int idx, u64 now)
{
- SEQ_printf(m, " #%d: ", idx);
- print_name_offset(m, taddr);
- SEQ_printf(m, ", ");
- print_name_offset(m, timer->function);
+ SEQ_printf(m, " #%d: <%pK>, %ps", idx, taddr, timer->function);
SEQ_printf(m, ", S:%02x", timer->state);
SEQ_printf(m, "\n");
SEQ_printf(m, " # expires at %Lu-%Lu nsecs [in %Ld to %Ld nsecs]\n",
@@ -116,9 +103,7 @@ print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
SEQ_printf(m, " .resolution: %u nsecs\n", hrtimer_resolution);
- SEQ_printf(m, " .get_time: ");
- print_name_offset(m, base->get_time);
- SEQ_printf(m, "\n");
+ SEQ_printf(m, " .get_time: %ps\n", base->get_time);
#ifdef CONFIG_HIGH_RES_TIMERS
SEQ_printf(m, " .offset: %Lu nsecs\n",
(unsigned long long) ktime_to_ns(base->offset));
@@ -218,42 +203,29 @@ print_tickdevice(struct seq_file *m, struct tick_device *td, int cpu)
SEQ_printf(m, " next_event: %Ld nsecs\n",
(unsigned long long) ktime_to_ns(dev->next_event));
- SEQ_printf(m, " set_next_event: ");
- print_name_offset(m, dev->set_next_event);
- SEQ_printf(m, "\n");
+ SEQ_printf(m, " set_next_event: %ps\n", dev->set_next_event);
- if (dev->set_state_shutdown) {
- SEQ_printf(m, " shutdown: ");
- print_name_offset(m, dev->set_state_shutdown);
- SEQ_printf(m, "\n");
- }
+ if (dev->set_state_shutdown)
+ SEQ_printf(m, " shutdown: %ps\n",
+ dev->set_state_shutdown);
- if (dev->set_state_periodic) {
- SEQ_printf(m, " periodic: ");
- print_name_offset(m, dev->set_state_periodic);
- SEQ_printf(m, "\n");
- }
+ if (dev->set_state_periodic)
+ SEQ_printf(m, " periodic: %ps\n",
+ dev->set_state_periodic);
- if (dev->set_state_oneshot) {
- SEQ_printf(m, " oneshot: ");
- print_name_offset(m, dev->set_state_oneshot);
- SEQ_printf(m, "\n");
- }
+ if (dev->set_state_oneshot)
+ SEQ_printf(m, " oneshot: %ps\n",
+ dev->set_state_oneshot);
- if (dev->set_state_oneshot_stopped) {
- SEQ_printf(m, " oneshot stopped: ");
- print_name_offset(m, dev->set_state_oneshot_stopped);
- SEQ_printf(m, "\n");
- }
+ if (dev->set_state_oneshot_stopped)
+ SEQ_printf(m, " oneshot stopped: %ps\n",
+ dev->set_state_oneshot_stopped);
- if (dev->tick_resume) {
- SEQ_printf(m, " resume: ");
- print_name_offset(m, dev->tick_resume);
- SEQ_printf(m, "\n");
- }
+ if (dev->tick_resume)
+ SEQ_printf(m, " resume: %ps\n",
+ dev->tick_resume);
- SEQ_printf(m, " event_handler: ");
- print_name_offset(m, dev->event_handler);
+ SEQ_printf(m, " event_handler: %ps\n", dev->event_handler);
SEQ_printf(m, "\n");
SEQ_printf(m, " retries: %lu\n", dev->retries);
SEQ_printf(m, "\n");