summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2019-05-01 15:45:36 +0100
committerWill Deacon <will.deacon@arm.com>2019-05-01 15:45:36 +0100
commit24cf262da1ad303fc940c798aab0bd1bd50e3fc2 (patch)
treeda38a3ae51128dfb959fb53d633201ab4ffb4209 /drivers
parent50abbe19623e08e2aa34e0e526bd6115569f3dc3 (diff)
parent0ea415390cd345b7d09e8c9ebd4b68adfe873043 (diff)
Merge branch 'for-next/timers' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux into for-next/core
Conflicts: arch/arm64/Kconfig arch/arm64/include/asm/arch_timer.h
Diffstat (limited to 'drivers')
-rw-r--r--drivers/clocksource/arm_arch_timer.c130
-rw-r--r--drivers/watchdog/sbsa_gwdt.c2
2 files changed, 58 insertions, 74 deletions
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 6cc8aff83805..9a2f23df7610 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -152,6 +152,26 @@ u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
return val;
}
+static u64 arch_counter_get_cntpct_stable(void)
+{
+ return __arch_counter_get_cntpct_stable();
+}
+
+static u64 arch_counter_get_cntpct(void)
+{
+ return __arch_counter_get_cntpct();
+}
+
+static u64 arch_counter_get_cntvct_stable(void)
+{
+ return __arch_counter_get_cntvct_stable();
+}
+
+static u64 arch_counter_get_cntvct(void)
+{
+ return __arch_counter_get_cntvct();
+}
+
/*
* Default to cp15 based access because arm64 uses this function for
* sched_clock() before DT is probed and the cp15 method is guaranteed
@@ -319,13 +339,6 @@ static u64 notrace arm64_858921_read_cntvct_el0(void)
}
#endif
-#ifdef CONFIG_ARM64_ERRATUM_1188873
-static u64 notrace arm64_1188873_read_cntvct_el0(void)
-{
- return read_sysreg(cntvct_el0);
-}
-#endif
-
#ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
/*
* The low bits of the counter registers are indeterminate while bit 10 or
@@ -372,8 +385,7 @@ static u32 notrace sun50i_a64_read_cntv_tval_el0(void)
DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *, timer_unstable_counter_workaround);
EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
-DEFINE_STATIC_KEY_FALSE(arch_timer_read_ool_enabled);
-EXPORT_SYMBOL_GPL(arch_timer_read_ool_enabled);
+static atomic_t timer_unstable_counter_workaround_in_use = ATOMIC_INIT(0);
static void erratum_set_next_event_tval_generic(const int access, unsigned long evt,
struct clock_event_device *clk)
@@ -457,14 +469,6 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = {
.read_cntvct_el0 = arm64_858921_read_cntvct_el0,
},
#endif
-#ifdef CONFIG_ARM64_ERRATUM_1188873
- {
- .match_type = ate_match_local_cap_id,
- .id = (void *)ARM64_WORKAROUND_1188873,
- .desc = "ARM erratum 1188873",
- .read_cntvct_el0 = arm64_1188873_read_cntvct_el0,
- },
-#endif
#ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
{
.match_type = ate_match_dt,
@@ -552,11 +556,8 @@ void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa
per_cpu(timer_unstable_counter_workaround, i) = wa;
}
- /*
- * Use the locked version, as we're called from the CPU
- * hotplug framework. Otherwise, we end-up in deadlock-land.
- */
- static_branch_enable_cpuslocked(&arch_timer_read_ool_enabled);
+ if (wa->read_cntvct_el0 || wa->read_cntpct_el0)
+ atomic_set(&timer_unstable_counter_workaround_in_use, 1);
/*
* Don't use the vdso fastpath if errata require using the
@@ -573,7 +574,7 @@ void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa
static void arch_timer_check_ool_workaround(enum arch_timer_erratum_match_type type,
void *arg)
{
- const struct arch_timer_erratum_workaround *wa;
+ const struct arch_timer_erratum_workaround *wa, *__wa;
ate_match_fn_t match_fn = NULL;
bool local = false;
@@ -597,53 +598,32 @@ static void arch_timer_check_ool_workaround(enum arch_timer_erratum_match_type t
if (!wa)
return;
- if (needs_unstable_timer_counter_workaround()) {
- const struct arch_timer_erratum_workaround *__wa;
- __wa = __this_cpu_read(timer_unstable_counter_workaround);
- if (__wa && wa != __wa)
- pr_warn("Can't enable workaround for %s (clashes with %s\n)",
- wa->desc, __wa->desc);
+ __wa = __this_cpu_read(timer_unstable_counter_workaround);
+ if (__wa && wa != __wa)
+ pr_warn("Can't enable workaround for %s (clashes with %s\n)",
+ wa->desc, __wa->desc);
- if (__wa)
- return;
- }
+ if (__wa)
+ return;
arch_timer_enable_workaround(wa, local);
pr_info("Enabling %s workaround for %s\n",
local ? "local" : "global", wa->desc);
}
-#define erratum_handler(fn, r, ...) \
-({ \
- bool __val; \
- if (needs_unstable_timer_counter_workaround()) { \
- const struct arch_timer_erratum_workaround *__wa; \
- __wa = __this_cpu_read(timer_unstable_counter_workaround); \
- if (__wa && __wa->fn) { \
- r = __wa->fn(__VA_ARGS__); \
- __val = true; \
- } else { \
- __val = false; \
- } \
- } else { \
- __val = false; \
- } \
- __val; \
-})
-
static bool arch_timer_this_cpu_has_cntvct_wa(void)
{
- const struct arch_timer_erratum_workaround *wa;
+ return has_erratum_handler(read_cntvct_el0);
+}
- wa = __this_cpu_read(timer_unstable_counter_workaround);
- return wa && wa->read_cntvct_el0;
+static bool arch_timer_counter_has_wa(void)
+{
+ return atomic_read(&timer_unstable_counter_workaround_in_use);
}
#else
#define arch_timer_check_ool_workaround(t,a) do { } while(0)
-#define erratum_set_next_event_tval_virt(...) ({BUG(); 0;})
-#define erratum_set_next_event_tval_phys(...) ({BUG(); 0;})
-#define erratum_handler(fn, r, ...) ({false;})
#define arch_timer_this_cpu_has_cntvct_wa() ({false;})
+#define arch_timer_counter_has_wa() ({false;})
#endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */
static __always_inline irqreturn_t timer_handler(const int access,
@@ -736,11 +716,6 @@ static __always_inline void set_next_event(const int access, unsigned long evt,
static int arch_timer_set_next_event_virt(unsigned long evt,
struct clock_event_device *clk)
{
- int ret;
-
- if (erratum_handler(set_next_event_virt, ret, evt, clk))
- return ret;
-
set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk);
return 0;
}
@@ -748,11 +723,6 @@ static int arch_timer_set_next_event_virt(unsigned long evt,
static int arch_timer_set_next_event_phys(unsigned long evt,
struct clock_event_device *clk)
{
- int ret;
-
- if (erratum_handler(set_next_event_phys, ret, evt, clk))
- return ret;
-
set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk);
return 0;
}
@@ -777,6 +747,10 @@ static void __arch_timer_setup(unsigned type,
clk->features = CLOCK_EVT_FEAT_ONESHOT;
if (type == ARCH_TIMER_TYPE_CP15) {
+ typeof(clk->set_next_event) sne;
+
+ arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL);
+
if (arch_timer_c3stop)
clk->features |= CLOCK_EVT_FEAT_C3STOP;
clk->name = "arch_sys_timer";
@@ -787,20 +761,20 @@ static void __arch_timer_setup(unsigned type,
case ARCH_TIMER_VIRT_PPI:
clk->set_state_shutdown = arch_timer_shutdown_virt;
clk->set_state_oneshot_stopped = arch_timer_shutdown_virt;
- clk->set_next_event = arch_timer_set_next_event_virt;
+ sne = erratum_handler(set_next_event_virt);
break;
case ARCH_TIMER_PHYS_SECURE_PPI:
case ARCH_TIMER_PHYS_NONSECURE_PPI:
case ARCH_TIMER_HYP_PPI:
clk->set_state_shutdown = arch_timer_shutdown_phys;
clk->set_state_oneshot_stopped = arch_timer_shutdown_phys;
- clk->set_next_event = arch_timer_set_next_event_phys;
+ sne = erratum_handler(set_next_event_phys);
break;
default:
BUG();
}
- arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL);
+ clk->set_next_event = sne;
} else {
clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
clk->name = "arch_mem_timer";
@@ -1002,12 +976,22 @@ static void __init arch_counter_register(unsigned type)
/* Register the CP15 based counter if we have one */
if (type & ARCH_TIMER_TYPE_CP15) {
+ u64 (*rd)(void);
+
if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) ||
- arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI)
- arch_timer_read_counter = arch_counter_get_cntvct;
- else
- arch_timer_read_counter = arch_counter_get_cntpct;
+ arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) {
+ if (arch_timer_counter_has_wa())
+ rd = arch_counter_get_cntvct_stable;
+ else
+ rd = arch_counter_get_cntvct;
+ } else {
+ if (arch_timer_counter_has_wa())
+ rd = arch_counter_get_cntpct_stable;
+ else
+ rd = arch_counter_get_cntpct;
+ }
+ arch_timer_read_counter = rd;
clocksource_counter.archdata.vdso_direct = vdso_default;
} else {
arch_timer_read_counter = arch_counter_get_cntvct_mem;
diff --git a/drivers/watchdog/sbsa_gwdt.c b/drivers/watchdog/sbsa_gwdt.c
index e8bd9887c566..e221e47396ab 100644
--- a/drivers/watchdog/sbsa_gwdt.c
+++ b/drivers/watchdog/sbsa_gwdt.c
@@ -161,7 +161,7 @@ static unsigned int sbsa_gwdt_get_timeleft(struct watchdog_device *wdd)
timeleft += readl(gwdt->control_base + SBSA_GWDT_WOR);
timeleft += lo_hi_readq(gwdt->control_base + SBSA_GWDT_WCV) -
- arch_counter_get_cntvct();
+ arch_timer_read_counter();
do_div(timeleft, gwdt->clk);