diff options
author | Daniel Lezcano <daniel.lezcano@linaro.org> | 2017-04-07 16:23:29 +0200 |
---|---|---|
committer | Daniel Lezcano <daniel.lezcano@linaro.org> | 2017-04-07 16:23:29 +0200 |
commit | 6f9c89000c3f771c47adacaca2df775a25f27205 (patch) | |
tree | 862f3df0421466dcdc2c7795f5c5d38d21862323 | |
parent | 28e71e2fe8fe6cdbd1bdc61601ea50d6423d3cf0 (diff) | |
parent | d003d029cea8a28139b4f9b88a36b8fac864f45b (diff) |
Merge tag 'arch-timer-errata' of git://git.kernel.org/pub/scm/linux/kernel/git/maz/arm-platforms into clockevents/4.12
arm64 arch timer workaround series, including the base patches
that will also go via the arm64 tree.
Signed-off-by: Daniel Lezcano <daniel.lezcano@linaro.org>
-rw-r--r-- | Documentation/arm64/silicon-errata.txt | 1 | ||||
-rw-r--r-- | arch/arm64/include/asm/arch_timer.h | 43 | ||||
-rw-r--r-- | arch/arm64/include/asm/cpucaps.h | 3 | ||||
-rw-r--r-- | arch/arm64/include/asm/cputype.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/esr.h | 2 | ||||
-rw-r--r-- | arch/arm64/kernel/cpu_errata.c | 15 | ||||
-rw-r--r-- | arch/arm64/kernel/cpufeature.c | 13 | ||||
-rw-r--r-- | arch/arm64/kernel/traps.c | 14 | ||||
-rw-r--r-- | drivers/clocksource/Kconfig | 11 | ||||
-rw-r--r-- | drivers/clocksource/arm_arch_timer.c | 535 |
10 files changed, 466 insertions, 173 deletions
diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt index 2f66683500b8..10f2dddbf449 100644 --- a/Documentation/arm64/silicon-errata.txt +++ b/Documentation/arm64/silicon-errata.txt @@ -54,6 +54,7 @@ stable kernels. | ARM | Cortex-A57 | #852523 | N/A | | ARM | Cortex-A57 | #834220 | ARM64_ERRATUM_834220 | | ARM | Cortex-A72 | #853709 | N/A | +| ARM | Cortex-A73 | #858921 | ARM64_ERRATUM_858921 | | ARM | MMU-500 | #841119,#826419 | N/A | | | | | | | Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 | diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h index b4b34004a21e..74d08e44a651 100644 --- a/arch/arm64/include/asm/arch_timer.h +++ b/arch/arm64/include/asm/arch_timer.h @@ -25,6 +25,7 @@ #include <linux/bug.h> #include <linux/init.h> #include <linux/jump_label.h> +#include <linux/smp.h> #include <linux/types.h> #include <clocksource/arm_arch_timer.h> @@ -37,24 +38,44 @@ extern struct static_key_false arch_timer_read_ool_enabled; #define needs_unstable_timer_counter_workaround() false #endif +enum arch_timer_erratum_match_type { + ate_match_dt, + ate_match_local_cap_id, + ate_match_acpi_oem_info, +}; + +struct clock_event_device; struct arch_timer_erratum_workaround { - const char *id; /* Indicate the Erratum ID */ + enum arch_timer_erratum_match_type match_type; + const void *id; + const char *desc; u32 (*read_cntp_tval_el0)(void); u32 (*read_cntv_tval_el0)(void); u64 (*read_cntvct_el0)(void); + int (*set_next_event_phys)(unsigned long, struct clock_event_device *); + int (*set_next_event_virt)(unsigned long, struct clock_event_device *); }; -extern const struct arch_timer_erratum_workaround *timer_unstable_counter_workaround; - -#define arch_timer_reg_read_stable(reg) \ -({ \ - u64 _val; \ - if (needs_unstable_timer_counter_workaround()) \ - _val = timer_unstable_counter_workaround->read_##reg();\ - else \ - _val = read_sysreg(reg); \ - _val; \ +DECLARE_PER_CPU(const struct arch_timer_erratum_workaround *, + timer_unstable_counter_workaround); + +#define arch_timer_reg_read_stable(reg) \ +({ \ + u64 _val; \ + if (needs_unstable_timer_counter_workaround()) { \ + const struct arch_timer_erratum_workaround *wa; \ + preempt_disable(); \ + wa = __this_cpu_read(timer_unstable_counter_workaround); \ + if (wa && wa->read_##reg) \ + _val = wa->read_##reg(); \ + else \ + _val = read_sysreg(reg); \ + preempt_enable(); \ + } else { \ + _val = read_sysreg(reg); \ + } \ + _val; \ }) /* diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index fb78a5d3b60b..b3aab8a17868 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -37,7 +37,8 @@ #define ARM64_HAS_NO_FPSIMD 16 #define ARM64_WORKAROUND_REPEAT_TLBI 17 #define ARM64_WORKAROUND_QCOM_FALKOR_E1003 18 +#define ARM64_WORKAROUND_858921 19 -#define ARM64_NCAPS 19 +#define ARM64_NCAPS 20 #endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index fc502713ab37..0984d1b3a8f2 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -80,6 +80,7 @@ #define ARM_CPU_PART_FOUNDATION 0xD00 #define ARM_CPU_PART_CORTEX_A57 0xD07 #define ARM_CPU_PART_CORTEX_A53 0xD03 +#define ARM_CPU_PART_CORTEX_A73 0xD09 #define APM_CPU_PART_POTENZA 0x000 @@ -92,6 +93,7 @@ #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) +#define MIDR_CORTEX_A73 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A73) #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX) #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX) #define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1) diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h index d14c478976d0..ad42e79a5d4d 100644 --- a/arch/arm64/include/asm/esr.h +++ b/arch/arm64/include/asm/esr.h @@ -175,6 +175,8 @@ #define ESR_ELx_SYS64_ISS_SYS_CTR_READ (ESR_ELx_SYS64_ISS_SYS_CTR | \ ESR_ELx_SYS64_ISS_DIR_READ) +#define ESR_ELx_SYS64_ISS_SYS_CNTVCT (ESR_ELx_SYS64_ISS_SYS_VAL(3, 3, 2, 14, 0) | \ + ESR_ELx_SYS64_ISS_DIR_READ) #ifndef __ASSEMBLY__ #include <asm/types.h> diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index f6cc67e7626e..2ed2a7657711 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -53,6 +53,13 @@ static int cpu_enable_trap_ctr_access(void *__unused) .midr_range_min = min, \ .midr_range_max = max +#define MIDR_ALL_VERSIONS(model) \ + .def_scope = SCOPE_LOCAL_CPU, \ + .matches = is_affected_midr_range, \ + .midr_model = model, \ + .midr_range_min = 0, \ + .midr_range_max = (MIDR_VARIANT_MASK | MIDR_REVISION_MASK) + const struct arm64_cpu_capabilities arm64_errata[] = { #if defined(CONFIG_ARM64_ERRATUM_826319) || \ defined(CONFIG_ARM64_ERRATUM_827319) || \ @@ -151,6 +158,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = { MIDR_CPU_VAR_REV(0, 0)), }, #endif +#ifdef CONFIG_ARM64_ERRATUM_858921 + { + /* Cortex-A73 all versions */ + .desc = "ARM erratum 858921", + .capability = ARM64_WORKAROUND_858921, + MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), + }, +#endif { } }; diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index abda8e861865..6eb77ae99b79 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1090,20 +1090,29 @@ static void __init setup_feature_capabilities(void) * Check if the current CPU has a given feature capability. * Should be called from non-preemptible context. */ -bool this_cpu_has_cap(unsigned int cap) +static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array, + unsigned int cap) { const struct arm64_cpu_capabilities *caps; if (WARN_ON(preemptible())) return false; - for (caps = arm64_features; caps->desc; caps++) + for (caps = cap_array; caps->desc; caps++) if (caps->capability == cap && caps->matches) return caps->matches(caps, SCOPE_LOCAL_CPU); return false; } +extern const struct arm64_cpu_capabilities arm64_errata[]; + +bool this_cpu_has_cap(unsigned int cap) +{ + return (__this_cpu_has_cap(arm64_features, cap) || + __this_cpu_has_cap(arm64_errata, cap)); +} + void __init setup_cpu_features(void) { u32 cwg; diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index e52be6aa44ee..1de444e6c669 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -505,6 +505,14 @@ static void ctr_read_handler(unsigned int esr, struct pt_regs *regs) regs->pc += 4; } +static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs) +{ + int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT; + + pt_regs_write_reg(regs, rt, arch_counter_get_cntvct()); + regs->pc += 4; +} + struct sys64_hook { unsigned int esr_mask; unsigned int esr_val; @@ -523,6 +531,12 @@ static struct sys64_hook sys64_hooks[] = { .esr_val = ESR_ELx_SYS64_ISS_SYS_CTR_READ, .handler = ctr_read_handler, }, + { + /* Trap read access to CNTVCT_EL0 */ + .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK, + .esr_val = ESR_ELx_SYS64_ISS_SYS_CNTVCT, + .handler = cntvct_read_handler, + }, {}, }; diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 7ca2c2461c9c..545d541ae20e 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -368,6 +368,17 @@ config HISILICON_ERRATUM_161010101 161010101. The workaround will be active if the hisilicon,erratum-161010101 property is found in the timer node. +config ARM64_ERRATUM_858921 + bool "Workaround for Cortex-A73 erratum 858921" + default y + select ARM_ARCH_TIMER_OOL_WORKAROUND + depends on ARM_ARCH_TIMER && ARM64 + help + This option enables a workaround applicable to Cortex-A73 + (all versions), whose counter may return incorrect values. + The workaround will be dynamically enabled when an affected + core is detected. + config ARM_GLOBAL_TIMER bool "Support for the ARM global timer" if COMPILE_TEST select CLKSRC_OF if OF diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index 56c3e25f4e1a..74183a7315ac 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -83,6 +83,7 @@ static enum ppi_nr arch_timer_uses_ppi = VIRT_PPI; static bool arch_timer_c3stop; static bool arch_timer_mem_use_virtual; static bool arch_counter_suspend_stop; +static bool vdso_default = true; static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM); @@ -96,6 +97,105 @@ early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg); * Architected system timer support. */ +static __always_inline +void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val, + struct clock_event_device *clk) +{ + if (access == ARCH_TIMER_MEM_PHYS_ACCESS) { + struct arch_timer *timer = to_arch_timer(clk); + switch (reg) { + case ARCH_TIMER_REG_CTRL: + writel_relaxed(val, timer->base + CNTP_CTL); + break; + case ARCH_TIMER_REG_TVAL: + writel_relaxed(val, timer->base + CNTP_TVAL); + break; + } + } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) { + struct arch_timer *timer = to_arch_timer(clk); + switch (reg) { + case ARCH_TIMER_REG_CTRL: + writel_relaxed(val, timer->base + CNTV_CTL); + break; + case ARCH_TIMER_REG_TVAL: + writel_relaxed(val, timer->base + CNTV_TVAL); + break; + } + } else { + arch_timer_reg_write_cp15(access, reg, val); + } +} + +static __always_inline +u32 arch_timer_reg_read(int access, enum arch_timer_reg reg, + struct clock_event_device *clk) +{ + u32 val; + + if (access == ARCH_TIMER_MEM_PHYS_ACCESS) { + struct arch_timer *timer = to_arch_timer(clk); + switch (reg) { + case ARCH_TIMER_REG_CTRL: + val = readl_relaxed(timer->base + CNTP_CTL); + break; + case ARCH_TIMER_REG_TVAL: + val = readl_relaxed(timer->base + CNTP_TVAL); + break; + } + } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) { + struct arch_timer *timer = to_arch_timer(clk); + switch (reg) { + case ARCH_TIMER_REG_CTRL: + val = readl_relaxed(timer->base + CNTV_CTL); + break; + case ARCH_TIMER_REG_TVAL: + val = readl_relaxed(timer->base + CNTV_TVAL); + break; + } + } else { + val = arch_timer_reg_read_cp15(access, reg); + } + + return val; +} + +/* + * Default to cp15 based access because arm64 uses this function for + * sched_clock() before DT is probed and the cp15 method is guaranteed + * to exist on arm64. arm doesn't use this before DT is probed so even + * if we don't have the cp15 accessors we won't have a problem. + */ +u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct; + +static u64 arch_counter_read(struct clocksource *cs) +{ + return arch_timer_read_counter(); +} + +static u64 arch_counter_read_cc(const struct cyclecounter *cc) +{ + return arch_timer_read_counter(); +} + +static struct clocksource clocksource_counter = { + .name = "arch_sys_counter", + .rating = 400, + .read = arch_counter_read, + .mask = CLOCKSOURCE_MASK(56), + .flags = CLOCK_SOURCE_IS_CONTINUOUS, +}; + +static struct cyclecounter cyclecounter __ro_after_init = { + .read = arch_counter_read_cc, + .mask = CLOCKSOURCE_MASK(56), +}; + +struct ate_acpi_oem_info { + char oem_id[ACPI_OEM_ID_SIZE + 1]; + char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1]; + u32 oem_revision; +}; + #ifdef CONFIG_FSL_ERRATUM_A008585 /* * The number of retries is an arbitrary value well beyond the highest number @@ -170,97 +270,289 @@ static u64 notrace hisi_161010101_read_cntvct_el0(void) { return __hisi_161010101_read_reg(cntvct_el0); } + +static struct ate_acpi_oem_info hisi_161010101_oem_info[] = { + /* + * Note that trailing spaces are required to properly match + * the OEM table information. + */ + { + .oem_id = "HISI ", + .oem_table_id = "HIP05 ", + .oem_revision = 0, + }, + { + .oem_id = "HISI ", + .oem_table_id = "HIP06 ", + .oem_revision = 0, + }, + { + .oem_id = "HISI ", + .oem_table_id = "HIP07 ", + .oem_revision = 0, + }, + { /* Sentinel indicating the end of the OEM array */ }, +}; +#endif + +#ifdef CONFIG_ARM64_ERRATUM_858921 +static u64 notrace arm64_858921_read_cntvct_el0(void) +{ + u64 old, new; + + old = read_sysreg(cntvct_el0); + new = read_sysreg(cntvct_el0); + return (((old ^ new) >> 32) & 1) ? old : new; +} #endif #ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND -const struct arch_timer_erratum_workaround *timer_unstable_counter_workaround = NULL; +DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *, + timer_unstable_counter_workaround); EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround); DEFINE_STATIC_KEY_FALSE(arch_timer_read_ool_enabled); EXPORT_SYMBOL_GPL(arch_timer_read_ool_enabled); +static void erratum_set_next_event_tval_generic(const int access, unsigned long evt, + struct clock_event_device *clk) +{ + unsigned long ctrl; + u64 cval = evt + arch_counter_get_cntvct(); + + ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk); + ctrl |= ARCH_TIMER_CTRL_ENABLE; + ctrl &= ~ARCH_TIMER_CTRL_IT_MASK; + + if (access == ARCH_TIMER_PHYS_ACCESS) + write_sysreg(cval, cntp_cval_el0); + else + write_sysreg(cval, cntv_cval_el0); + + arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk); +} + +static int erratum_set_next_event_tval_virt(unsigned long evt, + struct clock_event_device *clk) +{ + erratum_set_next_event_tval_generic(ARCH_TIMER_VIRT_ACCESS, evt, clk); + return 0; +} + +static int erratum_set_next_event_tval_phys(unsigned long evt, + struct clock_event_device *clk) +{ + erratum_set_next_event_tval_generic(ARCH_TIMER_PHYS_ACCESS, evt, clk); + return 0; +} + static const struct arch_timer_erratum_workaround ool_workarounds[] = { #ifdef CONFIG_FSL_ERRATUM_A008585 { + .match_type = ate_match_dt, .id = "fsl,erratum-a008585", + .desc = "Freescale erratum a005858", .read_cntp_tval_el0 = fsl_a008585_read_cntp_tval_el0, .read_cntv_tval_el0 = fsl_a008585_read_cntv_tval_el0, .read_cntvct_el0 = fsl_a008585_read_cntvct_el0, + .set_next_event_phys = erratum_set_next_event_tval_phys, + .set_next_event_virt = erratum_set_next_event_tval_virt, }, #endif #ifdef CONFIG_HISILICON_ERRATUM_161010101 { + .match_type = ate_match_dt, .id = "hisilicon,erratum-161010101", + .desc = "HiSilicon erratum 161010101", .read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0, .read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0, .read_cntvct_el0 = hisi_161010101_read_cntvct_el0, + .set_next_event_phys = erratum_set_next_event_tval_phys, + .set_next_event_virt = erratum_set_next_event_tval_virt, + }, + { + .match_type = ate_match_acpi_oem_info, + .id = hisi_161010101_oem_info, + .desc = "HiSilicon erratum 161010101", + .read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0, + .read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0, + .read_cntvct_el0 = hisi_161010101_read_cntvct_el0, + .set_next_event_phys = erratum_set_next_event_tval_phys, + .set_next_event_virt = erratum_set_next_event_tval_virt, + }, +#endif +#ifdef CONFIG_ARM64_ERRATUM_858921 + { + .match_type = ate_match_local_cap_id, + .id = (void *)ARM64_WORKAROUND_858921, + .desc = "ARM erratum 858921", + .read_cntvct_el0 = arm64_858921_read_cntvct_el0, }, #endif }; -#endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */ -static __always_inline -void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val, - struct clock_event_device *clk) +typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *, + const void *); + +static +bool arch_timer_check_dt_erratum(const struct arch_timer_erratum_workaround *wa, + const void *arg) { - if (access == ARCH_TIMER_MEM_PHYS_ACCESS) { - struct arch_timer *timer = to_arch_timer(clk); - switch (reg) { - case ARCH_TIMER_REG_CTRL: - writel_relaxed(val, timer->base + CNTP_CTL); - break; - case ARCH_TIMER_REG_TVAL: - writel_relaxed(val, timer->base + CNTP_TVAL); - break; - } - } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) { - struct arch_timer *timer = to_arch_timer(clk); - switch (reg) { - case ARCH_TIMER_REG_CTRL: - writel_relaxed(val, timer->base + CNTV_CTL); - break; - case ARCH_TIMER_REG_TVAL: - writel_relaxed(val, timer->base + CNTV_TVAL); - break; - } - } else { - arch_timer_reg_write_cp15(access, reg, val); + const struct device_node *np = arg; + + return of_property_read_bool(np, wa->id); +} + +static +bool arch_timer_check_local_cap_erratum(const struct arch_timer_erratum_workaround *wa, + const void *arg) +{ + return this_cpu_has_cap((uintptr_t)wa->id); +} + + +static +bool arch_timer_check_acpi_oem_erratum(const struct arch_timer_erratum_workaround *wa, + const void *arg) +{ + static const struct ate_acpi_oem_info empty_oem_info = {}; + const struct ate_acpi_oem_info *info = wa->id; + const struct acpi_table_header *table = arg; + + /* Iterate over the ACPI OEM info array, looking for a match */ + while (memcmp(info, &empty_oem_info, sizeof(*info))) { + if (!memcmp(info->oem_id, table->oem_id, ACPI_OEM_ID_SIZE) && + !memcmp(info->oem_table_id, table->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) && + info->oem_revision == table->oem_revision) + return true; + + info++; } + + return false; } -static __always_inline -u32 arch_timer_reg_read(int access, enum arch_timer_reg reg, - struct clock_event_device *clk) +static const struct arch_timer_erratum_workaround * +arch_timer_iterate_errata(enum arch_timer_erratum_match_type type, + ate_match_fn_t match_fn, + void *arg) { - u32 val; + int i; - if (access == ARCH_TIMER_MEM_PHYS_ACCESS) { - struct arch_timer *timer = to_arch_timer(clk); - switch (reg) { - case ARCH_TIMER_REG_CTRL: - val = readl_relaxed(timer->base + CNTP_CTL); - break; - case ARCH_TIMER_REG_TVAL: - val = readl_relaxed(timer->base + CNTP_TVAL); - break; - } - } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) { - struct arch_timer *timer = to_arch_timer(clk); - switch (reg) { - case ARCH_TIMER_REG_CTRL: - val = readl_relaxed(timer->base + CNTV_CTL); - break; - case ARCH_TIMER_REG_TVAL: - val = readl_relaxed(timer->base + CNTV_TVAL); - break; - } + for (i = 0; i < ARRAY_SIZE(ool_workarounds); i++) { + if (ool_workarounds[i].match_type != type) + continue; + + if (match_fn(&ool_workarounds[i], arg)) + return &ool_workarounds[i]; + } + + return NULL; +} + +static +void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa, + bool local) +{ + int i; + + if (local) { + __this_cpu_write(timer_unstable_counter_workaround, wa); } else { - val = arch_timer_reg_read_cp15(access, reg); + for_each_possible_cpu(i) + per_cpu(timer_unstable_counter_workaround, i) = wa; } - return val; + static_branch_enable(&arch_timer_read_ool_enabled); + + /* + * Don't use the vdso fastpath if errata require using the + * out-of-line counter accessor. We may change our mind pretty + * late in the game (with a per-CPU erratum, for example), so + * change both the default value and the vdso itself. + */ + if (wa->read_cntvct_el0) { + clocksource_counter.archdata.vdso_direct = false; + vdso_default = false; + } } +static void arch_timer_check_ool_workaround(enum arch_timer_erratum_match_type type, + void *arg) +{ + const struct arch_timer_erratum_workaround *wa; + ate_match_fn_t match_fn = NULL; + bool local = false; + + switch (type) { + case ate_match_dt: + match_fn = arch_timer_check_dt_erratum; + break; + case ate_match_local_cap_id: + match_fn = arch_timer_check_local_cap_erratum; + local = true; + break; + case ate_match_acpi_oem_info: + match_fn = arch_timer_check_acpi_oem_erratum; + break; + default: + WARN_ON(1); + return; + } + + wa = arch_timer_iterate_errata(type, match_fn, arg); + if (!wa) + return; + + if (needs_unstable_timer_counter_workaround()) { + const struct arch_timer_erratum_workaround *__wa; + __wa = __this_cpu_read(timer_unstable_counter_workaround); + if (__wa && wa != __wa) + pr_warn("Can't enable workaround for %s (clashes with %s\n)", + wa->desc, __wa->desc); + + if (__wa) + return; + } + + arch_timer_enable_workaround(wa, local); + pr_info("Enabling %s workaround for %s\n", + local ? "local" : "global", wa->desc); +} + +#define erratum_handler(fn, r, ...) \ +({ \ + bool __val; \ + if (needs_unstable_timer_counter_workaround()) { \ + const struct arch_timer_erratum_workaround *__wa; \ + __wa = __this_cpu_read(timer_unstable_counter_workaround); \ + if (__wa && __wa->fn) { \ + r = __wa->fn(__VA_ARGS__); \ + __val = true; \ + } else { \ + __val = false; \ + } \ + } else { \ + __val = false; \ + } \ + __val; \ +}) + +static bool arch_timer_this_cpu_has_cntvct_wa(void) +{ + const struct arch_timer_erratum_workaround *wa; + + wa = __this_cpu_read(timer_unstable_counter_workaround); + return wa && wa->read_cntvct_el0; +} +#else +#define arch_timer_check_ool_workaround(t,a) do { } while(0) +#define erratum_set_next_event_tval_virt(...) ({BUG(); 0;}) +#define erratum_set_next_event_tval_phys(...) ({BUG(); 0;}) +#define erratum_handler(fn, r, ...) ({false;}) +#define arch_timer_this_cpu_has_cntvct_wa() ({false;}) +#endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */ + static __always_inline irqreturn_t timer_handler(const int access, struct clock_event_device *evt) { @@ -348,43 +640,14 @@ static __always_inline void set_next_event(const int access, unsigned long evt, arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk); } -#ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND -static __always_inline void erratum_set_next_event_generic(const int access, - unsigned long evt, struct clock_event_device *clk) -{ - unsigned long ctrl; - u64 cval = evt + arch_counter_get_cntvct(); - - ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk); - ctrl |= ARCH_TIMER_CTRL_ENABLE; - ctrl &= ~ARCH_TIMER_CTRL_IT_MASK; - - if (access == ARCH_TIMER_PHYS_ACCESS) - write_sysreg(cval, cntp_cval_el0); - else if (access == ARCH_TIMER_VIRT_ACCESS) - write_sysreg(cval, cntv_cval_el0); - - arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk); -} - -static int erratum_set_next_event_virt(unsigned long evt, - struct clock_event_device *clk) -{ - erratum_set_next_event_generic(ARCH_TIMER_VIRT_ACCESS, evt, clk); - return 0; -} - -static int erratum_set_next_event_phys(unsigned long evt, - struct clock_event_device *clk) -{ - erratum_set_next_event_generic(ARCH_TIMER_PHYS_ACCESS, evt, clk); - return 0; -} -#endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */ - static int arch_timer_set_next_event_virt(unsigned long evt, struct clock_event_device *clk) { + int ret; + + if (erratum_handler(set_next_event_virt, ret, evt, clk)) + return ret; + set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk); return 0; } @@ -392,6 +655,11 @@ static int arch_timer_set_next_event_virt(unsigned long evt, static int arch_timer_set_next_event_phys(unsigned long evt, struct clock_event_device *clk) { + int ret; + + if (erratum_handler(set_next_event_phys, ret, evt, clk)) + return ret; + set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk); return 0; } @@ -410,19 +678,6 @@ static int arch_timer_set_next_event_phys_mem(unsigned long evt, return 0; } -static void erratum_workaround_set_sne(struct clock_event_device *clk) -{ -#ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND - if (!static_branch_unlikely(&arch_timer_read_ool_enabled)) - return; - - if (arch_timer_uses_ppi == VIRT_PPI) - clk->set_next_event = erratum_set_next_event_virt; - else - clk->set_next_event = erratum_set_next_event_phys; -#endif -} - static void __arch_timer_setup(unsigned type, struct clock_event_device *clk) { @@ -452,7 +707,7 @@ static void __arch_timer_setup(unsigned type, BUG(); } - erratum_workaround_set_sne(clk); + arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL); } else { clk->features |= CLOCK_EVT_FEAT_DYNIRQ; clk->name = "arch_mem_timer"; @@ -508,15 +763,23 @@ static void arch_counter_set_user_access(void) { u32 cntkctl = arch_timer_get_cntkctl(); - /* Disable user access to the timers and the physical counter */ + /* Disable user access to the timers and both counters */ /* Also disable virtual event stream */ cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN | ARCH_TIMER_USR_VT_ACCESS_EN + | ARCH_TIMER_USR_VCT_ACCESS_EN | ARCH_TIMER_VIRT_EVT_EN | ARCH_TIMER_USR_PCT_ACCESS_EN); - /* Enable user access to the virtual counter */ - cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN; + /* + * Enable user access to the virtual counter if it doesn't + * need to be workaround. The vdso may have been already + * disabled though. + */ + if (arch_timer_this_cpu_has_cntvct_wa()) + pr_info("CPU%d: Trapping CNTVCT access\n", smp_processor_id()); + else + cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN; arch_timer_set_cntkctl(cntkctl); } @@ -621,37 +884,6 @@ static u64 arch_counter_get_cntvct_mem(void) return ((u64) vct_hi << 32) | vct_lo; } -/* - * Default to cp15 based access because arm64 uses this function for - * sched_clock() before DT is probed and the cp15 method is guaranteed - * to exist on arm64. arm doesn't use this before DT is probed so even - * if we don't have the cp15 accessors we won't have a problem. - */ -u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct; - -static u64 arch_counter_read(struct clocksource *cs) -{ - return arch_timer_read_counter(); -} - -static u64 arch_counter_read_cc(const struct cyclecounter *cc) -{ - return arch_timer_read_counter(); -} - -static struct clocksource clocksource_counter = { - .name = "arch_sys_counter", - .rating = 400, - .read = arch_counter_read, - .mask = CLOCKSOURCE_MASK(56), - .flags = CLOCK_SOURCE_IS_CONTINUOUS, -}; - -static struct cyclecounter cyclecounter __ro_after_init = { - .read = arch_counter_read_cc, - .mask = CLOCKSOURCE_MASK(56), -}; - static struct arch_timer_kvm_info arch_timer_kvm_info; struct arch_timer_kvm_info *arch_timer_get_kvm_info(void) @@ -670,16 +902,7 @@ static void __init arch_counter_register(unsigned type) else arch_timer_read_counter = arch_counter_get_cntpct; - clocksource_counter.archdata.vdso_direct = true; - -#ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND - /* - * Don't use the vdso fastpath if errata require using - * the out-of-line counter accessor. - */ - if (static_branch_unlikely(&arch_timer_read_ool_enabled)) - clocksource_counter.archdata.vdso_direct = false; -#endif + clocksource_counter.archdata.vdso_direct = vdso_default; } else { arch_timer_read_counter = arch_counter_get_cntvct_mem; } @@ -718,14 +941,14 @@ static int arch_timer_dying_cpu(unsigned int cpu) } #ifdef CONFIG_CPU_PM -static unsigned int saved_cntkctl; +static DEFINE_PER_CPU(unsigned long, saved_cntkctl); static int arch_timer_cpu_pm_notify(struct notifier_block *self, unsigned long action, void *hcpu) { if (action == CPU_PM_ENTER) - saved_cntkctl = arch_timer_get_cntkctl(); + __this_cpu_write(saved_cntkctl, arch_timer_get_cntkctl()); else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT) - arch_timer_set_cntkctl(saved_cntkctl); + arch_timer_set_cntkctl(__this_cpu_read(saved_cntkctl)); return NOTIFY_OK; } @@ -960,17 +1183,8 @@ static int __init arch_timer_of_init(struct device_node *np) arch_timer_c3stop = !of_property_read_bool(np, "always-on"); -#ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND - for (i = 0; i < ARRAY_SIZE(ool_workarounds); i++) { - if (of_property_read_bool(np, ool_workarounds[i].id)) { - timer_unstable_counter_workaround = &ool_workarounds[i]; - static_branch_enable(&arch_timer_read_ool_enabled); - pr_info("arch_timer: Enabling workaround for %s\n", - timer_unstable_counter_workaround->id); - break; - } - } -#endif + /* Check for globally applicable workarounds */ + arch_timer_check_ool_workaround(ate_match_dt, np); /* * If we cannot rely on firmware initializing the timer registers then @@ -1127,6 +1341,9 @@ static int __init arch_timer_acpi_init(struct acpi_table_header *table) /* Always-on capability */ arch_timer_c3stop = !(gtdt->non_secure_el1_flags & ACPI_GTDT_ALWAYS_ON); + /* Check for globally applicable workarounds */ + arch_timer_check_ool_workaround(ate_match_acpi_oem_info, table); + arch_timer_init(); return 0; } |