summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-05-23 12:09:22 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2024-05-23 12:09:22 -0700
commit2b7ced108e93b837f152841ac1f0bf45ed2a6b21 (patch)
tree84ab89bc9f4b9aeea5aeb4cdda438333333b37d5 /arch
parent2ef32ad2241340565c35baf77fc95053c84eeeb0 (diff)
parente92bee9f861b466c676f0200be3e46af7bc4ac6b (diff)
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 fixes from Will Deacon: "The major fix here is for a filesystem corruption issue reported on Apple M1 as a result of buggy management of the floating point register state introduced in 6.8. I initially reverted one of the offending patches, but in the end Ard cooked a proper fix so there's a revert+reapply in the series. Aside from that, we've got some CPU errata workarounds and misc other fixes. - Fix broken FP register state tracking which resulted in filesystem corruption when dm-crypt is used - Workarounds for Arm CPU errata affecting the SSBS Spectre mitigation - Fix lockdep assertion in DMC620 memory controller PMU driver - Fix alignment of BUG table when CONFIG_DEBUG_BUGVERBOSE is disabled" * tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64/fpsimd: Avoid erroneous elide of user state reload Reapply "arm64: fpsimd: Implement lazy restore for kernel mode FPSIMD" arm64: asm-bug: Add .align 2 to the end of __BUG_ENTRY perf/arm-dmc620: Fix lockdep assert in ->event_init() Revert "arm64: fpsimd: Implement lazy restore for kernel mode FPSIMD" arm64: errata: Add workaround for Arm errata 3194386 and 3312417 arm64: cputype: Add Neoverse-V3 definitions arm64: cputype: Add Cortex-X4 definitions arm64: barrier: Restore spec_bar() macro
Diffstat (limited to 'arch')
-rw-r--r--arch/arm64/Kconfig42
-rw-r--r--arch/arm64/include/asm/asm-bug.h1
-rw-r--r--arch/arm64/include/asm/barrier.h4
-rw-r--r--arch/arm64/include/asm/cpucaps.h2
-rw-r--r--arch/arm64/include/asm/cputype.h4
-rw-r--r--arch/arm64/kernel/cpu_errata.c19
-rw-r--r--arch/arm64/kernel/cpufeature.c8
-rw-r--r--arch/arm64/kernel/fpsimd.c44
-rw-r--r--arch/arm64/kernel/proton-pack.c12
-rw-r--r--arch/arm64/tools/cpucaps1
10 files changed, 115 insertions, 22 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 2f31376e85aa..5d91259ee7b5 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1067,6 +1067,48 @@ config ARM64_ERRATUM_3117295
If unsure, say Y.
+config ARM64_WORKAROUND_SPECULATIVE_SSBS
+ bool
+
+config ARM64_ERRATUM_3194386
+ bool "Cortex-X4: 3194386: workaround for MSR SSBS not self-synchronizing"
+ select ARM64_WORKAROUND_SPECULATIVE_SSBS
+ default y
+ help
+ This option adds the workaround for ARM Cortex-X4 erratum 3194386.
+
+ On affected cores "MSR SSBS, #0" instructions may not affect
+ subsequent speculative instructions, which may permit unexepected
+ speculative store bypassing.
+
+ Work around this problem by placing a speculation barrier after
+ kernel changes to SSBS. The presence of the SSBS special-purpose
+ register is hidden from hwcaps and EL0 reads of ID_AA64PFR1_EL1, such
+ that userspace will use the PR_SPEC_STORE_BYPASS prctl to change
+ SSBS.
+
+ If unsure, say Y.
+
+config ARM64_ERRATUM_3312417
+ bool "Neoverse-V3: 3312417: workaround for MSR SSBS not self-synchronizing"
+ select ARM64_WORKAROUND_SPECULATIVE_SSBS
+ default y
+ help
+ This option adds the workaround for ARM Neoverse-V3 erratum 3312417.
+
+ On affected cores "MSR SSBS, #0" instructions may not affect
+ subsequent speculative instructions, which may permit unexepected
+ speculative store bypassing.
+
+ Work around this problem by placing a speculation barrier after
+ kernel changes to SSBS. The presence of the SSBS special-purpose
+ register is hidden from hwcaps and EL0 reads of ID_AA64PFR1_EL1, such
+ that userspace will use the PR_SPEC_STORE_BYPASS prctl to change
+ SSBS.
+
+ If unsure, say Y.
+
+
config CAVIUM_ERRATUM_22375
bool "Cavium erratum 22375, 24313"
default y
diff --git a/arch/arm64/include/asm/asm-bug.h b/arch/arm64/include/asm/asm-bug.h
index c762038ba400..6e73809f6492 100644
--- a/arch/arm64/include/asm/asm-bug.h
+++ b/arch/arm64/include/asm/asm-bug.h
@@ -28,6 +28,7 @@
14470: .long 14471f - .; \
_BUGVERBOSE_LOCATION(__FILE__, __LINE__) \
.short flags; \
+ .align 2; \
.popsection; \
14471:
#else
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index cf2987464c18..1ca947d5c939 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -40,6 +40,10 @@
*/
#define dgh() asm volatile("hint #6" : : : "memory")
+#define spec_bar() asm volatile(ALTERNATIVE("dsb nsh\nisb\n", \
+ SB_BARRIER_INSN"nop\n", \
+ ARM64_HAS_SB))
+
#ifdef CONFIG_ARM64_PSEUDO_NMI
#define pmr_sync() \
do { \
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index 270680e2b5c4..7529c0263933 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -58,6 +58,8 @@ cpucap_is_possible(const unsigned int cap)
return IS_ENABLED(CONFIG_NVIDIA_CARMEL_CNP_ERRATUM);
case ARM64_WORKAROUND_REPEAT_TLBI:
return IS_ENABLED(CONFIG_ARM64_WORKAROUND_REPEAT_TLBI);
+ case ARM64_WORKAROUND_SPECULATIVE_SSBS:
+ return IS_ENABLED(CONFIG_ARM64_WORKAROUND_SPECULATIVE_SSBS);
}
return true;
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index 936389e9aecb..7b32b99023a2 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -87,6 +87,8 @@
#define ARM_CPU_PART_NEOVERSE_N2 0xD49
#define ARM_CPU_PART_CORTEX_A78C 0xD4B
#define ARM_CPU_PART_NEOVERSE_V2 0xD4F
+#define ARM_CPU_PART_CORTEX_X4 0xD82
+#define ARM_CPU_PART_NEOVERSE_V3 0xD84
#define APM_CPU_PART_XGENE 0x000
#define APM_CPU_VAR_POTENZA 0x00
@@ -161,6 +163,8 @@
#define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2)
#define MIDR_CORTEX_A78C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78C)
#define MIDR_NEOVERSE_V2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V2)
+#define MIDR_CORTEX_X4 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X4)
+#define MIDR_NEOVERSE_V3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V3)
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 76b8dd37092a..828be635e7e1 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -432,6 +432,18 @@ static const struct midr_range erratum_spec_unpriv_load_list[] = {
};
#endif
+#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_SSBS
+static const struct midr_range erratum_spec_ssbs_list[] = {
+#ifdef CONFIG_ARM64_ERRATUM_3194386
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_X4),
+#endif
+#ifdef CONFIG_ARM64_ERRATUM_3312417
+ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3),
+#endif
+ {}
+};
+#endif
+
const struct arm64_cpu_capabilities arm64_errata[] = {
#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
{
@@ -729,6 +741,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
MIDR_FIXED(MIDR_CPU_VAR_REV(1,1), BIT(25)),
},
#endif
+#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_SSBS
+ {
+ .desc = "ARM errata 3194386, 3312417",
+ .capability = ARM64_WORKAROUND_SPECULATIVE_SSBS,
+ ERRATA_MIDR_RANGE_LIST(erratum_spec_ssbs_list),
+ },
+#endif
#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD
{
.desc = "ARM errata 2966298, 3117295",
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 56583677c1f2..48e7029f1054 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -2307,6 +2307,14 @@ static void user_feature_fixup(void)
if (regp)
regp->user_mask &= ~ID_AA64ISAR1_EL1_BF16_MASK;
}
+
+ if (cpus_have_cap(ARM64_WORKAROUND_SPECULATIVE_SSBS)) {
+ struct arm64_ftr_reg *regp;
+
+ regp = get_arm64_ftr_reg(SYS_ID_AA64PFR1_EL1);
+ if (regp)
+ regp->user_mask &= ~ID_AA64PFR1_EL1_SSBS_MASK;
+ }
}
static void elf_hwcap_fixup(void)
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index ebb0158997ca..82e8a6017382 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -1535,6 +1535,27 @@ static void fpsimd_save_kernel_state(struct task_struct *task)
task->thread.kernel_fpsimd_cpu = smp_processor_id();
}
+/*
+ * Invalidate any task's FPSIMD state that is present on this cpu.
+ * The FPSIMD context should be acquired with get_cpu_fpsimd_context()
+ * before calling this function.
+ */
+static void fpsimd_flush_cpu_state(void)
+{
+ WARN_ON(!system_supports_fpsimd());
+ __this_cpu_write(fpsimd_last_state.st, NULL);
+
+ /*
+ * Leaving streaming mode enabled will cause issues for any kernel
+ * NEON and leaving streaming mode or ZA enabled may increase power
+ * consumption.
+ */
+ if (system_supports_sme())
+ sme_smstop();
+
+ set_thread_flag(TIF_FOREIGN_FPSTATE);
+}
+
void fpsimd_thread_switch(struct task_struct *next)
{
bool wrong_task, wrong_cpu;
@@ -1552,7 +1573,7 @@ void fpsimd_thread_switch(struct task_struct *next)
if (test_tsk_thread_flag(next, TIF_KERNEL_FPSTATE)) {
fpsimd_load_kernel_state(next);
- set_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE);
+ fpsimd_flush_cpu_state();
} else {
/*
* Fix up TIF_FOREIGN_FPSTATE to correctly describe next's
@@ -1843,27 +1864,6 @@ void fpsimd_flush_task_state(struct task_struct *t)
}
/*
- * Invalidate any task's FPSIMD state that is present on this cpu.
- * The FPSIMD context should be acquired with get_cpu_fpsimd_context()
- * before calling this function.
- */
-static void fpsimd_flush_cpu_state(void)
-{
- WARN_ON(!system_supports_fpsimd());
- __this_cpu_write(fpsimd_last_state.st, NULL);
-
- /*
- * Leaving streaming mode enabled will cause issues for any kernel
- * NEON and leaving streaming mode or ZA enabled may increase power
- * consumption.
- */
- if (system_supports_sme())
- sme_smstop();
-
- set_thread_flag(TIF_FOREIGN_FPSTATE);
-}
-
-/*
* Save the FPSIMD state to memory and invalidate cpu view.
* This function must be called with preemption disabled.
*/
diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c
index 6268a13a1d58..baca47bd443c 100644
--- a/arch/arm64/kernel/proton-pack.c
+++ b/arch/arm64/kernel/proton-pack.c
@@ -558,6 +558,18 @@ static enum mitigation_state spectre_v4_enable_hw_mitigation(void)
/* SCTLR_EL1.DSSBS was initialised to 0 during boot */
set_pstate_ssbs(0);
+
+ /*
+ * SSBS is self-synchronizing and is intended to affect subsequent
+ * speculative instructions, but some CPUs can speculate with a stale
+ * value of SSBS.
+ *
+ * Mitigate this with an unconditional speculation barrier, as CPUs
+ * could mis-speculate branches and bypass a conditional barrier.
+ */
+ if (IS_ENABLED(CONFIG_ARM64_WORKAROUND_SPECULATIVE_SSBS))
+ spec_bar();
+
return SPECTRE_MITIGATED;
}
diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps
index 62b2838a231a..ac3429d892b9 100644
--- a/arch/arm64/tools/cpucaps
+++ b/arch/arm64/tools/cpucaps
@@ -102,4 +102,5 @@ WORKAROUND_NVIDIA_CARMEL_CNP
WORKAROUND_QCOM_FALKOR_E1003
WORKAROUND_REPEAT_TLBI
WORKAROUND_SPECULATIVE_AT
+WORKAROUND_SPECULATIVE_SSBS
WORKAROUND_SPECULATIVE_UNPRIV_LOAD