From e8cde32f111f7f5681a7bad3ec747e9e697569a9 Mon Sep 17 00:00:00 2001 From: Nianyao Tang Date: Tue, 11 Jun 2024 12:20:49 +0000 Subject: arm64/cpufeatures/kvm: Add ARMv8.9 FEAT_ECBHB bits in ID_AA64MMFR1 register Enable ECBHB bits in ID_AA64MMFR1 register as per ARM DDI 0487K.a specification. When guest OS read ID_AA64MMFR1_EL1, kvm emulate this reg using ftr_id_aa64mmfr1 and always return ID_AA64MMFR1_EL1.ECBHB=0 to guest. It results in guest syscall jump to tramp ventry, which is not needed in implementation with ID_AA64MMFR1_EL1.ECBHB=1. Let's make the guest syscall process the same as the host. Signed-off-by: Nianyao Tang Link: https://lore.kernel.org/r/20240611122049.2758600-1-tangnianyao@huawei.com Signed-off-by: Catalin Marinas --- arch/arm64/kernel/cpufeature.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 48e7029f1054..c3913c90797e 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -429,6 +429,7 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = { }; static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = { + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_ECBHB_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_TIDCP1_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_AFP_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_HCX_SHIFT, 4, 0), -- cgit v1.2.3-58-ga151 From 7647e2b109f4d508fcb35bb8089a27c4fdd81f61 Mon Sep 17 00:00:00 2001 From: Puranjay Mohan Date: Fri, 3 May 2024 17:18:46 +0000 Subject: arm64/arch_timer: include arch_timer.h includes linux/smp.h since the commit: 6acc71ccac7187fc ("arm64: arch_timer: Allows a CPU-specific erratum to only affect a subset of CPUs") It was included to use DEFINE_PER_CPU(), etc. But It should have included rather than . It worked because smp.h includes percpu.h. The next commit will remove percpu.h from smp.h and it will break this usage. Explicitly include percpu.h and remove smp.h Signed-off-by: Puranjay Mohan Acked-by: Mark Rutland Reviewed-by: Stephen Boyd Reviewed-by: Anshuman Khandual Link: https://lore.kernel.org/r/20240503171847.68267-1-puranjay@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/arch_timer.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h index 934c658ee947..f5794d50f51d 100644 --- a/arch/arm64/include/asm/arch_timer.h +++ b/arch/arm64/include/asm/arch_timer.h @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include #include -- cgit v1.2.3-58-ga151 From bf0baa5bbdc9b99ea081d360f245e5f96e835612 Mon Sep 17 00:00:00 2001 From: Puranjay Mohan Date: Fri, 3 May 2024 17:18:47 +0000 Subject: arm64: implement raw_smp_processor_id() using thread_info Historically, arm64 implemented raw_smp_processor_id() as a read of current_thread_info()->cpu. This changed when arm64 moved thread_info into task struct, as at the time CONFIG_THREAD_INFO_IN_TASK made core code use thread_struct::cpu for the cpu number, and due to header dependencies prevented using this in raw_smp_processor_id(). As a workaround, we moved to using a percpu variable in commit: 57c82954e77fa12c ("arm64: make cpu number a percpu variable") Since then, thread_info::cpu was reintroduced, and core code was made to use this in commits: 001430c1910df65a ("arm64: add CPU field to struct thread_info") bcf9033e5449bdca ("sched: move CPU field back into thread_info if THREAD_INFO_IN_TASK=y") Consequently it is possible to use current_thread_info()->cpu again. This decreases the number of emitted instructions like in the following example: Dump of assembler code for function bpf_get_smp_processor_id: 0xffff8000802cd608 <+0>: nop 0xffff8000802cd60c <+4>: nop 0xffff8000802cd610 <+8>: adrp x0, 0xffff800082138000 0xffff8000802cd614 <+12>: mrs x1, tpidr_el1 0xffff8000802cd618 <+16>: add x0, x0, #0x8 0xffff8000802cd61c <+20>: ldrsw x0, [x0, x1] 0xffff8000802cd620 <+24>: ret After this patch: Dump of assembler code for function bpf_get_smp_processor_id: 0xffff8000802c9130 <+0>: nop 0xffff8000802c9134 <+4>: nop 0xffff8000802c9138 <+8>: mrs x0, sp_el0 0xffff8000802c913c <+12>: ldr w0, [x0, #24] 0xffff8000802c9140 <+16>: ret A microbenchmark[1] was built to measure the performance improvement provided by this change. It calls the following function given number of times and finds the runtime overhead: static noinline int get_cpu_id(void) { return smp_processor_id(); } Run the benchmark like: modprobe smp_processor_id nr_function_calls=1000000000 +--------------------------+------------------------+ | | Number of Calls | Time taken | +--------+-----------------+------------------------+ | Before | 1000000000 | 1602888401ns | +--------+-----------------+------------------------+ | After | 1000000000 | 1206212658ns | +--------+-----------------+------------------------+ | Difference (decrease) | 396675743ns (24.74%) | +---------------------------------------------------+ Remove the percpu variable cpu_number as it is used only in set_smp_ipi_range() as a dummy variable to be passed to ipi_handler(). Use irq_stat in place of cpu_number here like arm32. [1] https://github.com/puranjaymohan/linux/commit/77d3fdd Signed-off-by: Puranjay Mohan Acked-by: Mark Rutland Reviewed-by: Stephen Boyd Link: https://lore.kernel.org/r/20240503171847.68267-2-puranjay@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/smp.h | 13 +------------ arch/arm64/kernel/smp.c | 9 ++------- 2 files changed, 3 insertions(+), 19 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h index efb13112b408..2510eec026f7 100644 --- a/arch/arm64/include/asm/smp.h +++ b/arch/arm64/include/asm/smp.h @@ -25,22 +25,11 @@ #ifndef __ASSEMBLY__ -#include - #include #include #include -DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number); - -/* - * We don't use this_cpu_read(cpu_number) as that has implicit writes to - * preempt_count, and associated (compiler) barriers, that we'd like to avoid - * the expense of. If we're preemptible, the value can be stale at use anyway. - * And we can't use this_cpu_ptr() either, as that winds up recursing back - * here under CONFIG_DEBUG_PREEMPT=y. - */ -#define raw_smp_processor_id() (*raw_cpu_ptr(&cpu_number)) +#define raw_smp_processor_id() (current_thread_info()->cpu) /* * Logical CPU mapping. diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 31c8b3094dd7..753360ce72f6 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -55,9 +55,6 @@ #include -DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number); -EXPORT_PER_CPU_SYMBOL(cpu_number); - /* * as from 2.5, kernels no longer have an init_tasks structure * so we need some other way of telling a new secondary core @@ -749,8 +746,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus) */ for_each_possible_cpu(cpu) { - per_cpu(cpu_number, cpu) = cpu; - if (cpu == smp_processor_id()) continue; @@ -1028,12 +1023,12 @@ void __init set_smp_ipi_range(int ipi_base, int n) if (ipi_should_be_nmi(i)) { err = request_percpu_nmi(ipi_base + i, ipi_handler, - "IPI", &cpu_number); + "IPI", &irq_stat); WARN(err, "Could not request IPI %d as NMI, err=%d\n", i, err); } else { err = request_percpu_irq(ipi_base + i, ipi_handler, - "IPI", &cpu_number); + "IPI", &irq_stat); WARN(err, "Could not request IPI %d as IRQ, err=%d\n", i, err); } -- cgit v1.2.3-58-ga151 From 26ca4423604f15930d96088dc5238f29dc11d5bc Mon Sep 17 00:00:00 2001 From: Peter Collingbourne Date: Tue, 28 May 2024 15:51:30 -0700 Subject: arm64: mte: Make mte_check_tfsr_*() conditional on KASAN instead of MTE The check in mte_check_tfsr_el1() is only necessary if HW tag based KASAN is enabled. However, we were also executing the check if MTE is enabled and KASAN is enabled at build time but disabled at runtime. This turned out to cause a measurable increase in power consumption on a specific microarchitecture after enabling MTE. Moreover, on the same system, an increase in invalid syscall latency (as measured by [1]) of around 20-30% (depending on the cluster) was observed after enabling MTE; this almost entirely goes away after removing this check. Therefore, make the check conditional on whether KASAN is enabled rather than on whether MTE is enabled. [1] https://lore.kernel.org/all/CAMn1gO4MwRV8bmFJ_SeY5tsYNPn2ZP56LjAhafygjFaKuu5ouw@mail.gmail.com/ Signed-off-by: Peter Collingbourne Link: https://linux-review.googlesource.com/id/I22d98d1483dd400a95595946552b769a5a1ad7bd Reviewed-by: Alexandru Elisei Link: https://lore.kernel.org/r/20240528225131.3577704-1-pcc@google.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/mte.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h index 91fbd5c8a391..0f84518632b4 100644 --- a/arch/arm64/include/asm/mte.h +++ b/arch/arm64/include/asm/mte.h @@ -182,7 +182,7 @@ void mte_check_tfsr_el1(void); static inline void mte_check_tfsr_entry(void) { - if (!system_supports_mte()) + if (!kasan_hw_tags_enabled()) return; mte_check_tfsr_el1(); @@ -190,7 +190,7 @@ static inline void mte_check_tfsr_entry(void) static inline void mte_check_tfsr_exit(void) { - if (!system_supports_mte()) + if (!kasan_hw_tags_enabled()) return; /* -- cgit v1.2.3-58-ga151 From be5a6f238700f38b534456608588723fba96c5ab Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 3 Jun 2024 12:18:08 +0100 Subject: arm64: cputype: Add Cortex-X3 definitions Add cputype definitions for Cortex-X3. These will be used for errata detection in subsequent patches. These values can be found in Table A-263 ("MIDR_EL1 bit descriptions") in issue 07 of the Cortex-X3 TRM, which can be found at: https://developer.arm.com/documentation/101593/0102/?lang=en Signed-off-by: Mark Rutland Cc: James Morse Cc: Will Deacon Link: https://lore.kernel.org/r/20240603111812.1514101-2-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/cputype.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index 7b32b99023a2..72fe207403c8 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -86,6 +86,7 @@ #define ARM_CPU_PART_CORTEX_X2 0xD48 #define ARM_CPU_PART_NEOVERSE_N2 0xD49 #define ARM_CPU_PART_CORTEX_A78C 0xD4B +#define ARM_CPU_PART_CORTEX_X3 0xD4E #define ARM_CPU_PART_NEOVERSE_V2 0xD4F #define ARM_CPU_PART_CORTEX_X4 0xD82 #define ARM_CPU_PART_NEOVERSE_V3 0xD84 @@ -162,6 +163,7 @@ #define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2) #define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2) #define MIDR_CORTEX_A78C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78C) +#define MIDR_CORTEX_X3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X3) #define MIDR_NEOVERSE_V2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V2) #define MIDR_CORTEX_X4 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X4) #define MIDR_NEOVERSE_V3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V3) -- cgit v1.2.3-58-ga151 From add332c40328cf06fe35e4b3cde8ec315c4629e5 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 3 Jun 2024 12:18:09 +0100 Subject: arm64: cputype: Add Cortex-A720 definitions Add cputype definitions for Cortex-A720. These will be used for errata detection in subsequent patches. These values can be found in Table A-186 ("MIDR_EL1 bit descriptions") in issue 0002-05 of the Cortex-A720 TRM, which can be found at: https://developer.arm.com/documentation/102530/0002/?lang=en Signed-off-by: Mark Rutland Cc: James Morse Cc: Will Deacon Link: https://lore.kernel.org/r/20240603111812.1514101-3-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/cputype.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index 72fe207403c8..dcbac1ce6c25 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -88,6 +88,7 @@ #define ARM_CPU_PART_CORTEX_A78C 0xD4B #define ARM_CPU_PART_CORTEX_X3 0xD4E #define ARM_CPU_PART_NEOVERSE_V2 0xD4F +#define ARM_CPU_PART_CORTEX_A720 0xD81 #define ARM_CPU_PART_CORTEX_X4 0xD82 #define ARM_CPU_PART_NEOVERSE_V3 0xD84 @@ -165,6 +166,7 @@ #define MIDR_CORTEX_A78C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78C) #define MIDR_CORTEX_X3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X3) #define MIDR_NEOVERSE_V2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V2) +#define MIDR_CORTEX_A720 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A720) #define MIDR_CORTEX_X4 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X4) #define MIDR_NEOVERSE_V3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V3) #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX) -- cgit v1.2.3-58-ga151 From fd2ff5f0b320f418288e7a1f919f648fbc8a0dfc Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 3 Jun 2024 12:18:10 +0100 Subject: arm64: cputype: Add Cortex-X925 definitions Add cputype definitions for Cortex-X925. These will be used for errata detection in subsequent patches. These values can be found in Table A-285 ("MIDR_EL1 bit descriptions") in issue 0001-05 of the Cortex-X925 TRM, which can be found at: https://developer.arm.com/documentation/102807/0001/?lang=en Signed-off-by: Mark Rutland Cc: James Morse Cc: Will Deacon Link: https://lore.kernel.org/r/20240603111812.1514101-4-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/cputype.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index dcbac1ce6c25..1cb0704c6163 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -91,6 +91,7 @@ #define ARM_CPU_PART_CORTEX_A720 0xD81 #define ARM_CPU_PART_CORTEX_X4 0xD82 #define ARM_CPU_PART_NEOVERSE_V3 0xD84 +#define ARM_CPU_PART_CORTEX_X925 0xD85 #define APM_CPU_PART_XGENE 0x000 #define APM_CPU_VAR_POTENZA 0x00 @@ -169,6 +170,7 @@ #define MIDR_CORTEX_A720 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A720) #define MIDR_CORTEX_X4 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X4) #define MIDR_NEOVERSE_V3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V3) +#define MIDR_CORTEX_X925 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X925) #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX) #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX) #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX) -- cgit v1.2.3-58-ga151 From ec768766608092087dfb5c1fc45a16a6f524dee2 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 3 Jun 2024 12:18:11 +0100 Subject: arm64: errata: Unify speculative SSBS errata logic Cortex-X4 erratum 3194386 and Neoverse-V3 erratum 3312417 are identical, with duplicate Kconfig text and some unsightly ifdeffery. While we try to share code behind CONFIG_ARM64_WORKAROUND_SPECULATIVE_SSBS, having separate options results in a fair amount of boilerplate code, and this will only get worse as we expand the set of affected CPUs. To reduce this boilerplate, unify the two behind a common Kconfig option. This removes the duplicate text and Kconfig logic, and removes the need for the intermediate ARM64_WORKAROUND_SPECULATIVE_SSBS option. The set of affected CPUs is described as a list so that this can easily be extended. I've used ARM64_ERRATUM_3194386 (matching the Neoverse-V3 erratum ID) as the common option, matching the way we use ARM64_ERRATUM_1319367 to cover Cortex-A57 erratum 1319537 and Cortex-A72 erratum 1319367. Signed-off-by: Mark Rutland Cc: James Morse Cc: Will Deacon Link: https://lore.kernel.org/r/20240603111812.1514101-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- Documentation/arch/arm64/silicon-errata.rst | 2 +- arch/arm64/Kconfig | 29 ++++------------------------- arch/arm64/include/asm/cpucaps.h | 2 +- arch/arm64/kernel/cpu_errata.c | 8 ++------ arch/arm64/kernel/proton-pack.c | 2 +- 5 files changed, 9 insertions(+), 34 deletions(-) (limited to 'arch') diff --git a/Documentation/arch/arm64/silicon-errata.rst b/Documentation/arch/arm64/silicon-errata.rst index eb8af8032c31..59ee2832406c 100644 --- a/Documentation/arch/arm64/silicon-errata.rst +++ b/Documentation/arch/arm64/silicon-errata.rst @@ -158,7 +158,7 @@ stable kernels. +----------------+-----------------+-----------------+-----------------------------+ | ARM | Neoverse-V1 | #1619801 | N/A | +----------------+-----------------+-----------------+-----------------------------+ -| ARM | Neoverse-V3 | #3312417 | ARM64_ERRATUM_3312417 | +| ARM | Neoverse-V3 | #3312417 | ARM64_ERRATUM_3194386 | +----------------+-----------------+-----------------+-----------------------------+ | ARM | MMU-500 | #841119,826419 | N/A | +----------------+-----------------+-----------------+-----------------------------+ diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 5d91259ee7b5..fb31ff9151b9 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1067,34 +1067,14 @@ config ARM64_ERRATUM_3117295 If unsure, say Y. -config ARM64_WORKAROUND_SPECULATIVE_SSBS - bool - config ARM64_ERRATUM_3194386 - bool "Cortex-X4: 3194386: workaround for MSR SSBS not self-synchronizing" - select ARM64_WORKAROUND_SPECULATIVE_SSBS + bool "Cortex-X4/Neoverse-V3: workaround for MSR SSBS not self-synchronizing" default y help - This option adds the workaround for ARM Cortex-X4 erratum 3194386. + This option adds the workaround for the following errata: - On affected cores "MSR SSBS, #0" instructions may not affect - subsequent speculative instructions, which may permit unexepected - speculative store bypassing. - - Work around this problem by placing a speculation barrier after - kernel changes to SSBS. The presence of the SSBS special-purpose - register is hidden from hwcaps and EL0 reads of ID_AA64PFR1_EL1, such - that userspace will use the PR_SPEC_STORE_BYPASS prctl to change - SSBS. - - If unsure, say Y. - -config ARM64_ERRATUM_3312417 - bool "Neoverse-V3: 3312417: workaround for MSR SSBS not self-synchronizing" - select ARM64_WORKAROUND_SPECULATIVE_SSBS - default y - help - This option adds the workaround for ARM Neoverse-V3 erratum 3312417. + * ARM Cortex-X4 erratum 3194386 + * ARM Neoverse-V3 erratum 3312417 On affected cores "MSR SSBS, #0" instructions may not affect subsequent speculative instructions, which may permit unexepected @@ -1108,7 +1088,6 @@ config ARM64_ERRATUM_3312417 If unsure, say Y. - config CAVIUM_ERRATUM_22375 bool "Cavium erratum 22375, 24313" default y diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index 7529c0263933..a6e5b07b64fd 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -59,7 +59,7 @@ cpucap_is_possible(const unsigned int cap) case ARM64_WORKAROUND_REPEAT_TLBI: return IS_ENABLED(CONFIG_ARM64_WORKAROUND_REPEAT_TLBI); case ARM64_WORKAROUND_SPECULATIVE_SSBS: - return IS_ENABLED(CONFIG_ARM64_WORKAROUND_SPECULATIVE_SSBS); + return IS_ENABLED(CONFIG_ARM64_ERRATUM_3194386); } return true; diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 828be635e7e1..5fbe14dc607f 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -432,14 +432,10 @@ static const struct midr_range erratum_spec_unpriv_load_list[] = { }; #endif -#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_SSBS -static const struct midr_range erratum_spec_ssbs_list[] = { #ifdef CONFIG_ARM64_ERRATUM_3194386 +static const struct midr_range erratum_spec_ssbs_list[] = { MIDR_ALL_VERSIONS(MIDR_CORTEX_X4), -#endif -#ifdef CONFIG_ARM64_ERRATUM_3312417 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3), -#endif {} }; #endif @@ -741,7 +737,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = { MIDR_FIXED(MIDR_CPU_VAR_REV(1,1), BIT(25)), }, #endif -#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_SSBS +#ifdef CONFIG_ARM64_ERRATUM_3194386 { .desc = "ARM errata 3194386, 3312417", .capability = ARM64_WORKAROUND_SPECULATIVE_SSBS, diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c index baca47bd443c..da53722f95d4 100644 --- a/arch/arm64/kernel/proton-pack.c +++ b/arch/arm64/kernel/proton-pack.c @@ -567,7 +567,7 @@ static enum mitigation_state spectre_v4_enable_hw_mitigation(void) * Mitigate this with an unconditional speculation barrier, as CPUs * could mis-speculate branches and bypass a conditional barrier. */ - if (IS_ENABLED(CONFIG_ARM64_WORKAROUND_SPECULATIVE_SSBS)) + if (IS_ENABLED(CONFIG_ARM64_ERRATUM_3194386)) spec_bar(); return SPECTRE_MITIGATED; -- cgit v1.2.3-58-ga151 From 75b3c43eab594bfbd8184ec8ee1a6b820950819a Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 3 Jun 2024 12:18:12 +0100 Subject: arm64: errata: Expand speculative SSBS workaround A number of Arm Ltd CPUs suffer from errata whereby an MSR to the SSBS special-purpose register does not affect subsequent speculative instructions, permitting speculative store bypassing for a window of time. We worked around this for Cortex-X4 and Neoverse-V3, in commit: 7187bb7d0b5c7dfa ("arm64: errata: Add workaround for Arm errata 3194386 and 3312417") ... as per their Software Developer Errata Notice (SDEN) documents: * Cortex-X4 SDEN v8.0, erratum 3194386: https://developer.arm.com/documentation/SDEN-2432808/0800/ * Neoverse-V3 SDEN v6.0, erratum 3312417: https://developer.arm.com/documentation/SDEN-2891958/0600/ Since then, similar errata have been published for a number of other Arm Ltd CPUs, for which the mitigation is the same. This is described in their respective SDEN documents: * Cortex-A710 SDEN v19.0, errataum 3324338 https://developer.arm.com/documentation/SDEN-1775101/1900/?lang=en * Cortex-A720 SDEN v11.0, erratum 3456091 https://developer.arm.com/documentation/SDEN-2439421/1100/?lang=en * Cortex-X2 SDEN v19.0, erratum 3324338 https://developer.arm.com/documentation/SDEN-1775100/1900/?lang=en * Cortex-X3 SDEN v14.0, erratum 3324335 https://developer.arm.com/documentation/SDEN-2055130/1400/?lang=en * Cortex-X925 SDEN v8.0, erratum 3324334 https://developer.arm.com/documentation/109108/800/?lang=en * Neoverse-N2 SDEN v17.0, erratum 3324339 https://developer.arm.com/documentation/SDEN-1982442/1700/?lang=en * Neoverse-V2 SDEN v9.0, erratum 3324336 https://developer.arm.com/documentation/SDEN-2332927/900/?lang=en Note that due to shared design lineage, some CPUs share the same erratum number. Add these to the existing mitigation under CONFIG_ARM64_ERRATUM_3194386. As listing all of the erratum IDs in the runtime description would be unwieldy, this is reduced to: "SSBS not fully self-synchronizing" ... matching the description of the errata in all of the SDENs. Signed-off-by: Mark Rutland Cc: James Morse Cc: Will Deacon Link: https://lore.kernel.org/r/20240603111812.1514101-6-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- Documentation/arch/arm64/silicon-errata.rst | 14 ++++++++++++++ arch/arm64/Kconfig | 9 ++++++++- arch/arm64/kernel/cpu_errata.c | 9 ++++++++- 3 files changed, 30 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/Documentation/arch/arm64/silicon-errata.rst b/Documentation/arch/arm64/silicon-errata.rst index 59ee2832406c..bb83c5d8c675 100644 --- a/Documentation/arch/arm64/silicon-errata.rst +++ b/Documentation/arch/arm64/silicon-errata.rst @@ -132,16 +132,26 @@ stable kernels. +----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-A710 | #2224489 | ARM64_ERRATUM_2224489 | +----------------+-----------------+-----------------+-----------------------------+ +| ARM | Cortex-A710 | #3324338 | ARM64_ERRATUM_3194386 | ++----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-A715 | #2645198 | ARM64_ERRATUM_2645198 | +----------------+-----------------+-----------------+-----------------------------+ +| ARM | Cortex-A720 | #3456091 | ARM64_ERRATUM_3194386 | ++----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-X1 | #1502854 | N/A | +----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-X2 | #2119858 | ARM64_ERRATUM_2119858 | +----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-X2 | #2224489 | ARM64_ERRATUM_2224489 | +----------------+-----------------+-----------------+-----------------------------+ +| ARM | Cortex-X2 | #3324338 | ARM64_ERRATUM_3194386 | ++----------------+-----------------+-----------------+-----------------------------+ +| ARM | Cortex-X3 | #3324335 | ARM64_ERRATUM_3194386 | ++----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-X4 | #3194386 | ARM64_ERRATUM_3194386 | +----------------+-----------------+-----------------+-----------------------------+ +| ARM | Cortex-X925 | #3324334 | ARM64_ERRATUM_3194386 | ++----------------+-----------------+-----------------+-----------------------------+ | ARM | Neoverse-N1 | #1188873,1418040| ARM64_ERRATUM_1418040 | +----------------+-----------------+-----------------+-----------------------------+ | ARM | Neoverse-N1 | #1349291 | N/A | @@ -156,8 +166,12 @@ stable kernels. +----------------+-----------------+-----------------+-----------------------------+ | ARM | Neoverse-N2 | #2253138 | ARM64_ERRATUM_2253138 | +----------------+-----------------+-----------------+-----------------------------+ +| ARM | Neoverse-N2 | #3324339 | ARM64_ERRATUM_3194386 | ++----------------+-----------------+-----------------+-----------------------------+ | ARM | Neoverse-V1 | #1619801 | N/A | +----------------+-----------------+-----------------+-----------------------------+ +| ARM | Neoverse-V2 | #3324336 | ARM64_ERRATUM_3194386 | ++----------------+-----------------+-----------------+-----------------------------+ | ARM | Neoverse-V3 | #3312417 | ARM64_ERRATUM_3194386 | +----------------+-----------------+-----------------+-----------------------------+ | ARM | MMU-500 | #841119,826419 | N/A | diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index fb31ff9151b9..f580f5af4a51 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1068,12 +1068,19 @@ config ARM64_ERRATUM_3117295 If unsure, say Y. config ARM64_ERRATUM_3194386 - bool "Cortex-X4/Neoverse-V3: workaround for MSR SSBS not self-synchronizing" + bool "Cortex-{A720,X4,X925}/Neoverse-V3: workaround for MSR SSBS not self-synchronizing" default y help This option adds the workaround for the following errata: + * ARM Cortex-A710 erratam 3324338 + * ARM Cortex-A720 erratum 3456091 + * ARM Cortex-X2 erratum 3324338 + * ARM Cortex-X3 erratum 3324335 * ARM Cortex-X4 erratum 3194386 + * ARM Cortex-X925 erratum 3324334 + * ARM Neoverse N2 erratum 3324339 + * ARM Neoverse V2 erratum 3324336 * ARM Neoverse-V3 erratum 3312417 On affected cores "MSR SSBS, #0" instructions may not affect diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 5fbe14dc607f..617424b73f8c 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -434,8 +434,15 @@ static const struct midr_range erratum_spec_unpriv_load_list[] = { #ifdef CONFIG_ARM64_ERRATUM_3194386 static const struct midr_range erratum_spec_ssbs_list[] = { + MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A720), + MIDR_ALL_VERSIONS(MIDR_CORTEX_X2), + MIDR_ALL_VERSIONS(MIDR_CORTEX_X3), MIDR_ALL_VERSIONS(MIDR_CORTEX_X4), + MIDR_ALL_VERSIONS(MIDR_CORTEX_X925), + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3), + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2), {} }; #endif @@ -739,7 +746,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = { #endif #ifdef CONFIG_ARM64_ERRATUM_3194386 { - .desc = "ARM errata 3194386, 3312417", + .desc = "SSBS not fully self-synchronizing", .capability = ARM64_WORKAROUND_SPECULATIVE_SSBS, ERRATA_MIDR_RANGE_LIST(erratum_spec_ssbs_list), }, -- cgit v1.2.3-58-ga151 From 99e7a8adc0ca906151f5d70ff68b8a81f53fd106 Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Wed, 5 Jun 2024 14:14:57 +0100 Subject: arm64: cpuidle: Move ACPI specific code into drivers/acpi/arm64/ The ACPI cpuidle LPI FFH code can be moved out of arm64 arch code as it just uses SMCCC. Move all the ACPI cpuidle LPI FFH code into drivers/acpi/arm64/cpuidle.c Signed-off-by: Sudeep Holla Acked-by: Catalin Marinas Acked-by: Hanjun Guo Link: https://lore.kernel.org/r/20240605131458.3341095-3-sudeep.holla@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/kernel/Makefile | 1 - arch/arm64/kernel/cpuidle.c | 74 -------------------------------------------- drivers/acpi/arm64/Makefile | 1 + drivers/acpi/arm64/cpuidle.c | 70 +++++++++++++++++++++++++++++++++++++++++ 4 files changed, 71 insertions(+), 75 deletions(-) delete mode 100644 arch/arm64/kernel/cpuidle.c create mode 100644 drivers/acpi/arm64/cpuidle.c (limited to 'arch') diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 763824963ed1..2b112f3b7510 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -46,7 +46,6 @@ obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o obj-$(CONFIG_HARDLOCKUP_DETECTOR_PERF) += watchdog_hld.o obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o obj-$(CONFIG_CPU_PM) += sleep.o suspend.o -obj-$(CONFIG_CPU_IDLE) += cpuidle.o obj-$(CONFIG_JUMP_LABEL) += jump_label.o obj-$(CONFIG_KGDB) += kgdb.o obj-$(CONFIG_EFI) += efi.o efi-rt-wrapper.o diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c deleted file mode 100644 index f372295207fb..000000000000 --- a/arch/arm64/kernel/cpuidle.c +++ /dev/null @@ -1,74 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * ARM64 CPU idle arch support - * - * Copyright (C) 2014 ARM Ltd. - * Author: Lorenzo Pieralisi - */ - -#include -#include -#include -#include - -#ifdef CONFIG_ACPI_PROCESSOR_IDLE - -#include - -#define ARM64_LPI_IS_RETENTION_STATE(arch_flags) (!(arch_flags)) - -static int psci_acpi_cpu_init_idle(unsigned int cpu) -{ - int i, count; - struct acpi_lpi_state *lpi; - struct acpi_processor *pr = per_cpu(processors, cpu); - - if (unlikely(!pr || !pr->flags.has_lpi)) - return -EINVAL; - - /* - * If the PSCI cpu_suspend function hook has not been initialized - * idle states must not be enabled, so bail out - */ - if (!psci_ops.cpu_suspend) - return -EOPNOTSUPP; - - count = pr->power.count - 1; - if (count <= 0) - return -ENODEV; - - for (i = 0; i < count; i++) { - u32 state; - - lpi = &pr->power.lpi_states[i + 1]; - /* - * Only bits[31:0] represent a PSCI power_state while - * bits[63:32] must be 0x0 as per ARM ACPI FFH Specification - */ - state = lpi->address; - if (!psci_power_state_is_valid(state)) { - pr_warn("Invalid PSCI power state %#x\n", state); - return -EINVAL; - } - } - - return 0; -} - -int acpi_processor_ffh_lpi_probe(unsigned int cpu) -{ - return psci_acpi_cpu_init_idle(cpu); -} - -__cpuidle int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi) -{ - u32 state = lpi->address; - - if (ARM64_LPI_IS_RETENTION_STATE(lpi->arch_flags)) - return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM_RCU(psci_cpu_suspend_enter, - lpi->index, state); - else - return CPU_PM_CPU_IDLE_ENTER_PARAM_RCU(psci_cpu_suspend_enter, - lpi->index, state); -} -#endif diff --git a/drivers/acpi/arm64/Makefile b/drivers/acpi/arm64/Makefile index 7d7fd6512bca..2efee23f00b4 100644 --- a/drivers/acpi/arm64/Makefile +++ b/drivers/acpi/arm64/Makefile @@ -3,6 +3,7 @@ obj-$(CONFIG_ACPI_AGDI) += agdi.o obj-$(CONFIG_ACPI_APMT) += apmt.o obj-$(CONFIG_ACPI_GTDT) += gtdt.o obj-$(CONFIG_ACPI_IORT) += iort.o +obj-$(CONFIG_ACPI_PROCESSOR_IDLE) += cpuidle.o obj-$(CONFIG_ARM_AMBA) += amba.o obj-y += dma.o init.o obj-y += thermal_cpufreq.o diff --git a/drivers/acpi/arm64/cpuidle.c b/drivers/acpi/arm64/cpuidle.c new file mode 100644 index 000000000000..801f9c450142 --- /dev/null +++ b/drivers/acpi/arm64/cpuidle.c @@ -0,0 +1,70 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * ARM64 CPU idle arch support + * + * Copyright (C) 2014 ARM Ltd. + * Author: Lorenzo Pieralisi + */ + +#include +#include +#include +#include +#include + +#define ARM64_LPI_IS_RETENTION_STATE(arch_flags) (!(arch_flags)) + +static int psci_acpi_cpu_init_idle(unsigned int cpu) +{ + int i, count; + struct acpi_lpi_state *lpi; + struct acpi_processor *pr = per_cpu(processors, cpu); + + if (unlikely(!pr || !pr->flags.has_lpi)) + return -EINVAL; + + /* + * If the PSCI cpu_suspend function hook has not been initialized + * idle states must not be enabled, so bail out + */ + if (!psci_ops.cpu_suspend) + return -EOPNOTSUPP; + + count = pr->power.count - 1; + if (count <= 0) + return -ENODEV; + + for (i = 0; i < count; i++) { + u32 state; + + lpi = &pr->power.lpi_states[i + 1]; + /* + * Only bits[31:0] represent a PSCI power_state while + * bits[63:32] must be 0x0 as per ARM ACPI FFH Specification + */ + state = lpi->address; + if (!psci_power_state_is_valid(state)) { + pr_warn("Invalid PSCI power state %#x\n", state); + return -EINVAL; + } + } + + return 0; +} + +int acpi_processor_ffh_lpi_probe(unsigned int cpu) +{ + return psci_acpi_cpu_init_idle(cpu); +} + +__cpuidle int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi) +{ + u32 state = lpi->address; + + if (ARM64_LPI_IS_RETENTION_STATE(lpi->arch_flags)) + return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM_RCU(psci_cpu_suspend_enter, + lpi->index, state); + else + return CPU_PM_CPU_IDLE_ENTER_PARAM_RCU(psci_cpu_suspend_enter, + lpi->index, state); +} -- cgit v1.2.3-58-ga151 From 7a7a1cac3c2f5e3c859efb295bad3469e09e6f8f Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Wed, 5 Jun 2024 14:14:58 +0100 Subject: arm64: FFH: Move ACPI specific code into drivers/acpi/arm64/ The ACPI FFH Opregion code can be moved out of arm64 arch code as it just uses SMCCC. Move all the ACPI FFH Opregion code into drivers/acpi/arm64/ffh.c Signed-off-by: Sudeep Holla Acked-by: Catalin Marinas Acked-by: Hanjun Guo Link: https://lore.kernel.org/r/20240605131458.3341095-4-sudeep.holla@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/kernel/acpi.c | 105 ------------------------------------------- drivers/acpi/arm64/Makefile | 1 + drivers/acpi/arm64/ffh.c | 107 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 108 insertions(+), 105 deletions(-) create mode 100644 drivers/acpi/arm64/ffh.c (limited to 'arch') diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c index e0e7b93c16cc..fd1917336d3a 100644 --- a/arch/arm64/kernel/acpi.c +++ b/arch/arm64/kernel/acpi.c @@ -422,108 +422,3 @@ void arch_reserve_mem_area(acpi_physical_address addr, size_t size) { memblock_mark_nomap(addr, size); } - -#ifdef CONFIG_ACPI_FFH -/* - * Implements ARM64 specific callbacks to support ACPI FFH Operation Region as - * specified in https://developer.arm.com/docs/den0048/latest - */ -struct acpi_ffh_data { - struct acpi_ffh_info info; - void (*invoke_ffh_fn)(unsigned long a0, unsigned long a1, - unsigned long a2, unsigned long a3, - unsigned long a4, unsigned long a5, - unsigned long a6, unsigned long a7, - struct arm_smccc_res *args, - struct arm_smccc_quirk *res); - void (*invoke_ffh64_fn)(const struct arm_smccc_1_2_regs *args, - struct arm_smccc_1_2_regs *res); -}; - -int acpi_ffh_address_space_arch_setup(void *handler_ctxt, void **region_ctxt) -{ - enum arm_smccc_conduit conduit; - struct acpi_ffh_data *ffh_ctxt; - - if (arm_smccc_get_version() < ARM_SMCCC_VERSION_1_2) - return -EOPNOTSUPP; - - conduit = arm_smccc_1_1_get_conduit(); - if (conduit == SMCCC_CONDUIT_NONE) { - pr_err("%s: invalid SMCCC conduit\n", __func__); - return -EOPNOTSUPP; - } - - ffh_ctxt = kzalloc(sizeof(*ffh_ctxt), GFP_KERNEL); - if (!ffh_ctxt) - return -ENOMEM; - - if (conduit == SMCCC_CONDUIT_SMC) { - ffh_ctxt->invoke_ffh_fn = __arm_smccc_smc; - ffh_ctxt->invoke_ffh64_fn = arm_smccc_1_2_smc; - } else { - ffh_ctxt->invoke_ffh_fn = __arm_smccc_hvc; - ffh_ctxt->invoke_ffh64_fn = arm_smccc_1_2_hvc; - } - - memcpy(ffh_ctxt, handler_ctxt, sizeof(ffh_ctxt->info)); - - *region_ctxt = ffh_ctxt; - return AE_OK; -} - -static bool acpi_ffh_smccc_owner_allowed(u32 fid) -{ - int owner = ARM_SMCCC_OWNER_NUM(fid); - - if (owner == ARM_SMCCC_OWNER_STANDARD || - owner == ARM_SMCCC_OWNER_SIP || owner == ARM_SMCCC_OWNER_OEM) - return true; - - return false; -} - -int acpi_ffh_address_space_arch_handler(acpi_integer *value, void *region_context) -{ - int ret = 0; - struct acpi_ffh_data *ffh_ctxt = region_context; - - if (ffh_ctxt->info.offset == 0) { - /* SMC/HVC 32bit call */ - struct arm_smccc_res res; - u32 a[8] = { 0 }, *ptr = (u32 *)value; - - if (!ARM_SMCCC_IS_FAST_CALL(*ptr) || ARM_SMCCC_IS_64(*ptr) || - !acpi_ffh_smccc_owner_allowed(*ptr) || - ffh_ctxt->info.length > 32) { - ret = AE_ERROR; - } else { - int idx, len = ffh_ctxt->info.length >> 2; - - for (idx = 0; idx < len; idx++) - a[idx] = *(ptr + idx); - - ffh_ctxt->invoke_ffh_fn(a[0], a[1], a[2], a[3], a[4], - a[5], a[6], a[7], &res, NULL); - memcpy(value, &res, sizeof(res)); - } - - } else if (ffh_ctxt->info.offset == 1) { - /* SMC/HVC 64bit call */ - struct arm_smccc_1_2_regs *r = (struct arm_smccc_1_2_regs *)value; - - if (!ARM_SMCCC_IS_FAST_CALL(r->a0) || !ARM_SMCCC_IS_64(r->a0) || - !acpi_ffh_smccc_owner_allowed(r->a0) || - ffh_ctxt->info.length > sizeof(*r)) { - ret = AE_ERROR; - } else { - ffh_ctxt->invoke_ffh64_fn(r, r); - memcpy(value, r, ffh_ctxt->info.length); - } - } else { - ret = AE_ERROR; - } - - return ret; -} -#endif /* CONFIG_ACPI_FFH */ diff --git a/drivers/acpi/arm64/Makefile b/drivers/acpi/arm64/Makefile index 2efee23f00b4..05ecde9eaabe 100644 --- a/drivers/acpi/arm64/Makefile +++ b/drivers/acpi/arm64/Makefile @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_ACPI_AGDI) += agdi.o obj-$(CONFIG_ACPI_APMT) += apmt.o +obj-$(CONFIG_ACPI_FFH) += ffh.o obj-$(CONFIG_ACPI_GTDT) += gtdt.o obj-$(CONFIG_ACPI_IORT) += iort.o obj-$(CONFIG_ACPI_PROCESSOR_IDLE) += cpuidle.o diff --git a/drivers/acpi/arm64/ffh.c b/drivers/acpi/arm64/ffh.c new file mode 100644 index 000000000000..877edc6557e9 --- /dev/null +++ b/drivers/acpi/arm64/ffh.c @@ -0,0 +1,107 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include +#include +#include + +/* + * Implements ARM64 specific callbacks to support ACPI FFH Operation Region as + * specified in https://developer.arm.com/docs/den0048/latest + */ +struct acpi_ffh_data { + struct acpi_ffh_info info; + void (*invoke_ffh_fn)(unsigned long a0, unsigned long a1, + unsigned long a2, unsigned long a3, + unsigned long a4, unsigned long a5, + unsigned long a6, unsigned long a7, + struct arm_smccc_res *args, + struct arm_smccc_quirk *res); + void (*invoke_ffh64_fn)(const struct arm_smccc_1_2_regs *args, + struct arm_smccc_1_2_regs *res); +}; + +int acpi_ffh_address_space_arch_setup(void *handler_ctxt, void **region_ctxt) +{ + enum arm_smccc_conduit conduit; + struct acpi_ffh_data *ffh_ctxt; + + if (arm_smccc_get_version() < ARM_SMCCC_VERSION_1_2) + return -EOPNOTSUPP; + + conduit = arm_smccc_1_1_get_conduit(); + if (conduit == SMCCC_CONDUIT_NONE) { + pr_err("%s: invalid SMCCC conduit\n", __func__); + return -EOPNOTSUPP; + } + + ffh_ctxt = kzalloc(sizeof(*ffh_ctxt), GFP_KERNEL); + if (!ffh_ctxt) + return -ENOMEM; + + if (conduit == SMCCC_CONDUIT_SMC) { + ffh_ctxt->invoke_ffh_fn = __arm_smccc_smc; + ffh_ctxt->invoke_ffh64_fn = arm_smccc_1_2_smc; + } else { + ffh_ctxt->invoke_ffh_fn = __arm_smccc_hvc; + ffh_ctxt->invoke_ffh64_fn = arm_smccc_1_2_hvc; + } + + memcpy(ffh_ctxt, handler_ctxt, sizeof(ffh_ctxt->info)); + + *region_ctxt = ffh_ctxt; + return AE_OK; +} + +static bool acpi_ffh_smccc_owner_allowed(u32 fid) +{ + int owner = ARM_SMCCC_OWNER_NUM(fid); + + if (owner == ARM_SMCCC_OWNER_STANDARD || + owner == ARM_SMCCC_OWNER_SIP || owner == ARM_SMCCC_OWNER_OEM) + return true; + + return false; +} + +int acpi_ffh_address_space_arch_handler(acpi_integer *value, void *region_context) +{ + int ret = 0; + struct acpi_ffh_data *ffh_ctxt = region_context; + + if (ffh_ctxt->info.offset == 0) { + /* SMC/HVC 32bit call */ + struct arm_smccc_res res; + u32 a[8] = { 0 }, *ptr = (u32 *)value; + + if (!ARM_SMCCC_IS_FAST_CALL(*ptr) || ARM_SMCCC_IS_64(*ptr) || + !acpi_ffh_smccc_owner_allowed(*ptr) || + ffh_ctxt->info.length > 32) { + ret = AE_ERROR; + } else { + int idx, len = ffh_ctxt->info.length >> 2; + + for (idx = 0; idx < len; idx++) + a[idx] = *(ptr + idx); + + ffh_ctxt->invoke_ffh_fn(a[0], a[1], a[2], a[3], a[4], + a[5], a[6], a[7], &res, NULL); + memcpy(value, &res, sizeof(res)); + } + + } else if (ffh_ctxt->info.offset == 1) { + /* SMC/HVC 64bit call */ + struct arm_smccc_1_2_regs *r = (struct arm_smccc_1_2_regs *)value; + + if (!ARM_SMCCC_IS_FAST_CALL(r->a0) || !ARM_SMCCC_IS_64(r->a0) || + !acpi_ffh_smccc_owner_allowed(r->a0) || + ffh_ctxt->info.length > sizeof(*r)) { + ret = AE_ERROR; + } else { + ffh_ctxt->invoke_ffh64_fn(r, r); + memcpy(value, r, ffh_ctxt->info.length); + } + } else { + ret = AE_ERROR; + } + + return ret; +} -- cgit v1.2.3-58-ga151 From 2a805201f9bdfcc172ac54f7e21843e186389506 Mon Sep 17 00:00:00 2001 From: Jeff Johnson Date: Wed, 12 Jun 2024 13:34:56 -0700 Subject: ARM64: reloc_test: add missing MODULE_DESCRIPTION() macro With ARCH=arm64, make allmodconfig && make W=1 C=1 reports: WARNING: modpost: missing MODULE_DESCRIPTION() in arch/arm64/kernel/arm64-reloc-test.o Add the missing invocation of the MODULE_DESCRIPTION() macro. Signed-off-by: Jeff Johnson Link: https://lore.kernel.org/r/20240612-md-arch-arm64-kernel-v1-1-1fafe8d11df3@quicinc.com Signed-off-by: Catalin Marinas --- arch/arm64/kernel/reloc_test_core.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/arm64/kernel/reloc_test_core.c b/arch/arm64/kernel/reloc_test_core.c index 99f2ffe9fc05..5b0891146054 100644 --- a/arch/arm64/kernel/reloc_test_core.c +++ b/arch/arm64/kernel/reloc_test_core.c @@ -74,4 +74,5 @@ static void __exit reloc_test_exit(void) module_init(reloc_test_init); module_exit(reloc_test_exit); +MODULE_DESCRIPTION("Relocation testing module"); MODULE_LICENSE("GPL v2"); -- cgit v1.2.3-58-ga151 From cf63fe35f1efa71a4b09f07abbcdf28144b6c6b6 Mon Sep 17 00:00:00 2001 From: "Mike Rapoport (IBM)" Date: Thu, 20 Jun 2024 20:40:38 +0300 Subject: arm64: Kconfig: fix typo in __builtin_return_adddress Comment about BUILTIN_RETURN_ADDRESS_STRIPS_PAC spells __builtin_return_adddress with a triple 'd', fix it. Signed-off-by: Mike Rapoport (IBM) Link: https://lore.kernel.org/r/20240620174038.3721466-1-rppt@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 5d91259ee7b5..3f39657af2cb 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -381,7 +381,7 @@ config BROKEN_GAS_INST config BUILTIN_RETURN_ADDRESS_STRIPS_PAC bool - # Clang's __builtin_return_adddress() strips the PAC since 12.0.0 + # Clang's __builtin_return_address() strips the PAC since 12.0.0 # https://github.com/llvm/llvm-project/commit/2a96f47c5ffca84cd774ad402cacd137f4bf45e2 default y if CC_IS_CLANG # GCC's __builtin_return_address() strips the PAC since 11.1.0, -- cgit v1.2.3-58-ga151 From 573611145fcb6325a28c462aca3753e257a0b2a6 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Tue, 18 Jun 2024 09:17:03 +0530 Subject: arm64/mm: Stop using ESR_ELx_FSC_TYPE during fault Fault status codes at page table level 0, 1, 2 and 3 for access, permission and translation faults are architecturally organized in a way, that masking out ESR_ELx_FSC_TYPE, fetches Level 0 status code for the respective fault. Helpers like esr_fsc_is_[translation|permission|access_flag]_fault() mask out ESR_ELx_FSC_TYPE before comparing against corresponding Level 0 status code as the kernel does not yet care about the page table level, where in the fault really occurred previously. This scheme is starting to crumble after FEAT_LPA2 when level -1 got added. Fault status code for translation fault at level -1 is 0x2B which does not follow ESR_ELx_FSC_TYPE, requiring esr_fsc_is_translation_fault() changes. This changes above helpers to compare against individual fault status code values for each page table level and stop using ESR_ELx_FSC_TYPE, which is losing its value as a common mask. Cc: Will Deacon Cc: Marc Zyngier Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Anshuman Khandual Reviewed-by: Marc Zyngier Reviewed-by: Ryan Roberts Acked-by: Mark Rutland Link: https://lore.kernel.org/r/20240618034703.3622510-1-anshuman.khandual@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/esr.h | 33 +++++++++++++++++++++++++++------ 1 file changed, 27 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h index 7abf09df7033..3f482500f71f 100644 --- a/arch/arm64/include/asm/esr.h +++ b/arch/arm64/include/asm/esr.h @@ -121,6 +121,14 @@ #define ESR_ELx_FSC_SECC (0x18) #define ESR_ELx_FSC_SECC_TTW(n) (0x1c + (n)) +/* Status codes for individual page table levels */ +#define ESR_ELx_FSC_ACCESS_L(n) (ESR_ELx_FSC_ACCESS + n) +#define ESR_ELx_FSC_PERM_L(n) (ESR_ELx_FSC_PERM + n) + +#define ESR_ELx_FSC_FAULT_nL (0x2C) +#define ESR_ELx_FSC_FAULT_L(n) (((n) < 0 ? ESR_ELx_FSC_FAULT_nL : \ + ESR_ELx_FSC_FAULT) + (n)) + /* ISS field definitions for Data Aborts */ #define ESR_ELx_ISV_SHIFT (24) #define ESR_ELx_ISV (UL(1) << ESR_ELx_ISV_SHIFT) @@ -388,20 +396,33 @@ static inline bool esr_is_data_abort(unsigned long esr) static inline bool esr_fsc_is_translation_fault(unsigned long esr) { - /* Translation fault, level -1 */ - if ((esr & ESR_ELx_FSC) == 0b101011) - return true; - return (esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_FAULT; + esr = esr & ESR_ELx_FSC; + + return (esr == ESR_ELx_FSC_FAULT_L(3)) || + (esr == ESR_ELx_FSC_FAULT_L(2)) || + (esr == ESR_ELx_FSC_FAULT_L(1)) || + (esr == ESR_ELx_FSC_FAULT_L(0)) || + (esr == ESR_ELx_FSC_FAULT_L(-1)); } static inline bool esr_fsc_is_permission_fault(unsigned long esr) { - return (esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_PERM; + esr = esr & ESR_ELx_FSC; + + return (esr == ESR_ELx_FSC_PERM_L(3)) || + (esr == ESR_ELx_FSC_PERM_L(2)) || + (esr == ESR_ELx_FSC_PERM_L(1)) || + (esr == ESR_ELx_FSC_PERM_L(0)); } static inline bool esr_fsc_is_access_flag_fault(unsigned long esr) { - return (esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_ACCESS; + esr = esr & ESR_ELx_FSC; + + return (esr == ESR_ELx_FSC_ACCESS_L(3)) || + (esr == ESR_ELx_FSC_ACCESS_L(2)) || + (esr == ESR_ELx_FSC_ACCESS_L(1)) || + (esr == ESR_ELx_FSC_ACCESS_L(0)); } /* Indicate whether ESR.EC==0x1A is for an ERETAx instruction */ -- cgit v1.2.3-58-ga151 From 18fdb6348c480fb646799f621204f674815f05e7 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 17 Jun 2024 12:18:41 +0100 Subject: arm64: irqchip/gic-v3: Select priorities at boot time The distributor and PMR/RPR can present different views of the interrupt priority space dependent upon the values of GICD_CTLR.DS and SCR_EL3.FIQ. Currently we treat the distributor's view of the priority space as canonical, and when the two differ we change the way we handle values in the PMR/RPR, using the `gic_nonsecure_priorities` static key to decide what to do. This approach works, but it's sub-optimal. When using pseudo-NMI we manipulate the distributor rarely, and we manipulate the PMR/RPR registers very frequently in code spread out throughout the kernel (e.g. local_irq_{save,restore}()). It would be nicer if we could use fixed values for the PMR/RPR, and dynamically choose the values programmed into the distributor. This patch changes the GICv3 driver and arm64 code accordingly. PMR values are chosen at compile time, and the GICv3 driver determines the appropriate values to program into the distributor at boot time. This removes the need for the `gic_nonsecure_priorities` static key and results in smaller and better generated code for saving/restoring the irqflags. Before this patch, local_irq_disable() compiles to: | 0000000000000000 : | 0: d503201f nop | 4: d50343df msr daifset, #0x3 | 8: d65f03c0 ret | c: d503201f nop | 10: d2800c00 mov x0, #0x60 // #96 | 14: d5184600 msr icc_pmr_el1, x0 | 18: d65f03c0 ret | 1c: d2801400 mov x0, #0xa0 // #160 | 20: 17fffffd b 14 After this patch, local_irq_disable() compiles to: | 0000000000000000 : | 0: d503201f nop | 4: d50343df msr daifset, #0x3 | 8: d65f03c0 ret | c: d2801800 mov x0, #0xc0 // #192 | 10: d5184600 msr icc_pmr_el1, x0 | 14: d65f03c0 ret ... with 3 fewer instructions per call. For defconfig + CONFIG_PSEUDO_NMI=y, this results in a minor saving of ~4K of text, and will make it easier to make further improvements to the way we manipulate irqflags and DAIF bits. Signed-off-by: Mark Rutland Cc: Alexandru Elisei Cc: Marc Zyngier Cc: Thomas Gleixner Cc: Will Deacon Reviewed-by: Marc Zyngier Tested-by: Marc Zyngier Link: https://lore.kernel.org/r/20240617111841.2529370-6-mark.rutland@arm.com Signed-off-by: Catalin Marinas Acked-by: Thomas Gleixner --- arch/arm64/include/asm/arch_gicv3.h | 15 ------ arch/arm64/include/asm/ptrace.h | 35 +++--------- arch/arm64/kernel/image-vars.h | 5 -- drivers/irqchip/irq-gic-v3.c | 96 ++++++++++++++------------------- include/linux/irqchip/arm-gic-v3-prio.h | 52 ++++++++++++++++++ 5 files changed, 97 insertions(+), 106 deletions(-) create mode 100644 include/linux/irqchip/arm-gic-v3-prio.h (limited to 'arch') diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h index 5f172611654b..9e96f024b2f1 100644 --- a/arch/arm64/include/asm/arch_gicv3.h +++ b/arch/arm64/include/asm/arch_gicv3.h @@ -175,21 +175,6 @@ static inline bool gic_prio_masking_enabled(void) static inline void gic_pmr_mask_irqs(void) { - BUILD_BUG_ON(GICD_INT_DEF_PRI < (__GIC_PRIO_IRQOFF | - GIC_PRIO_PSR_I_SET)); - BUILD_BUG_ON(GICD_INT_DEF_PRI >= GIC_PRIO_IRQON); - /* - * Need to make sure IRQON allows IRQs when SCR_EL3.FIQ is cleared - * and non-secure PMR accesses are not subject to the shifts that - * are applied to IRQ priorities - */ - BUILD_BUG_ON((0x80 | (GICD_INT_DEF_PRI >> 1)) >= GIC_PRIO_IRQON); - /* - * Same situation as above, but now we make sure that we can mask - * regular interrupts. - */ - BUILD_BUG_ON((0x80 | (GICD_INT_DEF_PRI >> 1)) < (__GIC_PRIO_IRQOFF_NS | - GIC_PRIO_PSR_I_SET)); gic_write_pmr(GIC_PRIO_IRQOFF); } diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index 47ec58031f11..0abe975d68a8 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h @@ -21,35 +21,12 @@ #define INIT_PSTATE_EL2 \ (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT | PSR_MODE_EL2h) -/* - * PMR values used to mask/unmask interrupts. - * - * GIC priority masking works as follows: if an IRQ's priority is a higher value - * than the value held in PMR, that IRQ is masked. Lowering the value of PMR - * means masking more IRQs (or at least that the same IRQs remain masked). - * - * To mask interrupts, we clear the most significant bit of PMR. - * - * Some code sections either automatically switch back to PSR.I or explicitly - * require to not use priority masking. If bit GIC_PRIO_PSR_I_SET is included - * in the priority mask, it indicates that PSR.I should be set and - * interrupt disabling temporarily does not rely on IRQ priorities. - */ -#define GIC_PRIO_IRQON 0xe0 -#define __GIC_PRIO_IRQOFF (GIC_PRIO_IRQON & ~0x80) -#define __GIC_PRIO_IRQOFF_NS 0xa0 -#define GIC_PRIO_PSR_I_SET (1 << 4) - -#define GIC_PRIO_IRQOFF \ - ({ \ - extern struct static_key_false gic_nonsecure_priorities;\ - u8 __prio = __GIC_PRIO_IRQOFF; \ - \ - if (static_branch_unlikely(&gic_nonsecure_priorities)) \ - __prio = __GIC_PRIO_IRQOFF_NS; \ - \ - __prio; \ - }) +#include + +#define GIC_PRIO_IRQON GICV3_PRIO_UNMASKED +#define GIC_PRIO_IRQOFF GICV3_PRIO_IRQ + +#define GIC_PRIO_PSR_I_SET GICV3_PRIO_PSR_I_SET /* Additional SPSR bits not exposed in the UABI */ #define PSR_MODE_THREAD_BIT (1 << 0) diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h index ba4f8f7d6a91..8f5422ed1b75 100644 --- a/arch/arm64/kernel/image-vars.h +++ b/arch/arm64/kernel/image-vars.h @@ -105,11 +105,6 @@ KVM_NVHE_ALIAS(__hyp_stub_vectors); KVM_NVHE_ALIAS(vgic_v2_cpuif_trap); KVM_NVHE_ALIAS(vgic_v3_cpuif_trap); -#ifdef CONFIG_ARM64_PSEUDO_NMI -/* Static key checked in GIC_PRIO_IRQOFF. */ -KVM_NVHE_ALIAS(gic_nonsecure_priorities); -#endif - /* EL2 exception handling */ KVM_NVHE_ALIAS(__start___kvm_ex_table); KVM_NVHE_ALIAS(__stop___kvm_ex_table); diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index e262f73b1ce8..1b2b5f89f832 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -37,8 +38,8 @@ #include "irq-gic-common.h" -static u8 dist_prio_irq __ro_after_init = GICD_INT_DEF_PRI; -static u8 dist_prio_nmi __ro_after_init = GICD_INT_DEF_PRI & ~0x80; +static u8 dist_prio_irq __ro_after_init = GICV3_PRIO_IRQ; +static u8 dist_prio_nmi __ro_after_init = GICV3_PRIO_NMI; #define FLAGS_WORKAROUND_GICR_WAKER_MSM8996 (1ULL << 0) #define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539 (1ULL << 1) @@ -110,30 +111,6 @@ static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key); */ static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis); -DEFINE_STATIC_KEY_FALSE(gic_nonsecure_priorities); -EXPORT_SYMBOL(gic_nonsecure_priorities); - -/* - * When the Non-secure world has access to group 0 interrupts (as a - * consequence of SCR_EL3.FIQ == 0), reading the ICC_RPR_EL1 register will - * return the Distributor's view of the interrupt priority. - * - * When GIC security is enabled (GICD_CTLR.DS == 0), the interrupt priority - * written by software is moved to the Non-secure range by the Distributor. - * - * If both are true (which is when gic_nonsecure_priorities gets enabled), - * we need to shift down the priority programmed by software to match it - * against the value returned by ICC_RPR_EL1. - */ -#define GICD_INT_RPR_PRI(priority) \ - ({ \ - u32 __priority = (priority); \ - if (static_branch_unlikely(&gic_nonsecure_priorities)) \ - __priority = 0x80 | (__priority >> 1); \ - \ - __priority; \ - }) - static u32 gic_get_pribits(void) { u32 pribits; @@ -185,6 +162,41 @@ static void __init gic_prio_init(void) cpus_have_security_disabled = gic_dist_security_disabled(); cpus_have_group0 = gic_has_group0(); + /* + * How priority values are used by the GIC depends on two things: + * the security state of the GIC (controlled by the GICD_CTRL.DS bit) + * and if Group 0 interrupts can be delivered to Linux in the non-secure + * world as FIQs (controlled by the SCR_EL3.FIQ bit). These affect the + * way priorities are presented in ICC_PMR_EL1 and in the distributor: + * + * GICD_CTRL.DS | SCR_EL3.FIQ | ICC_PMR_EL1 | Distributor + * ------------------------------------------------------- + * 1 | - | unchanged | unchanged + * ------------------------------------------------------- + * 0 | 1 | non-secure | non-secure + * ------------------------------------------------------- + * 0 | 0 | unchanged | non-secure + * + * In the non-secure view reads and writes are modified: + * + * - A value written is right-shifted by one and the MSB is set, + * forcing the priority into the non-secure range. + * + * - A value read is left-shifted by one. + * + * In the first two cases, where ICC_PMR_EL1 and the interrupt priority + * are both either modified or unchanged, we can use the same set of + * priorities. + * + * In the last case, where only the interrupt priorities are modified to + * be in the non-secure range, we program the non-secure values into + * the distributor to match the PMR values we want. + */ + if (cpus_have_group0 & !cpus_have_security_disabled) { + dist_prio_irq = __gicv3_prio_to_ns(dist_prio_irq); + dist_prio_nmi = __gicv3_prio_to_ns(dist_prio_nmi); + } + pr_info("GICD_CTRL.DS=%d, SCR_EL3.FIQ=%d\n", cpus_have_security_disabled, !cpus_have_group0); @@ -811,7 +823,7 @@ static bool gic_rpr_is_nmi_prio(void) if (!gic_supports_nmi()) return false; - return unlikely(gic_read_rpr() == GICD_INT_RPR_PRI(dist_prio_nmi)); + return unlikely(gic_read_rpr() == GICV3_PRIO_NMI); } static bool gic_irqnr_is_special(u32 irqnr) @@ -1960,36 +1972,6 @@ static void gic_enable_nmi_support(void) pr_info("Pseudo-NMIs enabled using %s ICC_PMR_EL1 synchronisation\n", gic_has_relaxed_pmr_sync() ? "relaxed" : "forced"); - /* - * How priority values are used by the GIC depends on two things: - * the security state of the GIC (controlled by the GICD_CTRL.DS bit) - * and if Group 0 interrupts can be delivered to Linux in the non-secure - * world as FIQs (controlled by the SCR_EL3.FIQ bit). These affect the - * ICC_PMR_EL1 register and the priority that software assigns to - * interrupts: - * - * GICD_CTRL.DS | SCR_EL3.FIQ | ICC_PMR_EL1 | Group 1 priority - * ----------------------------------------------------------- - * 1 | - | unchanged | unchanged - * ----------------------------------------------------------- - * 0 | 1 | non-secure | non-secure - * ----------------------------------------------------------- - * 0 | 0 | unchanged | non-secure - * - * where non-secure means that the value is right-shifted by one and the - * MSB bit set, to make it fit in the non-secure priority range. - * - * In the first two cases, where ICC_PMR_EL1 and the interrupt priority - * are both either modified or unchanged, we can use the same set of - * priorities. - * - * In the last case, where only the interrupt priorities are modified to - * be in the non-secure range, we use a different PMR value to mask IRQs - * and the rest of the values that we use remain unchanged. - */ - if (gic_has_group0() && !gic_dist_security_disabled()) - static_branch_enable(&gic_nonsecure_priorities); - static_branch_enable(&supports_pseudo_nmis); if (static_branch_likely(&supports_deactivate_key)) diff --git a/include/linux/irqchip/arm-gic-v3-prio.h b/include/linux/irqchip/arm-gic-v3-prio.h new file mode 100644 index 000000000000..44157c9abb78 --- /dev/null +++ b/include/linux/irqchip/arm-gic-v3-prio.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __LINUX_IRQCHIP_ARM_GIC_V3_PRIO_H +#define __LINUX_IRQCHIP_ARM_GIC_V3_PRIO_H + +/* + * GIC priorities from the view of the PMR/RPR. + * + * These values are chosen to be valid in either the absolute priority space or + * the NS view of the priority space. The value programmed into the distributor + * and ITS will be chosen at boot time such that these values appear in the + * PMR/RPR. + * + * GICV3_PRIO_UNMASKED is the PMR view of the priority to use to permit both + * IRQs and pseudo-NMIs. + * + * GICV3_PRIO_IRQ is the PMR view of the priority of regular interrupts. This + * can be written to the PMR to mask regular IRQs. + * + * GICV3_PRIO_NMI is the PMR view of the priority of pseudo-NMIs. This can be + * written to the PMR to mask pseudo-NMIs. + * + * On arm64 some code sections either automatically switch back to PSR.I or + * explicitly require to not use priority masking. If bit GICV3_PRIO_PSR_I_SET + * is included in the priority mask, it indicates that PSR.I should be set and + * interrupt disabling temporarily does not rely on IRQ priorities. + */ +#define GICV3_PRIO_UNMASKED 0xe0 +#define GICV3_PRIO_IRQ 0xc0 +#define GICV3_PRIO_NMI 0x80 + +#define GICV3_PRIO_PSR_I_SET (1 << 4) + +#ifndef __ASSEMBLER__ + +#define __gicv3_prio_to_ns(p) (0xff & ((p) << 1)) +#define __gicv3_ns_to_prio(ns) (0x80 | ((ns) >> 1)) + +#define __gicv3_prio_valid_ns(p) \ + (__gicv3_ns_to_prio(__gicv3_prio_to_ns(p)) == (p)) + +static_assert(__gicv3_prio_valid_ns(GICV3_PRIO_NMI)); +static_assert(__gicv3_prio_valid_ns(GICV3_PRIO_IRQ)); + +static_assert(GICV3_PRIO_NMI < GICV3_PRIO_IRQ); +static_assert(GICV3_PRIO_IRQ < GICV3_PRIO_UNMASKED); + +static_assert(GICV3_PRIO_IRQ < (GICV3_PRIO_IRQ | GICV3_PRIO_PSR_I_SET)); + +#endif /* __ASSEMBLER */ + +#endif /* __LINUX_IRQCHIP_ARM_GIC_V3_PRIO_H */ -- cgit v1.2.3-58-ga151 From cf938f91784f5b35d16fa9fc746a3bb03659ab50 Mon Sep 17 00:00:00 2001 From: Seongsu Park Date: Thu, 23 May 2024 21:21:46 +0900 Subject: arm64: Cleanup __cpu_set_tcr_t0sz() The T0SZ field of TCR_EL1 occupies bits 0-5 of the register and encode the virtual address space translated by TTBR0_EL1. When updating the field, for example because we are switching to/from the idmap page-table, __cpu_set_tcr_t0sz() erroneously treats its 't0sz' argument as unshifted, resulting in harmless but confusing double shifts by 0 in the code. Co-developed-by: Leem ChaeHoon Signed-off-by: Leem ChaeHoon Signed-off-by: Seongsu Park Link: https://lore.kernel.org/r/20240523122146.144483-1-sgsu.park@samsung.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/mmu_context.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index c768d16b81a4..bd19f4c758b7 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -72,11 +72,11 @@ static inline void __cpu_set_tcr_t0sz(unsigned long t0sz) { unsigned long tcr = read_sysreg(tcr_el1); - if ((tcr & TCR_T0SZ_MASK) >> TCR_T0SZ_OFFSET == t0sz) + if ((tcr & TCR_T0SZ_MASK) == t0sz) return; tcr &= ~TCR_T0SZ_MASK; - tcr |= t0sz << TCR_T0SZ_OFFSET; + tcr |= t0sz; write_sysreg(tcr, tcr_el1); isb(); } -- cgit v1.2.3-58-ga151 From 42b9fed388a510cdb17d25c0dc1c8b72c4828cea Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 13 Jun 2024 15:57:09 +0530 Subject: KVM: arm64: Replace custom macros with fields from ID_AA64PFR0_EL1 This replaces custom macros usage (i.e ID_AA64PFR0_EL1_ELx_64BIT_ONLY and ID_AA64PFR0_EL1_ELx_32BIT_64BIT) and instead directly uses register fields from ID_AA64PFR0_EL1 sysreg definition. Cc: Marc Zyngier Cc: Oliver Upton Cc: Will Deacon Cc: linux-arm-kernel@lists.infradead.org Cc: kvmarm@lists.linux.dev Cc: linux-kernel@vger.kernel.org Signed-off-by: Anshuman Khandual Acked-by: Marc Zyngier Acked-by: Mark Rutland Link: https://lore.kernel.org/r/20240613102710.3295108-2-anshuman.khandual@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/kvm/hyp/include/nvhe/fixed_config.h | 10 +++++----- arch/arm64/kvm/hyp/nvhe/pkvm.c | 4 ++-- arch/arm64/kvm/hyp/nvhe/sys_regs.c | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h index 51f043649146..f957890c7e38 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h +++ b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h @@ -52,11 +52,11 @@ * Supported by KVM */ #define PVM_ID_AA64PFR0_RESTRICT_UNSIGNED (\ - FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), ID_AA64PFR0_EL1_ELx_64BIT_ONLY) | \ - FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1), ID_AA64PFR0_EL1_ELx_64BIT_ONLY) | \ - FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL2), ID_AA64PFR0_EL1_ELx_64BIT_ONLY) | \ - FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL3), ID_AA64PFR0_EL1_ELx_64BIT_ONLY) | \ - FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_RAS), ID_AA64PFR0_EL1_RAS_IMP) \ + SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, EL0, IMP) | \ + SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, EL1, IMP) | \ + SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, EL2, IMP) | \ + SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, EL3, IMP) | \ + SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, RAS, IMP) \ ) /* diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c index 95cf18574251..187a5f4d56c0 100644 --- a/arch/arm64/kvm/hyp/nvhe/pkvm.c +++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c @@ -33,9 +33,9 @@ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu) /* Protected KVM does not support AArch32 guests. */ BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), - PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_EL1_ELx_64BIT_ONLY); + PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_EL1_EL0_IMP); BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1), - PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_EL1_ELx_64BIT_ONLY); + PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_EL1_EL1_IMP); /* * Linux guests assume support for floating-point and Advanced SIMD. Do diff --git a/arch/arm64/kvm/hyp/nvhe/sys_regs.c b/arch/arm64/kvm/hyp/nvhe/sys_regs.c index edd969a1f36b..2860548d4250 100644 --- a/arch/arm64/kvm/hyp/nvhe/sys_regs.c +++ b/arch/arm64/kvm/hyp/nvhe/sys_regs.c @@ -276,7 +276,7 @@ static bool pvm_access_id_aarch32(struct kvm_vcpu *vcpu, * of AArch32 feature id registers. */ BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1), - PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) > ID_AA64PFR0_EL1_ELx_64BIT_ONLY); + PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) > ID_AA64PFR0_EL1_EL1_IMP); return pvm_access_raz_wi(vcpu, p, r); } -- cgit v1.2.3-58-ga151 From 056600ff73307cadb89a07d068c9348b17eb530a Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 13 Jun 2024 15:57:10 +0530 Subject: arm64/cpufeature: Replace custom macros with fields from ID_AA64PFR0_EL1 This replaces custom macros usage (i.e ID_AA64PFR0_EL1_ELx_64BIT_ONLY and ID_AA64PFR0_EL1_ELx_32BIT_64BIT) and instead directly uses register fields from ID_AA64PFR0_EL1 sysreg definition. Finally let's drop off both these custom macros as they are now redundant. Cc: Will Deacon Cc: Mark Rutland Cc: Mark Brown Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Anshuman Khandual Acked-by: Mark Rutland Link: https://lore.kernel.org/r/20240613102710.3295108-3-anshuman.khandual@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/cpufeature.h | 4 ++-- arch/arm64/include/asm/sysreg.h | 4 ---- arch/arm64/kernel/cpufeature.c | 4 ++-- 3 files changed, 4 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 8b904a757bd3..558434267271 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -588,14 +588,14 @@ static inline bool id_aa64pfr0_32bit_el1(u64 pfr0) { u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_EL1_SHIFT); - return val == ID_AA64PFR0_EL1_ELx_32BIT_64BIT; + return val == ID_AA64PFR0_EL1_EL1_AARCH32; } static inline bool id_aa64pfr0_32bit_el0(u64 pfr0) { u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_EL0_SHIFT); - return val == ID_AA64PFR0_EL1_ELx_32BIT_64BIT; + return val == ID_AA64PFR0_EL1_EL0_AARCH32; } static inline bool id_aa64pfr0_sve(u64 pfr0) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index af3b206fa423..1b6e436dbb55 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -872,10 +872,6 @@ /* Position the attr at the correct index */ #define MAIR_ATTRIDX(attr, idx) ((attr) << ((idx) * 8)) -/* id_aa64pfr0 */ -#define ID_AA64PFR0_EL1_ELx_64BIT_ONLY 0x1 -#define ID_AA64PFR0_EL1_ELx_32BIT_64BIT 0x2 - /* id_aa64mmfr0 */ #define ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN 0x0 #define ID_AA64MMFR0_EL1_TGRAN4_LPA2 ID_AA64MMFR0_EL1_TGRAN4_52_BIT diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index c3913c90797e..646ecd3069fd 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -285,8 +285,8 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = { S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_FP_SHIFT, 4, ID_AA64PFR0_EL1_FP_NI), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL3_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL2_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL1_SHIFT, 4, ID_AA64PFR0_EL1_ELx_64BIT_ONLY), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL0_SHIFT, 4, ID_AA64PFR0_EL1_ELx_64BIT_ONLY), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL1_SHIFT, 4, ID_AA64PFR0_EL1_EL1_IMP), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL0_SHIFT, 4, ID_AA64PFR0_EL1_EL0_IMP), ARM64_FTR_END, }; -- cgit v1.2.3-58-ga151 From f5a4af3c752704272e9094466e66e21d4ee4e414 Mon Sep 17 00:00:00 2001 From: Liu Wei Date: Tue, 25 Jun 2024 11:05:04 +0800 Subject: ACPI: Add acpi=nospcr to disable ACPI SPCR as default console on ARM64 For varying privacy and security reasons, sometimes we would like to completely silence the _serial_ console, and only enable it when needed. But there are many existing systems that depend on this _serial_ console, so add acpi=nospcr to disable console in ACPI SPCR table as default _serial_ console. Signed-off-by: Liu Wei Suggested-by: Prarit Bhargava Suggested-by: Will Deacon Suggested-by: Andrew Lunn Reviewed-by: Hanjun Guo Reviewed-by: Prarit Bhargava Link: https://lore.kernel.org/r/20240625030504.58025-1-liuwei09@cestc.cn Signed-off-by: Catalin Marinas --- Documentation/admin-guide/kernel-parameters.txt | 10 +++++++--- arch/arm64/kernel/acpi.c | 18 +++++++++++++++++- 2 files changed, 24 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index b600df82669d..68d8acc38c82 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -12,7 +12,7 @@ acpi= [HW,ACPI,X86,ARM64,RISCV64,EARLY] Advanced Configuration and Power Interface Format: { force | on | off | strict | noirq | rsdt | - copy_dsdt } + copy_dsdt | nospcr } force -- enable ACPI if default was off on -- enable ACPI but allow fallback to DT [arm64,riscv64] off -- disable ACPI if default was on @@ -21,8 +21,12 @@ strictly ACPI specification compliant. rsdt -- prefer RSDT over (default) XSDT copy_dsdt -- copy DSDT to memory - For ARM64 and RISCV64, ONLY "acpi=off", "acpi=on" or - "acpi=force" are available + nospcr -- disable console in ACPI SPCR table as + default _serial_ console on ARM64 + For ARM64, ONLY "acpi=off", "acpi=on", "acpi=force" or + "acpi=nospcr" are available + For RISCV64, ONLY "acpi=off", "acpi=on" or "acpi=force" + are available See also Documentation/power/runtime_pm.rst, pci=noacpi diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c index fd1917336d3a..a73b87ac815c 100644 --- a/arch/arm64/kernel/acpi.c +++ b/arch/arm64/kernel/acpi.c @@ -45,6 +45,7 @@ EXPORT_SYMBOL(acpi_pci_disabled); static bool param_acpi_off __initdata; static bool param_acpi_on __initdata; static bool param_acpi_force __initdata; +static bool param_acpi_nospcr __initdata; static int __init parse_acpi(char *arg) { @@ -58,6 +59,8 @@ static int __init parse_acpi(char *arg) param_acpi_on = true; else if (strcmp(arg, "force") == 0) /* force ACPI to be enabled */ param_acpi_force = true; + else if (strcmp(arg, "nospcr") == 0) /* disable SPCR as default console */ + param_acpi_nospcr = true; else return -EINVAL; /* Core will print when we return error */ @@ -237,7 +240,20 @@ done: acpi_put_table(facs); } #endif - acpi_parse_spcr(earlycon_acpi_spcr_enable, true); + + /* + * For varying privacy and security reasons, sometimes need + * to completely silence the serial console output, and only + * enable it when needed. + * But there are many existing systems that depend on this + * behaviour, use acpi=nospcr to disable console in ACPI SPCR + * table as default serial console. + */ + acpi_parse_spcr(earlycon_acpi_spcr_enable, + !param_acpi_nospcr); + pr_info("Use ACPI SPCR as default console: %s\n", + param_acpi_nospcr ? "No" : "Yes"); + if (IS_ENABLED(CONFIG_ACPI_BGRT)) acpi_table_parse(ACPI_SIG_BGRT, acpi_parse_bgrt); } -- cgit v1.2.3-58-ga151 From 916b93f4e865b35563902f5862b443fc122631b4 Mon Sep 17 00:00:00 2001 From: Jinjie Ruan Date: Thu, 20 Jun 2024 14:36:00 +0800 Subject: arm64: smp: Fix missing IPI statistics commit 83cfac95c018 ("genirq: Allow interrupts to be excluded from /proc/interrupts") is to avoid IPIs appear twice in /proc/interrupts. But the commit 331a1b3a836c ("arm64: smp: Add arch support for backtrace using pseudo-NMI") and commit 2f5cd0c7ffde("arm64: kgdb: Implement kgdb_roundup_cpus() to enable pseudo-NMI roundup") set CPU_BACKTRACE and KGDB_ROUNDUP IPIs "IRQ_HIDDEN" flag but not show them in arch_show_interrupts(), which cause the interrupt kstat_irqs accounting is missing in display. Before this patch, CPU_BACKTRACE and KGDB_ROUNDUP IPIs are missing: / # cat /proc/interrupts CPU0 CPU1 CPU2 CPU3 11: 466 600 309 332 GICv3 27 Level arch_timer 13: 24 0 0 0 GICv3 33 Level uart-pl011 15: 64 0 0 0 GICv3 78 Edge virtio0 16: 0 0 0 0 GICv3 79 Edge virtio1 17: 0 0 0 0 GICv3 34 Level rtc-pl031 18: 3 3 3 3 GICv3 23 Level arm-pmu 19: 0 0 0 0 9030000.pl061 3 Edge GPIO Key Poweroff IPI0: 7 14 9 26 Rescheduling interrupts IPI1: 354 93 233 255 Function call interrupts IPI2: 0 0 0 0 CPU stop interrupts IPI3: 0 0 0 0 CPU stop (for crash dump) interrupts IPI4: 0 0 0 0 Timer broadcast interrupts IPI5: 1 0 0 0 IRQ work interrupts Err: 0 After this pacth, CPU_BACKTRACE and KGDB_ROUNDUP IPIs are displayed: / # cat /proc/interrupts CPU0 CPU1 CPU2 CPU3 11: 393 281 532 449 GICv3 27 Level arch_timer 13: 15 0 0 0 GICv3 33 Level uart-pl011 15: 64 0 0 0 GICv3 78 Edge virtio0 16: 0 0 0 0 GICv3 79 Edge virtio1 17: 0 0 0 0 GICv3 34 Level rtc-pl031 18: 2 2 2 2 GICv3 23 Level arm-pmu 19: 0 0 0 0 9030000.pl061 3 Edge GPIO Key Poweroff IPI0: 11 19 4 23 Rescheduling interrupts IPI1: 279 347 222 72 Function call interrupts IPI2: 0 0 0 0 CPU stop interrupts IPI3: 0 0 0 0 CPU stop (for crash dump) interrupts IPI4: 0 0 0 0 Timer broadcast interrupts IPI5: 1 0 0 1 IRQ work interrupts IPI6: 0 0 0 0 CPU backtrace interrupts IPI7: 0 0 0 0 KGDB roundup interrupts Err: 0 Fixes: 331a1b3a836c ("arm64: smp: Add arch support for backtrace using pseudo-NMI") Signed-off-by: Jinjie Ruan Suggested-by: Doug Anderson Acked-by: Will Deacon Reviewed-by: Douglas Anderson Link: https://lore.kernel.org/r/20240620063600.573559-1-ruanjinjie@huawei.com Signed-off-by: Catalin Marinas --- arch/arm64/kernel/smp.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 753360ce72f6..ad9c719bd078 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -762,13 +762,15 @@ void __init smp_prepare_cpus(unsigned int max_cpus) } } -static const char *ipi_types[NR_IPI] __tracepoint_string = { +static const char *ipi_types[MAX_IPI] __tracepoint_string = { [IPI_RESCHEDULE] = "Rescheduling interrupts", [IPI_CALL_FUNC] = "Function call interrupts", [IPI_CPU_STOP] = "CPU stop interrupts", [IPI_CPU_CRASH_STOP] = "CPU stop (for crash dump) interrupts", [IPI_TIMER] = "Timer broadcast interrupts", [IPI_IRQ_WORK] = "IRQ work interrupts", + [IPI_CPU_BACKTRACE] = "CPU backtrace interrupts", + [IPI_KGDB_ROUNDUP] = "KGDB roundup interrupts", }; static void smp_cross_call(const struct cpumask *target, unsigned int ipinr); @@ -779,7 +781,7 @@ int arch_show_interrupts(struct seq_file *p, int prec) { unsigned int cpu, i; - for (i = 0; i < NR_IPI; i++) { + for (i = 0; i < MAX_IPI; i++) { seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, prec >= 4 ? " " : ""); for_each_online_cpu(cpu) -- cgit v1.2.3-58-ga151