From 195fb517ee25bfefde9c74ecd86348eccbd6d2e4 Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Sun, 9 Jun 2024 17:39:24 -0700 Subject: cpu: Move CPU hotplug function declarations into their own header Avoid upcoming #include hell when wants to use lockdep_assert_cpus_held() and creates a #include loop that would break the build for arch/riscv. [ bp: s/cpu/CPU/g ] Signed-off-by: Tony Luck Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20240610003927.341707-2-tony.luck@intel.com --- include/linux/cpu.h | 33 +-------------------------------- include/linux/cpuhplock.h | 47 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 48 insertions(+), 32 deletions(-) create mode 100644 include/linux/cpuhplock.h diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 861c3bfc5f17..a8926d0a28cd 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -18,6 +18,7 @@ #include #include #include +#include #include struct device; @@ -132,38 +133,6 @@ static inline int add_cpu(unsigned int cpu) { return 0;} #endif /* CONFIG_SMP */ extern const struct bus_type cpu_subsys; -extern int lockdep_is_cpus_held(void); - -#ifdef CONFIG_HOTPLUG_CPU -extern void cpus_write_lock(void); -extern void cpus_write_unlock(void); -extern void cpus_read_lock(void); -extern void cpus_read_unlock(void); -extern int cpus_read_trylock(void); -extern void lockdep_assert_cpus_held(void); -extern void cpu_hotplug_disable(void); -extern void cpu_hotplug_enable(void); -void clear_tasks_mm_cpumask(int cpu); -int remove_cpu(unsigned int cpu); -int cpu_device_down(struct device *dev); -extern void smp_shutdown_nonboot_cpus(unsigned int primary_cpu); - -#else /* CONFIG_HOTPLUG_CPU */ - -static inline void cpus_write_lock(void) { } -static inline void cpus_write_unlock(void) { } -static inline void cpus_read_lock(void) { } -static inline void cpus_read_unlock(void) { } -static inline int cpus_read_trylock(void) { return true; } -static inline void lockdep_assert_cpus_held(void) { } -static inline void cpu_hotplug_disable(void) { } -static inline void cpu_hotplug_enable(void) { } -static inline int remove_cpu(unsigned int cpu) { return -EPERM; } -static inline void smp_shutdown_nonboot_cpus(unsigned int primary_cpu) { } -#endif /* !CONFIG_HOTPLUG_CPU */ - -DEFINE_LOCK_GUARD_0(cpus_read_lock, cpus_read_lock(), cpus_read_unlock()) - #ifdef CONFIG_PM_SLEEP_SMP extern int freeze_secondary_cpus(int primary); extern void thaw_secondary_cpus(void); diff --git a/include/linux/cpuhplock.h b/include/linux/cpuhplock.h new file mode 100644 index 000000000000..386abc482264 --- /dev/null +++ b/include/linux/cpuhplock.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * include/linux/cpuhplock.h - CPU hotplug locking + * + * Locking functions for CPU hotplug. + */ +#ifndef _LINUX_CPUHPLOCK_H_ +#define _LINUX_CPUHPLOCK_H_ + +#include +#include + +struct device; + +extern int lockdep_is_cpus_held(void); + +#ifdef CONFIG_HOTPLUG_CPU +extern void cpus_write_lock(void); +extern void cpus_write_unlock(void); +extern void cpus_read_lock(void); +extern void cpus_read_unlock(void); +extern int cpus_read_trylock(void); +extern void lockdep_assert_cpus_held(void); +extern void cpu_hotplug_disable(void); +extern void cpu_hotplug_enable(void); +void clear_tasks_mm_cpumask(int cpu); +int remove_cpu(unsigned int cpu); +int cpu_device_down(struct device *dev); +extern void smp_shutdown_nonboot_cpus(unsigned int primary_cpu); + +#else /* CONFIG_HOTPLUG_CPU */ + +static inline void cpus_write_lock(void) { } +static inline void cpus_write_unlock(void) { } +static inline void cpus_read_lock(void) { } +static inline void cpus_read_unlock(void) { } +static inline int cpus_read_trylock(void) { return true; } +static inline void lockdep_assert_cpus_held(void) { } +static inline void cpu_hotplug_disable(void) { } +static inline void cpu_hotplug_enable(void) { } +static inline int remove_cpu(unsigned int cpu) { return -EPERM; } +static inline void smp_shutdown_nonboot_cpus(unsigned int primary_cpu) { } +#endif /* !CONFIG_HOTPLUG_CPU */ + +DEFINE_LOCK_GUARD_0(cpus_read_lock, cpus_read_lock(), cpus_read_unlock()) + +#endif /* _LINUX_CPUHPLOCK_H_ */ -- cgit v1.2.3-58-ga151 From ddefcfdeb5a2238cbcb07b80dda9ac3136735b1e Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Sun, 9 Jun 2024 17:39:25 -0700 Subject: cpu: Drop "extern" from function declarations in cpuhplock.h This file was created with a direct cut and paste from cpu.h so kept the legacy declaration style. But the Linux coding standard for function declarations in header files is to avoid use of "extern". Drop "extern" from all function declarations. Signed-off-by: Tony Luck Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20240610003927.341707-3-tony.luck@intel.com --- include/linux/cpuhplock.h | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/include/linux/cpuhplock.h b/include/linux/cpuhplock.h index 386abc482264..431560bbd045 100644 --- a/include/linux/cpuhplock.h +++ b/include/linux/cpuhplock.h @@ -15,18 +15,18 @@ struct device; extern int lockdep_is_cpus_held(void); #ifdef CONFIG_HOTPLUG_CPU -extern void cpus_write_lock(void); -extern void cpus_write_unlock(void); -extern void cpus_read_lock(void); -extern void cpus_read_unlock(void); -extern int cpus_read_trylock(void); -extern void lockdep_assert_cpus_held(void); -extern void cpu_hotplug_disable(void); -extern void cpu_hotplug_enable(void); +void cpus_write_lock(void); +void cpus_write_unlock(void); +void cpus_read_lock(void); +void cpus_read_unlock(void); +int cpus_read_trylock(void); +void lockdep_assert_cpus_held(void); +void cpu_hotplug_disable(void); +void cpu_hotplug_enable(void); void clear_tasks_mm_cpumask(int cpu); int remove_cpu(unsigned int cpu); int cpu_device_down(struct device *dev); -extern void smp_shutdown_nonboot_cpus(unsigned int primary_cpu); +void smp_shutdown_nonboot_cpus(unsigned int primary_cpu); #else /* CONFIG_HOTPLUG_CPU */ -- cgit v1.2.3-58-ga151 From 685cb1674060c2cb1b9da051a12933c082b8e874 Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Sun, 9 Jun 2024 17:39:26 -0700 Subject: cacheinfo: Add function to get cacheinfo for a given CPU and cache level Resctrl open codes a search for information about a given cache level in a couple of places (and more are on the way). Provide a new inline function get_cpu_cacheinfo_level() in to do the search and return a pointer to the cacheinfo structure. Add lockdep_assert_cpus_held() to enforce the comment that cpuhp lock must be held. Simplify the existing get_cpu_cacheinfo_id() by using this new function to do the search. Signed-off-by: Tony Luck Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Reinette Chatre Link: https://lore.kernel.org/r/20240610003927.341707-4-tony.luck@intel.com --- include/linux/cacheinfo.h | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h index 2cb15fe4fe12..3dde175f4108 100644 --- a/include/linux/cacheinfo.h +++ b/include/linux/cacheinfo.h @@ -3,6 +3,7 @@ #define _LINUX_CACHEINFO_H #include +#include #include #include @@ -113,23 +114,37 @@ int acpi_get_cache_info(unsigned int cpu, const struct attribute_group *cache_get_priv_group(struct cacheinfo *this_leaf); /* - * Get the id of the cache associated with @cpu at level @level. + * Get the cacheinfo structure for the cache associated with @cpu at + * level @level. * cpuhp lock must be held. */ -static inline int get_cpu_cacheinfo_id(int cpu, int level) +static inline struct cacheinfo *get_cpu_cacheinfo_level(int cpu, int level) { struct cpu_cacheinfo *ci = get_cpu_cacheinfo(cpu); int i; + lockdep_assert_cpus_held(); + for (i = 0; i < ci->num_leaves; i++) { if (ci->info_list[i].level == level) { if (ci->info_list[i].attributes & CACHE_ID) - return ci->info_list[i].id; - return -1; + return &ci->info_list[i]; + return NULL; } } - return -1; + return NULL; +} + +/* + * Get the id of the cache associated with @cpu at level @level. + * cpuhp lock must be held. + */ +static inline int get_cpu_cacheinfo_id(int cpu, int level) +{ + struct cacheinfo *ci = get_cpu_cacheinfo_level(cpu, level); + + return ci ? ci->id : -1; } #ifdef CONFIG_ARM64 -- cgit v1.2.3-58-ga151 From f385f024639431bec3e70c33cdbc9563894b3ee5 Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Sun, 9 Jun 2024 17:39:27 -0700 Subject: x86/resctrl: Replace open coded cacheinfo searches pseudo_lock_region_init() and rdtgroup_cbm_to_size() open code a search for details of a particular cache level. Replace with get_cpu_cacheinfo_level(). Signed-off-by: Tony Luck Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Reinette Chatre Link: https://lore.kernel.org/r/20240610003927.341707-5-tony.luck@intel.com --- arch/x86/kernel/cpu/resctrl/pseudo_lock.c | 17 ++++++----------- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 14 +++++--------- 2 files changed, 11 insertions(+), 20 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c index aacf236dfe3b..1bbfd3c1e300 100644 --- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c +++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c @@ -292,9 +292,8 @@ static void pseudo_lock_region_clear(struct pseudo_lock_region *plr) */ static int pseudo_lock_region_init(struct pseudo_lock_region *plr) { - struct cpu_cacheinfo *ci; + struct cacheinfo *ci; int ret; - int i; /* Pick the first cpu we find that is associated with the cache. */ plr->cpu = cpumask_first(&plr->d->cpu_mask); @@ -306,15 +305,11 @@ static int pseudo_lock_region_init(struct pseudo_lock_region *plr) goto out_region; } - ci = get_cpu_cacheinfo(plr->cpu); - - plr->size = rdtgroup_cbm_to_size(plr->s->res, plr->d, plr->cbm); - - for (i = 0; i < ci->num_leaves; i++) { - if (ci->info_list[i].level == plr->s->res->cache_level) { - plr->line_size = ci->info_list[i].coherency_line_size; - return 0; - } + ci = get_cpu_cacheinfo_level(plr->cpu, plr->s->res->cache_level); + if (ci) { + plr->line_size = ci->coherency_line_size; + plr->size = rdtgroup_cbm_to_size(plr->s->res, plr->d, plr->cbm); + return 0; } ret = -1; diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 02f213f1c51c..cb68a121dabb 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -1450,18 +1450,14 @@ out: unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, struct rdt_domain *d, unsigned long cbm) { - struct cpu_cacheinfo *ci; unsigned int size = 0; - int num_b, i; + struct cacheinfo *ci; + int num_b; num_b = bitmap_weight(&cbm, r->cache.cbm_len); - ci = get_cpu_cacheinfo(cpumask_any(&d->cpu_mask)); - for (i = 0; i < ci->num_leaves; i++) { - if (ci->info_list[i].level == r->cache_level) { - size = ci->info_list[i].size / r->cache.cbm_len * num_b; - break; - } - } + ci = get_cpu_cacheinfo_level(cpumask_any(&d->cpu_mask), r->cache_level); + if (ci) + size = ci->size / r->cache.cbm_len * num_b; return size; } -- cgit v1.2.3-58-ga151 From 54183d103d38e5efefce8500ec41dfbfaba9c19d Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Wed, 29 May 2024 18:28:50 +0300 Subject: x86/kexec: Remove spurious unconditional JMP from from identity_mapped() This seemingly straightforward JMP was introduced in the initial version of the the 64bit kexec code without any explanation. It turns out (check accompanying Link) it's likely a copy/paste artefact from 32-bit code, where such a JMP could be used as a serializing instruction for the 486's prefetch queue. On x86_64 that's not needed because there's already a preceding write to cr4 which itself is a serializing operation. [ bp: Typos. Let's try this and see what cries out. If it does, reverting it is trivial. ] Signed-off-by: Nikolay Borisov Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/all/55bc0649-c017-49ab-905d-212f140a403f@citrix.com/ --- arch/x86/kernel/relocate_kernel_64.S | 3 --- 1 file changed, 3 deletions(-) diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S index 56cab1bb25f5..54e620021c7e 100644 --- a/arch/x86/kernel/relocate_kernel_64.S +++ b/arch/x86/kernel/relocate_kernel_64.S @@ -153,9 +153,6 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped) 1: movq %rax, %cr4 - jmp 1f -1: - /* Flush the TLB (needed?) */ movq %r9, %cr3 -- cgit v1.2.3-58-ga151 From 2b5e22afae07ca7d833e251f6d60da8455676ee9 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Fri, 14 Jun 2024 12:58:46 +0300 Subject: x86/acpi: Extract ACPI MADT wakeup code into a separate file In order to prepare for the expansion of support for the ACPI MADT wakeup method, move the relevant code into a separate file. Introduce a new configuration option to clearly indicate dependencies without the use of ifdefs. There have been no functional changes. Signed-off-by: Kirill A. Shutemov Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Baoquan He Reviewed-by: Kuppuswamy Sathyanarayanan Reviewed-by: Thomas Gleixner Acked-by: Borislav Petkov (AMD) Acked-by: Kai Huang Acked-by: Rafael J. Wysocki Tested-by: Tao Liu Link: https://lore.kernel.org/r/20240614095904.1345461-2-kirill.shutemov@linux.intel.com --- arch/x86/Kconfig | 7 ++++ arch/x86/include/asm/acpi.h | 5 +++ arch/x86/kernel/acpi/Makefile | 1 + arch/x86/kernel/acpi/boot.c | 86 +------------------------------------- arch/x86/kernel/acpi/madt_wakeup.c | 82 ++++++++++++++++++++++++++++++++++++ 5 files changed, 96 insertions(+), 85 deletions(-) create mode 100644 arch/x86/kernel/acpi/madt_wakeup.c diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 1d7122a1883e..125914536825 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1118,6 +1118,13 @@ config X86_LOCAL_APIC depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC || PCI_MSI select IRQ_DOMAIN_HIERARCHY +config ACPI_MADT_WAKEUP + def_bool y + depends on X86_64 + depends on ACPI + depends on SMP + depends on X86_LOCAL_APIC + config X86_IO_APIC def_bool y depends on X86_LOCAL_APIC || X86_UP_IOAPIC diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index 5af926c050f0..ceacac2b335d 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h @@ -78,6 +78,11 @@ static inline bool acpi_skip_set_wakeup_address(void) #define acpi_skip_set_wakeup_address acpi_skip_set_wakeup_address +union acpi_subtable_headers; + +int __init acpi_parse_mp_wake(union acpi_subtable_headers *header, + const unsigned long end); + /* * Check if the CPU can handle C2 and deeper */ diff --git a/arch/x86/kernel/acpi/Makefile b/arch/x86/kernel/acpi/Makefile index fc17b3f136fe..2feba7257665 100644 --- a/arch/x86/kernel/acpi/Makefile +++ b/arch/x86/kernel/acpi/Makefile @@ -4,6 +4,7 @@ obj-$(CONFIG_ACPI) += boot.o obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup_$(BITS).o obj-$(CONFIG_ACPI_APEI) += apei.o obj-$(CONFIG_ACPI_CPPC_LIB) += cppc.o +obj-$(CONFIG_ACPI_MADT_WAKEUP) += madt_wakeup.o ifneq ($(CONFIG_ACPI_PROCESSOR),) obj-y += cstate.o diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 4bf82dbd2a6b..9f4618dcd704 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -67,13 +67,6 @@ static bool has_lapic_cpus __initdata; static bool acpi_support_online_capable; #endif -#ifdef CONFIG_X86_64 -/* Physical address of the Multiprocessor Wakeup Structure mailbox */ -static u64 acpi_mp_wake_mailbox_paddr; -/* Virtual address of the Multiprocessor Wakeup Structure mailbox */ -static struct acpi_madt_multiproc_wakeup_mailbox *acpi_mp_wake_mailbox; -#endif - #ifdef CONFIG_X86_IO_APIC /* * Locks related to IOAPIC hotplug @@ -341,60 +334,6 @@ acpi_parse_lapic_nmi(union acpi_subtable_headers * header, const unsigned long e return 0; } - -#ifdef CONFIG_X86_64 -static int acpi_wakeup_cpu(u32 apicid, unsigned long start_ip) -{ - /* - * Remap mailbox memory only for the first call to acpi_wakeup_cpu(). - * - * Wakeup of secondary CPUs is fully serialized in the core code. - * No need to protect acpi_mp_wake_mailbox from concurrent accesses. - */ - if (!acpi_mp_wake_mailbox) { - acpi_mp_wake_mailbox = memremap(acpi_mp_wake_mailbox_paddr, - sizeof(*acpi_mp_wake_mailbox), - MEMREMAP_WB); - } - - /* - * Mailbox memory is shared between the firmware and OS. Firmware will - * listen on mailbox command address, and once it receives the wakeup - * command, the CPU associated with the given apicid will be booted. - * - * The value of 'apic_id' and 'wakeup_vector' must be visible to the - * firmware before the wakeup command is visible. smp_store_release() - * ensures ordering and visibility. - */ - acpi_mp_wake_mailbox->apic_id = apicid; - acpi_mp_wake_mailbox->wakeup_vector = start_ip; - smp_store_release(&acpi_mp_wake_mailbox->command, - ACPI_MP_WAKE_COMMAND_WAKEUP); - - /* - * Wait for the CPU to wake up. - * - * The CPU being woken up is essentially in a spin loop waiting to be - * woken up. It should not take long for it wake up and acknowledge by - * zeroing out ->command. - * - * ACPI specification doesn't provide any guidance on how long kernel - * has to wait for a wake up acknowledgement. It also doesn't provide - * a way to cancel a wake up request if it takes too long. - * - * In TDX environment, the VMM has control over how long it takes to - * wake up secondary. It can postpone scheduling secondary vCPU - * indefinitely. Giving up on wake up request and reporting error opens - * possible attack vector for VMM: it can wake up a secondary CPU when - * kernel doesn't expect it. Wait until positive result of the wake up - * request. - */ - while (READ_ONCE(acpi_mp_wake_mailbox->command)) - cpu_relax(); - - return 0; -} -#endif /* CONFIG_X86_64 */ #endif /* CONFIG_X86_LOCAL_APIC */ #ifdef CONFIG_X86_IO_APIC @@ -1124,29 +1063,6 @@ static int __init acpi_parse_madt_lapic_entries(void) } return 0; } - -#ifdef CONFIG_X86_64 -static int __init acpi_parse_mp_wake(union acpi_subtable_headers *header, - const unsigned long end) -{ - struct acpi_madt_multiproc_wakeup *mp_wake; - - if (!IS_ENABLED(CONFIG_SMP)) - return -ENODEV; - - mp_wake = (struct acpi_madt_multiproc_wakeup *)header; - if (BAD_MADT_ENTRY(mp_wake, end)) - return -EINVAL; - - acpi_table_print_madt_entry(&header->common); - - acpi_mp_wake_mailbox_paddr = mp_wake->base_address; - - apic_update_callback(wakeup_secondary_cpu_64, acpi_wakeup_cpu); - - return 0; -} -#endif /* CONFIG_X86_64 */ #endif /* CONFIG_X86_LOCAL_APIC */ #ifdef CONFIG_X86_IO_APIC @@ -1343,7 +1259,7 @@ static void __init acpi_process_madt(void) smp_found_config = 1; } -#ifdef CONFIG_X86_64 +#ifdef CONFIG_ACPI_MADT_WAKEUP /* * Parse MADT MP Wake entry. */ diff --git a/arch/x86/kernel/acpi/madt_wakeup.c b/arch/x86/kernel/acpi/madt_wakeup.c new file mode 100644 index 000000000000..7f164d38bd0b --- /dev/null +++ b/arch/x86/kernel/acpi/madt_wakeup.c @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +#include +#include +#include +#include +#include + +/* Physical address of the Multiprocessor Wakeup Structure mailbox */ +static u64 acpi_mp_wake_mailbox_paddr; + +/* Virtual address of the Multiprocessor Wakeup Structure mailbox */ +static struct acpi_madt_multiproc_wakeup_mailbox *acpi_mp_wake_mailbox; + +static int acpi_wakeup_cpu(u32 apicid, unsigned long start_ip) +{ + /* + * Remap mailbox memory only for the first call to acpi_wakeup_cpu(). + * + * Wakeup of secondary CPUs is fully serialized in the core code. + * No need to protect acpi_mp_wake_mailbox from concurrent accesses. + */ + if (!acpi_mp_wake_mailbox) { + acpi_mp_wake_mailbox = memremap(acpi_mp_wake_mailbox_paddr, + sizeof(*acpi_mp_wake_mailbox), + MEMREMAP_WB); + } + + /* + * Mailbox memory is shared between the firmware and OS. Firmware will + * listen on mailbox command address, and once it receives the wakeup + * command, the CPU associated with the given apicid will be booted. + * + * The value of 'apic_id' and 'wakeup_vector' must be visible to the + * firmware before the wakeup command is visible. smp_store_release() + * ensures ordering and visibility. + */ + acpi_mp_wake_mailbox->apic_id = apicid; + acpi_mp_wake_mailbox->wakeup_vector = start_ip; + smp_store_release(&acpi_mp_wake_mailbox->command, + ACPI_MP_WAKE_COMMAND_WAKEUP); + + /* + * Wait for the CPU to wake up. + * + * The CPU being woken up is essentially in a spin loop waiting to be + * woken up. It should not take long for it wake up and acknowledge by + * zeroing out ->command. + * + * ACPI specification doesn't provide any guidance on how long kernel + * has to wait for a wake up acknowledgment. It also doesn't provide + * a way to cancel a wake up request if it takes too long. + * + * In TDX environment, the VMM has control over how long it takes to + * wake up secondary. It can postpone scheduling secondary vCPU + * indefinitely. Giving up on wake up request and reporting error opens + * possible attack vector for VMM: it can wake up a secondary CPU when + * kernel doesn't expect it. Wait until positive result of the wake up + * request. + */ + while (READ_ONCE(acpi_mp_wake_mailbox->command)) + cpu_relax(); + + return 0; +} + +int __init acpi_parse_mp_wake(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_multiproc_wakeup *mp_wake; + + mp_wake = (struct acpi_madt_multiproc_wakeup *)header; + if (BAD_MADT_ENTRY(mp_wake, end)) + return -EINVAL; + + acpi_table_print_madt_entry(&header->common); + + acpi_mp_wake_mailbox_paddr = mp_wake->base_address; + + apic_update_callback(wakeup_secondary_cpu_64, acpi_wakeup_cpu); + + return 0; +} -- cgit v1.2.3-58-ga151 From 24dd05da8c7995cb2016b8f4da631c557aa6b40d Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Fri, 14 Jun 2024 12:58:47 +0300 Subject: x86/apic: Mark acpi_mp_wake_* variables as __ro_after_init acpi_mp_wake_mailbox_paddr and acpi_mp_wake_mailbox are initialized once during ACPI MADT init and never changed. Signed-off-by: Kirill A. Shutemov Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Baoquan He Reviewed-by: Thomas Gleixner Acked-by: Kai Huang Acked-by: Rafael J. Wysocki Tested-by: Tao Liu Link: https://lore.kernel.org/r/20240614095904.1345461-3-kirill.shutemov@linux.intel.com --- arch/x86/kernel/acpi/madt_wakeup.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/acpi/madt_wakeup.c b/arch/x86/kernel/acpi/madt_wakeup.c index 7f164d38bd0b..cf79ea6f3007 100644 --- a/arch/x86/kernel/acpi/madt_wakeup.c +++ b/arch/x86/kernel/acpi/madt_wakeup.c @@ -6,10 +6,10 @@ #include /* Physical address of the Multiprocessor Wakeup Structure mailbox */ -static u64 acpi_mp_wake_mailbox_paddr; +static u64 acpi_mp_wake_mailbox_paddr __ro_after_init; /* Virtual address of the Multiprocessor Wakeup Structure mailbox */ -static struct acpi_madt_multiproc_wakeup_mailbox *acpi_mp_wake_mailbox; +static struct acpi_madt_multiproc_wakeup_mailbox *acpi_mp_wake_mailbox __ro_after_init; static int acpi_wakeup_cpu(u32 apicid, unsigned long start_ip) { -- cgit v1.2.3-58-ga151 From 1037e4c53e851682ff8d1ab656567a4d5a333c93 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Fri, 14 Jun 2024 12:58:48 +0300 Subject: cpu/hotplug: Add support for declaring CPU offlining not supported The ACPI MADT mailbox wakeup method doesn't allow to offline a CPU after it has been woken up. Currently, offlining is prevented based on the confidential computing attribute which is set for Intel TDX. But TDX is not the only possible user of the wake up method. The MADT wakeup can be implemented outside of a confidential computing environment. Offline support is a property of the wakeup method, not the CoCo implementation. Introduce cpu_hotplug_disable_offlining() that can be called to indicate that CPU offlining should be disabled. This function is going to replace CC_ATTR_HOTPLUG_DISABLED for ACPI MADT wakeup method. Signed-off-by: Kirill A. Shutemov Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Thomas Gleixner Tested-by: Tao Liu Link: https://lore.kernel.org/r/20240614095904.1345461-4-kirill.shutemov@linux.intel.com --- include/linux/cpuhplock.h | 2 ++ kernel/cpu.c | 13 ++++++++++++- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/include/linux/cpuhplock.h b/include/linux/cpuhplock.h index 431560bbd045..f7aa20f62b87 100644 --- a/include/linux/cpuhplock.h +++ b/include/linux/cpuhplock.h @@ -21,6 +21,7 @@ void cpus_read_lock(void); void cpus_read_unlock(void); int cpus_read_trylock(void); void lockdep_assert_cpus_held(void); +void cpu_hotplug_disable_offlining(void); void cpu_hotplug_disable(void); void cpu_hotplug_enable(void); void clear_tasks_mm_cpumask(int cpu); @@ -36,6 +37,7 @@ static inline void cpus_read_lock(void) { } static inline void cpus_read_unlock(void) { } static inline int cpus_read_trylock(void) { return true; } static inline void lockdep_assert_cpus_held(void) { } +static inline void cpu_hotplug_disable_offlining(void) { } static inline void cpu_hotplug_disable(void) { } static inline void cpu_hotplug_enable(void) { } static inline int remove_cpu(unsigned int cpu) { return -EPERM; } diff --git a/kernel/cpu.c b/kernel/cpu.c index 563877d6c28b..4c15b478e2bc 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -483,6 +483,8 @@ static int cpu_hotplug_disabled; DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock); +static bool cpu_hotplug_offline_disabled __ro_after_init; + void cpus_read_lock(void) { percpu_down_read(&cpu_hotplug_lock); @@ -542,6 +544,14 @@ static void lockdep_release_cpus_lock(void) rwsem_release(&cpu_hotplug_lock.dep_map, _THIS_IP_); } +/* Declare CPU offlining not supported */ +void cpu_hotplug_disable_offlining(void) +{ + cpu_maps_update_begin(); + cpu_hotplug_offline_disabled = true; + cpu_maps_update_done(); +} + /* * Wait for currently running CPU hotplug operations to complete (if any) and * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects @@ -1471,7 +1481,8 @@ static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target) * If the platform does not support hotplug, report it explicitly to * differentiate it from a transient offlining failure. */ - if (cc_platform_has(CC_ATTR_HOTPLUG_DISABLED)) + if (cc_platform_has(CC_ATTR_HOTPLUG_DISABLED) || + cpu_hotplug_offline_disabled) return -EOPNOTSUPP; if (cpu_hotplug_disabled) return -EBUSY; -- cgit v1.2.3-58-ga151 From 66e48e491d1e1a0f243ebfcb9639b23de1a5db5e Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Fri, 14 Jun 2024 12:58:49 +0300 Subject: cpu/hotplug, x86/acpi: Disable CPU offlining for ACPI MADT wakeup ACPI MADT doesn't allow to offline a CPU after it has been woken up. Currently, CPU hotplug is prevented based on the confidential computing attribute which is set for Intel TDX. But TDX is not the only possible user of the wake up method. Any platform that uses ACPI MADT wakeup method cannot offline CPU. Disable CPU offlining on ACPI MADT wakeup enumeration. This has no visible effects for users: currently, TDX guest is the only platform that uses the ACPI MADT wakeup method. Signed-off-by: Kirill A. Shutemov Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Thomas Gleixner Acked-by: Rafael J. Wysocki Tested-by: Tao Liu Link: https://lore.kernel.org/r/20240614095904.1345461-5-kirill.shutemov@linux.intel.com --- arch/x86/coco/core.c | 1 - arch/x86/kernel/acpi/madt_wakeup.c | 3 +++ include/linux/cc_platform.h | 10 ---------- kernel/cpu.c | 3 +-- 4 files changed, 4 insertions(+), 13 deletions(-) diff --git a/arch/x86/coco/core.c b/arch/x86/coco/core.c index b31ef2424d19..0f81f70aca82 100644 --- a/arch/x86/coco/core.c +++ b/arch/x86/coco/core.c @@ -29,7 +29,6 @@ static bool noinstr intel_cc_platform_has(enum cc_attr attr) { switch (attr) { case CC_ATTR_GUEST_UNROLL_STRING_IO: - case CC_ATTR_HOTPLUG_DISABLED: case CC_ATTR_GUEST_MEM_ENCRYPT: case CC_ATTR_MEM_ENCRYPT: return true; diff --git a/arch/x86/kernel/acpi/madt_wakeup.c b/arch/x86/kernel/acpi/madt_wakeup.c index cf79ea6f3007..d222be8d7a07 100644 --- a/arch/x86/kernel/acpi/madt_wakeup.c +++ b/arch/x86/kernel/acpi/madt_wakeup.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later #include +#include #include #include #include @@ -76,6 +77,8 @@ int __init acpi_parse_mp_wake(union acpi_subtable_headers *header, acpi_mp_wake_mailbox_paddr = mp_wake->base_address; + cpu_hotplug_disable_offlining(); + apic_update_callback(wakeup_secondary_cpu_64, acpi_wakeup_cpu); return 0; diff --git a/include/linux/cc_platform.h b/include/linux/cc_platform.h index 60693a145894..caa4b4430634 100644 --- a/include/linux/cc_platform.h +++ b/include/linux/cc_platform.h @@ -81,16 +81,6 @@ enum cc_attr { */ CC_ATTR_GUEST_SEV_SNP, - /** - * @CC_ATTR_HOTPLUG_DISABLED: Hotplug is not supported or disabled. - * - * The platform/OS is running as a guest/virtual machine does not - * support CPU hotplug feature. - * - * Examples include TDX Guest. - */ - CC_ATTR_HOTPLUG_DISABLED, - /** * @CC_ATTR_HOST_SEV_SNP: AMD SNP enabled on the host. * diff --git a/kernel/cpu.c b/kernel/cpu.c index 4c15b478e2bc..a609385c7f99 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -1481,8 +1481,7 @@ static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target) * If the platform does not support hotplug, report it explicitly to * differentiate it from a transient offlining failure. */ - if (cc_platform_has(CC_ATTR_HOTPLUG_DISABLED) || - cpu_hotplug_offline_disabled) + if (cpu_hotplug_offline_disabled) return -EOPNOTSUPP; if (cpu_hotplug_disabled) return -EBUSY; -- cgit v1.2.3-58-ga151 From 7b46a8997db27ed70b01458fa4437ec2360feddd Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Fri, 14 Jun 2024 12:58:50 +0300 Subject: x86/relocate_kernel: Use named labels for less confusion That identity_mapped() function was loving that "1" label to the point of completely confusing its readers. Use named labels in each place for clarity. No functional changes. Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Kirill A. Shutemov Link: https://lore.kernel.org/r/20240614095904.1345461-6-kirill.shutemov@linux.intel.com --- arch/x86/kernel/relocate_kernel_64.S | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S index 54e620021c7e..8b8922de3765 100644 --- a/arch/x86/kernel/relocate_kernel_64.S +++ b/arch/x86/kernel/relocate_kernel_64.S @@ -148,9 +148,10 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped) */ movl $X86_CR4_PAE, %eax testq $X86_CR4_LA57, %r13 - jz 1f + jz .Lno_la57 orl $X86_CR4_LA57, %eax -1: +.Lno_la57: + movq %rax, %cr4 /* Flush the TLB (needed?) */ @@ -162,9 +163,9 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped) * used by kexec. Flush the caches before copying the kernel. */ testq %r12, %r12 - jz 1f + jz .Lsme_off wbinvd -1: +.Lsme_off: movq %rcx, %r11 call swap_pages @@ -184,7 +185,7 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped) */ testq %r11, %r11 - jnz 1f + jnz .Lrelocate xorl %eax, %eax xorl %ebx, %ebx xorl %ecx, %ecx @@ -205,7 +206,7 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped) ret int3 -1: +.Lrelocate: popq %rdx leaq PAGE_SIZE(%r10), %rsp ANNOTATE_RETPOLINE_SAFE -- cgit v1.2.3-58-ga151 From de60613173dfd75a10f6aa8e001bbcafa242e623 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Fri, 14 Jun 2024 12:58:51 +0300 Subject: x86/kexec: Keep CR4.MCE set during kexec for TDX guest TDX guests run with MCA enabled (CR4.MCE=1b) from the very start. If that bit is cleared during CR4 register reprogramming during boot or kexec flows, a #VE exception will be raised which the guest kernel cannot handle. Therefore, make sure the CR4.MCE setting is preserved over kexec too and avoid raising any #VEs. Signed-off-by: Kirill A. Shutemov Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20240614095904.1345461-7-kirill.shutemov@linux.intel.com --- arch/x86/kernel/relocate_kernel_64.S | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S index 8b8922de3765..042c9a0334e9 100644 --- a/arch/x86/kernel/relocate_kernel_64.S +++ b/arch/x86/kernel/relocate_kernel_64.S @@ -5,6 +5,8 @@ */ #include +#include +#include #include #include #include @@ -145,14 +147,15 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped) * Set cr4 to a known state: * - physical address extension enabled * - 5-level paging, if it was enabled before + * - Machine check exception on TDX guest, if it was enabled before. + * Clearing MCE might not be allowed in TDX guests, depending on setup. + * + * Use R13 that contains the original CR4 value, read in relocate_kernel(). + * PAE is always set in the original CR4. */ - movl $X86_CR4_PAE, %eax - testq $X86_CR4_LA57, %r13 - jz .Lno_la57 - orl $X86_CR4_LA57, %eax -.Lno_la57: - - movq %rax, %cr4 + andl $(X86_CR4_PAE | X86_CR4_LA57), %r13d + ALTERNATIVE "", __stringify(orl $X86_CR4_MCE, %r13d), X86_FEATURE_TDX_GUEST + movq %r13, %cr4 /* Flush the TLB (needed?) */ movq %r9, %cr3 -- cgit v1.2.3-58-ga151 From 99c5c4c60e0db1d2ff58b8a61c93b6851146469f Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Fri, 14 Jun 2024 12:58:52 +0300 Subject: x86/mm: Make x86_platform.guest.enc_status_change_*() return an error TDX is going to have more than one reason to fail enc_status_change_prepare(). Change the callback to return errno instead of assuming -EIO. Change enc_status_change_finish() too to keep the interface symmetric. Signed-off-by: Kirill A. Shutemov Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Dave Hansen Reviewed-by: Kai Huang Reviewed-by: Michael Kelley Tested-by: Tao Liu Link: https://lore.kernel.org/r/20240614095904.1345461-8-kirill.shutemov@linux.intel.com --- arch/x86/coco/tdx/tdx.c | 20 +++++++++++--------- arch/x86/hyperv/ivm.c | 22 ++++++++++------------ arch/x86/include/asm/x86_init.h | 4 ++-- arch/x86/kernel/x86_init.c | 4 ++-- arch/x86/mm/mem_encrypt_amd.c | 8 ++++---- arch/x86/mm/pat/set_memory.c | 12 +++++++----- 6 files changed, 36 insertions(+), 34 deletions(-) diff --git a/arch/x86/coco/tdx/tdx.c b/arch/x86/coco/tdx/tdx.c index c1cb90369915..26fa47db5782 100644 --- a/arch/x86/coco/tdx/tdx.c +++ b/arch/x86/coco/tdx/tdx.c @@ -798,28 +798,30 @@ static bool tdx_enc_status_changed(unsigned long vaddr, int numpages, bool enc) return true; } -static bool tdx_enc_status_change_prepare(unsigned long vaddr, int numpages, - bool enc) +static int tdx_enc_status_change_prepare(unsigned long vaddr, int numpages, + bool enc) { /* * Only handle shared->private conversion here. * See the comment in tdx_early_init(). */ - if (enc) - return tdx_enc_status_changed(vaddr, numpages, enc); - return true; + if (enc && !tdx_enc_status_changed(vaddr, numpages, enc)) + return -EIO; + + return 0; } -static bool tdx_enc_status_change_finish(unsigned long vaddr, int numpages, +static int tdx_enc_status_change_finish(unsigned long vaddr, int numpages, bool enc) { /* * Only handle private->shared conversion here. * See the comment in tdx_early_init(). */ - if (!enc) - return tdx_enc_status_changed(vaddr, numpages, enc); - return true; + if (!enc && !tdx_enc_status_changed(vaddr, numpages, enc)) + return -EIO; + + return 0; } void __init tdx_early_init(void) diff --git a/arch/x86/hyperv/ivm.c b/arch/x86/hyperv/ivm.c index 768d73de0d09..b4a851d27c7c 100644 --- a/arch/x86/hyperv/ivm.c +++ b/arch/x86/hyperv/ivm.c @@ -523,9 +523,9 @@ static int hv_mark_gpa_visibility(u16 count, const u64 pfn[], * transition is complete, hv_vtom_set_host_visibility() marks the pages * as "present" again. */ -static bool hv_vtom_clear_present(unsigned long kbuffer, int pagecount, bool enc) +static int hv_vtom_clear_present(unsigned long kbuffer, int pagecount, bool enc) { - return !set_memory_np(kbuffer, pagecount); + return set_memory_np(kbuffer, pagecount); } /* @@ -536,20 +536,19 @@ static bool hv_vtom_clear_present(unsigned long kbuffer, int pagecount, bool enc * with host. This function works as wrap of hv_mark_gpa_visibility() * with memory base and size. */ -static bool hv_vtom_set_host_visibility(unsigned long kbuffer, int pagecount, bool enc) +static int hv_vtom_set_host_visibility(unsigned long kbuffer, int pagecount, bool enc) { enum hv_mem_host_visibility visibility = enc ? VMBUS_PAGE_NOT_VISIBLE : VMBUS_PAGE_VISIBLE_READ_WRITE; u64 *pfn_array; phys_addr_t paddr; + int i, pfn, err; void *vaddr; int ret = 0; - bool result = true; - int i, pfn; pfn_array = kmalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL); if (!pfn_array) { - result = false; + ret = -ENOMEM; goto err_set_memory_p; } @@ -568,10 +567,8 @@ static bool hv_vtom_set_host_visibility(unsigned long kbuffer, int pagecount, bo if (pfn == HV_MAX_MODIFY_GPA_REP_COUNT || i == pagecount - 1) { ret = hv_mark_gpa_visibility(pfn, pfn_array, visibility); - if (ret) { - result = false; + if (ret) goto err_free_pfn_array; - } pfn = 0; } } @@ -586,10 +583,11 @@ err_set_memory_p: * order to avoid leaving the memory range in a "broken" state. Setting * the PRESENT bits shouldn't fail, but return an error if it does. */ - if (set_memory_p(kbuffer, pagecount)) - result = false; + err = set_memory_p(kbuffer, pagecount); + if (err && !ret) + ret = err; - return result; + return ret; } static bool hv_vtom_tlb_flush_required(bool private) diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h index 6149eabe200f..28ac3cb9b987 100644 --- a/arch/x86/include/asm/x86_init.h +++ b/arch/x86/include/asm/x86_init.h @@ -151,8 +151,8 @@ struct x86_init_acpi { * @enc_cache_flush_required Returns true if a cache flush is needed before changing page encryption status */ struct x86_guest { - bool (*enc_status_change_prepare)(unsigned long vaddr, int npages, bool enc); - bool (*enc_status_change_finish)(unsigned long vaddr, int npages, bool enc); + int (*enc_status_change_prepare)(unsigned long vaddr, int npages, bool enc); + int (*enc_status_change_finish)(unsigned long vaddr, int npages, bool enc); bool (*enc_tlb_flush_required)(bool enc); bool (*enc_cache_flush_required)(void); }; diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c index d5dc5a92635a..a7143bb7dd93 100644 --- a/arch/x86/kernel/x86_init.c +++ b/arch/x86/kernel/x86_init.c @@ -134,8 +134,8 @@ struct x86_cpuinit_ops x86_cpuinit = { static void default_nmi_init(void) { }; -static bool enc_status_change_prepare_noop(unsigned long vaddr, int npages, bool enc) { return true; } -static bool enc_status_change_finish_noop(unsigned long vaddr, int npages, bool enc) { return true; } +static int enc_status_change_prepare_noop(unsigned long vaddr, int npages, bool enc) { return 0; } +static int enc_status_change_finish_noop(unsigned long vaddr, int npages, bool enc) { return 0; } static bool enc_tlb_flush_required_noop(bool enc) { return false; } static bool enc_cache_flush_required_noop(void) { return false; } static bool is_private_mmio_noop(u64 addr) {return false; } diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c index 422602f6039b..e7b67519ddb5 100644 --- a/arch/x86/mm/mem_encrypt_amd.c +++ b/arch/x86/mm/mem_encrypt_amd.c @@ -283,7 +283,7 @@ static void enc_dec_hypercall(unsigned long vaddr, unsigned long size, bool enc) #endif } -static bool amd_enc_status_change_prepare(unsigned long vaddr, int npages, bool enc) +static int amd_enc_status_change_prepare(unsigned long vaddr, int npages, bool enc) { /* * To maintain the security guarantees of SEV-SNP guests, make sure @@ -292,11 +292,11 @@ static bool amd_enc_status_change_prepare(unsigned long vaddr, int npages, bool if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP) && !enc) snp_set_memory_shared(vaddr, npages); - return true; + return 0; } /* Return true unconditionally: return value doesn't matter for the SEV side */ -static bool amd_enc_status_change_finish(unsigned long vaddr, int npages, bool enc) +static int amd_enc_status_change_finish(unsigned long vaddr, int npages, bool enc) { /* * After memory is mapped encrypted in the page table, validate it @@ -308,7 +308,7 @@ static bool amd_enc_status_change_finish(unsigned long vaddr, int npages, bool e if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) enc_dec_hypercall(vaddr, npages << PAGE_SHIFT, enc); - return true; + return 0; } static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc) diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c index 19fdfbb171ed..498812f067cd 100644 --- a/arch/x86/mm/pat/set_memory.c +++ b/arch/x86/mm/pat/set_memory.c @@ -2196,7 +2196,8 @@ static int __set_memory_enc_pgtable(unsigned long addr, int numpages, bool enc) cpa_flush(&cpa, x86_platform.guest.enc_cache_flush_required()); /* Notify hypervisor that we are about to set/clr encryption attribute. */ - if (!x86_platform.guest.enc_status_change_prepare(addr, numpages, enc)) + ret = x86_platform.guest.enc_status_change_prepare(addr, numpages, enc); + if (ret) goto vmm_fail; ret = __change_page_attr_set_clr(&cpa, 1); @@ -2214,16 +2215,17 @@ static int __set_memory_enc_pgtable(unsigned long addr, int numpages, bool enc) return ret; /* Notify hypervisor that we have successfully set/clr encryption attribute. */ - if (!x86_platform.guest.enc_status_change_finish(addr, numpages, enc)) + ret = x86_platform.guest.enc_status_change_finish(addr, numpages, enc); + if (ret) goto vmm_fail; return 0; vmm_fail: - WARN_ONCE(1, "CPA VMM failure to convert memory (addr=%p, numpages=%d) to %s.\n", - (void *)addr, numpages, enc ? "private" : "shared"); + WARN_ONCE(1, "CPA VMM failure to convert memory (addr=%p, numpages=%d) to %s: %d\n", + (void *)addr, numpages, enc ? "private" : "shared", ret); - return -EIO; + return ret; } static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc) -- cgit v1.2.3-58-ga151 From 9d1dcdfa909178b6f465625bbfd8311e6107b48e Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Fri, 14 Jun 2024 12:58:53 +0300 Subject: x86/mm: Return correct level from lookup_address() if pte is none Currently, lookup_address() returns two things: 1. A "pte_t" (which might be a p[g4um]d_t) 2. The 'level' of the page tables where the "pte_t" was found (returned via a pointer) If no pte_t is found, 'level' is essentially garbage. Always fill out the level. For NULL "pte_t"s, fill in the level where the p*d_none() entry was found mirroring the "found" behavior. Always filling out the level allows using lookup_address() to precisely skip over holes when walking kernel page tables. Add one more entry into enum pg_level to indicate the size of the VA covered by one PGD entry in 5-level paging mode. Update comments for lookup_address() and lookup_address_in_pgd() to reflect changes in the interface. Signed-off-by: Kirill A. Shutemov Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Rick Edgecombe Reviewed-by: Baoquan He Reviewed-by: Dave Hansen Tested-by: Tao Liu Link: https://lore.kernel.org/r/20240614095904.1345461-9-kirill.shutemov@linux.intel.com --- arch/x86/include/asm/pgtable_types.h | 1 + arch/x86/mm/pat/set_memory.c | 21 ++++++++++----------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index b78644962626..2f321137736c 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h @@ -549,6 +549,7 @@ enum pg_level { PG_LEVEL_2M, PG_LEVEL_1G, PG_LEVEL_512G, + PG_LEVEL_256T, PG_LEVEL_NUM }; diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c index 498812f067cd..a7a7a6c6a3fb 100644 --- a/arch/x86/mm/pat/set_memory.c +++ b/arch/x86/mm/pat/set_memory.c @@ -662,8 +662,9 @@ static inline pgprot_t verify_rwx(pgprot_t old, pgprot_t new, unsigned long star /* * Lookup the page table entry for a virtual address in a specific pgd. - * Return a pointer to the entry, the level of the mapping, and the effective - * NX and RW bits of all page table levels. + * Return a pointer to the entry (or NULL if the entry does not exist), + * the level of the entry, and the effective NX and RW bits of all + * page table levels. */ pte_t *lookup_address_in_pgd_attr(pgd_t *pgd, unsigned long address, unsigned int *level, bool *nx, bool *rw) @@ -672,13 +673,14 @@ pte_t *lookup_address_in_pgd_attr(pgd_t *pgd, unsigned long address, pud_t *pud; pmd_t *pmd; - *level = PG_LEVEL_NONE; + *level = PG_LEVEL_256T; *nx = false; *rw = true; if (pgd_none(*pgd)) return NULL; + *level = PG_LEVEL_512G; *nx |= pgd_flags(*pgd) & _PAGE_NX; *rw &= pgd_flags(*pgd) & _PAGE_RW; @@ -686,10 +688,10 @@ pte_t *lookup_address_in_pgd_attr(pgd_t *pgd, unsigned long address, if (p4d_none(*p4d)) return NULL; - *level = PG_LEVEL_512G; if (p4d_leaf(*p4d) || !p4d_present(*p4d)) return (pte_t *)p4d; + *level = PG_LEVEL_1G; *nx |= p4d_flags(*p4d) & _PAGE_NX; *rw &= p4d_flags(*p4d) & _PAGE_RW; @@ -697,10 +699,10 @@ pte_t *lookup_address_in_pgd_attr(pgd_t *pgd, unsigned long address, if (pud_none(*pud)) return NULL; - *level = PG_LEVEL_1G; if (pud_leaf(*pud) || !pud_present(*pud)) return (pte_t *)pud; + *level = PG_LEVEL_2M; *nx |= pud_flags(*pud) & _PAGE_NX; *rw &= pud_flags(*pud) & _PAGE_RW; @@ -708,15 +710,13 @@ pte_t *lookup_address_in_pgd_attr(pgd_t *pgd, unsigned long address, if (pmd_none(*pmd)) return NULL; - *level = PG_LEVEL_2M; if (pmd_leaf(*pmd) || !pmd_present(*pmd)) return (pte_t *)pmd; + *level = PG_LEVEL_4K; *nx |= pmd_flags(*pmd) & _PAGE_NX; *rw &= pmd_flags(*pmd) & _PAGE_RW; - *level = PG_LEVEL_4K; - return pte_offset_kernel(pmd, address); } @@ -736,9 +736,8 @@ pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address, * Lookup the page table entry for a virtual address. Return a pointer * to the entry and the level of the mapping. * - * Note: We return pud and pmd either when the entry is marked large - * or when the present bit is not set. Otherwise we would return a - * pointer to a nonexisting mapping. + * Note: the function returns p4d, pud or pmd either when the entry is marked + * large or when the present bit is not set. Otherwise it returns NULL. */ pte_t *lookup_address(unsigned long address, unsigned int *level) { -- cgit v1.2.3-58-ga151 From c3abbf1376874f0d6eb22859a8655831644efa42 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Fri, 14 Jun 2024 12:58:54 +0300 Subject: x86/tdx: Account shared memory The kernel will convert all shared memory back to private during kexec. The direct mapping page tables will provide information on which memory is shared. It is extremely important to convert all shared memory. If a page is missed, it will cause the second kernel to crash when it accesses it. Keep track of the number of shared pages. This will allow for cross-checking against the shared information in the direct mapping and reporting if the shared bit is lost. Memory conversion is slow and does not happen often. Global atomic is not going to be a bottleneck. Signed-off-by: Kirill A. Shutemov Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Kai Huang Tested-by: Tao Liu Link: https://lore.kernel.org/r/20240614095904.1345461-10-kirill.shutemov@linux.intel.com --- arch/x86/coco/tdx/tdx.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/arch/x86/coco/tdx/tdx.c b/arch/x86/coco/tdx/tdx.c index 26fa47db5782..979891e97d83 100644 --- a/arch/x86/coco/tdx/tdx.c +++ b/arch/x86/coco/tdx/tdx.c @@ -38,6 +38,8 @@ #define TDREPORT_SUBTYPE_0 0 +static atomic_long_t nr_shared; + /* Called from __tdx_hypercall() for unrecoverable failure */ noinstr void __noreturn __tdx_hypercall_failed(void) { @@ -821,6 +823,11 @@ static int tdx_enc_status_change_finish(unsigned long vaddr, int numpages, if (!enc && !tdx_enc_status_changed(vaddr, numpages, enc)) return -EIO; + if (enc) + atomic_long_sub(numpages, &nr_shared); + else + atomic_long_add(numpages, &nr_shared); + return 0; } -- cgit v1.2.3-58-ga151 From 22daa42294b419a0d8060a3870285e7a72aa63e4 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Fri, 14 Jun 2024 12:58:55 +0300 Subject: x86/mm: Add callbacks to prepare encrypted memory for kexec AMD SEV and Intel TDX guests allocate shared buffers for performing I/O. This is done by allocating pages normally from the buddy allocator and then converting them to shared using set_memory_decrypted(). On kexec, the second kernel is unaware of which memory has been converted in this manner. It only sees E820_TYPE_RAM. Accessing shared memory as private is fatal. Therefore, the memory state must be reset to its original state before starting the new kernel with kexec. The process of converting shared memory back to private occurs in two steps: - enc_kexec_begin() stops new conversions. - enc_kexec_finish() unshares all existing shared memory, reverting it back to private. Signed-off-by: Kirill A. Shutemov Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Nikolay Borisov Reviewed-by: Kai Huang Tested-by: Tao Liu Link: https://lore.kernel.org/r/20240614095904.1345461-11-kirill.shutemov@linux.intel.com --- arch/x86/include/asm/x86_init.h | 10 ++++++++++ arch/x86/kernel/crash.c | 12 ++++++++++++ arch/x86/kernel/reboot.c | 12 ++++++++++++ arch/x86/kernel/x86_init.c | 4 ++++ 4 files changed, 38 insertions(+) diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h index 28ac3cb9b987..213cf5379a5a 100644 --- a/arch/x86/include/asm/x86_init.h +++ b/arch/x86/include/asm/x86_init.h @@ -149,12 +149,22 @@ struct x86_init_acpi { * @enc_status_change_finish Notify HV after the encryption status of a range is changed * @enc_tlb_flush_required Returns true if a TLB flush is needed before changing page encryption status * @enc_cache_flush_required Returns true if a cache flush is needed before changing page encryption status + * @enc_kexec_begin Begin the two-step process of converting shared memory back + * to private. It stops the new conversions from being started + * and waits in-flight conversions to finish, if possible. + * @enc_kexec_finish Finish the two-step process of converting shared memory to + * private. All memory is private after the call when + * the function returns. + * It is called on only one CPU while the others are shut down + * and with interrupts disabled. */ struct x86_guest { int (*enc_status_change_prepare)(unsigned long vaddr, int npages, bool enc); int (*enc_status_change_finish)(unsigned long vaddr, int npages, bool enc); bool (*enc_tlb_flush_required)(bool enc); bool (*enc_cache_flush_required)(void); + void (*enc_kexec_begin)(void); + void (*enc_kexec_finish)(void); }; /** diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index f06501445cd9..340af8155658 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c @@ -128,6 +128,18 @@ void native_machine_crash_shutdown(struct pt_regs *regs) #ifdef CONFIG_HPET_TIMER hpet_disable(); #endif + + /* + * Non-crash kexec calls enc_kexec_begin() while scheduling is still + * active. This allows the callback to wait until all in-flight + * shared<->private conversions are complete. In a crash scenario, + * enc_kexec_begin() gets called after all but one CPU have been shut + * down and interrupts have been disabled. This allows the callback to + * detect a race with the conversion and report it. + */ + x86_platform.guest.enc_kexec_begin(); + x86_platform.guest.enc_kexec_finish(); + crash_save_cpu(regs, safe_smp_processor_id()); } diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index f3130f762784..bb7a44af7efd 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -716,6 +717,14 @@ static void native_machine_emergency_restart(void) void native_machine_shutdown(void) { + /* + * Call enc_kexec_begin() while all CPUs are still active and + * interrupts are enabled. This will allow all in-flight memory + * conversions to finish cleanly. + */ + if (kexec_in_progress) + x86_platform.guest.enc_kexec_begin(); + /* Stop the cpus and apics */ #ifdef CONFIG_X86_IO_APIC /* @@ -752,6 +761,9 @@ void native_machine_shutdown(void) #ifdef CONFIG_X86_64 x86_platform.iommu_shutdown(); #endif + + if (kexec_in_progress) + x86_platform.guest.enc_kexec_finish(); } static void __machine_emergency_restart(int emergency) diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c index a7143bb7dd93..82b128d3f309 100644 --- a/arch/x86/kernel/x86_init.c +++ b/arch/x86/kernel/x86_init.c @@ -138,6 +138,8 @@ static int enc_status_change_prepare_noop(unsigned long vaddr, int npages, bool static int enc_status_change_finish_noop(unsigned long vaddr, int npages, bool enc) { return 0; } static bool enc_tlb_flush_required_noop(bool enc) { return false; } static bool enc_cache_flush_required_noop(void) { return false; } +static void enc_kexec_begin_noop(void) {} +static void enc_kexec_finish_noop(void) {} static bool is_private_mmio_noop(u64 addr) {return false; } struct x86_platform_ops x86_platform __ro_after_init = { @@ -161,6 +163,8 @@ struct x86_platform_ops x86_platform __ro_after_init = { .enc_status_change_finish = enc_status_change_finish_noop, .enc_tlb_flush_required = enc_tlb_flush_required_noop, .enc_cache_flush_required = enc_cache_flush_required_noop, + .enc_kexec_begin = enc_kexec_begin_noop, + .enc_kexec_finish = enc_kexec_finish_noop, }, }; -- cgit v1.2.3-58-ga151 From 859e63b789d6b17b3c64e51a0aabdc58752a0254 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Fri, 14 Jun 2024 12:58:56 +0300 Subject: x86/tdx: Convert shared memory back to private on kexec TDX guests allocate shared buffers to perform I/O. It is done by allocating pages normally from the buddy allocator and converting them to shared with set_memory_decrypted(). The second, kexec-ed kernel has no idea what memory is converted this way. It only sees E820_TYPE_RAM. Accessing shared memory via private mapping is fatal. It leads to unrecoverable TD exit. On kexec, walk direct mapping and convert all shared memory back to private. It makes all RAM private again and second kernel may use it normally. The conversion occurs in two steps: stopping new conversions and unsharing all memory. In the case of normal kexec, the stopping of conversions takes place while scheduling is still functioning. This allows for waiting until any ongoing conversions are finished. The second step is carried out when all CPUs except one are inactive and interrupts are disabled. This prevents any conflicts with code that may access shared memory. Signed-off-by: Kirill A. Shutemov Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Rick Edgecombe Reviewed-by: Kai Huang Tested-by: Tao Liu Link: https://lore.kernel.org/r/20240614095904.1345461-12-kirill.shutemov@linux.intel.com --- arch/x86/coco/tdx/tdx.c | 94 +++++++++++++++++++++++++++++++++++++++ arch/x86/include/asm/pgtable.h | 5 +++ arch/x86/include/asm/set_memory.h | 3 ++ arch/x86/mm/pat/set_memory.c | 42 +++++++++++++++-- 4 files changed, 141 insertions(+), 3 deletions(-) diff --git a/arch/x86/coco/tdx/tdx.c b/arch/x86/coco/tdx/tdx.c index 979891e97d83..078e2bac2553 100644 --- a/arch/x86/coco/tdx/tdx.c +++ b/arch/x86/coco/tdx/tdx.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -14,6 +15,7 @@ #include #include #include +#include /* MMIO direction */ #define EPT_READ 0 @@ -831,6 +833,95 @@ static int tdx_enc_status_change_finish(unsigned long vaddr, int numpages, return 0; } +/* Stop new private<->shared conversions */ +static void tdx_kexec_begin(void) +{ + if (!IS_ENABLED(CONFIG_KEXEC_CORE)) + return; + + /* + * Crash kernel reaches here with interrupts disabled: can't wait for + * conversions to finish. + * + * If race happened, just report and proceed. + */ + if (!set_memory_enc_stop_conversion()) + pr_warn("Failed to stop shared<->private conversions\n"); +} + +/* Walk direct mapping and convert all shared memory back to private */ +static void tdx_kexec_finish(void) +{ + unsigned long addr, end; + long found = 0, shared; + + if (!IS_ENABLED(CONFIG_KEXEC_CORE)) + return; + + lockdep_assert_irqs_disabled(); + + addr = PAGE_OFFSET; + end = PAGE_OFFSET + get_max_mapped(); + + while (addr < end) { + unsigned long size; + unsigned int level; + pte_t *pte; + + pte = lookup_address(addr, &level); + size = page_level_size(level); + + if (pte && pte_decrypted(*pte)) { + int pages = size / PAGE_SIZE; + + /* + * Touching memory with shared bit set triggers implicit + * conversion to shared. + * + * Make sure nobody touches the shared range from + * now on. + */ + set_pte(pte, __pte(0)); + + /* + * Memory encryption state persists across kexec. + * If tdx_enc_status_changed() fails in the first + * kernel, it leaves memory in an unknown state. + * + * If that memory remains shared, accessing it in the + * *next* kernel through a private mapping will result + * in an unrecoverable guest shutdown. + * + * The kdump kernel boot is not impacted as it uses + * a pre-reserved memory range that is always private. + * However, gathering crash information could lead to + * a crash if it accesses unconverted memory through + * a private mapping which is possible when accessing + * that memory through /proc/vmcore, for example. + * + * In all cases, print error info in order to leave + * enough bread crumbs for debugging. + */ + if (!tdx_enc_status_changed(addr, pages, true)) { + pr_err("Failed to unshare range %#lx-%#lx\n", + addr, addr + size); + } + + found += pages; + } + + addr += size; + } + + __flush_tlb_all(); + + shared = atomic_long_read(&nr_shared); + if (shared != found) { + pr_err("shared page accounting is off\n"); + pr_err("nr_shared = %ld, nr_found = %ld\n", shared, found); + } +} + void __init tdx_early_init(void) { struct tdx_module_args args = { @@ -890,6 +981,9 @@ void __init tdx_early_init(void) x86_platform.guest.enc_cache_flush_required = tdx_cache_flush_required; x86_platform.guest.enc_tlb_flush_required = tdx_tlb_flush_required; + x86_platform.guest.enc_kexec_begin = tdx_kexec_begin; + x86_platform.guest.enc_kexec_finish = tdx_kexec_finish; + /* * TDX intercepts the RDMSR to read the X2APIC ID in the parallel * bringup low level code. That raises #VE which cannot be handled diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 65b8e5bb902c..e39311a89bf4 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -140,6 +140,11 @@ static inline int pte_young(pte_t pte) return pte_flags(pte) & _PAGE_ACCESSED; } +static inline bool pte_decrypted(pte_t pte) +{ + return cc_mkdec(pte_val(pte)) == pte_val(pte); +} + #define pmd_dirty pmd_dirty static inline bool pmd_dirty(pmd_t pmd) { diff --git a/arch/x86/include/asm/set_memory.h b/arch/x86/include/asm/set_memory.h index 9aee31862b4a..4b2abce2e3e7 100644 --- a/arch/x86/include/asm/set_memory.h +++ b/arch/x86/include/asm/set_memory.h @@ -49,8 +49,11 @@ int set_memory_wb(unsigned long addr, int numpages); int set_memory_np(unsigned long addr, int numpages); int set_memory_p(unsigned long addr, int numpages); int set_memory_4k(unsigned long addr, int numpages); + +bool set_memory_enc_stop_conversion(void); int set_memory_encrypted(unsigned long addr, int numpages); int set_memory_decrypted(unsigned long addr, int numpages); + int set_memory_np_noalias(unsigned long addr, int numpages); int set_memory_nonglobal(unsigned long addr, int numpages); int set_memory_global(unsigned long addr, int numpages); diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c index a7a7a6c6a3fb..443a97e515c0 100644 --- a/arch/x86/mm/pat/set_memory.c +++ b/arch/x86/mm/pat/set_memory.c @@ -2227,12 +2227,48 @@ vmm_fail: return ret; } +/* + * The lock serializes conversions between private and shared memory. + * + * It is taken for read on conversion. A write lock guarantees that no + * concurrent conversions are in progress. + */ +static DECLARE_RWSEM(mem_enc_lock); + +/* + * Stop new private<->shared conversions. + * + * Taking the exclusive mem_enc_lock waits for in-flight conversions to complete. + * The lock is not released to prevent new conversions from being started. + */ +bool set_memory_enc_stop_conversion(void) +{ + /* + * In a crash scenario, sleep is not allowed. Try to take the lock. + * Failure indicates that there is a race with the conversion. + */ + if (oops_in_progress) + return down_write_trylock(&mem_enc_lock); + + down_write(&mem_enc_lock); + + return true; +} + static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc) { - if (cc_platform_has(CC_ATTR_MEM_ENCRYPT)) - return __set_memory_enc_pgtable(addr, numpages, enc); + int ret = 0; - return 0; + if (cc_platform_has(CC_ATTR_MEM_ENCRYPT)) { + if (!down_read_trylock(&mem_enc_lock)) + return -EBUSY; + + ret = __set_memory_enc_pgtable(addr, numpages, enc); + + up_read(&mem_enc_lock); + } + + return ret; } int set_memory_encrypted(unsigned long addr, int numpages) -- cgit v1.2.3-58-ga151 From 06fa48d85b09b3e67afeda220bc19f7102b53beb Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Fri, 14 Jun 2024 12:58:57 +0300 Subject: x86/mm: Make e820__end_ram_pfn() cover E820_TYPE_ACPI ranges e820__end_of_ram_pfn() is used to calculate max_pfn which, among other things, guides where direct mapping ends. Any memory above max_pfn is not going to be present in the direct mapping. e820__end_of_ram_pfn() finds the end of the RAM based on the highest E820_TYPE_RAM range. But it doesn't includes E820_TYPE_ACPI ranges into calculation. Despite the name, E820_TYPE_ACPI covers not only ACPI data, but also EFI tables and might be required by kernel to function properly. Usually the problem is hidden because there is some E820_TYPE_RAM memory above E820_TYPE_ACPI. But crashkernel only presents pre-allocated crash memory as E820_TYPE_RAM on boot. If the pre-allocated range is small, it can fit under the last E820_TYPE_ACPI range. Modify e820__end_of_ram_pfn() and e820__end_of_low_ram_pfn() to cover E820_TYPE_ACPI memory. The problem was discovered during debugging kexec for TDX guest. TDX guest uses E820_TYPE_ACPI to store the unaccepted memory bitmap and pass it between the kernels on kexec. Signed-off-by: Kirill A. Shutemov Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Dave Hansen Tested-by: Tao Liu Link: https://lore.kernel.org/r/20240614095904.1345461-13-kirill.shutemov@linux.intel.com --- arch/x86/kernel/e820.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 68b09f718f10..4893d30ce438 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c @@ -828,7 +828,7 @@ u64 __init e820__memblock_alloc_reserved(u64 size, u64 align) /* * Find the highest page frame number we have available */ -static unsigned long __init e820_end_pfn(unsigned long limit_pfn, enum e820_type type) +static unsigned long __init e820__end_ram_pfn(unsigned long limit_pfn) { int i; unsigned long last_pfn = 0; @@ -839,7 +839,8 @@ static unsigned long __init e820_end_pfn(unsigned long limit_pfn, enum e820_type unsigned long start_pfn; unsigned long end_pfn; - if (entry->type != type) + if (entry->type != E820_TYPE_RAM && + entry->type != E820_TYPE_ACPI) continue; start_pfn = entry->addr >> PAGE_SHIFT; @@ -865,12 +866,12 @@ static unsigned long __init e820_end_pfn(unsigned long limit_pfn, enum e820_type unsigned long __init e820__end_of_ram_pfn(void) { - return e820_end_pfn(MAX_ARCH_PFN, E820_TYPE_RAM); + return e820__end_ram_pfn(MAX_ARCH_PFN); } unsigned long __init e820__end_of_low_ram_pfn(void) { - return e820_end_pfn(1UL << (32 - PAGE_SHIFT), E820_TYPE_RAM); + return e820__end_ram_pfn(1UL << (32 - PAGE_SHIFT)); } static void __init early_panic(char *msg) -- cgit v1.2.3-58-ga151 From 5574b368873d4f24e2ae8fab3a1105ede252e542 Mon Sep 17 00:00:00 2001 From: Ashish Kalra Date: Fri, 14 Jun 2024 12:58:58 +0300 Subject: x86/mm: Do not zap page table entries mapping unaccepted memory table during kdump During crashkernel boot only pre-allocated crash memory is presented as E820_TYPE_RAM. This can cause page table entries mapping unaccepted memory table to be zapped during phys_pte_init(), phys_pmd_init(), phys_pud_init() and phys_p4d_init() as SNP/TDX guest use E820_TYPE_ACPI to store the unaccepted memory table and pass it between the kernels on kexec/kdump. E820_TYPE_ACPI covers not only ACPI data, but also EFI tables and might be required by kernel to function properly. The problem was discovered during debugging kdump for SNP guest. The unaccepted memory table stored with E820_TYPE_ACPI and passed between the kernels on kdump was getting zapped as the PMD entry mapping this is above the E820_TYPE_RAM range for the reserved crashkernel memory. Signed-off-by: Ashish Kalra Signed-off-by: Kirill A. Shutemov Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20240614095904.1345461-14-kirill.shutemov@linux.intel.com --- arch/x86/mm/init_64.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 7e177856ee4f..28002cc7a37d 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -469,7 +469,9 @@ phys_pte_init(pte_t *pte_page, unsigned long paddr, unsigned long paddr_end, !e820__mapped_any(paddr & PAGE_MASK, paddr_next, E820_TYPE_RAM) && !e820__mapped_any(paddr & PAGE_MASK, paddr_next, - E820_TYPE_RESERVED_KERN)) + E820_TYPE_RESERVED_KERN) && + !e820__mapped_any(paddr & PAGE_MASK, paddr_next, + E820_TYPE_ACPI)) set_pte_init(pte, __pte(0), init); continue; } @@ -524,7 +526,9 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end, !e820__mapped_any(paddr & PMD_MASK, paddr_next, E820_TYPE_RAM) && !e820__mapped_any(paddr & PMD_MASK, paddr_next, - E820_TYPE_RESERVED_KERN)) + E820_TYPE_RESERVED_KERN) && + !e820__mapped_any(paddr & PMD_MASK, paddr_next, + E820_TYPE_ACPI)) set_pmd_init(pmd, __pmd(0), init); continue; } @@ -611,7 +615,9 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end, !e820__mapped_any(paddr & PUD_MASK, paddr_next, E820_TYPE_RAM) && !e820__mapped_any(paddr & PUD_MASK, paddr_next, - E820_TYPE_RESERVED_KERN)) + E820_TYPE_RESERVED_KERN) && + !e820__mapped_any(paddr & PUD_MASK, paddr_next, + E820_TYPE_ACPI)) set_pud_init(pud, __pud(0), init); continue; } @@ -698,7 +704,9 @@ phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end, !e820__mapped_any(paddr & P4D_MASK, paddr_next, E820_TYPE_RAM) && !e820__mapped_any(paddr & P4D_MASK, paddr_next, - E820_TYPE_RESERVED_KERN)) + E820_TYPE_RESERVED_KERN) && + !e820__mapped_any(paddr & P4D_MASK, paddr_next, + E820_TYPE_ACPI)) set_p4d_init(p4d, __p4d(0), init); continue; } -- cgit v1.2.3-58-ga151 From 6630cbce7cd7785f76b1055f33a71199ef28510b Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Fri, 14 Jun 2024 12:58:59 +0300 Subject: x86/acpi: Rename fields in the acpi_madt_multiproc_wakeup structure In order to support MADT wakeup structure version 1, provide more appropriate names for the fields in the structure. Rename 'mailbox_version' to 'version'. This field signifies the version of the structure and the related protocols, rather than the version of the mailbox. This field has not been utilized in the code thus far. Rename 'base_address' to 'mailbox_address' to clarify the kind of address it represents. In version 1, the structure includes the reset vector address. Clear and distinct naming helps to prevent any confusion. Signed-off-by: Kirill A. Shutemov Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Kai Huang Reviewed-by: Kuppuswamy Sathyanarayanan Reviewed-by: Thomas Gleixner Acked-by: Rafael J. Wysocki Tested-by: Tao Liu Link: https://lore.kernel.org/r/20240614095904.1345461-15-kirill.shutemov@linux.intel.com --- arch/x86/kernel/acpi/madt_wakeup.c | 2 +- include/acpi/actbl2.h | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/acpi/madt_wakeup.c b/arch/x86/kernel/acpi/madt_wakeup.c index d222be8d7a07..004801b9b151 100644 --- a/arch/x86/kernel/acpi/madt_wakeup.c +++ b/arch/x86/kernel/acpi/madt_wakeup.c @@ -75,7 +75,7 @@ int __init acpi_parse_mp_wake(union acpi_subtable_headers *header, acpi_table_print_madt_entry(&header->common); - acpi_mp_wake_mailbox_paddr = mp_wake->base_address; + acpi_mp_wake_mailbox_paddr = mp_wake->mailbox_address; cpu_hotplug_disable_offlining(); diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h index ae747c89d92c..fa63362469aa 100644 --- a/include/acpi/actbl2.h +++ b/include/acpi/actbl2.h @@ -1194,9 +1194,9 @@ struct acpi_madt_generic_translator { struct acpi_madt_multiproc_wakeup { struct acpi_subtable_header header; - u16 mailbox_version; + u16 version; u32 reserved; /* reserved - must be zero */ - u64 base_address; + u64 mailbox_address; }; #define ACPI_MULTIPROC_WAKEUP_MB_OS_SIZE 2032 -- cgit v1.2.3-58-ga151 From db0936830a2fcc35e2b283275acf61b6d3ae1e11 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Fri, 14 Jun 2024 12:59:00 +0300 Subject: x86/acpi: Do not attempt to bring up secondary CPUs in the kexec case ACPI MADT doesn't allow to offline a CPU after it was onlined. This limits kexec: the second kernel won't be able to use more than one CPU. To prevent a kexec kernel from onlining secondary CPUs, invalidate the mailbox address in the ACPI MADT wakeup structure which prevents a kexec kernel to use it. This is safe as the booting kernel has the mailbox address cached already and acpi_wakeup_cpu() uses the cached value to bring up the secondary CPUs. Note: This is a Linux specific convention and not covered by the ACPI specification. Signed-off-by: Kirill A. Shutemov Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Kai Huang Reviewed-by: Kuppuswamy Sathyanarayanan Reviewed-by: Thomas Gleixner Acked-by: Rafael J. Wysocki Tested-by: Tao Liu Link: https://lore.kernel.org/r/20240614095904.1345461-16-kirill.shutemov@linux.intel.com --- arch/x86/kernel/acpi/madt_wakeup.c | 29 ++++++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/acpi/madt_wakeup.c b/arch/x86/kernel/acpi/madt_wakeup.c index 004801b9b151..30820f9de5af 100644 --- a/arch/x86/kernel/acpi/madt_wakeup.c +++ b/arch/x86/kernel/acpi/madt_wakeup.c @@ -14,6 +14,11 @@ static struct acpi_madt_multiproc_wakeup_mailbox *acpi_mp_wake_mailbox __ro_afte static int acpi_wakeup_cpu(u32 apicid, unsigned long start_ip) { + if (!acpi_mp_wake_mailbox_paddr) { + pr_warn_once("No MADT mailbox: cannot bringup secondary CPUs. Booting with kexec?\n"); + return -EOPNOTSUPP; + } + /* * Remap mailbox memory only for the first call to acpi_wakeup_cpu(). * @@ -64,6 +69,28 @@ static int acpi_wakeup_cpu(u32 apicid, unsigned long start_ip) return 0; } +static void acpi_mp_disable_offlining(struct acpi_madt_multiproc_wakeup *mp_wake) +{ + cpu_hotplug_disable_offlining(); + + /* + * ACPI MADT doesn't allow to offline a CPU after it was onlined. This + * limits kexec: the second kernel won't be able to use more than one CPU. + * + * To prevent a kexec kernel from onlining secondary CPUs invalidate the + * mailbox address in the ACPI MADT wakeup structure which prevents a + * kexec kernel to use it. + * + * This is safe as the booting kernel has the mailbox address cached + * already and acpi_wakeup_cpu() uses the cached value to bring up the + * secondary CPUs. + * + * Note: This is a Linux specific convention and not covered by the + * ACPI specification. + */ + mp_wake->mailbox_address = 0; +} + int __init acpi_parse_mp_wake(union acpi_subtable_headers *header, const unsigned long end) { @@ -77,7 +104,7 @@ int __init acpi_parse_mp_wake(union acpi_subtable_headers *header, acpi_mp_wake_mailbox_paddr = mp_wake->mailbox_address; - cpu_hotplug_disable_offlining(); + acpi_mp_disable_offlining(mp_wake); apic_update_callback(wakeup_secondary_cpu_64, acpi_wakeup_cpu); -- cgit v1.2.3-58-ga151 From 26ba7353caaa7140561d3f7693a77a3eb68c722c Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Fri, 14 Jun 2024 12:59:01 +0300 Subject: x86/smp: Add smp_ops.stop_this_cpu() callback If the helper is defined, it is called instead of halt() to stop the CPU at the end of stop_this_cpu() and on crash CPU shutdown. ACPI MADT will use it to hand over the CPU to BIOS in order to be able to wake it up again after kexec. Signed-off-by: Kirill A. Shutemov Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Thomas Gleixner Acked-by: Kai Huang Tested-by: Tao Liu Link: https://lore.kernel.org/r/20240614095904.1345461-17-kirill.shutemov@linux.intel.com --- arch/x86/include/asm/smp.h | 1 + arch/x86/kernel/process.c | 7 +++++++ arch/x86/kernel/reboot.c | 6 ++++++ 3 files changed, 14 insertions(+) diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index a35936b512fe..ca073f40698f 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -35,6 +35,7 @@ struct smp_ops { int (*cpu_disable)(void); void (*cpu_die)(unsigned int cpu); void (*play_dead)(void); + void (*stop_this_cpu)(void); void (*send_call_func_ipi)(const struct cpumask *mask); void (*send_call_func_single_ipi)(int cpu); diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index b8441147eb5e..f63f8fd00a91 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -835,6 +835,13 @@ void __noreturn stop_this_cpu(void *dummy) */ cpumask_clear_cpu(cpu, &cpus_stop_mask); +#ifdef CONFIG_SMP + if (smp_ops.stop_this_cpu) { + smp_ops.stop_this_cpu(); + unreachable(); + } +#endif + for (;;) { /* * Use native_halt() so that memory contents don't change diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index bb7a44af7efd..0e0a4cf6b5eb 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -880,6 +880,12 @@ static int crash_nmi_callback(unsigned int val, struct pt_regs *regs) cpu_emergency_disable_virtualization(); atomic_dec(&waiting_for_crash_ipi); + + if (smp_ops.stop_this_cpu) { + smp_ops.stop_this_cpu(); + unreachable(); + } + /* Assume hlt works */ halt(); for (;;) -- cgit v1.2.3-58-ga151 From d88e7b3e35cff2c318042990d70828f64c3ae296 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Fri, 14 Jun 2024 12:59:02 +0300 Subject: x86/mm: Introduce kernel_ident_mapping_free() The helper complements kernel_ident_mapping_init(): it frees the identity mapping that was previously allocated. It will be used in the error path to free a partially allocated mapping or if the mapping is no longer needed. The caller provides a struct x86_mapping_info with the free_pgd_page() callback hooked up and the pgd_t to free. Signed-off-by: Kirill A. Shutemov Signed-off-by: Borislav Petkov (AMD) Acked-by: Kai Huang Tested-by: Tao Liu Link: https://lore.kernel.org/r/20240614095904.1345461-18-kirill.shutemov@linux.intel.com --- arch/x86/include/asm/init.h | 3 ++ arch/x86/mm/ident_map.c | 73 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 76 insertions(+) diff --git a/arch/x86/include/asm/init.h b/arch/x86/include/asm/init.h index cc9ccf61b6bd..14d72727d7ee 100644 --- a/arch/x86/include/asm/init.h +++ b/arch/x86/include/asm/init.h @@ -6,6 +6,7 @@ struct x86_mapping_info { void *(*alloc_pgt_page)(void *); /* allocate buf for page table */ + void (*free_pgt_page)(void *, void *); /* free buf for page table */ void *context; /* context for alloc_pgt_page */ unsigned long page_flag; /* page flag for PMD or PUD entry */ unsigned long offset; /* ident mapping offset */ @@ -16,4 +17,6 @@ struct x86_mapping_info { int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page, unsigned long pstart, unsigned long pend); +void kernel_ident_mapping_free(struct x86_mapping_info *info, pgd_t *pgd); + #endif /* _ASM_X86_INIT_H */ diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c index 968d7005f4a7..c45127265f2f 100644 --- a/arch/x86/mm/ident_map.c +++ b/arch/x86/mm/ident_map.c @@ -4,6 +4,79 @@ * included by both the compressed kernel and the regular kernel. */ +static void free_pte(struct x86_mapping_info *info, pmd_t *pmd) +{ + pte_t *pte = pte_offset_kernel(pmd, 0); + + info->free_pgt_page(pte, info->context); +} + +static void free_pmd(struct x86_mapping_info *info, pud_t *pud) +{ + pmd_t *pmd = pmd_offset(pud, 0); + int i; + + for (i = 0; i < PTRS_PER_PMD; i++) { + if (!pmd_present(pmd[i])) + continue; + + if (pmd_leaf(pmd[i])) + continue; + + free_pte(info, &pmd[i]); + } + + info->free_pgt_page(pmd, info->context); +} + +static void free_pud(struct x86_mapping_info *info, p4d_t *p4d) +{ + pud_t *pud = pud_offset(p4d, 0); + int i; + + for (i = 0; i < PTRS_PER_PUD; i++) { + if (!pud_present(pud[i])) + continue; + + if (pud_leaf(pud[i])) + continue; + + free_pmd(info, &pud[i]); + } + + info->free_pgt_page(pud, info->context); +} + +static void free_p4d(struct x86_mapping_info *info, pgd_t *pgd) +{ + p4d_t *p4d = p4d_offset(pgd, 0); + int i; + + for (i = 0; i < PTRS_PER_P4D; i++) { + if (!p4d_present(p4d[i])) + continue; + + free_pud(info, &p4d[i]); + } + + if (pgtable_l5_enabled()) + info->free_pgt_page(p4d, info->context); +} + +void kernel_ident_mapping_free(struct x86_mapping_info *info, pgd_t *pgd) +{ + int i; + + for (i = 0; i < PTRS_PER_PGD; i++) { + if (!pgd_present(pgd[i])) + continue; + + free_p4d(info, &pgd[i]); + } + + info->free_pgt_page(pgd, info->context); +} + static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page, unsigned long addr, unsigned long end) { -- cgit v1.2.3-58-ga151 From 1ceebe2e46720de02af4cf626dc847ecc4a263fd Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Fri, 14 Jun 2024 12:59:03 +0300 Subject: x86/acpi: Add support for CPU offlining for ACPI MADT wakeup method MADT Multiprocessor Wakeup structure version 1 brings support for CPU offlining: BIOS provides a reset vector where the CPU has to jump to for offlining itself. The new TEST mailbox command can be used to test whether the CPU offlined itself which means the BIOS has control over the CPU and can online it again via the ACPI MADT wakeup method. Add CPU offlining support for the ACPI MADT wakeup method by implementing custom cpu_die(), play_dead() and stop_this_cpu() SMP operations. CPU offlining makes it possible to hand over secondary CPUs over kexec, not limiting the second kernel to a single CPU. The change conforms to the approved ACPI spec change proposal. See the Link. Signed-off-by: Kirill A. Shutemov Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Kuppuswamy Sathyanarayanan Reviewed-by: Thomas Gleixner Acked-by: Kai Huang Acked-by: Rafael J. Wysocki Tested-by: Tao Liu Link: https://lore.kernel.org/all/13356251.uLZWGnKmhe@kreacher Link: https://lore.kernel.org/r/20240614095904.1345461-19-kirill.shutemov@linux.intel.com --- arch/x86/include/asm/acpi.h | 2 + arch/x86/kernel/acpi/Makefile | 2 +- arch/x86/kernel/acpi/madt_playdead.S | 28 ++++++ arch/x86/kernel/acpi/madt_wakeup.c | 184 ++++++++++++++++++++++++++++++++++- include/acpi/actbl2.h | 15 ++- 5 files changed, 227 insertions(+), 4 deletions(-) create mode 100644 arch/x86/kernel/acpi/madt_playdead.S diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index ceacac2b335d..21bc53f5ed0c 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h @@ -83,6 +83,8 @@ union acpi_subtable_headers; int __init acpi_parse_mp_wake(union acpi_subtable_headers *header, const unsigned long end); +void asm_acpi_mp_play_dead(u64 reset_vector, u64 pgd_pa); + /* * Check if the CPU can handle C2 and deeper */ diff --git a/arch/x86/kernel/acpi/Makefile b/arch/x86/kernel/acpi/Makefile index 2feba7257665..842a5f449404 100644 --- a/arch/x86/kernel/acpi/Makefile +++ b/arch/x86/kernel/acpi/Makefile @@ -4,7 +4,7 @@ obj-$(CONFIG_ACPI) += boot.o obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup_$(BITS).o obj-$(CONFIG_ACPI_APEI) += apei.o obj-$(CONFIG_ACPI_CPPC_LIB) += cppc.o -obj-$(CONFIG_ACPI_MADT_WAKEUP) += madt_wakeup.o +obj-$(CONFIG_ACPI_MADT_WAKEUP) += madt_wakeup.o madt_playdead.o ifneq ($(CONFIG_ACPI_PROCESSOR),) obj-y += cstate.o diff --git a/arch/x86/kernel/acpi/madt_playdead.S b/arch/x86/kernel/acpi/madt_playdead.S new file mode 100644 index 000000000000..4e498d28cdc8 --- /dev/null +++ b/arch/x86/kernel/acpi/madt_playdead.S @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include +#include +#include +#include + + .text + .align PAGE_SIZE + +/* + * asm_acpi_mp_play_dead() - Hand over control of the CPU to the BIOS + * + * rdi: Address of the ACPI MADT MPWK ResetVector + * rsi: PGD of the identity mapping + */ +SYM_FUNC_START(asm_acpi_mp_play_dead) + /* Turn off global entries. Following CR3 write will flush them. */ + movq %cr4, %rdx + andq $~(X86_CR4_PGE), %rdx + movq %rdx, %cr4 + + /* Switch to identity mapping */ + movq %rsi, %cr3 + + /* Jump to reset vector */ + ANNOTATE_RETPOLINE_SAFE + jmp *%rdi +SYM_FUNC_END(asm_acpi_mp_play_dead) diff --git a/arch/x86/kernel/acpi/madt_wakeup.c b/arch/x86/kernel/acpi/madt_wakeup.c index 30820f9de5af..6cfe762be28b 100644 --- a/arch/x86/kernel/acpi/madt_wakeup.c +++ b/arch/x86/kernel/acpi/madt_wakeup.c @@ -1,10 +1,19 @@ // SPDX-License-Identifier: GPL-2.0-or-later #include #include +#include #include +#include +#include +#include +#include #include #include +#include +#include +#include #include +#include /* Physical address of the Multiprocessor Wakeup Structure mailbox */ static u64 acpi_mp_wake_mailbox_paddr __ro_after_init; @@ -12,6 +21,154 @@ static u64 acpi_mp_wake_mailbox_paddr __ro_after_init; /* Virtual address of the Multiprocessor Wakeup Structure mailbox */ static struct acpi_madt_multiproc_wakeup_mailbox *acpi_mp_wake_mailbox __ro_after_init; +static u64 acpi_mp_pgd __ro_after_init; +static u64 acpi_mp_reset_vector_paddr __ro_after_init; + +static void acpi_mp_stop_this_cpu(void) +{ + asm_acpi_mp_play_dead(acpi_mp_reset_vector_paddr, acpi_mp_pgd); +} + +static void acpi_mp_play_dead(void) +{ + play_dead_common(); + asm_acpi_mp_play_dead(acpi_mp_reset_vector_paddr, acpi_mp_pgd); +} + +static void acpi_mp_cpu_die(unsigned int cpu) +{ + u32 apicid = per_cpu(x86_cpu_to_apicid, cpu); + unsigned long timeout; + + /* + * Use TEST mailbox command to prove that BIOS got control over + * the CPU before declaring it dead. + * + * BIOS has to clear 'command' field of the mailbox. + */ + acpi_mp_wake_mailbox->apic_id = apicid; + smp_store_release(&acpi_mp_wake_mailbox->command, + ACPI_MP_WAKE_COMMAND_TEST); + + /* Don't wait longer than a second. */ + timeout = USEC_PER_SEC; + while (READ_ONCE(acpi_mp_wake_mailbox->command) && --timeout) + udelay(1); + + if (!timeout) + pr_err("Failed to hand over CPU %d to BIOS\n", cpu); +} + +/* The argument is required to match type of x86_mapping_info::alloc_pgt_page */ +static void __init *alloc_pgt_page(void *dummy) +{ + return memblock_alloc(PAGE_SIZE, PAGE_SIZE); +} + +static void __init free_pgt_page(void *pgt, void *dummy) +{ + return memblock_free(pgt, PAGE_SIZE); +} + +/* + * Make sure asm_acpi_mp_play_dead() is present in the identity mapping at + * the same place as in the kernel page tables. asm_acpi_mp_play_dead() switches + * to the identity mapping and the function has be present at the same spot in + * the virtual address space before and after switching page tables. + */ +static int __init init_transition_pgtable(pgd_t *pgd) +{ + pgprot_t prot = PAGE_KERNEL_EXEC_NOENC; + unsigned long vaddr, paddr; + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + + vaddr = (unsigned long)asm_acpi_mp_play_dead; + pgd += pgd_index(vaddr); + if (!pgd_present(*pgd)) { + p4d = (p4d_t *)alloc_pgt_page(NULL); + if (!p4d) + return -ENOMEM; + set_pgd(pgd, __pgd(__pa(p4d) | _KERNPG_TABLE)); + } + p4d = p4d_offset(pgd, vaddr); + if (!p4d_present(*p4d)) { + pud = (pud_t *)alloc_pgt_page(NULL); + if (!pud) + return -ENOMEM; + set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE)); + } + pud = pud_offset(p4d, vaddr); + if (!pud_present(*pud)) { + pmd = (pmd_t *)alloc_pgt_page(NULL); + if (!pmd) + return -ENOMEM; + set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); + } + pmd = pmd_offset(pud, vaddr); + if (!pmd_present(*pmd)) { + pte = (pte_t *)alloc_pgt_page(NULL); + if (!pte) + return -ENOMEM; + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE)); + } + pte = pte_offset_kernel(pmd, vaddr); + + paddr = __pa(vaddr); + set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, prot)); + + return 0; +} + +static int __init acpi_mp_setup_reset(u64 reset_vector) +{ + struct x86_mapping_info info = { + .alloc_pgt_page = alloc_pgt_page, + .free_pgt_page = free_pgt_page, + .page_flag = __PAGE_KERNEL_LARGE_EXEC, + .kernpg_flag = _KERNPG_TABLE_NOENC, + }; + pgd_t *pgd; + + pgd = alloc_pgt_page(NULL); + if (!pgd) + return -ENOMEM; + + for (int i = 0; i < nr_pfn_mapped; i++) { + unsigned long mstart, mend; + + mstart = pfn_mapped[i].start << PAGE_SHIFT; + mend = pfn_mapped[i].end << PAGE_SHIFT; + if (kernel_ident_mapping_init(&info, pgd, mstart, mend)) { + kernel_ident_mapping_free(&info, pgd); + return -ENOMEM; + } + } + + if (kernel_ident_mapping_init(&info, pgd, + PAGE_ALIGN_DOWN(reset_vector), + PAGE_ALIGN(reset_vector + 1))) { + kernel_ident_mapping_free(&info, pgd); + return -ENOMEM; + } + + if (init_transition_pgtable(pgd)) { + kernel_ident_mapping_free(&info, pgd); + return -ENOMEM; + } + + smp_ops.play_dead = acpi_mp_play_dead; + smp_ops.stop_this_cpu = acpi_mp_stop_this_cpu; + smp_ops.cpu_die = acpi_mp_cpu_die; + + acpi_mp_reset_vector_paddr = reset_vector; + acpi_mp_pgd = __pa(pgd); + + return 0; +} + static int acpi_wakeup_cpu(u32 apicid, unsigned long start_ip) { if (!acpi_mp_wake_mailbox_paddr) { @@ -97,14 +254,37 @@ int __init acpi_parse_mp_wake(union acpi_subtable_headers *header, struct acpi_madt_multiproc_wakeup *mp_wake; mp_wake = (struct acpi_madt_multiproc_wakeup *)header; - if (BAD_MADT_ENTRY(mp_wake, end)) + + /* + * Cannot use the standard BAD_MADT_ENTRY() to sanity check the @mp_wake + * entry. 'sizeof (struct acpi_madt_multiproc_wakeup)' can be larger + * than the actual size of the MP wakeup entry in ACPI table because the + * 'reset_vector' is only available in the V1 MP wakeup structure. + */ + if (!mp_wake) + return -EINVAL; + if (end - (unsigned long)mp_wake < ACPI_MADT_MP_WAKEUP_SIZE_V0) + return -EINVAL; + if (mp_wake->header.length < ACPI_MADT_MP_WAKEUP_SIZE_V0) return -EINVAL; acpi_table_print_madt_entry(&header->common); acpi_mp_wake_mailbox_paddr = mp_wake->mailbox_address; - acpi_mp_disable_offlining(mp_wake); + if (mp_wake->version >= ACPI_MADT_MP_WAKEUP_VERSION_V1 && + mp_wake->header.length >= ACPI_MADT_MP_WAKEUP_SIZE_V1) { + if (acpi_mp_setup_reset(mp_wake->reset_vector)) { + pr_warn("Failed to setup MADT reset vector\n"); + acpi_mp_disable_offlining(mp_wake); + } + } else { + /* + * CPU offlining requires version 1 of the ACPI MADT wakeup + * structure. + */ + acpi_mp_disable_offlining(mp_wake); + } apic_update_callback(wakeup_secondary_cpu_64, acpi_wakeup_cpu); diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h index fa63362469aa..e27958ef8264 100644 --- a/include/acpi/actbl2.h +++ b/include/acpi/actbl2.h @@ -1197,8 +1197,20 @@ struct acpi_madt_multiproc_wakeup { u16 version; u32 reserved; /* reserved - must be zero */ u64 mailbox_address; + u64 reset_vector; }; +/* Values for Version field above */ + +enum acpi_madt_multiproc_wakeup_version { + ACPI_MADT_MP_WAKEUP_VERSION_NONE = 0, + ACPI_MADT_MP_WAKEUP_VERSION_V1 = 1, + ACPI_MADT_MP_WAKEUP_VERSION_RESERVED = 2, /* 2 and greater are reserved */ +}; + +#define ACPI_MADT_MP_WAKEUP_SIZE_V0 16 +#define ACPI_MADT_MP_WAKEUP_SIZE_V1 24 + #define ACPI_MULTIPROC_WAKEUP_MB_OS_SIZE 2032 #define ACPI_MULTIPROC_WAKEUP_MB_FIRMWARE_SIZE 2048 @@ -1211,7 +1223,8 @@ struct acpi_madt_multiproc_wakeup_mailbox { u8 reserved_firmware[ACPI_MULTIPROC_WAKEUP_MB_FIRMWARE_SIZE]; /* reserved for firmware use */ }; -#define ACPI_MP_WAKE_COMMAND_WAKEUP 1 +#define ACPI_MP_WAKE_COMMAND_WAKEUP 1 +#define ACPI_MP_WAKE_COMMAND_TEST 2 /* 17: CPU Core Interrupt Controller (ACPI 6.5) */ -- cgit v1.2.3-58-ga151 From 16df35946120fca2346c415fae429c821391eef8 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Fri, 14 Jun 2024 12:59:04 +0300 Subject: ACPI: tables: Print MULTIPROC_WAKEUP when MADT is parsed When MADT is parsed, print MULTIPROC_WAKEUP information: ACPI: MP Wakeup (version[1], mailbox[0x7fffd000], reset[0x7fffe068]) This debug information will be very helpful during bringup. Signed-off-by: Kirill A. Shutemov Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Baoquan He Reviewed-by: Kuppuswamy Sathyanarayanan Acked-by: Kai Huang Acked-by: Rafael J. Wysocki Tested-by: Tao Liu Link: https://lore.kernel.org/r/20240614095904.1345461-20-kirill.shutemov@linux.intel.com --- drivers/acpi/tables.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c index b976e5fc3fbc..9e1b01c35070 100644 --- a/drivers/acpi/tables.c +++ b/drivers/acpi/tables.c @@ -198,6 +198,20 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header) } break; + case ACPI_MADT_TYPE_MULTIPROC_WAKEUP: + { + struct acpi_madt_multiproc_wakeup *p = + (struct acpi_madt_multiproc_wakeup *)header; + u64 reset_vector = 0; + + if (p->version >= ACPI_MADT_MP_WAKEUP_VERSION_V1) + reset_vector = p->reset_vector; + + pr_debug("MP Wakeup (version[%d], mailbox[%#llx], reset[%#llx])\n", + p->version, p->mailbox_address, reset_vector); + } + break; + case ACPI_MADT_TYPE_CORE_PIC: { struct acpi_madt_core_pic *p = (struct acpi_madt_core_pic *)header; -- cgit v1.2.3-58-ga151