From 5cf896fb6be3effd9aea455b22213e27be8bdb1d Mon Sep 17 00:00:00 2001 From: Peter Collingbourne Date: Wed, 31 Jul 2019 18:18:42 -0700 Subject: arm64: Add support for relocating the kernel with RELR relocations RELR is a relocation packing format for relative relocations. The format is described in a generic-abi proposal: https://groups.google.com/d/topic/generic-abi/bX460iggiKg/discussion The LLD linker can be instructed to pack relocations in the RELR format by passing the flag --pack-dyn-relocs=relr. This patch adds a new config option, CONFIG_RELR. Enabling this option instructs the linker to pack vmlinux's relative relocations in the RELR format, and causes the kernel to apply the relocations at startup along with the RELA relocations. RELA relocations still need to be applied because the linker will emit RELA relative relocations if they are unrepresentable in the RELR format (i.e. address not a multiple of 2). Enabling CONFIG_RELR reduces the size of a defconfig kernel image with CONFIG_RANDOMIZE_BASE by 3.5MB/16% uncompressed, or 550KB/5% compressed (lz4). Signed-off-by: Peter Collingbourne Tested-by: Nick Desaulniers Reviewed-by: Nick Desaulniers Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm64/Kconfig') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 3adcec05b1f6..2681eb79c40b 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1467,6 +1467,7 @@ endif config RELOCATABLE bool + select ARCH_HAS_RELR help This builds the kernel as a Position Independent Executable (PIE), which retains all relocation metadata required to relocate the -- cgit v1.2.3-58-ga151 From 63f0c60379650d82250f22e4cf4137ef3dc4f43d Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Tue, 23 Jul 2019 19:58:39 +0200 Subject: arm64: Introduce prctl() options to control the tagged user addresses ABI It is not desirable to relax the ABI to allow tagged user addresses into the kernel indiscriminately. This patch introduces a prctl() interface for enabling or disabling the tagged ABI with a global sysctl control for preventing applications from enabling the relaxed ABI (meant for testing user-space prctl() return error checking without reconfiguring the kernel). The ABI properties are inherited by threads of the same application and fork()'ed children but cleared on execve(). A Kconfig option allows the overall disabling of the relaxed ABI. The PR_SET_TAGGED_ADDR_CTRL will be expanded in the future to handle MTE-specific settings like imprecise vs precise exceptions. Reviewed-by: Kees Cook Signed-off-by: Catalin Marinas Signed-off-by: Andrey Konovalov Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 9 +++++ arch/arm64/include/asm/processor.h | 8 ++++ arch/arm64/include/asm/thread_info.h | 1 + arch/arm64/include/asm/uaccess.h | 4 +- arch/arm64/kernel/process.c | 73 ++++++++++++++++++++++++++++++++++++ include/uapi/linux/prctl.h | 5 +++ kernel/sys.c | 12 ++++++ 7 files changed, 111 insertions(+), 1 deletion(-) (limited to 'arch/arm64/Kconfig') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 3adcec05b1f6..5d254178b9ca 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1110,6 +1110,15 @@ config ARM64_SW_TTBR0_PAN zeroed area and reserved ASID. The user access routines restore the valid TTBR0_EL1 temporarily. +config ARM64_TAGGED_ADDR_ABI + bool "Enable the tagged user addresses syscall ABI" + default y + help + When this option is enabled, user applications can opt in to a + relaxed ABI via prctl() allowing tagged addresses to be passed + to system calls as pointer arguments. For details, see + Documentation/arm64/tagged-address-abi.txt. + menuconfig COMPAT bool "Kernel support for 32-bit EL0" depends on ARM64_4K_PAGES || EXPERT diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 844e2964b0f5..28eed40ffc12 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -306,6 +306,14 @@ extern void __init minsigstksz_setup(void); /* PR_PAC_RESET_KEYS prctl */ #define PAC_RESET_KEYS(tsk, arg) ptrauth_prctl_reset_keys(tsk, arg) +#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI +/* PR_{SET,GET}_TAGGED_ADDR_CTRL prctl */ +long set_tagged_addr_ctrl(unsigned long arg); +long get_tagged_addr_ctrl(void); +#define SET_TAGGED_ADDR_CTRL(arg) set_tagged_addr_ctrl(arg) +#define GET_TAGGED_ADDR_CTRL() get_tagged_addr_ctrl() +#endif + /* * For CONFIG_GCC_PLUGIN_STACKLEAK * diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index 180b34ec5965..012238d8e58d 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -90,6 +90,7 @@ void arch_release_task_struct(struct task_struct *tsk); #define TIF_SVE 23 /* Scalable Vector Extension in use */ #define TIF_SVE_VL_INHERIT 24 /* Inherit sve_vl_onexec across exec */ #define TIF_SSBD 25 /* Wants SSB mitigation */ +#define TIF_TAGGED_ADDR 26 /* Allow tagged user addresses */ #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index a138e3b4f717..097d6bfac0b7 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -62,7 +62,9 @@ static inline unsigned long __range_ok(const void __user *addr, unsigned long si { unsigned long ret, limit = current_thread_info()->addr_limit; - addr = untagged_addr(addr); + if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI) && + test_thread_flag(TIF_TAGGED_ADDR)) + addr = untagged_addr(addr); __chk_user_ptr(addr); asm volatile( diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index f674f28df663..76b7c55026aa 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -38,6 +39,7 @@ #include #include #include +#include #include #include @@ -307,11 +309,18 @@ static void tls_thread_flush(void) } } +static void flush_tagged_addr_state(void) +{ + if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI)) + clear_thread_flag(TIF_TAGGED_ADDR); +} + void flush_thread(void) { fpsimd_flush_thread(); tls_thread_flush(); flush_ptrace_hw_breakpoint(current); + flush_tagged_addr_state(); } void release_thread(struct task_struct *dead_task) @@ -565,3 +574,67 @@ void arch_setup_new_exec(void) ptrauth_thread_init_user(current); } + +#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI +/* + * Control the relaxed ABI allowing tagged user addresses into the kernel. + */ +static unsigned int tagged_addr_prctl_allowed = 1; + +long set_tagged_addr_ctrl(unsigned long arg) +{ + if (!tagged_addr_prctl_allowed) + return -EINVAL; + if (is_compat_task()) + return -EINVAL; + if (arg & ~PR_TAGGED_ADDR_ENABLE) + return -EINVAL; + + update_thread_flag(TIF_TAGGED_ADDR, arg & PR_TAGGED_ADDR_ENABLE); + + return 0; +} + +long get_tagged_addr_ctrl(void) +{ + if (!tagged_addr_prctl_allowed) + return -EINVAL; + if (is_compat_task()) + return -EINVAL; + + if (test_thread_flag(TIF_TAGGED_ADDR)) + return PR_TAGGED_ADDR_ENABLE; + + return 0; +} + +/* + * Global sysctl to disable the tagged user addresses support. This control + * only prevents the tagged address ABI enabling via prctl() and does not + * disable it for tasks that already opted in to the relaxed ABI. + */ +static int zero; +static int one = 1; + +static struct ctl_table tagged_addr_sysctl_table[] = { + { + .procname = "tagged_addr", + .mode = 0644, + .data = &tagged_addr_prctl_allowed, + .maxlen = sizeof(int), + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &one, + }, + { } +}; + +static int __init tagged_addr_init(void) +{ + if (!register_sysctl("abi", tagged_addr_sysctl_table)) + return -EINVAL; + return 0; +} + +core_initcall(tagged_addr_init); +#endif /* CONFIG_ARM64_TAGGED_ADDR_ABI */ diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h index 094bb03b9cc2..2e927b3e9d6c 100644 --- a/include/uapi/linux/prctl.h +++ b/include/uapi/linux/prctl.h @@ -229,4 +229,9 @@ struct prctl_mm_map { # define PR_PAC_APDBKEY (1UL << 3) # define PR_PAC_APGAKEY (1UL << 4) +/* Tagged user address controls for arm64 */ +#define PR_SET_TAGGED_ADDR_CTRL 55 +#define PR_GET_TAGGED_ADDR_CTRL 56 +# define PR_TAGGED_ADDR_ENABLE (1UL << 0) + #endif /* _LINUX_PRCTL_H */ diff --git a/kernel/sys.c b/kernel/sys.c index 2969304c29fe..c6c4d5358bd3 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -124,6 +124,12 @@ #ifndef PAC_RESET_KEYS # define PAC_RESET_KEYS(a, b) (-EINVAL) #endif +#ifndef SET_TAGGED_ADDR_CTRL +# define SET_TAGGED_ADDR_CTRL(a) (-EINVAL) +#endif +#ifndef GET_TAGGED_ADDR_CTRL +# define GET_TAGGED_ADDR_CTRL() (-EINVAL) +#endif /* * this is where the system-wide overflow UID and GID are defined, for @@ -2492,6 +2498,12 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, return -EINVAL; error = PAC_RESET_KEYS(me, arg2); break; + case PR_SET_TAGGED_ADDR_CTRL: + error = SET_TAGGED_ADDR_CTRL(arg2); + break; + case PR_GET_TAGGED_ADDR_CTRL: + error = GET_TAGGED_ADDR_CTRL(); + break; default: error = -EINVAL; break; -- cgit v1.2.3-58-ga151 From 42d038c4fb00f1ec1a4c4616784da4561385b628 Mon Sep 17 00:00:00 2001 From: Leo Yan Date: Tue, 6 Aug 2019 18:00:14 +0800 Subject: arm64: Add support for function error injection Inspired by the commit 7cd01b08d35f ("powerpc: Add support for function error injection"), this patch supports function error injection for Arm64. This patch mainly support two functions: one is regs_set_return_value() which is used to overwrite the return value; the another function is override_function_with_return() which is to override the probed function returning and jump to its caller. Reviewed-by: Masami Hiramatsu Signed-off-by: Leo Yan Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 1 + arch/arm64/include/asm/ptrace.h | 5 +++++ arch/arm64/lib/Makefile | 2 ++ arch/arm64/lib/error-inject.c | 18 ++++++++++++++++++ 4 files changed, 26 insertions(+) create mode 100644 arch/arm64/lib/error-inject.c (limited to 'arch/arm64/Kconfig') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 3adcec05b1f6..b15803afb2a0 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -148,6 +148,7 @@ config ARM64 select HAVE_FAST_GUP select HAVE_FTRACE_MCOUNT_RECORD select HAVE_FUNCTION_TRACER + select HAVE_FUNCTION_ERROR_INJECTION select HAVE_FUNCTION_GRAPH_TRACER select HAVE_GCC_PLUGINS select HAVE_HW_BREAKPOINT if PERF_EVENTS diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index 1dcf63a9ac1f..fbebb411ae20 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h @@ -301,6 +301,11 @@ static inline unsigned long regs_return_value(struct pt_regs *regs) return regs->regs[0]; } +static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc) +{ + regs->regs[0] = rc; +} + /** * regs_get_kernel_argument() - get Nth function argument in kernel * @regs: pt_regs of that context diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile index 33c2a4abda04..f182ccb0438e 100644 --- a/arch/arm64/lib/Makefile +++ b/arch/arm64/lib/Makefile @@ -33,3 +33,5 @@ UBSAN_SANITIZE_atomic_ll_sc.o := n lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o obj-$(CONFIG_CRC32) += crc32.o + +obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o diff --git a/arch/arm64/lib/error-inject.c b/arch/arm64/lib/error-inject.c new file mode 100644 index 000000000000..ed15021da3ed --- /dev/null +++ b/arch/arm64/lib/error-inject.c @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +void override_function_with_return(struct pt_regs *regs) +{ + /* + * 'regs' represents the state on entry of a predefined function in + * the kernel/module and which is captured on a kprobe. + * + * When kprobe returns back from exception it will override the end + * of probed function and directly return to the predefined + * function's caller. + */ + instruction_pointer_set(regs, procedure_link_pointer(regs)); +} +NOKPROBE_SYMBOL(override_function_with_return); -- cgit v1.2.3-58-ga151 From 6bd1d0be0e97936d15cdacc71f5c232fbf71293e Mon Sep 17 00:00:00 2001 From: Steve Capper Date: Wed, 7 Aug 2019 16:55:15 +0100 Subject: arm64: kasan: Switch to using KASAN_SHADOW_OFFSET KASAN_SHADOW_OFFSET is a constant that is supplied to gcc as a command line argument and affects the codegen of the inline address sanetiser. Essentially, for an example memory access: *ptr1 = val; The compiler will insert logic similar to the below: shadowValue = *(ptr1 >> KASAN_SHADOW_SCALE_SHIFT + KASAN_SHADOW_OFFSET) if (somethingWrong(shadowValue)) flagAnError(); This code sequence is inserted into many places, thus KASAN_SHADOW_OFFSET is essentially baked into many places in the kernel text. If we want to run a single kernel binary with multiple address spaces, then we need to do this with KASAN_SHADOW_OFFSET fixed. Thankfully, due to the way the KASAN_SHADOW_OFFSET is used to provide shadow addresses we know that the end of the shadow region is constant w.r.t. VA space size: KASAN_SHADOW_END = ~0 >> KASAN_SHADOW_SCALE_SHIFT + KASAN_SHADOW_OFFSET This means that if we increase the size of the VA space, the start of the KASAN region expands into lower addresses whilst the end of the KASAN region is fixed. Currently the arm64 code computes KASAN_SHADOW_OFFSET at build time via build scripts with the VA size used as a parameter. (There are build time checks in the C code too to ensure that expected values are being derived). It is sufficient, and indeed is a simplification, to remove the build scripts (and build time checks) entirely and instead provide KASAN_SHADOW_OFFSET values. This patch removes the logic to compute the KASAN_SHADOW_OFFSET in the arm64 Makefile, and instead we adopt the approach used by x86 to supply offset values in kConfig. To help debug/develop future VA space changes, the Makefile logic has been preserved in a script file in the arm64 Documentation folder. Reviewed-by: Catalin Marinas Signed-off-by: Steve Capper Signed-off-by: Will Deacon --- Documentation/arm64/kasan-offsets.sh | 27 +++++++++++++++++++++++++++ arch/arm64/Kconfig | 15 +++++++++++++++ arch/arm64/Makefile | 8 -------- arch/arm64/include/asm/kasan.h | 11 ++++------- arch/arm64/include/asm/memory.h | 8 +++++--- 5 files changed, 51 insertions(+), 18 deletions(-) create mode 100644 Documentation/arm64/kasan-offsets.sh (limited to 'arch/arm64/Kconfig') diff --git a/Documentation/arm64/kasan-offsets.sh b/Documentation/arm64/kasan-offsets.sh new file mode 100644 index 000000000000..2b7a021db363 --- /dev/null +++ b/Documentation/arm64/kasan-offsets.sh @@ -0,0 +1,27 @@ +#!/bin/sh + +# Print out the KASAN_SHADOW_OFFSETS required to place the KASAN SHADOW +# start address at the mid-point of the kernel VA space + +print_kasan_offset () { + printf "%02d\t" $1 + printf "0x%08x00000000\n" $(( (0xffffffff & (-1 << ($1 - 1 - 32))) \ + + (1 << ($1 - 32 - $2)) \ + - (1 << (64 - 32 - $2)) )) +} + +echo KASAN_SHADOW_SCALE_SHIFT = 3 +printf "VABITS\tKASAN_SHADOW_OFFSET\n" +print_kasan_offset 48 3 +print_kasan_offset 47 3 +print_kasan_offset 42 3 +print_kasan_offset 39 3 +print_kasan_offset 36 3 +echo +echo KASAN_SHADOW_SCALE_SHIFT = 4 +printf "VABITS\tKASAN_SHADOW_OFFSET\n" +print_kasan_offset 48 4 +print_kasan_offset 47 4 +print_kasan_offset 42 4 +print_kasan_offset 39 4 +print_kasan_offset 36 4 diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 3adcec05b1f6..f7f23e47c28f 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -297,6 +297,21 @@ config ARCH_SUPPORTS_UPROBES config ARCH_PROC_KCORE_TEXT def_bool y +config KASAN_SHADOW_OFFSET + hex + depends on KASAN + default 0xdfffa00000000000 if (ARM64_VA_BITS_48 || ARM64_USER_VA_BITS_52) && !KASAN_SW_TAGS + default 0xdfffd00000000000 if ARM64_VA_BITS_47 && !KASAN_SW_TAGS + default 0xdffffe8000000000 if ARM64_VA_BITS_42 && !KASAN_SW_TAGS + default 0xdfffffd000000000 if ARM64_VA_BITS_39 && !KASAN_SW_TAGS + default 0xdffffffa00000000 if ARM64_VA_BITS_36 && !KASAN_SW_TAGS + default 0xefff900000000000 if (ARM64_VA_BITS_48 || ARM64_USER_VA_BITS_52) && KASAN_SW_TAGS + default 0xefffc80000000000 if ARM64_VA_BITS_47 && KASAN_SW_TAGS + default 0xeffffe4000000000 if ARM64_VA_BITS_42 && KASAN_SW_TAGS + default 0xefffffc800000000 if ARM64_VA_BITS_39 && KASAN_SW_TAGS + default 0xeffffff900000000 if ARM64_VA_BITS_36 && KASAN_SW_TAGS + default 0xffffffffffffffff + source "arch/arm64/Kconfig.platforms" menu "Kernel Features" diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 61f7926fdeca..a8d2a241ac58 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -126,14 +126,6 @@ KBUILD_CFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT) KBUILD_CPPFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT) KBUILD_AFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT) -# KASAN_SHADOW_OFFSET = VA_START + (1 << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT)) -# - (1 << (64 - KASAN_SHADOW_SCALE_SHIFT)) -# in 32-bit arithmetic -KASAN_SHADOW_OFFSET := $(shell printf "0x%08x00000000\n" $$(( \ - (0xffffffff & (-1 << ($(CONFIG_ARM64_VA_BITS) - 1 - 32))) \ - + (1 << ($(CONFIG_ARM64_VA_BITS) - 32 - $(KASAN_SHADOW_SCALE_SHIFT))) \ - - (1 << (64 - 32 - $(KASAN_SHADOW_SCALE_SHIFT))) )) ) - export TEXT_OFFSET GZFLAGS core-y += arch/arm64/kernel/ arch/arm64/mm/ diff --git a/arch/arm64/include/asm/kasan.h b/arch/arm64/include/asm/kasan.h index b52aacd2c526..10d2add842da 100644 --- a/arch/arm64/include/asm/kasan.h +++ b/arch/arm64/include/asm/kasan.h @@ -18,11 +18,8 @@ * KASAN_SHADOW_START: beginning of the kernel virtual addresses. * KASAN_SHADOW_END: KASAN_SHADOW_START + 1/N of kernel virtual addresses, * where N = (1 << KASAN_SHADOW_SCALE_SHIFT). - */ -#define KASAN_SHADOW_START (VA_START) -#define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE) - -/* + * + * KASAN_SHADOW_OFFSET: * This value is used to map an address to the corresponding shadow * address by the following formula: * shadow_addr = (address >> KASAN_SHADOW_SCALE_SHIFT) + KASAN_SHADOW_OFFSET @@ -33,8 +30,8 @@ * KASAN_SHADOW_OFFSET = KASAN_SHADOW_END - * (1ULL << (64 - KASAN_SHADOW_SCALE_SHIFT)) */ -#define KASAN_SHADOW_OFFSET (KASAN_SHADOW_END - (1ULL << \ - (64 - KASAN_SHADOW_SCALE_SHIFT))) +#define _KASAN_SHADOW_START(va) (KASAN_SHADOW_END - (1UL << ((va) - KASAN_SHADOW_SCALE_SHIFT))) +#define KASAN_SHADOW_START _KASAN_SHADOW_START(VA_BITS) void kasan_init(void); void kasan_copy_shadow(pgd_t *pgdir); diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 380594b1a0ba..968659c90f5c 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -42,7 +42,7 @@ #define PAGE_OFFSET (UL(0xffffffffffffffff) - \ (UL(1) << VA_BITS) + 1) #define KIMAGE_VADDR (MODULES_END) -#define BPF_JIT_REGION_START (VA_START + KASAN_SHADOW_SIZE) +#define BPF_JIT_REGION_START (KASAN_SHADOW_END) #define BPF_JIT_REGION_SIZE (SZ_128M) #define BPF_JIT_REGION_END (BPF_JIT_REGION_START + BPF_JIT_REGION_SIZE) #define MODULES_END (MODULES_VADDR + MODULES_VSIZE) @@ -68,11 +68,13 @@ * significantly, so double the (minimum) stack size when they are in use. */ #ifdef CONFIG_KASAN -#define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT)) +#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL) +#define KASAN_SHADOW_END ((UL(1) << (64 - KASAN_SHADOW_SCALE_SHIFT)) \ + + KASAN_SHADOW_OFFSET) #define KASAN_THREAD_SHIFT 1 #else -#define KASAN_SHADOW_SIZE (0) #define KASAN_THREAD_SHIFT 0 +#define KASAN_SHADOW_END (VA_START) #endif #define MIN_THREAD_SHIFT (14 + KASAN_THREAD_SHIFT) -- cgit v1.2.3-58-ga151 From b6d00d47e81a49f6cf462518c10408f37a3e6785 Mon Sep 17 00:00:00 2001 From: Steve Capper Date: Wed, 7 Aug 2019 16:55:22 +0100 Subject: arm64: mm: Introduce 52-bit Kernel VAs Most of the machinery is now in place to enable 52-bit kernel VAs that are detectable at boot time. This patch adds a Kconfig option for 52-bit user and kernel addresses and plumbs in the requisite CONFIG_ macros as well as sets TCR.T1SZ, physvirt_offset and vmemmap at early boot. To simplify things this patch also removes the 52-bit user/48-bit kernel kconfig option. Signed-off-by: Steve Capper Reviewed-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 20 +++++++++++--------- arch/arm64/include/asm/assembler.h | 13 ++++++++----- arch/arm64/include/asm/memory.h | 7 ++++--- arch/arm64/include/asm/mmu_context.h | 2 +- arch/arm64/include/asm/pgtable-hwdef.h | 2 +- arch/arm64/kernel/head.S | 4 ++-- arch/arm64/mm/init.c | 10 ++++++++++ arch/arm64/mm/proc.S | 3 ++- 8 files changed, 39 insertions(+), 22 deletions(-) (limited to 'arch/arm64/Kconfig') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index f7f23e47c28f..f5f7cb75a698 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -286,7 +286,7 @@ config PGTABLE_LEVELS int default 2 if ARM64_16K_PAGES && ARM64_VA_BITS_36 default 2 if ARM64_64K_PAGES && ARM64_VA_BITS_42 - default 3 if ARM64_64K_PAGES && (ARM64_VA_BITS_48 || ARM64_USER_VA_BITS_52) + default 3 if ARM64_64K_PAGES && (ARM64_VA_BITS_48 || ARM64_VA_BITS_52) default 3 if ARM64_4K_PAGES && ARM64_VA_BITS_39 default 3 if ARM64_16K_PAGES && ARM64_VA_BITS_47 default 4 if !ARM64_64K_PAGES && ARM64_VA_BITS_48 @@ -300,12 +300,12 @@ config ARCH_PROC_KCORE_TEXT config KASAN_SHADOW_OFFSET hex depends on KASAN - default 0xdfffa00000000000 if (ARM64_VA_BITS_48 || ARM64_USER_VA_BITS_52) && !KASAN_SW_TAGS + default 0xdfffa00000000000 if (ARM64_VA_BITS_48 || ARM64_VA_BITS_52) && !KASAN_SW_TAGS default 0xdfffd00000000000 if ARM64_VA_BITS_47 && !KASAN_SW_TAGS default 0xdffffe8000000000 if ARM64_VA_BITS_42 && !KASAN_SW_TAGS default 0xdfffffd000000000 if ARM64_VA_BITS_39 && !KASAN_SW_TAGS default 0xdffffffa00000000 if ARM64_VA_BITS_36 && !KASAN_SW_TAGS - default 0xefff900000000000 if (ARM64_VA_BITS_48 || ARM64_USER_VA_BITS_52) && KASAN_SW_TAGS + default 0xefff900000000000 if (ARM64_VA_BITS_48 || ARM64_VA_BITS_52) && KASAN_SW_TAGS default 0xefffc80000000000 if ARM64_VA_BITS_47 && KASAN_SW_TAGS default 0xeffffe4000000000 if ARM64_VA_BITS_42 && KASAN_SW_TAGS default 0xefffffc800000000 if ARM64_VA_BITS_39 && KASAN_SW_TAGS @@ -759,13 +759,14 @@ config ARM64_VA_BITS_47 config ARM64_VA_BITS_48 bool "48-bit" -config ARM64_USER_VA_BITS_52 - bool "52-bit (user)" +config ARM64_VA_BITS_52 + bool "52-bit" depends on ARM64_64K_PAGES && (ARM64_PAN || !ARM64_SW_TTBR0_PAN) help Enable 52-bit virtual addressing for userspace when explicitly - requested via a hint to mmap(). The kernel will continue to - use 48-bit virtual addresses for its own mappings. + requested via a hint to mmap(). The kernel will also use 52-bit + virtual addresses for its own mappings (provided HW support for + this feature is available, otherwise it reverts to 48-bit). NOTE: Enabling 52-bit virtual addressing in conjunction with ARMv8.3 Pointer Authentication will result in the PAC being @@ -778,7 +779,7 @@ endchoice config ARM64_FORCE_52BIT bool "Force 52-bit virtual addresses for userspace" - depends on ARM64_USER_VA_BITS_52 && EXPERT + depends on ARM64_VA_BITS_52 && EXPERT help For systems with 52-bit userspace VAs enabled, the kernel will attempt to maintain compatibility with older software by providing 48-bit VAs @@ -795,7 +796,8 @@ config ARM64_VA_BITS default 39 if ARM64_VA_BITS_39 default 42 if ARM64_VA_BITS_42 default 47 if ARM64_VA_BITS_47 - default 48 if ARM64_VA_BITS_48 || ARM64_USER_VA_BITS_52 + default 48 if ARM64_VA_BITS_48 + default 52 if ARM64_VA_BITS_52 choice prompt "Physical address space size" diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index ede368bafa2c..c066fc4976cd 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -349,6 +349,13 @@ alternative_endif bfi \valreg, \t0sz, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH .endm +/* + * tcr_set_t1sz - update TCR.T1SZ + */ + .macro tcr_set_t1sz, valreg, t1sz + bfi \valreg, \t1sz, #TCR_T1SZ_OFFSET, #TCR_TxSZ_WIDTH + .endm + /* * tcr_compute_pa_size - set TCR.(I)PS to the highest supported * ID_AA64MMFR0_EL1.PARange value @@ -539,10 +546,6 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU * ttbr: Value of ttbr to set, modified. */ .macro offset_ttbr1, ttbr, tmp -#ifdef CONFIG_ARM64_USER_VA_BITS_52 - orr \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET -#endif - #ifdef CONFIG_ARM64_VA_BITS_52 mrs_s \tmp, SYS_ID_AA64MMFR2_EL1 and \tmp, \tmp, #(0xf << ID_AA64MMFR2_LVA_SHIFT) @@ -558,7 +561,7 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU * to be nop'ed out when dealing with 52-bit kernel VAs. */ .macro restore_ttbr1, ttbr -#if defined(CONFIG_ARM64_USER_VA_BITS_52) || defined(CONFIG_ARM64_VA_BITS_52) +#ifdef CONFIG_ARM64_VA_BITS_52 bic \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET #endif .endm diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 0204c2006c92..d911d0573460 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -44,8 +44,9 @@ * VA_START - the first kernel virtual address. */ #define VA_BITS (CONFIG_ARM64_VA_BITS) -#define PAGE_OFFSET (UL(0xffffffffffffffff) - \ - (UL(1) << VA_BITS) + 1) +#define _PAGE_OFFSET(va) (UL(0xffffffffffffffff) - \ + (UL(1) << (va)) + 1) +#define PAGE_OFFSET (_PAGE_OFFSET(VA_BITS)) #define KIMAGE_VADDR (MODULES_END) #define BPF_JIT_REGION_START (KASAN_SHADOW_END) #define BPF_JIT_REGION_SIZE (SZ_128M) @@ -68,7 +69,7 @@ #define KERNEL_START _text #define KERNEL_END _end -#ifdef CONFIG_ARM64_USER_VA_BITS_52 +#ifdef CONFIG_ARM64_VA_BITS_52 #define MAX_USER_VA_BITS 52 #else #define MAX_USER_VA_BITS VA_BITS diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index 670003a55d28..3827ff4040a3 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -63,7 +63,7 @@ extern u64 idmap_ptrs_per_pgd; static inline bool __cpu_uses_extended_idmap(void) { - if (IS_ENABLED(CONFIG_ARM64_USER_VA_BITS_52)) + if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52)) return false; return unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS)); diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index db92950bb1a0..3df60f97da1f 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -304,7 +304,7 @@ #define TTBR_BADDR_MASK_52 (((UL(1) << 46) - 1) << 2) #endif -#ifdef CONFIG_ARM64_USER_VA_BITS_52 +#ifdef CONFIG_ARM64_VA_BITS_52 /* Must be at least 64-byte aligned to prevent corruption of the TTBR */ #define TTBR1_BADDR_4852_OFFSET (((UL(1) << (52 - PGDIR_SHIFT)) - \ (UL(1) << (48 - PGDIR_SHIFT))) * 8) diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index a96dc4386c7c..c8446f8c81f5 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -308,7 +308,7 @@ __create_page_tables: adrp x0, idmap_pg_dir adrp x3, __idmap_text_start // __pa(__idmap_text_start) -#ifdef CONFIG_ARM64_USER_VA_BITS_52 +#ifdef CONFIG_ARM64_VA_BITS_52 mrs_s x6, SYS_ID_AA64MMFR2_EL1 and x6, x6, #(0xf << ID_AA64MMFR2_LVA_SHIFT) mov x5, #52 @@ -794,7 +794,7 @@ ENTRY(__enable_mmu) ENDPROC(__enable_mmu) ENTRY(__cpu_secondary_check52bitva) -#ifdef CONFIG_ARM64_USER_VA_BITS_52 +#ifdef CONFIG_ARM64_VA_BITS_52 ldr_l x0, vabits_user cmp x0, #52 b.ne 2f diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 2940221e5519..531c497c5758 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -325,6 +325,16 @@ void __init arm64_memblock_init(void) vmemmap = ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT)); + /* + * If we are running with a 52-bit kernel VA config on a system that + * does not support it, we have to offset our vmemmap and physvirt_offset + * s.t. we avoid the 52-bit portion of the direct linear map + */ + if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52) && (vabits_actual != 52)) { + vmemmap += (_PAGE_OFFSET(48) - _PAGE_OFFSET(52)) >> PAGE_SHIFT; + physvirt_offset = PHYS_OFFSET - _PAGE_OFFSET(48); + } + /* * Remove the memory that we will not be able to cover with the * linear mapping. Take care not to clip the kernel which may be diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 8d289ff7584d..8b021c5c0884 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -438,10 +438,11 @@ ENTRY(__cpu_setup) TCR_TBI0 | TCR_A1 | TCR_KASAN_FLAGS tcr_clear_errata_bits x10, x9, x5 -#ifdef CONFIG_ARM64_USER_VA_BITS_52 +#ifdef CONFIG_ARM64_VA_BITS_52 ldr_l x9, vabits_user sub x9, xzr, x9 add x9, x9, #64 + tcr_set_t1sz x10, x9 #else ldr_l x9, idmap_t0sz #endif -- cgit v1.2.3-58-ga151 From b32baf91f60fb9c7010bff87e68132f2ce31d9a8 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 29 Aug 2019 11:52:47 +0100 Subject: arm64: lse: Make ARM64_LSE_ATOMICS depend on JUMP_LABEL MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Support for LSE atomic instructions (CONFIG_ARM64_LSE_ATOMICS) relies on a static key to select between the legacy LL/SC implementation which is available on all arm64 CPUs and the super-duper LSE implementation which is available on CPUs implementing v8.1 and later. Unfortunately, when building a kernel with CONFIG_JUMP_LABEL disabled (e.g. because the toolchain doesn't support 'asm goto'), the static key inside the atomics code tries to use atomics itself. This results in a mess of circular includes and a build failure: In file included from ./arch/arm64/include/asm/lse.h:11, from ./arch/arm64/include/asm/atomic.h:16, from ./include/linux/atomic.h:7, from ./include/asm-generic/bitops/atomic.h:5, from ./arch/arm64/include/asm/bitops.h:26, from ./include/linux/bitops.h:19, from ./include/linux/kernel.h:12, from ./include/asm-generic/bug.h:18, from ./arch/arm64/include/asm/bug.h:26, from ./include/linux/bug.h:5, from ./include/linux/page-flags.h:10, from kernel/bounds.c:10: ./include/linux/jump_label.h: In function ‘static_key_count’: ./include/linux/jump_label.h:254:9: error: implicit declaration of function ‘atomic_read’ [-Werror=implicit-function-declaration] return atomic_read(&key->enabled); ^~~~~~~~~~~ [ ... more of the same ... ] Since LSE atomic instructions are not critical to the operation of the kernel, make them depend on JUMP_LABEL at compile time. Reviewed-by: Andrew Murray Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm64/Kconfig') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 3adcec05b1f6..27405ac94228 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1263,6 +1263,7 @@ config ARM64_PAN config ARM64_LSE_ATOMICS bool "Atomic instructions" + depends on JUMP_LABEL default y help As part of the Large System Extensions, ARMv8.1 introduces new -- cgit v1.2.3-58-ga151