diff options
author | Catalin Marinas <catalin.marinas@arm.com> | 2017-08-15 18:40:58 +0100 |
---|---|---|
committer | Catalin Marinas <catalin.marinas@arm.com> | 2017-08-15 18:40:58 +0100 |
commit | df5b95bee1ed7009a2090e9924e7a96e14850d56 (patch) | |
tree | 03606e57fd6c7e8efbff2315dd64ea857448f517 /arch/arm64/include | |
parent | 969ff73e72fe903a2354c51e01c1a1f937c544ca (diff) | |
parent | 872d8327ce8982883b8237b2c320c8666f14e561 (diff) |
Merge branch 'arm64/vmap-stack' of git://git.kernel.org/pub/scm/linux/kernel/git/mark/linux into for-next/core
* 'arm64/vmap-stack' of git://git.kernel.org/pub/scm/linux/kernel/git/mark/linux:
arm64: add VMAP_STACK overflow detection
arm64: add on_accessible_stack()
arm64: add basic VMAP_STACK support
arm64: use an irq stack pointer
arm64: assembler: allow adr_this_cpu to use the stack pointer
arm64: factor out entry stack manipulation
efi/arm64: add EFI_KIMG_ALIGN
arm64: move SEGMENT_ALIGN to <asm/memory.h>
arm64: clean up irq stack definitions
arm64: clean up THREAD_* definitions
arm64: factor out PAGE_* and CONT_* definitions
arm64: kernel: remove {THREAD,IRQ_STACK}_START_SP
fork: allow arch-override of VMAP stack alignment
arm64: remove __die()'s stack dump
Diffstat (limited to 'arch/arm64/include')
-rw-r--r-- | arch/arm64/include/asm/assembler.h | 8 | ||||
-rw-r--r-- | arch/arm64/include/asm/efi.h | 8 | ||||
-rw-r--r-- | arch/arm64/include/asm/irq.h | 25 | ||||
-rw-r--r-- | arch/arm64/include/asm/memory.h | 53 | ||||
-rw-r--r-- | arch/arm64/include/asm/page-def.h | 34 | ||||
-rw-r--r-- | arch/arm64/include/asm/page.h | 12 | ||||
-rw-r--r-- | arch/arm64/include/asm/processor.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/stacktrace.h | 60 | ||||
-rw-r--r-- | arch/arm64/include/asm/thread_info.h | 10 |
9 files changed, 164 insertions, 48 deletions
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 1ef56837cc6f..d58a6253c6ab 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -230,12 +230,18 @@ lr .req x30 // link register .endm /* - * @dst: Result of per_cpu(sym, smp_processor_id()) + * @dst: Result of per_cpu(sym, smp_processor_id()), can be SP for + * non-module code * @sym: The name of the per-cpu variable * @tmp: scratch register */ .macro adr_this_cpu, dst, sym, tmp +#ifndef MODULE + adrp \tmp, \sym + add \dst, \tmp, #:lo12:\sym +#else adr_l \dst, \sym +#endif mrs \tmp, tpidr_el1 add \dst, \dst, \tmp .endm diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h index 8f3043aba873..2b1e5def2e49 100644 --- a/arch/arm64/include/asm/efi.h +++ b/arch/arm64/include/asm/efi.h @@ -4,6 +4,7 @@ #include <asm/boot.h> #include <asm/cpufeature.h> #include <asm/io.h> +#include <asm/memory.h> #include <asm/mmu_context.h> #include <asm/neon.h> #include <asm/ptrace.h> @@ -48,6 +49,13 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md); */ #define EFI_FDT_ALIGN SZ_2M /* used by allocate_new_fdt_and_exit_boot() */ +/* + * In some configurations (e.g. VMAP_STACK && 64K pages), stacks built into the + * kernel need greater alignment than we require the segments to be padded to. + */ +#define EFI_KIMG_ALIGN \ + (SEGMENT_ALIGN > THREAD_ALIGN ? SEGMENT_ALIGN : THREAD_ALIGN) + /* on arm64, the FDT may be located anywhere in system RAM */ static inline unsigned long efi_get_max_fdt_addr(unsigned long dram_base) { diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h index 8ba89c4ca183..5e6f77239064 100644 --- a/arch/arm64/include/asm/irq.h +++ b/arch/arm64/include/asm/irq.h @@ -1,21 +1,12 @@ #ifndef __ASM_IRQ_H #define __ASM_IRQ_H -#define IRQ_STACK_SIZE THREAD_SIZE -#define IRQ_STACK_START_SP THREAD_START_SP - #ifndef __ASSEMBLER__ -#include <linux/percpu.h> -#include <linux/sched/task_stack.h> - #include <asm-generic/irq.h> -#include <asm/thread_info.h> struct pt_regs; -DECLARE_PER_CPU(unsigned long [IRQ_STACK_SIZE/sizeof(long)], irq_stack); - extern void set_handle_irq(void (*handle_irq)(struct pt_regs *)); static inline int nr_legacy_irqs(void) @@ -23,21 +14,5 @@ static inline int nr_legacy_irqs(void) return 0; } -static inline bool on_irq_stack(unsigned long sp) -{ - unsigned long low = (unsigned long)raw_cpu_ptr(irq_stack); - unsigned long high = low + IRQ_STACK_START_SP; - - return (low <= sp && sp <= high); -} - -static inline bool on_task_stack(struct task_struct *tsk, unsigned long sp) -{ - unsigned long low = (unsigned long)task_stack_page(tsk); - unsigned long high = low + THREAD_SIZE; - - return (low <= sp && sp < high); -} - #endif /* !__ASSEMBLER__ */ #endif diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index ef39dcb9ca6a..3585a5e26151 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -25,6 +25,7 @@ #include <linux/const.h> #include <linux/types.h> #include <asm/bug.h> +#include <asm/page-def.h> #include <asm/sizes.h> /* @@ -103,6 +104,58 @@ #define KASAN_SHADOW_SIZE (0) #endif +#define MIN_THREAD_SHIFT 14 + +/* + * VMAP'd stacks are allocated at page granularity, so we must ensure that such + * stacks are a multiple of page size. + */ +#if defined(CONFIG_VMAP_STACK) && (MIN_THREAD_SHIFT < PAGE_SHIFT) +#define THREAD_SHIFT PAGE_SHIFT +#else +#define THREAD_SHIFT MIN_THREAD_SHIFT +#endif + +#if THREAD_SHIFT >= PAGE_SHIFT +#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT) +#endif + +#define THREAD_SIZE (UL(1) << THREAD_SHIFT) + +/* + * By aligning VMAP'd stacks to 2 * THREAD_SIZE, we can detect overflow by + * checking sp & (1 << THREAD_SHIFT), which we can do cheaply in the entry + * assembly. + */ +#ifdef CONFIG_VMAP_STACK +#define THREAD_ALIGN (2 * THREAD_SIZE) +#else +#define THREAD_ALIGN THREAD_SIZE +#endif + +#define IRQ_STACK_SIZE THREAD_SIZE + +#define OVERFLOW_STACK_SIZE SZ_4K + +/* + * Alignment of kernel segments (e.g. .text, .data). + */ +#if defined(CONFIG_DEBUG_ALIGN_RODATA) +/* + * 4 KB granule: 1 level 2 entry + * 16 KB granule: 128 level 3 entries, with contiguous bit + * 64 KB granule: 32 level 3 entries, with contiguous bit + */ +#define SEGMENT_ALIGN SZ_2M +#else +/* + * 4 KB granule: 16 level 3 entries, with contiguous bit + * 16 KB granule: 4 level 3 entries, without contiguous bit + * 64 KB granule: 1 level 3 entry + */ +#define SEGMENT_ALIGN SZ_64K +#endif + /* * Memory types available. */ diff --git a/arch/arm64/include/asm/page-def.h b/arch/arm64/include/asm/page-def.h new file mode 100644 index 000000000000..01591a29dc2e --- /dev/null +++ b/arch/arm64/include/asm/page-def.h @@ -0,0 +1,34 @@ +/* + * Based on arch/arm/include/asm/page.h + * + * Copyright (C) 1995-2003 Russell King + * Copyright (C) 2017 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_PAGE_DEF_H +#define __ASM_PAGE_DEF_H + +#include <linux/const.h> + +/* PAGE_SHIFT determines the page size */ +/* CONT_SHIFT determines the number of pages which can be tracked together */ +#define PAGE_SHIFT CONFIG_ARM64_PAGE_SHIFT +#define CONT_SHIFT CONFIG_ARM64_CONT_SHIFT +#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) +#define PAGE_MASK (~(PAGE_SIZE-1)) + +#define CONT_SIZE (_AC(1, UL) << (CONT_SHIFT + PAGE_SHIFT)) +#define CONT_MASK (~(CONT_SIZE-1)) + +#endif /* __ASM_PAGE_DEF_H */ diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h index 8472c6def5ef..60d02c81a3a2 100644 --- a/arch/arm64/include/asm/page.h +++ b/arch/arm64/include/asm/page.h @@ -19,17 +19,7 @@ #ifndef __ASM_PAGE_H #define __ASM_PAGE_H -#include <linux/const.h> - -/* PAGE_SHIFT determines the page size */ -/* CONT_SHIFT determines the number of pages which can be tracked together */ -#define PAGE_SHIFT CONFIG_ARM64_PAGE_SHIFT -#define CONT_SHIFT CONFIG_ARM64_CONT_SHIFT -#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) -#define PAGE_MASK (~(PAGE_SIZE-1)) - -#define CONT_SIZE (_AC(1, UL) << (CONT_SHIFT + PAGE_SHIFT)) -#define CONT_MASK (~(CONT_SIZE-1)) +#include <asm/page-def.h> #ifndef __ASSEMBLY__ diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index b7334f11c4cf..29adab8138c3 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -159,7 +159,7 @@ extern struct task_struct *cpu_switch_to(struct task_struct *prev, struct task_struct *next); #define task_pt_regs(p) \ - ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1) + ((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1) #define KSTK_EIP(tsk) ((unsigned long)task_pt_regs(tsk)->pc) #define KSTK_ESP(tsk) user_stack_pointer(task_pt_regs(tsk)) diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h index 3bebab378c72..6ad30776e984 100644 --- a/arch/arm64/include/asm/stacktrace.h +++ b/arch/arm64/include/asm/stacktrace.h @@ -16,7 +16,12 @@ #ifndef __ASM_STACKTRACE_H #define __ASM_STACKTRACE_H -struct task_struct; +#include <linux/percpu.h> +#include <linux/sched.h> +#include <linux/sched/task_stack.h> + +#include <asm/memory.h> +#include <asm/ptrace.h> struct stackframe { unsigned long fp; @@ -31,4 +36,57 @@ extern void walk_stackframe(struct task_struct *tsk, struct stackframe *frame, int (*fn)(struct stackframe *, void *), void *data); extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk); +DECLARE_PER_CPU(unsigned long *, irq_stack_ptr); + +static inline bool on_irq_stack(unsigned long sp) +{ + unsigned long low = (unsigned long)raw_cpu_read(irq_stack_ptr); + unsigned long high = low + IRQ_STACK_SIZE; + + if (!low) + return false; + + return (low <= sp && sp < high); +} + +static inline bool on_task_stack(struct task_struct *tsk, unsigned long sp) +{ + unsigned long low = (unsigned long)task_stack_page(tsk); + unsigned long high = low + THREAD_SIZE; + + return (low <= sp && sp < high); +} + +#ifdef CONFIG_VMAP_STACK +DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack); + +static inline bool on_overflow_stack(unsigned long sp) +{ + unsigned long low = (unsigned long)raw_cpu_ptr(overflow_stack); + unsigned long high = low + OVERFLOW_STACK_SIZE; + + return (low <= sp && sp < high); +} +#else +static inline bool on_overflow_stack(unsigned long sp) { return false; } +#endif + +/* + * We can only safely access per-cpu stacks from current in a non-preemptible + * context. + */ +static inline bool on_accessible_stack(struct task_struct *tsk, unsigned long sp) +{ + if (on_task_stack(tsk, sp)) + return true; + if (tsk != current || preemptible()) + return false; + if (on_irq_stack(sp)) + return true; + if (on_overflow_stack(sp)) + return true; + + return false; +} + #endif /* __ASM_STACKTRACE_H */ diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index 46c3b93cf865..aa04b733b349 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -23,19 +23,11 @@ #include <linux/compiler.h> -#ifdef CONFIG_ARM64_4K_PAGES -#define THREAD_SIZE_ORDER 2 -#elif defined(CONFIG_ARM64_16K_PAGES) -#define THREAD_SIZE_ORDER 0 -#endif - -#define THREAD_SIZE 16384 -#define THREAD_START_SP (THREAD_SIZE - 16) - #ifndef __ASSEMBLY__ struct task_struct; +#include <asm/memory.h> #include <asm/stack_pointer.h> #include <asm/types.h> |