diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-08-14 16:39:13 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-08-14 16:39:13 -0700 |
commit | 1202f4fdbcb6deeffd3eb39c94b8dc0cc8202b16 (patch) | |
tree | b822d71e82e13846d3a77ffd08b8ef6e571d7d37 /arch/arm64/include | |
parent | d0055f351e647f33f3b0329bff022213bf8aa085 (diff) | |
parent | 3c4d9137eefecf273a520d392071ffc9df0a9a7a (diff) |
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Will Deacon:
"A bunch of good stuff in here. Worth noting is that we've pulled in
the x86/mm branch from -tip so that we can make use of the core
ioremap changes which allow us to put down huge mappings in the
vmalloc area without screwing up the TLB. Much of the positive
diffstat is because of the rseq selftest for arm64.
Summary:
- Wire up support for qspinlock, replacing our trusty ticket lock
code
- Add an IPI to flush_icache_range() to ensure that stale
instructions fetched into the pipeline are discarded along with the
I-cache lines
- Support for the GCC "stackleak" plugin
- Support for restartable sequences, plus an arm64 port for the
selftest
- Kexec/kdump support on systems booting with ACPI
- Rewrite of our syscall entry code in C, which allows us to zero the
GPRs on entry from userspace
- Support for chained PMU counters, allowing 64-bit event counters to
be constructed on current CPUs
- Ensure scheduler topology information is kept up-to-date with CPU
hotplug events
- Re-enable support for huge vmalloc/IO mappings now that the core
code has the correct hooks to use break-before-make sequences
- Miscellaneous, non-critical fixes and cleanups"
* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (90 commits)
arm64: alternative: Use true and false for boolean values
arm64: kexec: Add comment to explain use of __flush_icache_range()
arm64: sdei: Mark sdei stack helper functions as static
arm64, kaslr: export offset in VMCOREINFO ELF notes
arm64: perf: Add cap_user_time aarch64
efi/libstub: Only disable stackleak plugin for arm64
arm64: drop unused kernel_neon_begin_partial() macro
arm64: kexec: machine_kexec should call __flush_icache_range
arm64: svc: Ensure hardirq tracing is updated before return
arm64: mm: Export __sync_icache_dcache() for xen-privcmd
drivers/perf: arm-ccn: Use devm_ioremap_resource() to map memory
arm64: Add support for STACKLEAK gcc plugin
arm64: Add stack information to on_accessible_stack
drivers/perf: hisi: update the sccl_id/ccl_id when MT is supported
arm64: fix ACPI dependencies
rseq/selftests: Add support for arm64
arm64: acpi: fix alignment fault in accessing ACPI
efi/arm: map UEFI memory map even w/o runtime services enabled
efi/arm: preserve early mapping of UEFI memory map longer for BGRT
drivers: acpi: add dependency of EFI for arm64
...
Diffstat (limited to 'arch/arm64/include')
24 files changed, 354 insertions, 238 deletions
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild index 3a9b84d39d71..6cd5d77b6b44 100644 --- a/arch/arm64/include/asm/Kbuild +++ b/arch/arm64/include/asm/Kbuild @@ -16,6 +16,7 @@ generic-y += mm-arch-hooks.h generic-y += msi.h generic-y += preempt.h generic-y += qrwlock.h +generic-y += qspinlock.h generic-y += rwsem.h generic-y += segment.h generic-y += serial.h diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h index 0db62a4cbce2..709208dfdc8b 100644 --- a/arch/arm64/include/asm/acpi.h +++ b/arch/arm64/include/asm/acpi.h @@ -12,10 +12,12 @@ #ifndef _ASM_ACPI_H #define _ASM_ACPI_H +#include <linux/efi.h> #include <linux/memblock.h> #include <linux/psci.h> #include <asm/cputype.h> +#include <asm/io.h> #include <asm/smp_plat.h> #include <asm/tlbflush.h> @@ -29,18 +31,22 @@ /* Basic configuration for ACPI */ #ifdef CONFIG_ACPI +pgprot_t __acpi_get_mem_attribute(phys_addr_t addr); + /* ACPI table mapping after acpi_permanent_mmap is set */ static inline void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size) { + /* For normal memory we already have a cacheable mapping. */ + if (memblock_is_map_memory(phys)) + return (void __iomem *)__phys_to_virt(phys); + /* - * EFI's reserve_regions() call adds memory with the WB attribute - * to memblock via early_init_dt_add_memory_arch(). + * We should still honor the memory's attribute here because + * crash dump kernel possibly excludes some ACPI (reclaim) + * regions from memblock list. */ - if (!memblock_is_memory(phys)) - return ioremap(phys, size); - - return ioremap_cache(phys, size); + return __ioremap(phys, size, __acpi_get_mem_attribute(phys)); } #define acpi_os_ioremap acpi_os_ioremap @@ -129,15 +135,20 @@ static inline const char *acpi_get_enable_method(int cpu) * for compatibility. */ #define acpi_disable_cmcff 1 -pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr); +static inline pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr) +{ + return __acpi_get_mem_attribute(addr); +} #endif /* CONFIG_ACPI_APEI */ #ifdef CONFIG_ACPI_NUMA int arm64_acpi_numa_init(void); -int acpi_numa_get_nid(unsigned int cpu, u64 hwid); +int acpi_numa_get_nid(unsigned int cpu); +void acpi_map_cpus_to_nodes(void); #else static inline int arm64_acpi_numa_init(void) { return -ENOSYS; } -static inline int acpi_numa_get_nid(unsigned int cpu, u64 hwid) { return NUMA_NO_NODE; } +static inline int acpi_numa_get_nid(unsigned int cpu) { return NUMA_NO_NODE; } +static inline void acpi_map_cpus_to_nodes(void) { } #endif /* CONFIG_ACPI_NUMA */ #define ACPI_TABLE_UPGRADE_MAX_PHYS MEMBLOCK_ALLOC_ACCESSIBLE diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index f11518af96a9..822a9192c551 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -128,6 +128,19 @@ do { \ __u.__val; \ }) +#define smp_cond_load_relaxed(ptr, cond_expr) \ +({ \ + typeof(ptr) __PTR = (ptr); \ + typeof(*ptr) VAL; \ + for (;;) { \ + VAL = READ_ONCE(*__PTR); \ + if (cond_expr) \ + break; \ + __cmpwait_relaxed(__PTR, VAL); \ + } \ + VAL; \ +}) + #define smp_cond_load_acquire(ptr, cond_expr) \ ({ \ typeof(ptr) __PTR = (ptr); \ diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h index 5df5cfe1c143..5ee5bca8c24b 100644 --- a/arch/arm64/include/asm/cache.h +++ b/arch/arm64/include/asm/cache.h @@ -21,12 +21,16 @@ #define CTR_L1IP_SHIFT 14 #define CTR_L1IP_MASK 3 #define CTR_DMINLINE_SHIFT 16 +#define CTR_IMINLINE_SHIFT 0 #define CTR_ERG_SHIFT 20 #define CTR_CWG_SHIFT 24 #define CTR_CWG_MASK 15 #define CTR_IDC_SHIFT 28 #define CTR_DIC_SHIFT 29 +#define CTR_CACHE_MINLINE_MASK \ + (0xf << CTR_DMINLINE_SHIFT | 0xf << CTR_IMINLINE_SHIFT) + #define CTR_L1IP(ctr) (((ctr) >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK) #define ICACHE_POLICY_VPIPT 0 diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index d264a7274811..19844211a4e6 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h @@ -19,6 +19,7 @@ #ifndef __ASM_CACHEFLUSH_H #define __ASM_CACHEFLUSH_H +#include <linux/kgdb.h> #include <linux/mm.h> /* @@ -71,7 +72,7 @@ * - kaddr - page address * - size - region size */ -extern void flush_icache_range(unsigned long start, unsigned long end); +extern void __flush_icache_range(unsigned long start, unsigned long end); extern int invalidate_icache_range(unsigned long start, unsigned long end); extern void __flush_dcache_area(void *addr, size_t len); extern void __inval_dcache_area(void *addr, size_t len); @@ -81,6 +82,30 @@ extern void __clean_dcache_area_pou(void *addr, size_t len); extern long __flush_cache_user_range(unsigned long start, unsigned long end); extern void sync_icache_aliases(void *kaddr, unsigned long len); +static inline void flush_icache_range(unsigned long start, unsigned long end) +{ + __flush_icache_range(start, end); + + /* + * IPI all online CPUs so that they undergo a context synchronization + * event and are forced to refetch the new instructions. + */ +#ifdef CONFIG_KGDB + /* + * KGDB performs cache maintenance with interrupts disabled, so we + * will deadlock trying to IPI the secondary CPUs. In theory, we can + * set CACHE_FLUSH_IS_SAFE to 0 to avoid this known issue, but that + * just means that KGDB will elide the maintenance altogether! As it + * turns out, KGDB uses IPIs to round-up the secondary CPUs during + * the patching operation, so we don't need extra IPIs here anyway. + * In which case, add a KGDB-specific bodge and return early. + */ + if (kgdb_connected && irqs_disabled()) + return; +#endif + kick_all_cpus_sync(); +} + static inline void flush_cache_mm(struct mm_struct *mm) { } diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index 8a699c708fc9..be3bf3d08916 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -49,7 +49,8 @@ #define ARM64_HAS_CACHE_DIC 28 #define ARM64_HW_DBM 29 #define ARM64_SSBD 30 +#define ARM64_MISMATCHED_CACHE_TYPE 31 -#define ARM64_NCAPS 31 +#define ARM64_NCAPS 32 #endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index fa92747a49c8..dd1ad3950ef5 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -16,13 +16,15 @@ #ifndef __ASM_FP_H #define __ASM_FP_H -#include <asm/ptrace.h> #include <asm/errno.h> +#include <asm/ptrace.h> #include <asm/processor.h> #include <asm/sigcontext.h> +#include <asm/sysreg.h> #ifndef __ASSEMBLY__ +#include <linux/build_bug.h> #include <linux/cache.h> #include <linux/init.h> #include <linux/stddef.h> @@ -102,6 +104,16 @@ extern int sve_set_vector_length(struct task_struct *task, extern int sve_set_current_vl(unsigned long arg); extern int sve_get_current_vl(void); +static inline void sve_user_disable(void) +{ + sysreg_clear_set(cpacr_el1, CPACR_EL1_ZEN_EL0EN, 0); +} + +static inline void sve_user_enable(void) +{ + sysreg_clear_set(cpacr_el1, 0, CPACR_EL1_ZEN_EL0EN); +} + /* * Probing and setup functions. * Calls to these functions must be serialised with one another. @@ -128,6 +140,9 @@ static inline int sve_get_current_vl(void) return -EINVAL; } +static inline void sve_user_disable(void) { BUILD_BUG(); } +static inline void sve_user_enable(void) { BUILD_BUG(); } + static inline void sve_init_vq_map(void) { } static inline void sve_update_vq_map(void) { } static inline int sve_verify_vq_map(void) { return 0; } diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index f62c56b1793f..c6802dea6cab 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -446,8 +446,6 @@ u32 aarch64_insn_gen_prefetch(enum aarch64_insn_register base, s32 aarch64_get_branch_offset(u32 insn); u32 aarch64_set_branch_offset(u32 insn, s32 offset); -bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn); - int aarch64_insn_patch_text_nosync(void *addr, u32 insn); int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt); diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 1dab3a984608..0c97e45d1dc3 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -140,7 +140,7 @@ static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) { - *vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT; + *vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT; } /* @@ -190,8 +190,8 @@ static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu) u32 mode; if (vcpu_mode_is_32bit(vcpu)) { - mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK; - return mode > COMPAT_PSR_MODE_USR; + mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK; + return mode > PSR_AA32_MODE_USR; } mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK; @@ -329,7 +329,7 @@ static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu) static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) { if (vcpu_mode_is_32bit(vcpu)) { - *vcpu_cpsr(vcpu) |= COMPAT_PSR_E_BIT; + *vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT; } else { u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1); sctlr |= (1 << 25); @@ -340,7 +340,7 @@ static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu) { if (vcpu_mode_is_32bit(vcpu)) - return !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_E_BIT); + return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT); return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & (1 << 25)); } diff --git a/arch/arm64/include/asm/neon.h b/arch/arm64/include/asm/neon.h index f922eaf780f9..fb9d137256a6 100644 --- a/arch/arm64/include/asm/neon.h +++ b/arch/arm64/include/asm/neon.h @@ -19,11 +19,4 @@ void kernel_neon_begin(void); void kernel_neon_end(void); -/* - * Temporary macro to allow the crypto code to compile. Note that the - * semantics of kernel_neon_begin_partial() are now different from the - * original as it does not allow being called in an interrupt context. - */ -#define kernel_neon_begin_partial(num_regs) kernel_neon_begin() - #endif /* ! __ASM_NEON_H */ diff --git a/arch/arm64/include/asm/numa.h b/arch/arm64/include/asm/numa.h index 01bc46d5b43a..626ad01e83bf 100644 --- a/arch/arm64/include/asm/numa.h +++ b/arch/arm64/include/asm/numa.h @@ -35,10 +35,14 @@ void __init numa_set_distance(int from, int to, int distance); void __init numa_free_distance(void); void __init early_map_cpu_to_node(unsigned int cpu, int nid); void numa_store_cpu_info(unsigned int cpu); +void numa_add_cpu(unsigned int cpu); +void numa_remove_cpu(unsigned int cpu); #else /* CONFIG_NUMA */ static inline void numa_store_cpu_info(unsigned int cpu) { } +static inline void numa_add_cpu(unsigned int cpu) { } +static inline void numa_remove_cpu(unsigned int cpu) { } static inline void arm64_numa_init(void) { } static inline void early_map_cpu_to_node(unsigned int cpu, int nid) { } diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index a73ae1e49200..79657ad91397 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -182,12 +182,12 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp) { start_thread_common(regs, pc); - regs->pstate = COMPAT_PSR_MODE_USR; + regs->pstate = PSR_AA32_MODE_USR; if (pc & 1) - regs->pstate |= COMPAT_PSR_T_BIT; + regs->pstate |= PSR_AA32_T_BIT; #ifdef __AARCH64EB__ - regs->pstate |= COMPAT_PSR_E_BIT; + regs->pstate |= PSR_AA32_E_BIT; #endif regs->compat_sp = sp; @@ -266,5 +266,20 @@ extern void __init minsigstksz_setup(void); #define SVE_SET_VL(arg) sve_set_current_vl(arg) #define SVE_GET_VL() sve_get_current_vl() +/* + * For CONFIG_GCC_PLUGIN_STACKLEAK + * + * These need to be macros because otherwise we get stuck in a nightmare + * of header definitions for the use of task_stack_page. + */ + +#define current_top_of_stack() \ +({ \ + struct stack_info _info; \ + BUG_ON(!on_accessible_stack(current, current_stack_pointer, &_info)); \ + _info.high; \ +}) +#define on_thread_stack() (on_task_stack(current, current_stack_pointer, NULL)) + #endif /* __ASSEMBLY__ */ #endif /* __ASM_PROCESSOR_H */ diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index 6069d66e0bc2..177b851ca6d9 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h @@ -35,36 +35,39 @@ #define COMPAT_PTRACE_GETHBPREGS 29 #define COMPAT_PTRACE_SETHBPREGS 30 -/* AArch32 CPSR bits */ -#define COMPAT_PSR_MODE_MASK 0x0000001f -#define COMPAT_PSR_MODE_USR 0x00000010 -#define COMPAT_PSR_MODE_FIQ 0x00000011 -#define COMPAT_PSR_MODE_IRQ 0x00000012 -#define COMPAT_PSR_MODE_SVC 0x00000013 -#define COMPAT_PSR_MODE_ABT 0x00000017 -#define COMPAT_PSR_MODE_HYP 0x0000001a -#define COMPAT_PSR_MODE_UND 0x0000001b -#define COMPAT_PSR_MODE_SYS 0x0000001f -#define COMPAT_PSR_T_BIT 0x00000020 -#define COMPAT_PSR_F_BIT 0x00000040 -#define COMPAT_PSR_I_BIT 0x00000080 -#define COMPAT_PSR_A_BIT 0x00000100 -#define COMPAT_PSR_E_BIT 0x00000200 -#define COMPAT_PSR_J_BIT 0x01000000 -#define COMPAT_PSR_Q_BIT 0x08000000 -#define COMPAT_PSR_V_BIT 0x10000000 -#define COMPAT_PSR_C_BIT 0x20000000 -#define COMPAT_PSR_Z_BIT 0x40000000 -#define COMPAT_PSR_N_BIT 0x80000000 -#define COMPAT_PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */ -#define COMPAT_PSR_GE_MASK 0x000f0000 +/* SPSR_ELx bits for exceptions taken from AArch32 */ +#define PSR_AA32_MODE_MASK 0x0000001f +#define PSR_AA32_MODE_USR 0x00000010 +#define PSR_AA32_MODE_FIQ 0x00000011 +#define PSR_AA32_MODE_IRQ 0x00000012 +#define PSR_AA32_MODE_SVC 0x00000013 +#define PSR_AA32_MODE_ABT 0x00000017 +#define PSR_AA32_MODE_HYP 0x0000001a +#define PSR_AA32_MODE_UND 0x0000001b +#define PSR_AA32_MODE_SYS 0x0000001f +#define PSR_AA32_T_BIT 0x00000020 +#define PSR_AA32_F_BIT 0x00000040 +#define PSR_AA32_I_BIT 0x00000080 +#define PSR_AA32_A_BIT 0x00000100 +#define PSR_AA32_E_BIT 0x00000200 +#define PSR_AA32_DIT_BIT 0x01000000 +#define PSR_AA32_Q_BIT 0x08000000 +#define PSR_AA32_V_BIT 0x10000000 +#define PSR_AA32_C_BIT 0x20000000 +#define PSR_AA32_Z_BIT 0x40000000 +#define PSR_AA32_N_BIT 0x80000000 +#define PSR_AA32_IT_MASK 0x0600fc00 /* If-Then execution state mask */ +#define PSR_AA32_GE_MASK 0x000f0000 #ifdef CONFIG_CPU_BIG_ENDIAN -#define COMPAT_PSR_ENDSTATE COMPAT_PSR_E_BIT +#define PSR_AA32_ENDSTATE PSR_AA32_E_BIT #else -#define COMPAT_PSR_ENDSTATE 0 +#define PSR_AA32_ENDSTATE 0 #endif +/* AArch32 CPSR bits, as seen in AArch32 */ +#define COMPAT_PSR_DIT_BIT 0x00200000 + /* * These are 'magic' values for PTRACE_PEEKUSR that return info about where a * process is located in memory. @@ -111,6 +114,30 @@ #define compat_sp_fiq regs[29] #define compat_lr_fiq regs[30] +static inline unsigned long compat_psr_to_pstate(const unsigned long psr) +{ + unsigned long pstate; + + pstate = psr & ~COMPAT_PSR_DIT_BIT; + + if (psr & COMPAT_PSR_DIT_BIT) + pstate |= PSR_AA32_DIT_BIT; + + return pstate; +} + +static inline unsigned long pstate_to_compat_psr(const unsigned long pstate) +{ + unsigned long psr; + + psr = pstate & ~PSR_AA32_DIT_BIT; + + if (pstate & PSR_AA32_DIT_BIT) + psr |= COMPAT_PSR_DIT_BIT; + + return psr; +} + /* * This struct defines the way the registers are stored on the stack during an * exception. Note that sizeof(struct pt_regs) has to be a multiple of 16 (for @@ -156,7 +183,7 @@ static inline void forget_syscall(struct pt_regs *regs) #ifdef CONFIG_COMPAT #define compat_thumb_mode(regs) \ - (((regs)->pstate & COMPAT_PSR_T_BIT)) + (((regs)->pstate & PSR_AA32_T_BIT)) #else #define compat_thumb_mode(regs) (0) #endif diff --git a/arch/arm64/include/asm/sdei.h b/arch/arm64/include/asm/sdei.h index e073e6886685..ffe47d766c25 100644 --- a/arch/arm64/include/asm/sdei.h +++ b/arch/arm64/include/asm/sdei.h @@ -40,15 +40,18 @@ asmlinkage unsigned long __sdei_handler(struct pt_regs *regs, unsigned long sdei_arch_get_entry_point(int conduit); #define sdei_arch_get_entry_point(x) sdei_arch_get_entry_point(x) -bool _on_sdei_stack(unsigned long sp); -static inline bool on_sdei_stack(unsigned long sp) +struct stack_info; + +bool _on_sdei_stack(unsigned long sp, struct stack_info *info); +static inline bool on_sdei_stack(unsigned long sp, + struct stack_info *info) { if (!IS_ENABLED(CONFIG_VMAP_STACK)) return false; if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE)) return false; if (in_nmi()) - return _on_sdei_stack(sp); + return _on_sdei_stack(sp, info); return false; } diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h index 26c5bd7d88d8..38116008d18b 100644 --- a/arch/arm64/include/asm/spinlock.h +++ b/arch/arm64/include/asm/spinlock.h @@ -16,123 +16,8 @@ #ifndef __ASM_SPINLOCK_H #define __ASM_SPINLOCK_H -#include <asm/lse.h> -#include <asm/spinlock_types.h> -#include <asm/processor.h> - -/* - * Spinlock implementation. - * - * The memory barriers are implicit with the load-acquire and store-release - * instructions. - */ - -static inline void arch_spin_lock(arch_spinlock_t *lock) -{ - unsigned int tmp; - arch_spinlock_t lockval, newval; - - asm volatile( - /* Atomically increment the next ticket. */ - ARM64_LSE_ATOMIC_INSN( - /* LL/SC */ -" prfm pstl1strm, %3\n" -"1: ldaxr %w0, %3\n" -" add %w1, %w0, %w5\n" -" stxr %w2, %w1, %3\n" -" cbnz %w2, 1b\n", - /* LSE atomics */ -" mov %w2, %w5\n" -" ldadda %w2, %w0, %3\n" - __nops(3) - ) - - /* Did we get the lock? */ -" eor %w1, %w0, %w0, ror #16\n" -" cbz %w1, 3f\n" - /* - * No: spin on the owner. Send a local event to avoid missing an - * unlock before the exclusive load. - */ -" sevl\n" -"2: wfe\n" -" ldaxrh %w2, %4\n" -" eor %w1, %w2, %w0, lsr #16\n" -" cbnz %w1, 2b\n" - /* We got the lock. Critical section starts here. */ -"3:" - : "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*lock) - : "Q" (lock->owner), "I" (1 << TICKET_SHIFT) - : "memory"); -} - -static inline int arch_spin_trylock(arch_spinlock_t *lock) -{ - unsigned int tmp; - arch_spinlock_t lockval; - - asm volatile(ARM64_LSE_ATOMIC_INSN( - /* LL/SC */ - " prfm pstl1strm, %2\n" - "1: ldaxr %w0, %2\n" - " eor %w1, %w0, %w0, ror #16\n" - " cbnz %w1, 2f\n" - " add %w0, %w0, %3\n" - " stxr %w1, %w0, %2\n" - " cbnz %w1, 1b\n" - "2:", - /* LSE atomics */ - " ldr %w0, %2\n" - " eor %w1, %w0, %w0, ror #16\n" - " cbnz %w1, 1f\n" - " add %w1, %w0, %3\n" - " casa %w0, %w1, %2\n" - " sub %w1, %w1, %3\n" - " eor %w1, %w1, %w0\n" - "1:") - : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock) - : "I" (1 << TICKET_SHIFT) - : "memory"); - - return !tmp; -} - -static inline void arch_spin_unlock(arch_spinlock_t *lock) -{ - unsigned long tmp; - - asm volatile(ARM64_LSE_ATOMIC_INSN( - /* LL/SC */ - " ldrh %w1, %0\n" - " add %w1, %w1, #1\n" - " stlrh %w1, %0", - /* LSE atomics */ - " mov %w1, #1\n" - " staddlh %w1, %0\n" - __nops(1)) - : "=Q" (lock->owner), "=&r" (tmp) - : - : "memory"); -} - -static inline int arch_spin_value_unlocked(arch_spinlock_t lock) -{ - return lock.owner == lock.next; -} - -static inline int arch_spin_is_locked(arch_spinlock_t *lock) -{ - return !arch_spin_value_unlocked(READ_ONCE(*lock)); -} - -static inline int arch_spin_is_contended(arch_spinlock_t *lock) -{ - arch_spinlock_t lockval = READ_ONCE(*lock); - return (lockval.next - lockval.owner) > 1; -} -#define arch_spin_is_contended arch_spin_is_contended - #include <asm/qrwlock.h> +#include <asm/qspinlock.h> /* See include/linux/spinlock.h */ #define smp_mb__after_spinlock() smp_mb() diff --git a/arch/arm64/include/asm/spinlock_types.h b/arch/arm64/include/asm/spinlock_types.h index 6b856012c51b..a157ff465e27 100644 --- a/arch/arm64/include/asm/spinlock_types.h +++ b/arch/arm64/include/asm/spinlock_types.h @@ -20,22 +20,7 @@ # error "please don't include this file directly" #endif -#include <linux/types.h> - -#define TICKET_SHIFT 16 - -typedef struct { -#ifdef __AARCH64EB__ - u16 next; - u16 owner; -#else - u16 owner; - u16 next; -#endif -} __aligned(4) arch_spinlock_t; - -#define __ARCH_SPIN_LOCK_UNLOCKED { 0 , 0 } - +#include <asm-generic/qspinlock_types.h> #include <asm-generic/qrwlock_types.h> #endif diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h index 902f9edacbea..e86737b7c924 100644 --- a/arch/arm64/include/asm/stacktrace.h +++ b/arch/arm64/include/asm/stacktrace.h @@ -32,6 +32,21 @@ struct stackframe { #endif }; +enum stack_type { + STACK_TYPE_UNKNOWN, + STACK_TYPE_TASK, + STACK_TYPE_IRQ, + STACK_TYPE_OVERFLOW, + STACK_TYPE_SDEI_NORMAL, + STACK_TYPE_SDEI_CRITICAL, +}; + +struct stack_info { + unsigned long low; + unsigned long high; + enum stack_type type; +}; + extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame); extern void walk_stackframe(struct task_struct *tsk, struct stackframe *frame, int (*fn)(struct stackframe *, void *), void *data); @@ -39,7 +54,8 @@ extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk); DECLARE_PER_CPU(unsigned long *, irq_stack_ptr); -static inline bool on_irq_stack(unsigned long sp) +static inline bool on_irq_stack(unsigned long sp, + struct stack_info *info) { unsigned long low = (unsigned long)raw_cpu_read(irq_stack_ptr); unsigned long high = low + IRQ_STACK_SIZE; @@ -47,46 +63,79 @@ static inline bool on_irq_stack(unsigned long sp) if (!low) return false; - return (low <= sp && sp < high); + if (sp < low || sp >= high) + return false; + + if (info) { + info->low = low; + info->high = high; + info->type = STACK_TYPE_IRQ; + } + + return true; } -static inline bool on_task_stack(struct task_struct *tsk, unsigned long sp) +static inline bool on_task_stack(struct task_struct *tsk, unsigned long sp, + struct stack_info *info) { unsigned long low = (unsigned long)task_stack_page(tsk); unsigned long high = low + THREAD_SIZE; - return (low <= sp && sp < high); + if (sp < low || sp >= high) + return false; + + if (info) { + info->low = low; + info->high = high; + info->type = STACK_TYPE_TASK; + } + + return true; } #ifdef CONFIG_VMAP_STACK DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack); -static inline bool on_overflow_stack(unsigned long sp) +static inline bool on_overflow_stack(unsigned long sp, + struct stack_info *info) { unsigned long low = (unsigned long)raw_cpu_ptr(overflow_stack); unsigned long high = low + OVERFLOW_STACK_SIZE; - return (low <= sp && sp < high); + if (sp < low || sp >= high) + return false; + + if (info) { + info->low = low; + info->high = high; + info->type = STACK_TYPE_OVERFLOW; + } + + return true; } #else -static inline bool on_overflow_stack(unsigned long sp) { return false; } +static inline bool on_overflow_stack(unsigned long sp, + struct stack_info *info) { return false; } #endif + /* * We can only safely access per-cpu stacks from current in a non-preemptible * context. */ -static inline bool on_accessible_stack(struct task_struct *tsk, unsigned long sp) +static inline bool on_accessible_stack(struct task_struct *tsk, + unsigned long sp, + struct stack_info *info) { - if (on_task_stack(tsk, sp)) + if (on_task_stack(tsk, sp, info)) return true; if (tsk != current || preemptible()) return false; - if (on_irq_stack(sp)) + if (on_irq_stack(sp, info)) return true; - if (on_overflow_stack(sp)) + if (on_overflow_stack(sp, info)) return true; - if (on_sdei_stack(sp)) + if (on_sdei_stack(sp, info)) return true; return false; diff --git a/arch/arm64/include/asm/syscall.h b/arch/arm64/include/asm/syscall.h index 709a574468f0..ad8be16a39c9 100644 --- a/arch/arm64/include/asm/syscall.h +++ b/arch/arm64/include/asm/syscall.h @@ -20,7 +20,13 @@ #include <linux/compat.h> #include <linux/err.h> -extern const void *sys_call_table[]; +typedef long (*syscall_fn_t)(struct pt_regs *regs); + +extern const syscall_fn_t sys_call_table[]; + +#ifdef CONFIG_COMPAT +extern const syscall_fn_t compat_sys_call_table[]; +#endif static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs) diff --git a/arch/arm64/include/asm/syscall_wrapper.h b/arch/arm64/include/asm/syscall_wrapper.h new file mode 100644 index 000000000000..a4477e515b79 --- /dev/null +++ b/arch/arm64/include/asm/syscall_wrapper.h @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * syscall_wrapper.h - arm64 specific wrappers to syscall definitions + * + * Based on arch/x86/include/asm_syscall_wrapper.h + */ + +#ifndef __ASM_SYSCALL_WRAPPER_H +#define __ASM_SYSCALL_WRAPPER_H + +#define SC_ARM64_REGS_TO_ARGS(x, ...) \ + __MAP(x,__SC_ARGS \ + ,,regs->regs[0],,regs->regs[1],,regs->regs[2] \ + ,,regs->regs[3],,regs->regs[4],,regs->regs[5]) + +#ifdef CONFIG_COMPAT + +#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \ + asmlinkage long __arm64_compat_sys##name(const struct pt_regs *regs); \ + ALLOW_ERROR_INJECTION(__arm64_compat_sys##name, ERRNO); \ + static long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \ + static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \ + asmlinkage long __arm64_compat_sys##name(const struct pt_regs *regs) \ + { \ + return __se_compat_sys##name(SC_ARM64_REGS_TO_ARGS(x,__VA_ARGS__)); \ + } \ + static long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \ + { \ + return __do_compat_sys##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__)); \ + } \ + static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) + +#define COMPAT_SYSCALL_DEFINE0(sname) \ + asmlinkage long __arm64_compat_sys_##sname(void); \ + ALLOW_ERROR_INJECTION(__arm64_compat_sys_##sname, ERRNO); \ + asmlinkage long __arm64_compat_sys_##sname(void) + +#define COND_SYSCALL_COMPAT(name) \ + cond_syscall(__arm64_compat_sys_##name); + +#define COMPAT_SYS_NI(name) \ + SYSCALL_ALIAS(__arm64_compat_sys_##name, sys_ni_posix_timers); + +#endif /* CONFIG_COMPAT */ + +#define __SYSCALL_DEFINEx(x, name, ...) \ + asmlinkage long __arm64_sys##name(const struct pt_regs *regs); \ + ALLOW_ERROR_INJECTION(__arm64_sys##name, ERRNO); \ + static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \ + static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \ + asmlinkage long __arm64_sys##name(const struct pt_regs *regs) \ + { \ + return __se_sys##name(SC_ARM64_REGS_TO_ARGS(x,__VA_ARGS__)); \ + } \ + static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \ + { \ + long ret = __do_sys##name(__MAP(x,__SC_CAST,__VA_ARGS__)); \ + __MAP(x,__SC_TEST,__VA_ARGS__); \ + __PROTECT(x, ret,__MAP(x,__SC_ARGS,__VA_ARGS__)); \ + return ret; \ + } \ + static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) + +#ifndef SYSCALL_DEFINE0 +#define SYSCALL_DEFINE0(sname) \ + SYSCALL_METADATA(_##sname, 0); \ + asmlinkage long __arm64_sys_##sname(void); \ + ALLOW_ERROR_INJECTION(__arm64_sys_##sname, ERRNO); \ + asmlinkage long __arm64_sys_##sname(void) +#endif + +#ifndef COND_SYSCALL +#define COND_SYSCALL(name) cond_syscall(__arm64_sys_##name) +#endif + +#ifndef SYS_NI +#define SYS_NI(name) SYSCALL_ALIAS(__arm64_sys_##name, sys_ni_posix_timers); +#endif + +#endif /* __ASM_SYSCALL_WRAPPER_H */ diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index a8f84812c6e8..e205ec8489e9 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -436,7 +436,8 @@ #define SCTLR_EL2_RES0 ((1 << 6) | (1 << 7) | (1 << 8) | (1 << 9) | \ (1 << 10) | (1 << 13) | (1 << 14) | (1 << 15) | \ (1 << 17) | (1 << 20) | (1 << 24) | (1 << 26) | \ - (1 << 27) | (1 << 30) | (1 << 31)) + (1 << 27) | (1 << 30) | (1 << 31) | \ + (0xffffffffUL << 32)) #ifdef CONFIG_CPU_BIG_ENDIAN #define ENDIAN_SET_EL2 SCTLR_ELx_EE @@ -452,9 +453,9 @@ SCTLR_ELx_SA | SCTLR_ELx_I | SCTLR_ELx_WXN | \ ENDIAN_CLEAR_EL2 | SCTLR_EL2_RES0) -/* Check all the bits are accounted for */ -#define SCTLR_EL2_BUILD_BUG_ON_MISSING_BITS BUILD_BUG_ON((SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != ~0) - +#if (SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != 0xffffffffffffffff +#error "Inconsistent SCTLR_EL2 set/clear bits" +#endif /* SCTLR_EL1 specific flags. */ #define SCTLR_EL1_UCI (1 << 26) @@ -473,7 +474,8 @@ #define SCTLR_EL1_RES1 ((1 << 11) | (1 << 20) | (1 << 22) | (1 << 28) | \ (1 << 29)) #define SCTLR_EL1_RES0 ((1 << 6) | (1 << 10) | (1 << 13) | (1 << 17) | \ - (1 << 27) | (1 << 30) | (1 << 31)) + (1 << 27) | (1 << 30) | (1 << 31) | \ + (0xffffffffUL << 32)) #ifdef CONFIG_CPU_BIG_ENDIAN #define ENDIAN_SET_EL1 (SCTLR_EL1_E0E | SCTLR_ELx_EE) @@ -492,8 +494,9 @@ SCTLR_EL1_UMA | SCTLR_ELx_WXN | ENDIAN_CLEAR_EL1 |\ SCTLR_EL1_RES0) -/* Check all the bits are accounted for */ -#define SCTLR_EL1_BUILD_BUG_ON_MISSING_BITS BUILD_BUG_ON((SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != ~0) +#if (SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != 0xffffffffffffffff +#error "Inconsistent SCTLR_EL1 set/clear bits" +#endif /* id_aa64isar0 */ #define ID_AA64ISAR0_TS_SHIFT 52 @@ -739,19 +742,6 @@ asm( write_sysreg(__scs_new, sysreg); \ } while (0) -static inline void config_sctlr_el1(u32 clear, u32 set) -{ - u32 val; - - SCTLR_EL2_BUILD_BUG_ON_MISSING_BITS; - SCTLR_EL1_BUILD_BUG_ON_MISSING_BITS; - - val = read_sysreg(sctlr_el1); - val &= ~clear; - val |= set; - write_sysreg(val, sctlr_el1); -} - #endif #endif /* __ASM_SYSREG_H */ diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index dfc61d73f740..a4a1901140ee 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -218,6 +218,13 @@ static inline void __flush_tlb_pgtable(struct mm_struct *mm, dsb(ish); } +static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr) +{ + unsigned long addr = __TLBI_VADDR(kaddr, 0); + + __tlbi(vaae1is, addr); + dsb(ish); +} #endif #endif diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h index df48212f767b..49a0fee4f89b 100644 --- a/arch/arm64/include/asm/topology.h +++ b/arch/arm64/include/asm/topology.h @@ -11,7 +11,7 @@ struct cpu_topology { int llc_id; cpumask_t thread_sibling; cpumask_t core_sibling; - cpumask_t llc_siblings; + cpumask_t llc_sibling; }; extern struct cpu_topology cpu_topology[NR_CPUS]; @@ -20,9 +20,11 @@ extern struct cpu_topology cpu_topology[NR_CPUS]; #define topology_core_id(cpu) (cpu_topology[cpu].core_id) #define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling) #define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling) +#define topology_llc_cpumask(cpu) (&cpu_topology[cpu].llc_sibling) void init_cpu_topology(void); void store_cpu_topology(unsigned int cpuid); +void remove_cpu_topology(unsigned int cpuid); const struct cpumask *cpu_coregroup_mask(int cpu); #ifdef CONFIG_NUMA diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index a0baa9af5487..e0d0f5b856e7 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h @@ -43,7 +43,7 @@ #define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2) #define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5) -#define __NR_compat_syscalls 398 +#define __NR_compat_syscalls 399 #endif #define __ARCH_WANT_SYS_CLONE diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index ef292160748c..2cd6dcf8d246 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h @@ -260,7 +260,7 @@ __SYSCALL(117, sys_ni_syscall) #define __NR_fsync 118 __SYSCALL(__NR_fsync, sys_fsync) #define __NR_sigreturn 119 -__SYSCALL(__NR_sigreturn, compat_sys_sigreturn_wrapper) +__SYSCALL(__NR_sigreturn, compat_sys_sigreturn) #define __NR_clone 120 __SYSCALL(__NR_clone, sys_clone) #define __NR_setdomainname 121 @@ -368,7 +368,7 @@ __SYSCALL(__NR_getresgid, sys_getresgid16) #define __NR_prctl 172 __SYSCALL(__NR_prctl, sys_prctl) #define __NR_rt_sigreturn 173 -__SYSCALL(__NR_rt_sigreturn, compat_sys_rt_sigreturn_wrapper) +__SYSCALL(__NR_rt_sigreturn, compat_sys_rt_sigreturn) #define __NR_rt_sigaction 174 __SYSCALL(__NR_rt_sigaction, compat_sys_rt_sigaction) #define __NR_rt_sigprocmask 175 @@ -382,9 +382,9 @@ __SYSCALL(__NR_rt_sigqueueinfo, compat_sys_rt_sigqueueinfo) #define __NR_rt_sigsuspend 179 __SYSCALL(__NR_rt_sigsuspend, compat_sys_rt_sigsuspend) #define __NR_pread64 180 -__SYSCALL(__NR_pread64, compat_sys_pread64_wrapper) +__SYSCALL(__NR_pread64, compat_sys_aarch32_pread64) #define __NR_pwrite64 181 -__SYSCALL(__NR_pwrite64, compat_sys_pwrite64_wrapper) +__SYSCALL(__NR_pwrite64, compat_sys_aarch32_pwrite64) #define __NR_chown 182 __SYSCALL(__NR_chown, sys_chown16) #define __NR_getcwd 183 @@ -406,11 +406,11 @@ __SYSCALL(__NR_vfork, sys_vfork) #define __NR_ugetrlimit 191 /* SuS compliant getrlimit */ __SYSCALL(__NR_ugetrlimit, compat_sys_getrlimit) /* SuS compliant getrlimit */ #define __NR_mmap2 192 -__SYSCALL(__NR_mmap2, compat_sys_mmap2_wrapper) +__SYSCALL(__NR_mmap2, compat_sys_aarch32_mmap2) #define __NR_truncate64 193 -__SYSCALL(__NR_truncate64, compat_sys_truncate64_wrapper) +__SYSCALL(__NR_truncate64, compat_sys_aarch32_truncate64) #define __NR_ftruncate64 194 -__SYSCALL(__NR_ftruncate64, compat_sys_ftruncate64_wrapper) +__SYSCALL(__NR_ftruncate64, compat_sys_aarch32_ftruncate64) #define __NR_stat64 195 __SYSCALL(__NR_stat64, sys_stat64) #define __NR_lstat64 196 @@ -472,7 +472,7 @@ __SYSCALL(223, sys_ni_syscall) #define __NR_gettid 224 __SYSCALL(__NR_gettid, sys_gettid) #define __NR_readahead 225 -__SYSCALL(__NR_readahead, compat_sys_readahead_wrapper) +__SYSCALL(__NR_readahead, compat_sys_aarch32_readahead) #define __NR_setxattr 226 __SYSCALL(__NR_setxattr, sys_setxattr) #define __NR_lsetxattr 227 @@ -554,15 +554,15 @@ __SYSCALL(__NR_clock_getres, compat_sys_clock_getres) #define __NR_clock_nanosleep 265 __SYSCALL(__NR_clock_nanosleep, compat_sys_clock_nanosleep) #define __NR_statfs64 266 -__SYSCALL(__NR_statfs64, compat_sys_statfs64_wrapper) +__SYSCALL(__NR_statfs64, compat_sys_aarch32_statfs64) #define __NR_fstatfs64 267 -__SYSCALL(__NR_fstatfs64, compat_sys_fstatfs64_wrapper) +__SYSCALL(__NR_fstatfs64, compat_sys_aarch32_fstatfs64) #define __NR_tgkill 268 __SYSCALL(__NR_tgkill, sys_tgkill) #define __NR_utimes 269 __SYSCALL(__NR_utimes, compat_sys_utimes) #define __NR_arm_fadvise64_64 270 -__SYSCALL(__NR_arm_fadvise64_64, compat_sys_fadvise64_64_wrapper) +__SYSCALL(__NR_arm_fadvise64_64, compat_sys_aarch32_fadvise64_64) #define __NR_pciconfig_iobase 271 __SYSCALL(__NR_pciconfig_iobase, sys_pciconfig_iobase) #define __NR_pciconfig_read 272 @@ -704,7 +704,7 @@ __SYSCALL(__NR_get_robust_list, compat_sys_get_robust_list) #define __NR_splice 340 __SYSCALL(__NR_splice, sys_splice) #define __NR_sync_file_range2 341 -__SYSCALL(__NR_sync_file_range2, compat_sys_sync_file_range2_wrapper) +__SYSCALL(__NR_sync_file_range2, compat_sys_aarch32_sync_file_range2) #define __NR_tee 342 __SYSCALL(__NR_tee, sys_tee) #define __NR_vmsplice 343 @@ -726,7 +726,7 @@ __SYSCALL(__NR_timerfd_create, sys_timerfd_create) #define __NR_eventfd 351 __SYSCALL(__NR_eventfd, sys_eventfd) #define __NR_fallocate 352 -__SYSCALL(__NR_fallocate, compat_sys_fallocate_wrapper) +__SYSCALL(__NR_fallocate, compat_sys_aarch32_fallocate) #define __NR_timerfd_settime 353 __SYSCALL(__NR_timerfd_settime, compat_sys_timerfd_settime) #define __NR_timerfd_gettime 354 @@ -817,6 +817,8 @@ __SYSCALL(__NR_pkey_alloc, sys_pkey_alloc) __SYSCALL(__NR_pkey_free, sys_pkey_free) #define __NR_statx 397 __SYSCALL(__NR_statx, sys_statx) +#define __NR_rseq 398 +__SYSCALL(__NR_rseq, sys_rseq) /* * Please add new compat syscalls above this comment and update |