summaryrefslogtreecommitdiff
path: root/arch/arm64/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/include/asm')
-rw-r--r--arch/arm64/include/asm/Kbuild8
-rw-r--r--arch/arm64/include/asm/acpi.h12
-rw-r--r--arch/arm64/include/asm/arch_gicv3.h15
-rw-r--r--arch/arm64/include/asm/arch_timer.h2
-rw-r--r--arch/arm64/include/asm/arm_pmuv3.h2
-rw-r--r--arch/arm64/include/asm/asm-extable.h3
-rw-r--r--arch/arm64/include/asm/cpucaps.h2
-rw-r--r--arch/arm64/include/asm/cpufeature.h4
-rw-r--r--arch/arm64/include/asm/cputype.h6
-rw-r--r--arch/arm64/include/asm/el2_setup.h6
-rw-r--r--arch/arm64/include/asm/esr.h33
-rw-r--r--arch/arm64/include/asm/io.h36
-rw-r--r--arch/arm64/include/asm/kvm_arm.h6
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h71
-rw-r--r--arch/arm64/include/asm/kvm_host.h25
-rw-r--r--arch/arm64/include/asm/kvm_hyp.h4
-rw-r--r--arch/arm64/include/asm/kvm_pkvm.h9
-rw-r--r--arch/arm64/include/asm/mmu_context.h4
-rw-r--r--arch/arm64/include/asm/mte.h4
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h1
-rw-r--r--arch/arm64/include/asm/ptrace.h35
-rw-r--r--arch/arm64/include/asm/runtime-const.h88
-rw-r--r--arch/arm64/include/asm/seccomp.h13
-rw-r--r--arch/arm64/include/asm/smp.h13
-rw-r--r--arch/arm64/include/asm/sysreg.h4
-rw-r--r--arch/arm64/include/asm/uaccess.h169
-rw-r--r--arch/arm64/include/asm/unistd.h22
-rw-r--r--arch/arm64/include/asm/unistd32.h939
-rw-r--r--arch/arm64/include/asm/vdso/compat_gettimeofday.h12
-rw-r--r--arch/arm64/include/asm/word-at-a-time.h11
30 files changed, 434 insertions, 1125 deletions
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index 4b6d2d52053e..7d7d97ad3cd5 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -1,4 +1,12 @@
# SPDX-License-Identifier: GPL-2.0
+syscall-y += syscall_table_32.h
+syscall-y += syscall_table_64.h
+
+# arm32 syscall table used by lib/compat_audit.c:
+syscall-y += unistd_32.h
+# same constants with prefixes, used by vdso, seccomp and sigreturn:
+syscall-y += unistd_compat_32.h
+
generic-y += early_ioremap.h
generic-y += mcs_spinlock.h
generic-y += qrwlock.h
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
index 6792a1f83f2a..a407f9cd549e 100644
--- a/arch/arm64/include/asm/acpi.h
+++ b/arch/arm64/include/asm/acpi.h
@@ -119,6 +119,18 @@ static inline u32 get_acpi_id_for_cpu(unsigned int cpu)
return acpi_cpu_get_madt_gicc(cpu)->uid;
}
+static inline int get_cpu_for_acpi_id(u32 uid)
+{
+ int cpu;
+
+ for (cpu = 0; cpu < nr_cpu_ids; cpu++)
+ if (acpi_cpu_get_madt_gicc(cpu) &&
+ uid == get_acpi_id_for_cpu(cpu))
+ return cpu;
+
+ return -EINVAL;
+}
+
static inline void arch_fix_phys_package_id(int num, u32 slot) { }
void __init acpi_init_cpus(void);
int apei_claim_sea(struct pt_regs *regs);
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
index 5f172611654b..9e96f024b2f1 100644
--- a/arch/arm64/include/asm/arch_gicv3.h
+++ b/arch/arm64/include/asm/arch_gicv3.h
@@ -175,21 +175,6 @@ static inline bool gic_prio_masking_enabled(void)
static inline void gic_pmr_mask_irqs(void)
{
- BUILD_BUG_ON(GICD_INT_DEF_PRI < (__GIC_PRIO_IRQOFF |
- GIC_PRIO_PSR_I_SET));
- BUILD_BUG_ON(GICD_INT_DEF_PRI >= GIC_PRIO_IRQON);
- /*
- * Need to make sure IRQON allows IRQs when SCR_EL3.FIQ is cleared
- * and non-secure PMR accesses are not subject to the shifts that
- * are applied to IRQ priorities
- */
- BUILD_BUG_ON((0x80 | (GICD_INT_DEF_PRI >> 1)) >= GIC_PRIO_IRQON);
- /*
- * Same situation as above, but now we make sure that we can mask
- * regular interrupts.
- */
- BUILD_BUG_ON((0x80 | (GICD_INT_DEF_PRI >> 1)) < (__GIC_PRIO_IRQOFF_NS |
- GIC_PRIO_PSR_I_SET));
gic_write_pmr(GIC_PRIO_IRQOFF);
}
diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h
index 934c658ee947..f5794d50f51d 100644
--- a/arch/arm64/include/asm/arch_timer.h
+++ b/arch/arm64/include/asm/arch_timer.h
@@ -15,7 +15,7 @@
#include <linux/bug.h>
#include <linux/init.h>
#include <linux/jump_label.h>
-#include <linux/smp.h>
+#include <linux/percpu.h>
#include <linux/types.h>
#include <clocksource/arm_arch_timer.h>
diff --git a/arch/arm64/include/asm/arm_pmuv3.h b/arch/arm64/include/asm/arm_pmuv3.h
index c27404fa4418..a4697a0b6835 100644
--- a/arch/arm64/include/asm/arm_pmuv3.h
+++ b/arch/arm64/include/asm/arm_pmuv3.h
@@ -6,7 +6,7 @@
#ifndef __ASM_PMUV3_H
#define __ASM_PMUV3_H
-#include <linux/kvm_host.h>
+#include <asm/kvm_host.h>
#include <asm/cpufeature.h>
#include <asm/sysreg.h>
diff --git a/arch/arm64/include/asm/asm-extable.h b/arch/arm64/include/asm/asm-extable.h
index 980d1dd8e1a3..b8a5861dc7b7 100644
--- a/arch/arm64/include/asm/asm-extable.h
+++ b/arch/arm64/include/asm/asm-extable.h
@@ -112,6 +112,9 @@
#define _ASM_EXTABLE_KACCESS_ERR(insn, fixup, err) \
_ASM_EXTABLE_KACCESS_ERR_ZERO(insn, fixup, err, wzr)
+#define _ASM_EXTABLE_KACCESS(insn, fixup) \
+ _ASM_EXTABLE_KACCESS_ERR_ZERO(insn, fixup, wzr, wzr)
+
#define _ASM_EXTABLE_LOAD_UNALIGNED_ZEROPAD(insn, fixup, data, addr) \
__DEFINE_ASM_GPR_NUMS \
__ASM_EXTABLE_RAW(#insn, #fixup, \
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index 7529c0263933..a6e5b07b64fd 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -59,7 +59,7 @@ cpucap_is_possible(const unsigned int cap)
case ARM64_WORKAROUND_REPEAT_TLBI:
return IS_ENABLED(CONFIG_ARM64_WORKAROUND_REPEAT_TLBI);
case ARM64_WORKAROUND_SPECULATIVE_SSBS:
- return IS_ENABLED(CONFIG_ARM64_WORKAROUND_SPECULATIVE_SSBS);
+ return IS_ENABLED(CONFIG_ARM64_ERRATUM_3194386);
}
return true;
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 8b904a757bd3..558434267271 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -588,14 +588,14 @@ static inline bool id_aa64pfr0_32bit_el1(u64 pfr0)
{
u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_EL1_SHIFT);
- return val == ID_AA64PFR0_EL1_ELx_32BIT_64BIT;
+ return val == ID_AA64PFR0_EL1_EL1_AARCH32;
}
static inline bool id_aa64pfr0_32bit_el0(u64 pfr0)
{
u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_EL0_SHIFT);
- return val == ID_AA64PFR0_EL1_ELx_32BIT_64BIT;
+ return val == ID_AA64PFR0_EL1_EL0_AARCH32;
}
static inline bool id_aa64pfr0_sve(u64 pfr0)
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index 7b32b99023a2..1cb0704c6163 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -86,9 +86,12 @@
#define ARM_CPU_PART_CORTEX_X2 0xD48
#define ARM_CPU_PART_NEOVERSE_N2 0xD49
#define ARM_CPU_PART_CORTEX_A78C 0xD4B
+#define ARM_CPU_PART_CORTEX_X3 0xD4E
#define ARM_CPU_PART_NEOVERSE_V2 0xD4F
+#define ARM_CPU_PART_CORTEX_A720 0xD81
#define ARM_CPU_PART_CORTEX_X4 0xD82
#define ARM_CPU_PART_NEOVERSE_V3 0xD84
+#define ARM_CPU_PART_CORTEX_X925 0xD85
#define APM_CPU_PART_XGENE 0x000
#define APM_CPU_VAR_POTENZA 0x00
@@ -162,9 +165,12 @@
#define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2)
#define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2)
#define MIDR_CORTEX_A78C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78C)
+#define MIDR_CORTEX_X3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X3)
#define MIDR_NEOVERSE_V2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V2)
+#define MIDR_CORTEX_A720 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A720)
#define MIDR_CORTEX_X4 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X4)
#define MIDR_NEOVERSE_V3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V3)
+#define MIDR_CORTEX_X925 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X925)
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h
index e4546b29dd0c..fd87c4b8f984 100644
--- a/arch/arm64/include/asm/el2_setup.h
+++ b/arch/arm64/include/asm/el2_setup.h
@@ -146,7 +146,7 @@
/* Coprocessor traps */
.macro __init_el2_cptr
__check_hvhe .LnVHE_\@, x1
- mov x0, #(CPACR_EL1_FPEN_EL1EN | CPACR_EL1_FPEN_EL0EN)
+ mov x0, #CPACR_ELx_FPEN
msr cpacr_el1, x0
b .Lskip_set_cptr_\@
.LnVHE_\@:
@@ -277,7 +277,7 @@
// (h)VHE case
mrs x0, cpacr_el1 // Disable SVE traps
- orr x0, x0, #(CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN)
+ orr x0, x0, #CPACR_ELx_ZEN
msr cpacr_el1, x0
b .Lskip_set_cptr_\@
@@ -298,7 +298,7 @@
// (h)VHE case
mrs x0, cpacr_el1 // Disable SME traps
- orr x0, x0, #(CPACR_EL1_SMEN_EL0EN | CPACR_EL1_SMEN_EL1EN)
+ orr x0, x0, #CPACR_ELx_SMEN
msr cpacr_el1, x0
b .Lskip_set_cptr_sme_\@
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 7abf09df7033..3f482500f71f 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -121,6 +121,14 @@
#define ESR_ELx_FSC_SECC (0x18)
#define ESR_ELx_FSC_SECC_TTW(n) (0x1c + (n))
+/* Status codes for individual page table levels */
+#define ESR_ELx_FSC_ACCESS_L(n) (ESR_ELx_FSC_ACCESS + n)
+#define ESR_ELx_FSC_PERM_L(n) (ESR_ELx_FSC_PERM + n)
+
+#define ESR_ELx_FSC_FAULT_nL (0x2C)
+#define ESR_ELx_FSC_FAULT_L(n) (((n) < 0 ? ESR_ELx_FSC_FAULT_nL : \
+ ESR_ELx_FSC_FAULT) + (n))
+
/* ISS field definitions for Data Aborts */
#define ESR_ELx_ISV_SHIFT (24)
#define ESR_ELx_ISV (UL(1) << ESR_ELx_ISV_SHIFT)
@@ -388,20 +396,33 @@ static inline bool esr_is_data_abort(unsigned long esr)
static inline bool esr_fsc_is_translation_fault(unsigned long esr)
{
- /* Translation fault, level -1 */
- if ((esr & ESR_ELx_FSC) == 0b101011)
- return true;
- return (esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_FAULT;
+ esr = esr & ESR_ELx_FSC;
+
+ return (esr == ESR_ELx_FSC_FAULT_L(3)) ||
+ (esr == ESR_ELx_FSC_FAULT_L(2)) ||
+ (esr == ESR_ELx_FSC_FAULT_L(1)) ||
+ (esr == ESR_ELx_FSC_FAULT_L(0)) ||
+ (esr == ESR_ELx_FSC_FAULT_L(-1));
}
static inline bool esr_fsc_is_permission_fault(unsigned long esr)
{
- return (esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_PERM;
+ esr = esr & ESR_ELx_FSC;
+
+ return (esr == ESR_ELx_FSC_PERM_L(3)) ||
+ (esr == ESR_ELx_FSC_PERM_L(2)) ||
+ (esr == ESR_ELx_FSC_PERM_L(1)) ||
+ (esr == ESR_ELx_FSC_PERM_L(0));
}
static inline bool esr_fsc_is_access_flag_fault(unsigned long esr)
{
- return (esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_ACCESS;
+ esr = esr & ESR_ELx_FSC;
+
+ return (esr == ESR_ELx_FSC_ACCESS_L(3)) ||
+ (esr == ESR_ELx_FSC_ACCESS_L(2)) ||
+ (esr == ESR_ELx_FSC_ACCESS_L(1)) ||
+ (esr == ESR_ELx_FSC_ACCESS_L(0));
}
/* Indicate whether ESR.EC==0x1A is for an ERETAx instruction */
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 4ff0ae3f6d66..41fd90895dfc 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -153,8 +153,9 @@ extern void __memset_io(volatile void __iomem *, int, size_t);
* emit the large TLP from the CPU.
*/
-static inline void __const_memcpy_toio_aligned32(volatile u32 __iomem *to,
- const u32 *from, size_t count)
+static __always_inline void
+__const_memcpy_toio_aligned32(volatile u32 __iomem *to, const u32 *from,
+ size_t count)
{
switch (count) {
case 8:
@@ -196,24 +197,22 @@ static inline void __const_memcpy_toio_aligned32(volatile u32 __iomem *to,
void __iowrite32_copy_full(void __iomem *to, const void *from, size_t count);
-static inline void __const_iowrite32_copy(void __iomem *to, const void *from,
- size_t count)
+static __always_inline void
+__iowrite32_copy(void __iomem *to, const void *from, size_t count)
{
- if (count == 8 || count == 4 || count == 2 || count == 1) {
+ if (__builtin_constant_p(count) &&
+ (count == 8 || count == 4 || count == 2 || count == 1)) {
__const_memcpy_toio_aligned32(to, from, count);
dgh();
} else {
__iowrite32_copy_full(to, from, count);
}
}
+#define __iowrite32_copy __iowrite32_copy
-#define __iowrite32_copy(to, from, count) \
- (__builtin_constant_p(count) ? \
- __const_iowrite32_copy(to, from, count) : \
- __iowrite32_copy_full(to, from, count))
-
-static inline void __const_memcpy_toio_aligned64(volatile u64 __iomem *to,
- const u64 *from, size_t count)
+static __always_inline void
+__const_memcpy_toio_aligned64(volatile u64 __iomem *to, const u64 *from,
+ size_t count)
{
switch (count) {
case 8:
@@ -255,21 +254,18 @@ static inline void __const_memcpy_toio_aligned64(volatile u64 __iomem *to,
void __iowrite64_copy_full(void __iomem *to, const void *from, size_t count);
-static inline void __const_iowrite64_copy(void __iomem *to, const void *from,
- size_t count)
+static __always_inline void
+__iowrite64_copy(void __iomem *to, const void *from, size_t count)
{
- if (count == 8 || count == 4 || count == 2 || count == 1) {
+ if (__builtin_constant_p(count) &&
+ (count == 8 || count == 4 || count == 2 || count == 1)) {
__const_memcpy_toio_aligned64(to, from, count);
dgh();
} else {
__iowrite64_copy_full(to, from, count);
}
}
-
-#define __iowrite64_copy(to, from, count) \
- (__builtin_constant_p(count) ? \
- __const_iowrite64_copy(to, from, count) : \
- __iowrite64_copy_full(to, from, count))
+#define __iowrite64_copy __iowrite64_copy
/*
* I/O memory mapping functions.
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index e01bb5ca13b7..b2adc2c6c82a 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -305,6 +305,12 @@
GENMASK(19, 14) | \
BIT(11))
+#define CPTR_VHE_EL2_RES0 (GENMASK(63, 32) | \
+ GENMASK(27, 26) | \
+ GENMASK(23, 22) | \
+ GENMASK(19, 18) | \
+ GENMASK(15, 0))
+
/* Hyp Debug Configuration Register bits */
#define MDCR_EL2_E2TB_MASK (UL(0x3))
#define MDCR_EL2_E2TB_SHIFT (UL(24))
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 501e3e019c93..21650e7924d4 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -557,6 +557,68 @@ static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
vcpu_set_flag((v), e); \
} while (0)
+#define __build_check_all_or_none(r, bits) \
+ BUILD_BUG_ON(((r) & (bits)) && ((r) & (bits)) != (bits))
+
+#define __cpacr_to_cptr_clr(clr, set) \
+ ({ \
+ u64 cptr = 0; \
+ \
+ if ((set) & CPACR_ELx_FPEN) \
+ cptr |= CPTR_EL2_TFP; \
+ if ((set) & CPACR_ELx_ZEN) \
+ cptr |= CPTR_EL2_TZ; \
+ if ((set) & CPACR_ELx_SMEN) \
+ cptr |= CPTR_EL2_TSM; \
+ if ((clr) & CPACR_ELx_TTA) \
+ cptr |= CPTR_EL2_TTA; \
+ if ((clr) & CPTR_EL2_TAM) \
+ cptr |= CPTR_EL2_TAM; \
+ if ((clr) & CPTR_EL2_TCPAC) \
+ cptr |= CPTR_EL2_TCPAC; \
+ \
+ cptr; \
+ })
+
+#define __cpacr_to_cptr_set(clr, set) \
+ ({ \
+ u64 cptr = 0; \
+ \
+ if ((clr) & CPACR_ELx_FPEN) \
+ cptr |= CPTR_EL2_TFP; \
+ if ((clr) & CPACR_ELx_ZEN) \
+ cptr |= CPTR_EL2_TZ; \
+ if ((clr) & CPACR_ELx_SMEN) \
+ cptr |= CPTR_EL2_TSM; \
+ if ((set) & CPACR_ELx_TTA) \
+ cptr |= CPTR_EL2_TTA; \
+ if ((set) & CPTR_EL2_TAM) \
+ cptr |= CPTR_EL2_TAM; \
+ if ((set) & CPTR_EL2_TCPAC) \
+ cptr |= CPTR_EL2_TCPAC; \
+ \
+ cptr; \
+ })
+
+#define cpacr_clear_set(clr, set) \
+ do { \
+ BUILD_BUG_ON((set) & CPTR_VHE_EL2_RES0); \
+ BUILD_BUG_ON((clr) & CPACR_ELx_E0POE); \
+ __build_check_all_or_none((clr), CPACR_ELx_FPEN); \
+ __build_check_all_or_none((set), CPACR_ELx_FPEN); \
+ __build_check_all_or_none((clr), CPACR_ELx_ZEN); \
+ __build_check_all_or_none((set), CPACR_ELx_ZEN); \
+ __build_check_all_or_none((clr), CPACR_ELx_SMEN); \
+ __build_check_all_or_none((set), CPACR_ELx_SMEN); \
+ \
+ if (has_vhe() || has_hvhe()) \
+ sysreg_clear_set(cpacr_el1, clr, set); \
+ else \
+ sysreg_clear_set(cptr_el2, \
+ __cpacr_to_cptr_clr(clr, set), \
+ __cpacr_to_cptr_set(clr, set));\
+ } while (0)
+
static __always_inline void kvm_write_cptr_el2(u64 val)
{
if (has_vhe() || has_hvhe())
@@ -570,17 +632,16 @@ static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu)
u64 val;
if (has_vhe()) {
- val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN |
- CPACR_EL1_ZEN_EL1EN);
+ val = (CPACR_ELx_FPEN | CPACR_EL1_ZEN_EL1EN);
if (cpus_have_final_cap(ARM64_SME))
val |= CPACR_EL1_SMEN_EL1EN;
} else if (has_hvhe()) {
- val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN);
+ val = CPACR_ELx_FPEN;
if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs())
- val |= CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN;
+ val |= CPACR_ELx_ZEN;
if (cpus_have_final_cap(ARM64_SME))
- val |= CPACR_EL1_SMEN_EL1EN | CPACR_EL1_SMEN_EL0EN;
+ val |= CPACR_ELx_SMEN;
} else {
val = CPTR_NVHE_EL2_RES1;
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 8170c04fde91..36b8e97bf49e 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -76,6 +76,7 @@ static inline enum kvm_mode kvm_get_mode(void) { return KVM_MODE_NONE; };
DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
extern unsigned int __ro_after_init kvm_sve_max_vl;
+extern unsigned int __ro_after_init kvm_host_sve_max_vl;
int __init kvm_arm_init_sve(void);
u32 __attribute_const__ kvm_target_cpu(void);
@@ -521,6 +522,20 @@ struct kvm_cpu_context {
u64 *vncr_array;
};
+struct cpu_sve_state {
+ __u64 zcr_el1;
+
+ /*
+ * Ordering is important since __sve_save_state/__sve_restore_state
+ * relies on it.
+ */
+ __u32 fpsr;
+ __u32 fpcr;
+
+ /* Must be SVE_VQ_BYTES (128 bit) aligned. */
+ __u8 sve_regs[];
+};
+
/*
* This structure is instantiated on a per-CPU basis, and contains
* data that is:
@@ -534,7 +549,15 @@ struct kvm_cpu_context {
*/
struct kvm_host_data {
struct kvm_cpu_context host_ctxt;
- struct user_fpsimd_state *fpsimd_state; /* hyp VA */
+
+ /*
+ * All pointers in this union are hyp VA.
+ * sve_state is only used in pKVM and if system_supports_sve().
+ */
+ union {
+ struct user_fpsimd_state *fpsimd_state;
+ struct cpu_sve_state *sve_state;
+ };
/* Ownership of the FP regs */
enum {
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index 3e80464f8953..b05bceca3385 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -111,7 +111,8 @@ void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu);
void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
-void __sve_restore_state(void *sve_pffr, u32 *fpsr);
+void __sve_save_state(void *sve_pffr, u32 *fpsr, int save_ffr);
+void __sve_restore_state(void *sve_pffr, u32 *fpsr, int restore_ffr);
u64 __guest_enter(struct kvm_vcpu *vcpu);
@@ -142,5 +143,6 @@ extern u64 kvm_nvhe_sym(id_aa64smfr0_el1_sys_val);
extern unsigned long kvm_nvhe_sym(__icache_flags);
extern unsigned int kvm_nvhe_sym(kvm_arm_vmid_bits);
+extern unsigned int kvm_nvhe_sym(kvm_host_sve_max_vl);
#endif /* __ARM64_KVM_HYP_H__ */
diff --git a/arch/arm64/include/asm/kvm_pkvm.h b/arch/arm64/include/asm/kvm_pkvm.h
index ad9cfb5c1ff4..cd56acd9a842 100644
--- a/arch/arm64/include/asm/kvm_pkvm.h
+++ b/arch/arm64/include/asm/kvm_pkvm.h
@@ -128,4 +128,13 @@ static inline unsigned long hyp_ffa_proxy_pages(void)
return (2 * KVM_FFA_MBOX_NR_PAGES) + DIV_ROUND_UP(desc_max, PAGE_SIZE);
}
+static inline size_t pkvm_host_sve_state_size(void)
+{
+ if (!system_supports_sve())
+ return 0;
+
+ return size_add(sizeof(struct cpu_sve_state),
+ SVE_SIG_REGS_SIZE(sve_vq_from_vl(kvm_host_sve_max_vl)));
+}
+
#endif /* __ARM64_KVM_PKVM_H__ */
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index c768d16b81a4..bd19f4c758b7 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -72,11 +72,11 @@ static inline void __cpu_set_tcr_t0sz(unsigned long t0sz)
{
unsigned long tcr = read_sysreg(tcr_el1);
- if ((tcr & TCR_T0SZ_MASK) >> TCR_T0SZ_OFFSET == t0sz)
+ if ((tcr & TCR_T0SZ_MASK) == t0sz)
return;
tcr &= ~TCR_T0SZ_MASK;
- tcr |= t0sz << TCR_T0SZ_OFFSET;
+ tcr |= t0sz;
write_sysreg(tcr, tcr_el1);
isb();
}
diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h
index 91fbd5c8a391..0f84518632b4 100644
--- a/arch/arm64/include/asm/mte.h
+++ b/arch/arm64/include/asm/mte.h
@@ -182,7 +182,7 @@ void mte_check_tfsr_el1(void);
static inline void mte_check_tfsr_entry(void)
{
- if (!system_supports_mte())
+ if (!kasan_hw_tags_enabled())
return;
mte_check_tfsr_el1();
@@ -190,7 +190,7 @@ static inline void mte_check_tfsr_entry(void)
static inline void mte_check_tfsr_exit(void)
{
- if (!system_supports_mte())
+ if (!kasan_hw_tags_enabled())
return;
/*
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index 9943ff0af4c9..1f60aa1bc750 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -170,6 +170,7 @@
#define PTE_CONT (_AT(pteval_t, 1) << 52) /* Contiguous range */
#define PTE_PXN (_AT(pteval_t, 1) << 53) /* Privileged XN */
#define PTE_UXN (_AT(pteval_t, 1) << 54) /* User XN */
+#define PTE_SWBITS_MASK _AT(pteval_t, (BIT(63) | GENMASK(58, 55)))
#define PTE_ADDR_LOW (((_AT(pteval_t, 1) << (50 - PAGE_SHIFT)) - 1) << PAGE_SHIFT)
#ifdef CONFIG_ARM64_PA_BITS_52
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index 47ec58031f11..0abe975d68a8 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -21,35 +21,12 @@
#define INIT_PSTATE_EL2 \
(PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT | PSR_MODE_EL2h)
-/*
- * PMR values used to mask/unmask interrupts.
- *
- * GIC priority masking works as follows: if an IRQ's priority is a higher value
- * than the value held in PMR, that IRQ is masked. Lowering the value of PMR
- * means masking more IRQs (or at least that the same IRQs remain masked).
- *
- * To mask interrupts, we clear the most significant bit of PMR.
- *
- * Some code sections either automatically switch back to PSR.I or explicitly
- * require to not use priority masking. If bit GIC_PRIO_PSR_I_SET is included
- * in the priority mask, it indicates that PSR.I should be set and
- * interrupt disabling temporarily does not rely on IRQ priorities.
- */
-#define GIC_PRIO_IRQON 0xe0
-#define __GIC_PRIO_IRQOFF (GIC_PRIO_IRQON & ~0x80)
-#define __GIC_PRIO_IRQOFF_NS 0xa0
-#define GIC_PRIO_PSR_I_SET (1 << 4)
-
-#define GIC_PRIO_IRQOFF \
- ({ \
- extern struct static_key_false gic_nonsecure_priorities;\
- u8 __prio = __GIC_PRIO_IRQOFF; \
- \
- if (static_branch_unlikely(&gic_nonsecure_priorities)) \
- __prio = __GIC_PRIO_IRQOFF_NS; \
- \
- __prio; \
- })
+#include <linux/irqchip/arm-gic-v3-prio.h>
+
+#define GIC_PRIO_IRQON GICV3_PRIO_UNMASKED
+#define GIC_PRIO_IRQOFF GICV3_PRIO_IRQ
+
+#define GIC_PRIO_PSR_I_SET GICV3_PRIO_PSR_I_SET
/* Additional SPSR bits not exposed in the UABI */
#define PSR_MODE_THREAD_BIT (1 << 0)
diff --git a/arch/arm64/include/asm/runtime-const.h b/arch/arm64/include/asm/runtime-const.h
new file mode 100644
index 000000000000..be5915669d23
--- /dev/null
+++ b/arch/arm64/include/asm/runtime-const.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_RUNTIME_CONST_H
+#define _ASM_RUNTIME_CONST_H
+
+#include <asm/cacheflush.h>
+
+/* Sigh. You can still run arm64 in BE mode */
+#include <asm/byteorder.h>
+
+#define runtime_const_ptr(sym) ({ \
+ typeof(sym) __ret; \
+ asm_inline("1:\t" \
+ "movz %0, #0xcdef\n\t" \
+ "movk %0, #0x89ab, lsl #16\n\t" \
+ "movk %0, #0x4567, lsl #32\n\t" \
+ "movk %0, #0x0123, lsl #48\n\t" \
+ ".pushsection runtime_ptr_" #sym ",\"a\"\n\t" \
+ ".long 1b - .\n\t" \
+ ".popsection" \
+ :"=r" (__ret)); \
+ __ret; })
+
+#define runtime_const_shift_right_32(val, sym) ({ \
+ unsigned long __ret; \
+ asm_inline("1:\t" \
+ "lsr %w0,%w1,#12\n\t" \
+ ".pushsection runtime_shift_" #sym ",\"a\"\n\t" \
+ ".long 1b - .\n\t" \
+ ".popsection" \
+ :"=r" (__ret) \
+ :"r" (0u+(val))); \
+ __ret; })
+
+#define runtime_const_init(type, sym) do { \
+ extern s32 __start_runtime_##type##_##sym[]; \
+ extern s32 __stop_runtime_##type##_##sym[]; \
+ runtime_const_fixup(__runtime_fixup_##type, \
+ (unsigned long)(sym), \
+ __start_runtime_##type##_##sym, \
+ __stop_runtime_##type##_##sym); \
+} while (0)
+
+/* 16-bit immediate for wide move (movz and movk) in bits 5..20 */
+static inline void __runtime_fixup_16(__le32 *p, unsigned int val)
+{
+ u32 insn = le32_to_cpu(*p);
+ insn &= 0xffe0001f;
+ insn |= (val & 0xffff) << 5;
+ *p = cpu_to_le32(insn);
+}
+
+static inline void __runtime_fixup_caches(void *where, unsigned int insns)
+{
+ unsigned long va = (unsigned long)where;
+ caches_clean_inval_pou(va, va + 4*insns);
+}
+
+static inline void __runtime_fixup_ptr(void *where, unsigned long val)
+{
+ __le32 *p = lm_alias(where);
+ __runtime_fixup_16(p, val);
+ __runtime_fixup_16(p+1, val >> 16);
+ __runtime_fixup_16(p+2, val >> 32);
+ __runtime_fixup_16(p+3, val >> 48);
+ __runtime_fixup_caches(where, 4);
+}
+
+/* Immediate value is 6 bits starting at bit #16 */
+static inline void __runtime_fixup_shift(void *where, unsigned long val)
+{
+ __le32 *p = lm_alias(where);
+ u32 insn = le32_to_cpu(*p);
+ insn &= 0xffc0ffff;
+ insn |= (val & 63) << 16;
+ *p = cpu_to_le32(insn);
+ __runtime_fixup_caches(where, 1);
+}
+
+static inline void runtime_const_fixup(void (*fn)(void *, unsigned long),
+ unsigned long val, s32 *start, s32 *end)
+{
+ while (start < end) {
+ fn(*start + (void *)start, val);
+ start++;
+ }
+}
+
+#endif
diff --git a/arch/arm64/include/asm/seccomp.h b/arch/arm64/include/asm/seccomp.h
index 30256233788b..b83975555314 100644
--- a/arch/arm64/include/asm/seccomp.h
+++ b/arch/arm64/include/asm/seccomp.h
@@ -8,13 +8,13 @@
#ifndef _ASM_SECCOMP_H
#define _ASM_SECCOMP_H
-#include <asm/unistd.h>
+#include <asm/unistd_compat_32.h>
#ifdef CONFIG_COMPAT
-#define __NR_seccomp_read_32 __NR_compat_read
-#define __NR_seccomp_write_32 __NR_compat_write
-#define __NR_seccomp_exit_32 __NR_compat_exit
-#define __NR_seccomp_sigreturn_32 __NR_compat_rt_sigreturn
+#define __NR_seccomp_read_32 __NR_compat32_read
+#define __NR_seccomp_write_32 __NR_compat32_write
+#define __NR_seccomp_exit_32 __NR_compat32_exit
+#define __NR_seccomp_sigreturn_32 __NR_compat32_rt_sigreturn
#endif /* CONFIG_COMPAT */
#include <asm-generic/seccomp.h>
@@ -23,8 +23,9 @@
#define SECCOMP_ARCH_NATIVE_NR NR_syscalls
#define SECCOMP_ARCH_NATIVE_NAME "aarch64"
#ifdef CONFIG_COMPAT
+#include <asm/unistd_compat_32.h>
# define SECCOMP_ARCH_COMPAT AUDIT_ARCH_ARM
-# define SECCOMP_ARCH_COMPAT_NR __NR_compat_syscalls
+# define SECCOMP_ARCH_COMPAT_NR __NR_compat32_syscalls
# define SECCOMP_ARCH_COMPAT_NAME "arm"
#endif
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index efb13112b408..2510eec026f7 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -25,22 +25,11 @@
#ifndef __ASSEMBLY__
-#include <asm/percpu.h>
-
#include <linux/threads.h>
#include <linux/cpumask.h>
#include <linux/thread_info.h>
-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
-
-/*
- * We don't use this_cpu_read(cpu_number) as that has implicit writes to
- * preempt_count, and associated (compiler) barriers, that we'd like to avoid
- * the expense of. If we're preemptible, the value can be stale at use anyway.
- * And we can't use this_cpu_ptr() either, as that winds up recursing back
- * here under CONFIG_DEBUG_PREEMPT=y.
- */
-#define raw_smp_processor_id() (*raw_cpu_ptr(&cpu_number))
+#define raw_smp_processor_id() (current_thread_info()->cpu)
/*
* Logical CPU mapping.
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index af3b206fa423..1b6e436dbb55 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -872,10 +872,6 @@
/* Position the attr at the correct index */
#define MAIR_ATTRIDX(attr, idx) ((attr) << ((idx) * 8))
-/* id_aa64pfr0 */
-#define ID_AA64PFR0_EL1_ELx_64BIT_ONLY 0x1
-#define ID_AA64PFR0_EL1_ELx_32BIT_64BIT 0x2
-
/* id_aa64mmfr0 */
#define ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN 0x0
#define ID_AA64MMFR0_EL1_TGRAN4_LPA2 ID_AA64MMFR0_EL1_TGRAN4_52_BIT
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 14be5000c5a0..28f665e0975a 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -184,29 +184,40 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
* The "__xxx_error" versions set the third argument to -EFAULT if an error
* occurs, and leave it unchanged on success.
*/
-#define __get_mem_asm(load, reg, x, addr, err, type) \
+#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
+#define __get_mem_asm(load, reg, x, addr, label, type) \
+ asm_goto_output( \
+ "1: " load " " reg "0, [%1]\n" \
+ _ASM_EXTABLE_##type##ACCESS_ERR(1b, %l2, %w0) \
+ : "=r" (x) \
+ : "r" (addr) : : label)
+#else
+#define __get_mem_asm(load, reg, x, addr, label, type) do { \
+ int __gma_err = 0; \
asm volatile( \
"1: " load " " reg "1, [%2]\n" \
"2:\n" \
_ASM_EXTABLE_##type##ACCESS_ERR_ZERO(1b, 2b, %w0, %w1) \
- : "+r" (err), "=r" (x) \
- : "r" (addr))
+ : "+r" (__gma_err), "=r" (x) \
+ : "r" (addr)); \
+ if (__gma_err) goto label; } while (0)
+#endif
-#define __raw_get_mem(ldr, x, ptr, err, type) \
+#define __raw_get_mem(ldr, x, ptr, label, type) \
do { \
unsigned long __gu_val; \
switch (sizeof(*(ptr))) { \
case 1: \
- __get_mem_asm(ldr "b", "%w", __gu_val, (ptr), (err), type); \
+ __get_mem_asm(ldr "b", "%w", __gu_val, (ptr), label, type); \
break; \
case 2: \
- __get_mem_asm(ldr "h", "%w", __gu_val, (ptr), (err), type); \
+ __get_mem_asm(ldr "h", "%w", __gu_val, (ptr), label, type); \
break; \
case 4: \
- __get_mem_asm(ldr, "%w", __gu_val, (ptr), (err), type); \
+ __get_mem_asm(ldr, "%w", __gu_val, (ptr), label, type); \
break; \
case 8: \
- __get_mem_asm(ldr, "%x", __gu_val, (ptr), (err), type); \
+ __get_mem_asm(ldr, "%x", __gu_val, (ptr), label, type); \
break; \
default: \
BUILD_BUG(); \
@@ -219,27 +230,34 @@ do { \
* uaccess_ttbr0_disable(). As `x` and `ptr` could contain blocking functions,
* we must evaluate these outside of the critical section.
*/
-#define __raw_get_user(x, ptr, err) \
+#define __raw_get_user(x, ptr, label) \
do { \
__typeof__(*(ptr)) __user *__rgu_ptr = (ptr); \
__typeof__(x) __rgu_val; \
__chk_user_ptr(ptr); \
- \
- uaccess_ttbr0_enable(); \
- __raw_get_mem("ldtr", __rgu_val, __rgu_ptr, err, U); \
- uaccess_ttbr0_disable(); \
- \
- (x) = __rgu_val; \
+ do { \
+ __label__ __rgu_failed; \
+ uaccess_ttbr0_enable(); \
+ __raw_get_mem("ldtr", __rgu_val, __rgu_ptr, __rgu_failed, U); \
+ uaccess_ttbr0_disable(); \
+ (x) = __rgu_val; \
+ break; \
+ __rgu_failed: \
+ uaccess_ttbr0_disable(); \
+ goto label; \
+ } while (0); \
} while (0)
#define __get_user_error(x, ptr, err) \
do { \
+ __label__ __gu_failed; \
__typeof__(*(ptr)) __user *__p = (ptr); \
might_fault(); \
if (access_ok(__p, sizeof(*__p))) { \
__p = uaccess_mask_ptr(__p); \
- __raw_get_user((x), __p, (err)); \
+ __raw_get_user((x), __p, __gu_failed); \
} else { \
+ __gu_failed: \
(x) = (__force __typeof__(x))0; (err) = -EFAULT; \
} \
} while (0)
@@ -262,40 +280,42 @@ do { \
do { \
__typeof__(dst) __gkn_dst = (dst); \
__typeof__(src) __gkn_src = (src); \
- int __gkn_err = 0; \
- \
- __mte_enable_tco_async(); \
- __raw_get_mem("ldr", *((type *)(__gkn_dst)), \
- (__force type *)(__gkn_src), __gkn_err, K); \
- __mte_disable_tco_async(); \
+ do { \
+ __label__ __gkn_label; \
\
- if (unlikely(__gkn_err)) \
+ __mte_enable_tco_async(); \
+ __raw_get_mem("ldr", *((type *)(__gkn_dst)), \
+ (__force type *)(__gkn_src), __gkn_label, K); \
+ __mte_disable_tco_async(); \
+ break; \
+ __gkn_label: \
+ __mte_disable_tco_async(); \
goto err_label; \
+ } while (0); \
} while (0)
-#define __put_mem_asm(store, reg, x, addr, err, type) \
- asm volatile( \
- "1: " store " " reg "1, [%2]\n" \
+#define __put_mem_asm(store, reg, x, addr, label, type) \
+ asm goto( \
+ "1: " store " " reg "0, [%1]\n" \
"2:\n" \
- _ASM_EXTABLE_##type##ACCESS_ERR(1b, 2b, %w0) \
- : "+r" (err) \
- : "rZ" (x), "r" (addr))
+ _ASM_EXTABLE_##type##ACCESS(1b, %l2) \
+ : : "rZ" (x), "r" (addr) : : label)
-#define __raw_put_mem(str, x, ptr, err, type) \
+#define __raw_put_mem(str, x, ptr, label, type) \
do { \
__typeof__(*(ptr)) __pu_val = (x); \
switch (sizeof(*(ptr))) { \
case 1: \
- __put_mem_asm(str "b", "%w", __pu_val, (ptr), (err), type); \
+ __put_mem_asm(str "b", "%w", __pu_val, (ptr), label, type); \
break; \
case 2: \
- __put_mem_asm(str "h", "%w", __pu_val, (ptr), (err), type); \
+ __put_mem_asm(str "h", "%w", __pu_val, (ptr), label, type); \
break; \
case 4: \
- __put_mem_asm(str, "%w", __pu_val, (ptr), (err), type); \
+ __put_mem_asm(str, "%w", __pu_val, (ptr), label, type); \
break; \
case 8: \
- __put_mem_asm(str, "%x", __pu_val, (ptr), (err), type); \
+ __put_mem_asm(str, "%x", __pu_val, (ptr), label, type); \
break; \
default: \
BUILD_BUG(); \
@@ -307,25 +327,34 @@ do { \
* uaccess_ttbr0_disable(). As `x` and `ptr` could contain blocking functions,
* we must evaluate these outside of the critical section.
*/
-#define __raw_put_user(x, ptr, err) \
+#define __raw_put_user(x, ptr, label) \
do { \
+ __label__ __rpu_failed; \
__typeof__(*(ptr)) __user *__rpu_ptr = (ptr); \
__typeof__(*(ptr)) __rpu_val = (x); \
__chk_user_ptr(__rpu_ptr); \
\
- uaccess_ttbr0_enable(); \
- __raw_put_mem("sttr", __rpu_val, __rpu_ptr, err, U); \
- uaccess_ttbr0_disable(); \
+ do { \
+ uaccess_ttbr0_enable(); \
+ __raw_put_mem("sttr", __rpu_val, __rpu_ptr, __rpu_failed, U); \
+ uaccess_ttbr0_disable(); \
+ break; \
+ __rpu_failed: \
+ uaccess_ttbr0_disable(); \
+ goto label; \
+ } while (0); \
} while (0)
#define __put_user_error(x, ptr, err) \
do { \
+ __label__ __pu_failed; \
__typeof__(*(ptr)) __user *__p = (ptr); \
might_fault(); \
if (access_ok(__p, sizeof(*__p))) { \
__p = uaccess_mask_ptr(__p); \
- __raw_put_user((x), __p, (err)); \
+ __raw_put_user((x), __p, __pu_failed); \
} else { \
+ __pu_failed: \
(err) = -EFAULT; \
} \
} while (0)
@@ -348,15 +377,18 @@ do { \
do { \
__typeof__(dst) __pkn_dst = (dst); \
__typeof__(src) __pkn_src = (src); \
- int __pkn_err = 0; \
\
- __mte_enable_tco_async(); \
- __raw_put_mem("str", *((type *)(__pkn_src)), \
- (__force type *)(__pkn_dst), __pkn_err, K); \
- __mte_disable_tco_async(); \
- \
- if (unlikely(__pkn_err)) \
+ do { \
+ __label__ __pkn_err; \
+ __mte_enable_tco_async(); \
+ __raw_put_mem("str", *((type *)(__pkn_src)), \
+ (__force type *)(__pkn_dst), __pkn_err, K); \
+ __mte_disable_tco_async(); \
+ break; \
+ __pkn_err: \
+ __mte_disable_tco_async(); \
goto err_label; \
+ } while (0); \
} while(0)
extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
@@ -381,6 +413,51 @@ extern unsigned long __must_check __arch_copy_to_user(void __user *to, const voi
__actu_ret; \
})
+static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len)
+{
+ if (unlikely(!access_ok(ptr,len)))
+ return 0;
+ uaccess_ttbr0_enable();
+ return 1;
+}
+#define user_access_begin(a,b) user_access_begin(a,b)
+#define user_access_end() uaccess_ttbr0_disable()
+#define unsafe_put_user(x, ptr, label) \
+ __raw_put_mem("sttr", x, uaccess_mask_ptr(ptr), label, U)
+#define unsafe_get_user(x, ptr, label) \
+ __raw_get_mem("ldtr", x, uaccess_mask_ptr(ptr), label, U)
+
+/*
+ * KCSAN uses these to save and restore ttbr state.
+ * We do not support KCSAN with ARM64_SW_TTBR0_PAN, so
+ * they are no-ops.
+ */
+static inline unsigned long user_access_save(void) { return 0; }
+static inline void user_access_restore(unsigned long enabled) { }
+
+/*
+ * We want the unsafe accessors to always be inlined and use
+ * the error labels - thus the macro games.
+ */
+#define unsafe_copy_loop(dst, src, len, type, label) \
+ while (len >= sizeof(type)) { \
+ unsafe_put_user(*(type *)(src),(type __user *)(dst),label); \
+ dst += sizeof(type); \
+ src += sizeof(type); \
+ len -= sizeof(type); \
+ }
+
+#define unsafe_copy_to_user(_dst,_src,_len,label) \
+do { \
+ char __user *__ucu_dst = (_dst); \
+ const char *__ucu_src = (_src); \
+ size_t __ucu_len = (_len); \
+ unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u64, label); \
+ unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u32, label); \
+ unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u16, label); \
+ unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u8, label); \
+} while (0)
+
#define INLINE_COPY_TO_USER
#define INLINE_COPY_FROM_USER
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index 1346579f802f..80618c9bbcd8 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -17,35 +17,17 @@
#define __ARCH_WANT_SYS_VFORK
/*
- * Compat syscall numbers used by the AArch64 kernel.
- */
-#define __NR_compat_restart_syscall 0
-#define __NR_compat_exit 1
-#define __NR_compat_read 3
-#define __NR_compat_write 4
-#define __NR_compat_gettimeofday 78
-#define __NR_compat_sigreturn 119
-#define __NR_compat_rt_sigreturn 173
-#define __NR_compat_clock_gettime 263
-#define __NR_compat_clock_getres 264
-#define __NR_compat_clock_gettime64 403
-#define __NR_compat_clock_getres_time64 406
-
-/*
* The following SVCs are ARM private.
*/
#define __ARM_NR_COMPAT_BASE 0x0f0000
#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE + 2)
#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE + 5)
#define __ARM_NR_COMPAT_END (__ARM_NR_COMPAT_BASE + 0x800)
-
-#define __NR_compat_syscalls 463
#endif
#define __ARCH_WANT_SYS_CLONE
+#define __ARCH_WANT_NEW_STAT
-#ifndef __COMPAT_SYSCALL_NR
-#include <uapi/asm/unistd.h>
-#endif
+#include <asm/unistd_64.h>
#define NR_syscalls (__NR_syscalls)
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index 266b96acc014..e0b1a0b57f75 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -1,938 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * AArch32 (compat) system call definitions.
- *
- * Copyright (C) 2001-2005 Russell King
- * Copyright (C) 2012 ARM Ltd.
- */
+#ifndef _UAPI__ASM_ARM_UNISTD_H
+#define _UAPI__ASM_ARM_UNISTD_H
-#ifndef __SYSCALL
-#define __SYSCALL(x, y)
-#endif
+#include <asm/unistd_32.h>
-#define __NR_restart_syscall 0
-__SYSCALL(__NR_restart_syscall, sys_restart_syscall)
-#define __NR_exit 1
-__SYSCALL(__NR_exit, sys_exit)
-#define __NR_fork 2
-__SYSCALL(__NR_fork, sys_fork)
-#define __NR_read 3
-__SYSCALL(__NR_read, sys_read)
-#define __NR_write 4
-__SYSCALL(__NR_write, sys_write)
-#define __NR_open 5
-__SYSCALL(__NR_open, compat_sys_open)
-#define __NR_close 6
-__SYSCALL(__NR_close, sys_close)
- /* 7 was sys_waitpid */
-__SYSCALL(7, sys_ni_syscall)
-#define __NR_creat 8
-__SYSCALL(__NR_creat, sys_creat)
-#define __NR_link 9
-__SYSCALL(__NR_link, sys_link)
-#define __NR_unlink 10
-__SYSCALL(__NR_unlink, sys_unlink)
-#define __NR_execve 11
-__SYSCALL(__NR_execve, compat_sys_execve)
-#define __NR_chdir 12
-__SYSCALL(__NR_chdir, sys_chdir)
- /* 13 was sys_time */
-__SYSCALL(13, sys_ni_syscall)
-#define __NR_mknod 14
-__SYSCALL(__NR_mknod, sys_mknod)
-#define __NR_chmod 15
-__SYSCALL(__NR_chmod, sys_chmod)
-#define __NR_lchown 16
-__SYSCALL(__NR_lchown, sys_lchown16)
- /* 17 was sys_break */
-__SYSCALL(17, sys_ni_syscall)
- /* 18 was sys_stat */
-__SYSCALL(18, sys_ni_syscall)
-#define __NR_lseek 19
-__SYSCALL(__NR_lseek, compat_sys_lseek)
-#define __NR_getpid 20
-__SYSCALL(__NR_getpid, sys_getpid)
-#define __NR_mount 21
-__SYSCALL(__NR_mount, sys_mount)
- /* 22 was sys_umount */
-__SYSCALL(22, sys_ni_syscall)
-#define __NR_setuid 23
-__SYSCALL(__NR_setuid, sys_setuid16)
-#define __NR_getuid 24
-__SYSCALL(__NR_getuid, sys_getuid16)
- /* 25 was sys_stime */
-__SYSCALL(25, sys_ni_syscall)
-#define __NR_ptrace 26
-__SYSCALL(__NR_ptrace, compat_sys_ptrace)
- /* 27 was sys_alarm */
-__SYSCALL(27, sys_ni_syscall)
- /* 28 was sys_fstat */
-__SYSCALL(28, sys_ni_syscall)
-#define __NR_pause 29
-__SYSCALL(__NR_pause, sys_pause)
- /* 30 was sys_utime */
-__SYSCALL(30, sys_ni_syscall)
- /* 31 was sys_stty */
-__SYSCALL(31, sys_ni_syscall)
- /* 32 was sys_gtty */
-__SYSCALL(32, sys_ni_syscall)
-#define __NR_access 33
-__SYSCALL(__NR_access, sys_access)
-#define __NR_nice 34
-__SYSCALL(__NR_nice, sys_nice)
- /* 35 was sys_ftime */
-__SYSCALL(35, sys_ni_syscall)
-#define __NR_sync 36
-__SYSCALL(__NR_sync, sys_sync)
-#define __NR_kill 37
-__SYSCALL(__NR_kill, sys_kill)
-#define __NR_rename 38
-__SYSCALL(__NR_rename, sys_rename)
-#define __NR_mkdir 39
-__SYSCALL(__NR_mkdir, sys_mkdir)
-#define __NR_rmdir 40
-__SYSCALL(__NR_rmdir, sys_rmdir)
-#define __NR_dup 41
-__SYSCALL(__NR_dup, sys_dup)
-#define __NR_pipe 42
-__SYSCALL(__NR_pipe, sys_pipe)
-#define __NR_times 43
-__SYSCALL(__NR_times, compat_sys_times)
- /* 44 was sys_prof */
-__SYSCALL(44, sys_ni_syscall)
-#define __NR_brk 45
-__SYSCALL(__NR_brk, sys_brk)
-#define __NR_setgid 46
-__SYSCALL(__NR_setgid, sys_setgid16)
-#define __NR_getgid 47
-__SYSCALL(__NR_getgid, sys_getgid16)
- /* 48 was sys_signal */
-__SYSCALL(48, sys_ni_syscall)
-#define __NR_geteuid 49
-__SYSCALL(__NR_geteuid, sys_geteuid16)
-#define __NR_getegid 50
-__SYSCALL(__NR_getegid, sys_getegid16)
-#define __NR_acct 51
-__SYSCALL(__NR_acct, sys_acct)
-#define __NR_umount2 52
-__SYSCALL(__NR_umount2, sys_umount)
- /* 53 was sys_lock */
-__SYSCALL(53, sys_ni_syscall)
-#define __NR_ioctl 54
-__SYSCALL(__NR_ioctl, compat_sys_ioctl)
-#define __NR_fcntl 55
-__SYSCALL(__NR_fcntl, compat_sys_fcntl)
- /* 56 was sys_mpx */
-__SYSCALL(56, sys_ni_syscall)
-#define __NR_setpgid 57
-__SYSCALL(__NR_setpgid, sys_setpgid)
- /* 58 was sys_ulimit */
-__SYSCALL(58, sys_ni_syscall)
- /* 59 was sys_olduname */
-__SYSCALL(59, sys_ni_syscall)
-#define __NR_umask 60
-__SYSCALL(__NR_umask, sys_umask)
-#define __NR_chroot 61
-__SYSCALL(__NR_chroot, sys_chroot)
-#define __NR_ustat 62
-__SYSCALL(__NR_ustat, compat_sys_ustat)
-#define __NR_dup2 63
-__SYSCALL(__NR_dup2, sys_dup2)
-#define __NR_getppid 64
-__SYSCALL(__NR_getppid, sys_getppid)
-#define __NR_getpgrp 65
-__SYSCALL(__NR_getpgrp, sys_getpgrp)
-#define __NR_setsid 66
-__SYSCALL(__NR_setsid, sys_setsid)
-#define __NR_sigaction 67
-__SYSCALL(__NR_sigaction, compat_sys_sigaction)
- /* 68 was sys_sgetmask */
-__SYSCALL(68, sys_ni_syscall)
- /* 69 was sys_ssetmask */
-__SYSCALL(69, sys_ni_syscall)
-#define __NR_setreuid 70
-__SYSCALL(__NR_setreuid, sys_setreuid16)
-#define __NR_setregid 71
-__SYSCALL(__NR_setregid, sys_setregid16)
-#define __NR_sigsuspend 72
-__SYSCALL(__NR_sigsuspend, sys_sigsuspend)
-#define __NR_sigpending 73
-__SYSCALL(__NR_sigpending, compat_sys_sigpending)
-#define __NR_sethostname 74
-__SYSCALL(__NR_sethostname, sys_sethostname)
-#define __NR_setrlimit 75
-__SYSCALL(__NR_setrlimit, compat_sys_setrlimit)
- /* 76 was compat_sys_getrlimit */
-__SYSCALL(76, sys_ni_syscall)
-#define __NR_getrusage 77
-__SYSCALL(__NR_getrusage, compat_sys_getrusage)
-#define __NR_gettimeofday 78
-__SYSCALL(__NR_gettimeofday, compat_sys_gettimeofday)
-#define __NR_settimeofday 79
-__SYSCALL(__NR_settimeofday, compat_sys_settimeofday)
-#define __NR_getgroups 80
-__SYSCALL(__NR_getgroups, sys_getgroups16)
-#define __NR_setgroups 81
-__SYSCALL(__NR_setgroups, sys_setgroups16)
- /* 82 was compat_sys_select */
-__SYSCALL(82, sys_ni_syscall)
-#define __NR_symlink 83
-__SYSCALL(__NR_symlink, sys_symlink)
- /* 84 was sys_lstat */
-__SYSCALL(84, sys_ni_syscall)
-#define __NR_readlink 85
-__SYSCALL(__NR_readlink, sys_readlink)
-#define __NR_uselib 86
-__SYSCALL(__NR_uselib, sys_uselib)
-#define __NR_swapon 87
-__SYSCALL(__NR_swapon, sys_swapon)
-#define __NR_reboot 88
-__SYSCALL(__NR_reboot, sys_reboot)
- /* 89 was sys_readdir */
-__SYSCALL(89, sys_ni_syscall)
- /* 90 was sys_mmap */
-__SYSCALL(90, sys_ni_syscall)
-#define __NR_munmap 91
-__SYSCALL(__NR_munmap, sys_munmap)
-#define __NR_truncate 92
-__SYSCALL(__NR_truncate, compat_sys_truncate)
-#define __NR_ftruncate 93
-__SYSCALL(__NR_ftruncate, compat_sys_ftruncate)
-#define __NR_fchmod 94
-__SYSCALL(__NR_fchmod, sys_fchmod)
-#define __NR_fchown 95
-__SYSCALL(__NR_fchown, sys_fchown16)
-#define __NR_getpriority 96
-__SYSCALL(__NR_getpriority, sys_getpriority)
-#define __NR_setpriority 97
-__SYSCALL(__NR_setpriority, sys_setpriority)
- /* 98 was sys_profil */
-__SYSCALL(98, sys_ni_syscall)
-#define __NR_statfs 99
-__SYSCALL(__NR_statfs, compat_sys_statfs)
-#define __NR_fstatfs 100
-__SYSCALL(__NR_fstatfs, compat_sys_fstatfs)
- /* 101 was sys_ioperm */
-__SYSCALL(101, sys_ni_syscall)
- /* 102 was sys_socketcall */
-__SYSCALL(102, sys_ni_syscall)
-#define __NR_syslog 103
-__SYSCALL(__NR_syslog, sys_syslog)
-#define __NR_setitimer 104
-__SYSCALL(__NR_setitimer, compat_sys_setitimer)
-#define __NR_getitimer 105
-__SYSCALL(__NR_getitimer, compat_sys_getitimer)
-#define __NR_stat 106
-__SYSCALL(__NR_stat, compat_sys_newstat)
-#define __NR_lstat 107
-__SYSCALL(__NR_lstat, compat_sys_newlstat)
-#define __NR_fstat 108
-__SYSCALL(__NR_fstat, compat_sys_newfstat)
- /* 109 was sys_uname */
-__SYSCALL(109, sys_ni_syscall)
- /* 110 was sys_iopl */
-__SYSCALL(110, sys_ni_syscall)
-#define __NR_vhangup 111
-__SYSCALL(__NR_vhangup, sys_vhangup)
- /* 112 was sys_idle */
-__SYSCALL(112, sys_ni_syscall)
- /* 113 was sys_syscall */
-__SYSCALL(113, sys_ni_syscall)
-#define __NR_wait4 114
-__SYSCALL(__NR_wait4, compat_sys_wait4)
-#define __NR_swapoff 115
-__SYSCALL(__NR_swapoff, sys_swapoff)
-#define __NR_sysinfo 116
-__SYSCALL(__NR_sysinfo, compat_sys_sysinfo)
- /* 117 was sys_ipc */
-__SYSCALL(117, sys_ni_syscall)
-#define __NR_fsync 118
-__SYSCALL(__NR_fsync, sys_fsync)
-#define __NR_sigreturn 119
-__SYSCALL(__NR_sigreturn, compat_sys_sigreturn)
-#define __NR_clone 120
-__SYSCALL(__NR_clone, sys_clone)
-#define __NR_setdomainname 121
-__SYSCALL(__NR_setdomainname, sys_setdomainname)
-#define __NR_uname 122
-__SYSCALL(__NR_uname, sys_newuname)
- /* 123 was sys_modify_ldt */
-__SYSCALL(123, sys_ni_syscall)
-#define __NR_adjtimex 124
-__SYSCALL(__NR_adjtimex, sys_adjtimex_time32)
-#define __NR_mprotect 125
-__SYSCALL(__NR_mprotect, sys_mprotect)
-#define __NR_sigprocmask 126
-__SYSCALL(__NR_sigprocmask, compat_sys_sigprocmask)
- /* 127 was sys_create_module */
-__SYSCALL(127, sys_ni_syscall)
-#define __NR_init_module 128
-__SYSCALL(__NR_init_module, sys_init_module)
-#define __NR_delete_module 129
-__SYSCALL(__NR_delete_module, sys_delete_module)
- /* 130 was sys_get_kernel_syms */
-__SYSCALL(130, sys_ni_syscall)
-#define __NR_quotactl 131
-__SYSCALL(__NR_quotactl, sys_quotactl)
-#define __NR_getpgid 132
-__SYSCALL(__NR_getpgid, sys_getpgid)
-#define __NR_fchdir 133
-__SYSCALL(__NR_fchdir, sys_fchdir)
-#define __NR_bdflush 134
-__SYSCALL(__NR_bdflush, sys_ni_syscall)
-#define __NR_sysfs 135
-__SYSCALL(__NR_sysfs, sys_sysfs)
-#define __NR_personality 136
-__SYSCALL(__NR_personality, sys_personality)
- /* 137 was sys_afs_syscall */
-__SYSCALL(137, sys_ni_syscall)
-#define __NR_setfsuid 138
-__SYSCALL(__NR_setfsuid, sys_setfsuid16)
-#define __NR_setfsgid 139
-__SYSCALL(__NR_setfsgid, sys_setfsgid16)
-#define __NR__llseek 140
-__SYSCALL(__NR__llseek, sys_llseek)
-#define __NR_getdents 141
-__SYSCALL(__NR_getdents, compat_sys_getdents)
-#define __NR__newselect 142
-__SYSCALL(__NR__newselect, compat_sys_select)
-#define __NR_flock 143
-__SYSCALL(__NR_flock, sys_flock)
-#define __NR_msync 144
-__SYSCALL(__NR_msync, sys_msync)
-#define __NR_readv 145
-__SYSCALL(__NR_readv, sys_readv)
-#define __NR_writev 146
-__SYSCALL(__NR_writev, sys_writev)
-#define __NR_getsid 147
-__SYSCALL(__NR_getsid, sys_getsid)
-#define __NR_fdatasync 148
-__SYSCALL(__NR_fdatasync, sys_fdatasync)
- /* 149 was sys_sysctl */
-__SYSCALL(149, sys_ni_syscall)
-#define __NR_mlock 150
-__SYSCALL(__NR_mlock, sys_mlock)
-#define __NR_munlock 151
-__SYSCALL(__NR_munlock, sys_munlock)
-#define __NR_mlockall 152
-__SYSCALL(__NR_mlockall, sys_mlockall)
-#define __NR_munlockall 153
-__SYSCALL(__NR_munlockall, sys_munlockall)
-#define __NR_sched_setparam 154
-__SYSCALL(__NR_sched_setparam, sys_sched_setparam)
-#define __NR_sched_getparam 155
-__SYSCALL(__NR_sched_getparam, sys_sched_getparam)
-#define __NR_sched_setscheduler 156
-__SYSCALL(__NR_sched_setscheduler, sys_sched_setscheduler)
-#define __NR_sched_getscheduler 157
-__SYSCALL(__NR_sched_getscheduler, sys_sched_getscheduler)
-#define __NR_sched_yield 158
-__SYSCALL(__NR_sched_yield, sys_sched_yield)
-#define __NR_sched_get_priority_max 159
-__SYSCALL(__NR_sched_get_priority_max, sys_sched_get_priority_max)
-#define __NR_sched_get_priority_min 160
-__SYSCALL(__NR_sched_get_priority_min, sys_sched_get_priority_min)
-#define __NR_sched_rr_get_interval 161
-__SYSCALL(__NR_sched_rr_get_interval, sys_sched_rr_get_interval_time32)
-#define __NR_nanosleep 162
-__SYSCALL(__NR_nanosleep, sys_nanosleep_time32)
-#define __NR_mremap 163
-__SYSCALL(__NR_mremap, sys_mremap)
-#define __NR_setresuid 164
-__SYSCALL(__NR_setresuid, sys_setresuid16)
-#define __NR_getresuid 165
-__SYSCALL(__NR_getresuid, sys_getresuid16)
- /* 166 was sys_vm86 */
-__SYSCALL(166, sys_ni_syscall)
- /* 167 was sys_query_module */
-__SYSCALL(167, sys_ni_syscall)
-#define __NR_poll 168
-__SYSCALL(__NR_poll, sys_poll)
-#define __NR_nfsservctl 169
-__SYSCALL(__NR_nfsservctl, sys_ni_syscall)
-#define __NR_setresgid 170
-__SYSCALL(__NR_setresgid, sys_setresgid16)
-#define __NR_getresgid 171
-__SYSCALL(__NR_getresgid, sys_getresgid16)
-#define __NR_prctl 172
-__SYSCALL(__NR_prctl, sys_prctl)
-#define __NR_rt_sigreturn 173
-__SYSCALL(__NR_rt_sigreturn, compat_sys_rt_sigreturn)
-#define __NR_rt_sigaction 174
-__SYSCALL(__NR_rt_sigaction, compat_sys_rt_sigaction)
-#define __NR_rt_sigprocmask 175
-__SYSCALL(__NR_rt_sigprocmask, compat_sys_rt_sigprocmask)
-#define __NR_rt_sigpending 176
-__SYSCALL(__NR_rt_sigpending, compat_sys_rt_sigpending)
-#define __NR_rt_sigtimedwait 177
-__SYSCALL(__NR_rt_sigtimedwait, compat_sys_rt_sigtimedwait_time32)
-#define __NR_rt_sigqueueinfo 178
-__SYSCALL(__NR_rt_sigqueueinfo, compat_sys_rt_sigqueueinfo)
-#define __NR_rt_sigsuspend 179
-__SYSCALL(__NR_rt_sigsuspend, compat_sys_rt_sigsuspend)
-#define __NR_pread64 180
-__SYSCALL(__NR_pread64, compat_sys_aarch32_pread64)
-#define __NR_pwrite64 181
-__SYSCALL(__NR_pwrite64, compat_sys_aarch32_pwrite64)
-#define __NR_chown 182
-__SYSCALL(__NR_chown, sys_chown16)
-#define __NR_getcwd 183
-__SYSCALL(__NR_getcwd, sys_getcwd)
-#define __NR_capget 184
-__SYSCALL(__NR_capget, sys_capget)
-#define __NR_capset 185
-__SYSCALL(__NR_capset, sys_capset)
-#define __NR_sigaltstack 186
-__SYSCALL(__NR_sigaltstack, compat_sys_sigaltstack)
-#define __NR_sendfile 187
-__SYSCALL(__NR_sendfile, compat_sys_sendfile)
- /* 188 reserved */
-__SYSCALL(188, sys_ni_syscall)
- /* 189 reserved */
-__SYSCALL(189, sys_ni_syscall)
-#define __NR_vfork 190
-__SYSCALL(__NR_vfork, sys_vfork)
-#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */
-__SYSCALL(__NR_ugetrlimit, compat_sys_getrlimit) /* SuS compliant getrlimit */
-#define __NR_mmap2 192
-__SYSCALL(__NR_mmap2, compat_sys_aarch32_mmap2)
-#define __NR_truncate64 193
-__SYSCALL(__NR_truncate64, compat_sys_aarch32_truncate64)
-#define __NR_ftruncate64 194
-__SYSCALL(__NR_ftruncate64, compat_sys_aarch32_ftruncate64)
-#define __NR_stat64 195
-__SYSCALL(__NR_stat64, sys_stat64)
-#define __NR_lstat64 196
-__SYSCALL(__NR_lstat64, sys_lstat64)
-#define __NR_fstat64 197
-__SYSCALL(__NR_fstat64, sys_fstat64)
-#define __NR_lchown32 198
-__SYSCALL(__NR_lchown32, sys_lchown)
-#define __NR_getuid32 199
-__SYSCALL(__NR_getuid32, sys_getuid)
-#define __NR_getgid32 200
-__SYSCALL(__NR_getgid32, sys_getgid)
-#define __NR_geteuid32 201
-__SYSCALL(__NR_geteuid32, sys_geteuid)
-#define __NR_getegid32 202
-__SYSCALL(__NR_getegid32, sys_getegid)
-#define __NR_setreuid32 203
-__SYSCALL(__NR_setreuid32, sys_setreuid)
-#define __NR_setregid32 204
-__SYSCALL(__NR_setregid32, sys_setregid)
-#define __NR_getgroups32 205
-__SYSCALL(__NR_getgroups32, sys_getgroups)
-#define __NR_setgroups32 206
-__SYSCALL(__NR_setgroups32, sys_setgroups)
-#define __NR_fchown32 207
-__SYSCALL(__NR_fchown32, sys_fchown)
-#define __NR_setresuid32 208
-__SYSCALL(__NR_setresuid32, sys_setresuid)
-#define __NR_getresuid32 209
-__SYSCALL(__NR_getresuid32, sys_getresuid)
-#define __NR_setresgid32 210
-__SYSCALL(__NR_setresgid32, sys_setresgid)
-#define __NR_getresgid32 211
-__SYSCALL(__NR_getresgid32, sys_getresgid)
-#define __NR_chown32 212
-__SYSCALL(__NR_chown32, sys_chown)
-#define __NR_setuid32 213
-__SYSCALL(__NR_setuid32, sys_setuid)
-#define __NR_setgid32 214
-__SYSCALL(__NR_setgid32, sys_setgid)
-#define __NR_setfsuid32 215
-__SYSCALL(__NR_setfsuid32, sys_setfsuid)
-#define __NR_setfsgid32 216
-__SYSCALL(__NR_setfsgid32, sys_setfsgid)
-#define __NR_getdents64 217
-__SYSCALL(__NR_getdents64, sys_getdents64)
-#define __NR_pivot_root 218
-__SYSCALL(__NR_pivot_root, sys_pivot_root)
-#define __NR_mincore 219
-__SYSCALL(__NR_mincore, sys_mincore)
-#define __NR_madvise 220
-__SYSCALL(__NR_madvise, sys_madvise)
-#define __NR_fcntl64 221
-__SYSCALL(__NR_fcntl64, compat_sys_fcntl64)
- /* 222 for tux */
-__SYSCALL(222, sys_ni_syscall)
- /* 223 is unused */
-__SYSCALL(223, sys_ni_syscall)
-#define __NR_gettid 224
-__SYSCALL(__NR_gettid, sys_gettid)
-#define __NR_readahead 225
-__SYSCALL(__NR_readahead, compat_sys_aarch32_readahead)
-#define __NR_setxattr 226
-__SYSCALL(__NR_setxattr, sys_setxattr)
-#define __NR_lsetxattr 227
-__SYSCALL(__NR_lsetxattr, sys_lsetxattr)
-#define __NR_fsetxattr 228
-__SYSCALL(__NR_fsetxattr, sys_fsetxattr)
-#define __NR_getxattr 229
-__SYSCALL(__NR_getxattr, sys_getxattr)
-#define __NR_lgetxattr 230
-__SYSCALL(__NR_lgetxattr, sys_lgetxattr)
-#define __NR_fgetxattr 231
-__SYSCALL(__NR_fgetxattr, sys_fgetxattr)
-#define __NR_listxattr 232
-__SYSCALL(__NR_listxattr, sys_listxattr)
-#define __NR_llistxattr 233
-__SYSCALL(__NR_llistxattr, sys_llistxattr)
-#define __NR_flistxattr 234
-__SYSCALL(__NR_flistxattr, sys_flistxattr)
-#define __NR_removexattr 235
-__SYSCALL(__NR_removexattr, sys_removexattr)
-#define __NR_lremovexattr 236
-__SYSCALL(__NR_lremovexattr, sys_lremovexattr)
-#define __NR_fremovexattr 237
-__SYSCALL(__NR_fremovexattr, sys_fremovexattr)
-#define __NR_tkill 238
-__SYSCALL(__NR_tkill, sys_tkill)
-#define __NR_sendfile64 239
-__SYSCALL(__NR_sendfile64, sys_sendfile64)
-#define __NR_futex 240
-__SYSCALL(__NR_futex, sys_futex_time32)
-#define __NR_sched_setaffinity 241
-__SYSCALL(__NR_sched_setaffinity, compat_sys_sched_setaffinity)
-#define __NR_sched_getaffinity 242
-__SYSCALL(__NR_sched_getaffinity, compat_sys_sched_getaffinity)
-#define __NR_io_setup 243
-__SYSCALL(__NR_io_setup, compat_sys_io_setup)
-#define __NR_io_destroy 244
-__SYSCALL(__NR_io_destroy, sys_io_destroy)
-#define __NR_io_getevents 245
-__SYSCALL(__NR_io_getevents, sys_io_getevents_time32)
-#define __NR_io_submit 246
-__SYSCALL(__NR_io_submit, compat_sys_io_submit)
-#define __NR_io_cancel 247
-__SYSCALL(__NR_io_cancel, sys_io_cancel)
-#define __NR_exit_group 248
-__SYSCALL(__NR_exit_group, sys_exit_group)
- /* 249 was lookup_dcookie */
-__SYSCALL(249, sys_ni_syscall)
-#define __NR_epoll_create 250
-__SYSCALL(__NR_epoll_create, sys_epoll_create)
-#define __NR_epoll_ctl 251
-__SYSCALL(__NR_epoll_ctl, sys_epoll_ctl)
-#define __NR_epoll_wait 252
-__SYSCALL(__NR_epoll_wait, sys_epoll_wait)
-#define __NR_remap_file_pages 253
-__SYSCALL(__NR_remap_file_pages, sys_remap_file_pages)
- /* 254 for set_thread_area */
-__SYSCALL(254, sys_ni_syscall)
- /* 255 for get_thread_area */
-__SYSCALL(255, sys_ni_syscall)
-#define __NR_set_tid_address 256
-__SYSCALL(__NR_set_tid_address, sys_set_tid_address)
-#define __NR_timer_create 257
-__SYSCALL(__NR_timer_create, compat_sys_timer_create)
-#define __NR_timer_settime 258
-__SYSCALL(__NR_timer_settime, sys_timer_settime32)
-#define __NR_timer_gettime 259
-__SYSCALL(__NR_timer_gettime, sys_timer_gettime32)
-#define __NR_timer_getoverrun 260
-__SYSCALL(__NR_timer_getoverrun, sys_timer_getoverrun)
-#define __NR_timer_delete 261
-__SYSCALL(__NR_timer_delete, sys_timer_delete)
-#define __NR_clock_settime 262
-__SYSCALL(__NR_clock_settime, sys_clock_settime32)
-#define __NR_clock_gettime 263
-__SYSCALL(__NR_clock_gettime, sys_clock_gettime32)
-#define __NR_clock_getres 264
-__SYSCALL(__NR_clock_getres, sys_clock_getres_time32)
-#define __NR_clock_nanosleep 265
-__SYSCALL(__NR_clock_nanosleep, sys_clock_nanosleep_time32)
-#define __NR_statfs64 266
-__SYSCALL(__NR_statfs64, compat_sys_aarch32_statfs64)
-#define __NR_fstatfs64 267
-__SYSCALL(__NR_fstatfs64, compat_sys_aarch32_fstatfs64)
-#define __NR_tgkill 268
-__SYSCALL(__NR_tgkill, sys_tgkill)
-#define __NR_utimes 269
-__SYSCALL(__NR_utimes, sys_utimes_time32)
-#define __NR_arm_fadvise64_64 270
-__SYSCALL(__NR_arm_fadvise64_64, compat_sys_aarch32_fadvise64_64)
-#define __NR_pciconfig_iobase 271
-__SYSCALL(__NR_pciconfig_iobase, sys_pciconfig_iobase)
-#define __NR_pciconfig_read 272
-__SYSCALL(__NR_pciconfig_read, sys_pciconfig_read)
-#define __NR_pciconfig_write 273
-__SYSCALL(__NR_pciconfig_write, sys_pciconfig_write)
-#define __NR_mq_open 274
-__SYSCALL(__NR_mq_open, compat_sys_mq_open)
-#define __NR_mq_unlink 275
-__SYSCALL(__NR_mq_unlink, sys_mq_unlink)
-#define __NR_mq_timedsend 276
-__SYSCALL(__NR_mq_timedsend, sys_mq_timedsend_time32)
-#define __NR_mq_timedreceive 277
-__SYSCALL(__NR_mq_timedreceive, sys_mq_timedreceive_time32)
-#define __NR_mq_notify 278
-__SYSCALL(__NR_mq_notify, compat_sys_mq_notify)
-#define __NR_mq_getsetattr 279
-__SYSCALL(__NR_mq_getsetattr, compat_sys_mq_getsetattr)
-#define __NR_waitid 280
-__SYSCALL(__NR_waitid, compat_sys_waitid)
-#define __NR_socket 281
-__SYSCALL(__NR_socket, sys_socket)
-#define __NR_bind 282
-__SYSCALL(__NR_bind, sys_bind)
-#define __NR_connect 283
-__SYSCALL(__NR_connect, sys_connect)
-#define __NR_listen 284
-__SYSCALL(__NR_listen, sys_listen)
-#define __NR_accept 285
-__SYSCALL(__NR_accept, sys_accept)
-#define __NR_getsockname 286
-__SYSCALL(__NR_getsockname, sys_getsockname)
-#define __NR_getpeername 287
-__SYSCALL(__NR_getpeername, sys_getpeername)
-#define __NR_socketpair 288
-__SYSCALL(__NR_socketpair, sys_socketpair)
-#define __NR_send 289
-__SYSCALL(__NR_send, sys_send)
-#define __NR_sendto 290
-__SYSCALL(__NR_sendto, sys_sendto)
-#define __NR_recv 291
-__SYSCALL(__NR_recv, compat_sys_recv)
-#define __NR_recvfrom 292
-__SYSCALL(__NR_recvfrom, compat_sys_recvfrom)
-#define __NR_shutdown 293
-__SYSCALL(__NR_shutdown, sys_shutdown)
-#define __NR_setsockopt 294
-__SYSCALL(__NR_setsockopt, sys_setsockopt)
-#define __NR_getsockopt 295
-__SYSCALL(__NR_getsockopt, sys_getsockopt)
-#define __NR_sendmsg 296
-__SYSCALL(__NR_sendmsg, compat_sys_sendmsg)
-#define __NR_recvmsg 297
-__SYSCALL(__NR_recvmsg, compat_sys_recvmsg)
-#define __NR_semop 298
-__SYSCALL(__NR_semop, sys_semop)
-#define __NR_semget 299
-__SYSCALL(__NR_semget, sys_semget)
-#define __NR_semctl 300
-__SYSCALL(__NR_semctl, compat_sys_old_semctl)
-#define __NR_msgsnd 301
-__SYSCALL(__NR_msgsnd, compat_sys_msgsnd)
-#define __NR_msgrcv 302
-__SYSCALL(__NR_msgrcv, compat_sys_msgrcv)
-#define __NR_msgget 303
-__SYSCALL(__NR_msgget, sys_msgget)
-#define __NR_msgctl 304
-__SYSCALL(__NR_msgctl, compat_sys_old_msgctl)
-#define __NR_shmat 305
-__SYSCALL(__NR_shmat, compat_sys_shmat)
-#define __NR_shmdt 306
-__SYSCALL(__NR_shmdt, sys_shmdt)
-#define __NR_shmget 307
-__SYSCALL(__NR_shmget, sys_shmget)
-#define __NR_shmctl 308
-__SYSCALL(__NR_shmctl, compat_sys_old_shmctl)
-#define __NR_add_key 309
-__SYSCALL(__NR_add_key, sys_add_key)
-#define __NR_request_key 310
-__SYSCALL(__NR_request_key, sys_request_key)
-#define __NR_keyctl 311
-__SYSCALL(__NR_keyctl, compat_sys_keyctl)
-#define __NR_semtimedop 312
-__SYSCALL(__NR_semtimedop, sys_semtimedop_time32)
-#define __NR_vserver 313
-__SYSCALL(__NR_vserver, sys_ni_syscall)
-#define __NR_ioprio_set 314
-__SYSCALL(__NR_ioprio_set, sys_ioprio_set)
-#define __NR_ioprio_get 315
-__SYSCALL(__NR_ioprio_get, sys_ioprio_get)
-#define __NR_inotify_init 316
-__SYSCALL(__NR_inotify_init, sys_inotify_init)
-#define __NR_inotify_add_watch 317
-__SYSCALL(__NR_inotify_add_watch, sys_inotify_add_watch)
-#define __NR_inotify_rm_watch 318
-__SYSCALL(__NR_inotify_rm_watch, sys_inotify_rm_watch)
-#define __NR_mbind 319
-__SYSCALL(__NR_mbind, sys_mbind)
-#define __NR_get_mempolicy 320
-__SYSCALL(__NR_get_mempolicy, sys_get_mempolicy)
-#define __NR_set_mempolicy 321
-__SYSCALL(__NR_set_mempolicy, sys_set_mempolicy)
-#define __NR_openat 322
-__SYSCALL(__NR_openat, compat_sys_openat)
-#define __NR_mkdirat 323
-__SYSCALL(__NR_mkdirat, sys_mkdirat)
-#define __NR_mknodat 324
-__SYSCALL(__NR_mknodat, sys_mknodat)
-#define __NR_fchownat 325
-__SYSCALL(__NR_fchownat, sys_fchownat)
-#define __NR_futimesat 326
-__SYSCALL(__NR_futimesat, sys_futimesat_time32)
-#define __NR_fstatat64 327
-__SYSCALL(__NR_fstatat64, sys_fstatat64)
-#define __NR_unlinkat 328
-__SYSCALL(__NR_unlinkat, sys_unlinkat)
-#define __NR_renameat 329
-__SYSCALL(__NR_renameat, sys_renameat)
-#define __NR_linkat 330
-__SYSCALL(__NR_linkat, sys_linkat)
-#define __NR_symlinkat 331
-__SYSCALL(__NR_symlinkat, sys_symlinkat)
-#define __NR_readlinkat 332
-__SYSCALL(__NR_readlinkat, sys_readlinkat)
-#define __NR_fchmodat 333
-__SYSCALL(__NR_fchmodat, sys_fchmodat)
-#define __NR_faccessat 334
-__SYSCALL(__NR_faccessat, sys_faccessat)
-#define __NR_pselect6 335
-__SYSCALL(__NR_pselect6, compat_sys_pselect6_time32)
-#define __NR_ppoll 336
-__SYSCALL(__NR_ppoll, compat_sys_ppoll_time32)
-#define __NR_unshare 337
-__SYSCALL(__NR_unshare, sys_unshare)
-#define __NR_set_robust_list 338
-__SYSCALL(__NR_set_robust_list, compat_sys_set_robust_list)
-#define __NR_get_robust_list 339
-__SYSCALL(__NR_get_robust_list, compat_sys_get_robust_list)
-#define __NR_splice 340
-__SYSCALL(__NR_splice, sys_splice)
-#define __NR_sync_file_range2 341
-__SYSCALL(__NR_sync_file_range2, compat_sys_aarch32_sync_file_range2)
-#define __NR_tee 342
-__SYSCALL(__NR_tee, sys_tee)
-#define __NR_vmsplice 343
-__SYSCALL(__NR_vmsplice, sys_vmsplice)
-#define __NR_move_pages 344
-__SYSCALL(__NR_move_pages, sys_move_pages)
-#define __NR_getcpu 345
-__SYSCALL(__NR_getcpu, sys_getcpu)
-#define __NR_epoll_pwait 346
-__SYSCALL(__NR_epoll_pwait, compat_sys_epoll_pwait)
-#define __NR_kexec_load 347
-__SYSCALL(__NR_kexec_load, compat_sys_kexec_load)
-#define __NR_utimensat 348
-__SYSCALL(__NR_utimensat, sys_utimensat_time32)
-#define __NR_signalfd 349
-__SYSCALL(__NR_signalfd, compat_sys_signalfd)
-#define __NR_timerfd_create 350
-__SYSCALL(__NR_timerfd_create, sys_timerfd_create)
-#define __NR_eventfd 351
-__SYSCALL(__NR_eventfd, sys_eventfd)
-#define __NR_fallocate 352
-__SYSCALL(__NR_fallocate, compat_sys_aarch32_fallocate)
-#define __NR_timerfd_settime 353
-__SYSCALL(__NR_timerfd_settime, sys_timerfd_settime32)
-#define __NR_timerfd_gettime 354
-__SYSCALL(__NR_timerfd_gettime, sys_timerfd_gettime32)
-#define __NR_signalfd4 355
-__SYSCALL(__NR_signalfd4, compat_sys_signalfd4)
-#define __NR_eventfd2 356
-__SYSCALL(__NR_eventfd2, sys_eventfd2)
-#define __NR_epoll_create1 357
-__SYSCALL(__NR_epoll_create1, sys_epoll_create1)
-#define __NR_dup3 358
-__SYSCALL(__NR_dup3, sys_dup3)
-#define __NR_pipe2 359
-__SYSCALL(__NR_pipe2, sys_pipe2)
-#define __NR_inotify_init1 360
-__SYSCALL(__NR_inotify_init1, sys_inotify_init1)
-#define __NR_preadv 361
-__SYSCALL(__NR_preadv, compat_sys_preadv)
-#define __NR_pwritev 362
-__SYSCALL(__NR_pwritev, compat_sys_pwritev)
-#define __NR_rt_tgsigqueueinfo 363
-__SYSCALL(__NR_rt_tgsigqueueinfo, compat_sys_rt_tgsigqueueinfo)
-#define __NR_perf_event_open 364
-__SYSCALL(__NR_perf_event_open, sys_perf_event_open)
-#define __NR_recvmmsg 365
-__SYSCALL(__NR_recvmmsg, compat_sys_recvmmsg_time32)
-#define __NR_accept4 366
-__SYSCALL(__NR_accept4, sys_accept4)
-#define __NR_fanotify_init 367
-__SYSCALL(__NR_fanotify_init, sys_fanotify_init)
-#define __NR_fanotify_mark 368
-__SYSCALL(__NR_fanotify_mark, compat_sys_fanotify_mark)
-#define __NR_prlimit64 369
-__SYSCALL(__NR_prlimit64, sys_prlimit64)
-#define __NR_name_to_handle_at 370
-__SYSCALL(__NR_name_to_handle_at, sys_name_to_handle_at)
-#define __NR_open_by_handle_at 371
-__SYSCALL(__NR_open_by_handle_at, compat_sys_open_by_handle_at)
-#define __NR_clock_adjtime 372
-__SYSCALL(__NR_clock_adjtime, sys_clock_adjtime32)
-#define __NR_syncfs 373
-__SYSCALL(__NR_syncfs, sys_syncfs)
-#define __NR_sendmmsg 374
-__SYSCALL(__NR_sendmmsg, compat_sys_sendmmsg)
-#define __NR_setns 375
-__SYSCALL(__NR_setns, sys_setns)
-#define __NR_process_vm_readv 376
-__SYSCALL(__NR_process_vm_readv, sys_process_vm_readv)
-#define __NR_process_vm_writev 377
-__SYSCALL(__NR_process_vm_writev, sys_process_vm_writev)
-#define __NR_kcmp 378
-__SYSCALL(__NR_kcmp, sys_kcmp)
-#define __NR_finit_module 379
-__SYSCALL(__NR_finit_module, sys_finit_module)
-#define __NR_sched_setattr 380
-__SYSCALL(__NR_sched_setattr, sys_sched_setattr)
-#define __NR_sched_getattr 381
-__SYSCALL(__NR_sched_getattr, sys_sched_getattr)
-#define __NR_renameat2 382
-__SYSCALL(__NR_renameat2, sys_renameat2)
-#define __NR_seccomp 383
-__SYSCALL(__NR_seccomp, sys_seccomp)
-#define __NR_getrandom 384
-__SYSCALL(__NR_getrandom, sys_getrandom)
-#define __NR_memfd_create 385
-__SYSCALL(__NR_memfd_create, sys_memfd_create)
-#define __NR_bpf 386
-__SYSCALL(__NR_bpf, sys_bpf)
-#define __NR_execveat 387
-__SYSCALL(__NR_execveat, compat_sys_execveat)
-#define __NR_userfaultfd 388
-__SYSCALL(__NR_userfaultfd, sys_userfaultfd)
-#define __NR_membarrier 389
-__SYSCALL(__NR_membarrier, sys_membarrier)
-#define __NR_mlock2 390
-__SYSCALL(__NR_mlock2, sys_mlock2)
-#define __NR_copy_file_range 391
-__SYSCALL(__NR_copy_file_range, sys_copy_file_range)
-#define __NR_preadv2 392
-__SYSCALL(__NR_preadv2, compat_sys_preadv2)
-#define __NR_pwritev2 393
-__SYSCALL(__NR_pwritev2, compat_sys_pwritev2)
-#define __NR_pkey_mprotect 394
-__SYSCALL(__NR_pkey_mprotect, sys_pkey_mprotect)
-#define __NR_pkey_alloc 395
-__SYSCALL(__NR_pkey_alloc, sys_pkey_alloc)
-#define __NR_pkey_free 396
-__SYSCALL(__NR_pkey_free, sys_pkey_free)
-#define __NR_statx 397
-__SYSCALL(__NR_statx, sys_statx)
-#define __NR_rseq 398
-__SYSCALL(__NR_rseq, sys_rseq)
-#define __NR_io_pgetevents 399
-__SYSCALL(__NR_io_pgetevents, compat_sys_io_pgetevents)
-#define __NR_migrate_pages 400
-__SYSCALL(__NR_migrate_pages, sys_migrate_pages)
-#define __NR_kexec_file_load 401
-__SYSCALL(__NR_kexec_file_load, sys_kexec_file_load)
-/* 402 is unused */
-#define __NR_clock_gettime64 403
-__SYSCALL(__NR_clock_gettime64, sys_clock_gettime)
-#define __NR_clock_settime64 404
-__SYSCALL(__NR_clock_settime64, sys_clock_settime)
-#define __NR_clock_adjtime64 405
-__SYSCALL(__NR_clock_adjtime64, sys_clock_adjtime)
-#define __NR_clock_getres_time64 406
-__SYSCALL(__NR_clock_getres_time64, sys_clock_getres)
-#define __NR_clock_nanosleep_time64 407
-__SYSCALL(__NR_clock_nanosleep_time64, sys_clock_nanosleep)
-#define __NR_timer_gettime64 408
-__SYSCALL(__NR_timer_gettime64, sys_timer_gettime)
-#define __NR_timer_settime64 409
-__SYSCALL(__NR_timer_settime64, sys_timer_settime)
-#define __NR_timerfd_gettime64 410
-__SYSCALL(__NR_timerfd_gettime64, sys_timerfd_gettime)
-#define __NR_timerfd_settime64 411
-__SYSCALL(__NR_timerfd_settime64, sys_timerfd_settime)
-#define __NR_utimensat_time64 412
-__SYSCALL(__NR_utimensat_time64, sys_utimensat)
-#define __NR_pselect6_time64 413
-__SYSCALL(__NR_pselect6_time64, compat_sys_pselect6_time64)
-#define __NR_ppoll_time64 414
-__SYSCALL(__NR_ppoll_time64, compat_sys_ppoll_time64)
-#define __NR_io_pgetevents_time64 416
-__SYSCALL(__NR_io_pgetevents_time64, sys_io_pgetevents)
-#define __NR_recvmmsg_time64 417
-__SYSCALL(__NR_recvmmsg_time64, compat_sys_recvmmsg_time64)
-#define __NR_mq_timedsend_time64 418
-__SYSCALL(__NR_mq_timedsend_time64, sys_mq_timedsend)
-#define __NR_mq_timedreceive_time64 419
-__SYSCALL(__NR_mq_timedreceive_time64, sys_mq_timedreceive)
-#define __NR_semtimedop_time64 420
-__SYSCALL(__NR_semtimedop_time64, sys_semtimedop)
-#define __NR_rt_sigtimedwait_time64 421
-__SYSCALL(__NR_rt_sigtimedwait_time64, compat_sys_rt_sigtimedwait_time64)
-#define __NR_futex_time64 422
-__SYSCALL(__NR_futex_time64, sys_futex)
-#define __NR_sched_rr_get_interval_time64 423
-__SYSCALL(__NR_sched_rr_get_interval_time64, sys_sched_rr_get_interval)
-#define __NR_pidfd_send_signal 424
-__SYSCALL(__NR_pidfd_send_signal, sys_pidfd_send_signal)
-#define __NR_io_uring_setup 425
-__SYSCALL(__NR_io_uring_setup, sys_io_uring_setup)
-#define __NR_io_uring_enter 426
-__SYSCALL(__NR_io_uring_enter, sys_io_uring_enter)
-#define __NR_io_uring_register 427
-__SYSCALL(__NR_io_uring_register, sys_io_uring_register)
-#define __NR_open_tree 428
-__SYSCALL(__NR_open_tree, sys_open_tree)
-#define __NR_move_mount 429
-__SYSCALL(__NR_move_mount, sys_move_mount)
-#define __NR_fsopen 430
-__SYSCALL(__NR_fsopen, sys_fsopen)
-#define __NR_fsconfig 431
-__SYSCALL(__NR_fsconfig, sys_fsconfig)
-#define __NR_fsmount 432
-__SYSCALL(__NR_fsmount, sys_fsmount)
-#define __NR_fspick 433
-__SYSCALL(__NR_fspick, sys_fspick)
-#define __NR_pidfd_open 434
-__SYSCALL(__NR_pidfd_open, sys_pidfd_open)
-#define __NR_clone3 435
-__SYSCALL(__NR_clone3, sys_clone3)
-#define __NR_close_range 436
-__SYSCALL(__NR_close_range, sys_close_range)
-#define __NR_openat2 437
-__SYSCALL(__NR_openat2, sys_openat2)
-#define __NR_pidfd_getfd 438
-__SYSCALL(__NR_pidfd_getfd, sys_pidfd_getfd)
-#define __NR_faccessat2 439
-__SYSCALL(__NR_faccessat2, sys_faccessat2)
-#define __NR_process_madvise 440
-__SYSCALL(__NR_process_madvise, sys_process_madvise)
-#define __NR_epoll_pwait2 441
-__SYSCALL(__NR_epoll_pwait2, compat_sys_epoll_pwait2)
-#define __NR_mount_setattr 442
-__SYSCALL(__NR_mount_setattr, sys_mount_setattr)
-#define __NR_quotactl_fd 443
-__SYSCALL(__NR_quotactl_fd, sys_quotactl_fd)
-#define __NR_landlock_create_ruleset 444
-__SYSCALL(__NR_landlock_create_ruleset, sys_landlock_create_ruleset)
-#define __NR_landlock_add_rule 445
-__SYSCALL(__NR_landlock_add_rule, sys_landlock_add_rule)
-#define __NR_landlock_restrict_self 446
-__SYSCALL(__NR_landlock_restrict_self, sys_landlock_restrict_self)
-#define __NR_process_mrelease 448
-__SYSCALL(__NR_process_mrelease, sys_process_mrelease)
-#define __NR_futex_waitv 449
-__SYSCALL(__NR_futex_waitv, sys_futex_waitv)
-#define __NR_set_mempolicy_home_node 450
-__SYSCALL(__NR_set_mempolicy_home_node, sys_set_mempolicy_home_node)
-#define __NR_cachestat 451
-__SYSCALL(__NR_cachestat, sys_cachestat)
-#define __NR_fchmodat2 452
-__SYSCALL(__NR_fchmodat2, sys_fchmodat2)
-#define __NR_map_shadow_stack 453
-__SYSCALL(__NR_map_shadow_stack, sys_map_shadow_stack)
-#define __NR_futex_wake 454
-__SYSCALL(__NR_futex_wake, sys_futex_wake)
-#define __NR_futex_wait 455
-__SYSCALL(__NR_futex_wait, sys_futex_wait)
-#define __NR_futex_requeue 456
-__SYSCALL(__NR_futex_requeue, sys_futex_requeue)
-#define __NR_statmount 457
-__SYSCALL(__NR_statmount, sys_statmount)
-#define __NR_listmount 458
-__SYSCALL(__NR_listmount, sys_listmount)
-#define __NR_lsm_get_self_attr 459
-__SYSCALL(__NR_lsm_get_self_attr, sys_lsm_get_self_attr)
-#define __NR_lsm_set_self_attr 460
-__SYSCALL(__NR_lsm_set_self_attr, sys_lsm_set_self_attr)
-#define __NR_lsm_list_modules 461
-__SYSCALL(__NR_lsm_list_modules, sys_lsm_list_modules)
-#define __NR_mseal 462
-__SYSCALL(__NR_mseal, sys_mseal)
+#define __NR_sync_file_range2 __NR_arm_sync_file_range
-/*
- * Please add new compat syscalls above this comment and update
- * __NR_compat_syscalls in asm/unistd.h.
- */
+#endif /* _UAPI__ASM_ARM_UNISTD_H */
diff --git a/arch/arm64/include/asm/vdso/compat_gettimeofday.h b/arch/arm64/include/asm/vdso/compat_gettimeofday.h
index ecb6fd4c3c64..778c1202bbbf 100644
--- a/arch/arm64/include/asm/vdso/compat_gettimeofday.h
+++ b/arch/arm64/include/asm/vdso/compat_gettimeofday.h
@@ -8,7 +8,7 @@
#ifndef __ASSEMBLY__
#include <asm/barrier.h>
-#include <asm/unistd.h>
+#include <asm/unistd_compat_32.h>
#include <asm/errno.h>
#include <asm/vdso/compat_barrier.h>
@@ -24,7 +24,7 @@ int gettimeofday_fallback(struct __kernel_old_timeval *_tv,
register struct timezone *tz asm("r1") = _tz;
register struct __kernel_old_timeval *tv asm("r0") = _tv;
register long ret asm ("r0");
- register long nr asm("r7") = __NR_compat_gettimeofday;
+ register long nr asm("r7") = __NR_compat32_gettimeofday;
asm volatile(
" swi #0\n"
@@ -41,7 +41,7 @@ long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
register struct __kernel_timespec *ts asm("r1") = _ts;
register clockid_t clkid asm("r0") = _clkid;
register long ret asm ("r0");
- register long nr asm("r7") = __NR_compat_clock_gettime64;
+ register long nr asm("r7") = __NR_compat32_clock_gettime64;
asm volatile(
" swi #0\n"
@@ -58,7 +58,7 @@ long clock_gettime32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
register struct old_timespec32 *ts asm("r1") = _ts;
register clockid_t clkid asm("r0") = _clkid;
register long ret asm ("r0");
- register long nr asm("r7") = __NR_compat_clock_gettime;
+ register long nr asm("r7") = __NR_compat32_clock_gettime;
asm volatile(
" swi #0\n"
@@ -75,7 +75,7 @@ int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
register struct __kernel_timespec *ts asm("r1") = _ts;
register clockid_t clkid asm("r0") = _clkid;
register long ret asm ("r0");
- register long nr asm("r7") = __NR_compat_clock_getres_time64;
+ register long nr asm("r7") = __NR_compat32_clock_getres_time64;
asm volatile(
" swi #0\n"
@@ -92,7 +92,7 @@ int clock_getres32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
register struct old_timespec32 *ts asm("r1") = _ts;
register clockid_t clkid asm("r0") = _clkid;
register long ret asm ("r0");
- register long nr asm("r7") = __NR_compat_clock_getres;
+ register long nr asm("r7") = __NR_compat32_clock_getres;
asm volatile(
" swi #0\n"
diff --git a/arch/arm64/include/asm/word-at-a-time.h b/arch/arm64/include/asm/word-at-a-time.h
index 14251abee23c..824ca6987a51 100644
--- a/arch/arm64/include/asm/word-at-a-time.h
+++ b/arch/arm64/include/asm/word-at-a-time.h
@@ -27,20 +27,15 @@ static inline unsigned long has_zero(unsigned long a, unsigned long *bits,
}
#define prep_zero_mask(a, bits, c) (bits)
+#define create_zero_mask(bits) (bits)
+#define find_zero(bits) (__ffs(bits) >> 3)
-static inline unsigned long create_zero_mask(unsigned long bits)
+static inline unsigned long zero_bytemask(unsigned long bits)
{
bits = (bits - 1) & ~bits;
return bits >> 7;
}
-static inline unsigned long find_zero(unsigned long mask)
-{
- return fls64(mask) >> 3;
-}
-
-#define zero_bytemask(mask) (mask)
-
#else /* __AARCH64EB__ */
#include <asm-generic/word-at-a-time.h>
#endif