diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-09-09 14:25:11 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-09-09 14:25:11 -0700 |
commit | 1b37a0a2d46f0c5fa5eee170ddeeb83342faa117 (patch) | |
tree | 162022b849419b0f3606c0388f6dc15e4e0925e5 /arch | |
parent | 2a5a4326e58339a26cd1510259e7310b8c0980ff (diff) | |
parent | c6a906cce61a8015b622707f9c12003f90673399 (diff) |
Merge tag 'riscv-for-linus-6.6-mw2-2' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux
Pull more RISC-V updates from Palmer Dabbelt:
- The kernel now dynamically probes for misaligned access speed, as
opposed to relying on a table of known implementations.
- Support for non-coherent devices on systems using the Andes AX45MP
core, including the RZ/Five SoCs.
- Support for the V extension in ptrace(), again.
- Support for KASLR.
- Support for the BPF prog pack allocator in RISC-V.
- A handful of bug fixes and cleanups.
* tag 'riscv-for-linus-6.6-mw2-2' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux: (25 commits)
soc: renesas: Kconfig: For ARCH_R9A07G043 select the required configs if dependencies are met
riscv: Kconfig.errata: Add dependency for RISCV_SBI in ERRATA_ANDES config
riscv: Kconfig.errata: Drop dependency for MMU in ERRATA_ANDES_CMO config
riscv: Kconfig: Select DMA_DIRECT_REMAP only if MMU is enabled
bpf, riscv: use prog pack allocator in the BPF JIT
riscv: implement a memset like function for text
riscv: extend patch_text_nosync() for multiple pages
bpf: make bpf_prog_pack allocator portable
riscv: libstub: Implement KASLR by using generic functions
libstub: Fix compilation warning for rv32
arm64: libstub: Move KASLR handling functions to kaslr.c
riscv: Dump out kernel offset information on panic
riscv: Introduce virtual kernel mapping KASLR
RISC-V: Add ptrace support for vectors
soc: renesas: Kconfig: Select the required configs for RZ/Five SoC
cache: Add L2 cache management for Andes AX45MP RISC-V core
dt-bindings: cache: andestech,ax45mp-cache: Add DT binding documentation for L2 cache controller
riscv: mm: dma-noncoherent: nonstandard cache operations support
riscv: errata: Add Andes alternative ports
riscv: asm: vendorid_list: Add Andes Technology to the vendors list
...
Diffstat (limited to 'arch')
35 files changed, 909 insertions, 82 deletions
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h index f482b994c608..bcd5622aa096 100644 --- a/arch/arm64/include/asm/efi.h +++ b/arch/arm64/include/asm/efi.h @@ -156,4 +156,6 @@ static inline void efi_capsule_flush_cache_range(void *addr, int size) efi_status_t efi_handle_corrupted_x18(efi_status_t s, const char *f); +void efi_icache_sync(unsigned long start, unsigned long end); + #endif /* _ASM_EFI_H */ diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 5138dce1a0b4..d607ab0f7c6d 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -273,7 +273,14 @@ config RISCV_DMA_NONCOHERENT select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE select DMA_BOUNCE_UNALIGNED_KMALLOC if SWIOTLB - select DMA_DIRECT_REMAP + select DMA_DIRECT_REMAP if MMU + +config RISCV_NONSTANDARD_CACHE_OPS + bool + depends on RISCV_DMA_NONCOHERENT + help + This enables function pointer support for non-standard noncoherent + systems to handle cache management. config AS_HAS_INSN def_bool $(as-instr,.insn r 51$(comma) 0$(comma) 0$(comma) t0$(comma) t0$(comma) zero) @@ -713,6 +720,25 @@ config RELOCATABLE If unsure, say N. +config RANDOMIZE_BASE + bool "Randomize the address of the kernel image" + select RELOCATABLE + depends on MMU && 64BIT && !XIP_KERNEL + help + Randomizes the virtual address at which the kernel image is + loaded, as a security feature that deters exploit attempts + relying on knowledge of the location of kernel internals. + + It is the bootloader's job to provide entropy, by passing a + random u64 value in /chosen/kaslr-seed at kernel entry. + + When booting via the UEFI stub, it will invoke the firmware's + EFI_RNG_PROTOCOL implementation (if available) to supply entropy + to the kernel proper. In addition, it will randomise the physical + location of the kernel Image as well. + + If unsure, say N. + endmenu # "Kernel features" menu "Boot options" diff --git a/arch/riscv/Kconfig.errata b/arch/riscv/Kconfig.errata index 0c8f4652cd82..566bcefeab50 100644 --- a/arch/riscv/Kconfig.errata +++ b/arch/riscv/Kconfig.errata @@ -1,5 +1,26 @@ menu "CPU errata selection" +config ERRATA_ANDES + bool "Andes AX45MP errata" + depends on RISCV_ALTERNATIVE && RISCV_SBI + help + All Andes errata Kconfig depend on this Kconfig. Disabling + this Kconfig will disable all Andes errata. Please say "Y" + here if your platform uses Andes CPU cores. + + Otherwise, please say "N" here to avoid unnecessary overhead. + +config ERRATA_ANDES_CMO + bool "Apply Andes cache management errata" + depends on ERRATA_ANDES && ARCH_R9A07G043 + select RISCV_DMA_NONCOHERENT + default y + help + This will apply the cache management errata to handle the + non-standard handling on non-coherent operations on Andes cores. + + If you don't know what to do here, say "Y". + config ERRATA_SIFIVE bool "SiFive errata" depends on RISCV_ALTERNATIVE diff --git a/arch/riscv/errata/Makefile b/arch/riscv/errata/Makefile index 7b2637c8c332..8a2739485123 100644 --- a/arch/riscv/errata/Makefile +++ b/arch/riscv/errata/Makefile @@ -2,5 +2,6 @@ ifdef CONFIG_RELOCATABLE KBUILD_CFLAGS += -fno-pie endif +obj-$(CONFIG_ERRATA_ANDES) += andes/ obj-$(CONFIG_ERRATA_SIFIVE) += sifive/ obj-$(CONFIG_ERRATA_THEAD) += thead/ diff --git a/arch/riscv/errata/andes/Makefile b/arch/riscv/errata/andes/Makefile new file mode 100644 index 000000000000..2d644e19caef --- /dev/null +++ b/arch/riscv/errata/andes/Makefile @@ -0,0 +1 @@ +obj-y += errata.o diff --git a/arch/riscv/errata/andes/errata.c b/arch/riscv/errata/andes/errata.c new file mode 100644 index 000000000000..197db68cc8da --- /dev/null +++ b/arch/riscv/errata/andes/errata.c @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Erratas to be applied for Andes CPU cores + * + * Copyright (C) 2023 Renesas Electronics Corporation. + * + * Author: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com> + */ + +#include <linux/memory.h> +#include <linux/module.h> + +#include <asm/alternative.h> +#include <asm/cacheflush.h> +#include <asm/errata_list.h> +#include <asm/patch.h> +#include <asm/processor.h> +#include <asm/sbi.h> +#include <asm/vendorid_list.h> + +#define ANDESTECH_AX45MP_MARCHID 0x8000000000008a45UL +#define ANDESTECH_AX45MP_MIMPID 0x500UL +#define ANDESTECH_SBI_EXT_ANDES 0x0900031E + +#define ANDES_SBI_EXT_IOCP_SW_WORKAROUND 1 + +static long ax45mp_iocp_sw_workaround(void) +{ + struct sbiret ret; + + /* + * ANDES_SBI_EXT_IOCP_SW_WORKAROUND SBI EXT checks if the IOCP is missing and + * cache is controllable only then CMO will be applied to the platform. + */ + ret = sbi_ecall(ANDESTECH_SBI_EXT_ANDES, ANDES_SBI_EXT_IOCP_SW_WORKAROUND, + 0, 0, 0, 0, 0, 0); + + return ret.error ? 0 : ret.value; +} + +static bool errata_probe_iocp(unsigned int stage, unsigned long arch_id, unsigned long impid) +{ + if (!IS_ENABLED(CONFIG_ERRATA_ANDES_CMO)) + return false; + + if (arch_id != ANDESTECH_AX45MP_MARCHID || impid != ANDESTECH_AX45MP_MIMPID) + return false; + + if (!ax45mp_iocp_sw_workaround()) + return false; + + /* Set this just to make core cbo code happy */ + riscv_cbom_block_size = 1; + riscv_noncoherent_supported(); + + return true; +} + +void __init_or_module andes_errata_patch_func(struct alt_entry *begin, struct alt_entry *end, + unsigned long archid, unsigned long impid, + unsigned int stage) +{ + errata_probe_iocp(stage, archid, impid); + + /* we have nothing to patch here ATM so just return back */ +} diff --git a/arch/riscv/errata/thead/errata.c b/arch/riscv/errata/thead/errata.c index be84b14f0118..0554ed4bf087 100644 --- a/arch/riscv/errata/thead/errata.c +++ b/arch/riscv/errata/thead/errata.c @@ -120,11 +120,3 @@ void thead_errata_patch_func(struct alt_entry *begin, struct alt_entry *end, if (stage == RISCV_ALTERNATIVES_EARLY_BOOT) local_flush_icache_all(); } - -void thead_feature_probe_func(unsigned int cpu, - unsigned long archid, - unsigned long impid) -{ - if ((archid == 0) && (impid == 0)) - per_cpu(misaligned_access_speed, cpu) = RISCV_HWPROBE_MISALIGNED_FAST; -} diff --git a/arch/riscv/include/asm/alternative.h b/arch/riscv/include/asm/alternative.h index 6a41537826a7..3c2b59b25017 100644 --- a/arch/riscv/include/asm/alternative.h +++ b/arch/riscv/include/asm/alternative.h @@ -30,7 +30,6 @@ #define ALT_OLD_PTR(a) __ALT_PTR(a, old_offset) #define ALT_ALT_PTR(a) __ALT_PTR(a, alt_offset) -void probe_vendor_features(unsigned int cpu); void __init apply_boot_alternatives(void); void __init apply_early_boot_alternatives(void); void apply_module_alternatives(void *start, size_t length); @@ -46,6 +45,9 @@ struct alt_entry { u32 patch_id; /* The patch ID (erratum ID or cpufeature ID) */ }; +void andes_errata_patch_func(struct alt_entry *begin, struct alt_entry *end, + unsigned long archid, unsigned long impid, + unsigned int stage); void sifive_errata_patch_func(struct alt_entry *begin, struct alt_entry *end, unsigned long archid, unsigned long impid, unsigned int stage); @@ -53,15 +55,11 @@ void thead_errata_patch_func(struct alt_entry *begin, struct alt_entry *end, unsigned long archid, unsigned long impid, unsigned int stage); -void thead_feature_probe_func(unsigned int cpu, unsigned long archid, - unsigned long impid); - void riscv_cpufeature_patch_func(struct alt_entry *begin, struct alt_entry *end, unsigned int stage); #else /* CONFIG_RISCV_ALTERNATIVE */ -static inline void probe_vendor_features(unsigned int cpu) { } static inline void apply_boot_alternatives(void) { } static inline void apply_early_boot_alternatives(void) { } static inline void apply_module_alternatives(void *start, size_t length) { } diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h index 23fed53b8815..d0345bd659c9 100644 --- a/arch/riscv/include/asm/cpufeature.h +++ b/arch/riscv/include/asm/cpufeature.h @@ -30,4 +30,6 @@ DECLARE_PER_CPU(long, misaligned_access_speed); /* Per-cpu ISA extensions. */ extern struct riscv_isainfo hart_isa[NR_CPUS]; +void check_unaligned_access(int cpu); + #endif diff --git a/arch/riscv/include/asm/dma-noncoherent.h b/arch/riscv/include/asm/dma-noncoherent.h new file mode 100644 index 000000000000..312cfa0858fb --- /dev/null +++ b/arch/riscv/include/asm/dma-noncoherent.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2023 Renesas Electronics Corp. + */ + +#ifndef __ASM_DMA_NONCOHERENT_H +#define __ASM_DMA_NONCOHERENT_H + +#include <linux/dma-direct.h> + +/* + * struct riscv_nonstd_cache_ops - Structure for non-standard CMO function pointers + * + * @wback: Function pointer for cache writeback + * @inv: Function pointer for invalidating cache + * @wback_inv: Function pointer for flushing the cache (writeback + invalidating) + */ +struct riscv_nonstd_cache_ops { + void (*wback)(phys_addr_t paddr, size_t size); + void (*inv)(phys_addr_t paddr, size_t size); + void (*wback_inv)(phys_addr_t paddr, size_t size); +}; + +extern struct riscv_nonstd_cache_ops noncoherent_cache_ops; + +void riscv_noncoherent_register_cache_ops(const struct riscv_nonstd_cache_ops *ops); + +#endif /* __ASM_DMA_NONCOHERENT_H */ diff --git a/arch/riscv/include/asm/efi.h b/arch/riscv/include/asm/efi.h index 8a6a128ec57f..46a355913b27 100644 --- a/arch/riscv/include/asm/efi.h +++ b/arch/riscv/include/asm/efi.h @@ -45,4 +45,6 @@ void arch_efi_call_virt_teardown(void); unsigned long stext_offset(void); +void efi_icache_sync(unsigned long start, unsigned long end); + #endif /* _ASM_EFI_H */ diff --git a/arch/riscv/include/asm/errata_list.h b/arch/riscv/include/asm/errata_list.h index fb1a810f3d8c..e2ecd01bfac7 100644 --- a/arch/riscv/include/asm/errata_list.h +++ b/arch/riscv/include/asm/errata_list.h @@ -11,6 +11,11 @@ #include <asm/hwcap.h> #include <asm/vendorid_list.h> +#ifdef CONFIG_ERRATA_ANDES +#define ERRATA_ANDESTECH_NO_IOCP 0 +#define ERRATA_ANDESTECH_NUMBER 1 +#endif + #ifdef CONFIG_ERRATA_SIFIVE #define ERRATA_SIFIVE_CIP_453 0 #define ERRATA_SIFIVE_CIP_1200 1 diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h index b55ba20903ec..5488ecc337b6 100644 --- a/arch/riscv/include/asm/page.h +++ b/arch/riscv/include/asm/page.h @@ -106,6 +106,7 @@ typedef struct page *pgtable_t; struct kernel_mapping { unsigned long page_offset; unsigned long virt_addr; + unsigned long virt_offset; uintptr_t phys_addr; uintptr_t size; /* Offset between linear mapping virtual address and kernel load address */ @@ -185,6 +186,8 @@ extern phys_addr_t __phys_addr_symbol(unsigned long x); #define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x)) +unsigned long kaslr_offset(void); + #endif /* __ASSEMBLY__ */ #define virt_addr_valid(vaddr) ({ \ diff --git a/arch/riscv/include/asm/patch.h b/arch/riscv/include/asm/patch.h index 63c98833d510..e88b52d39eac 100644 --- a/arch/riscv/include/asm/patch.h +++ b/arch/riscv/include/asm/patch.h @@ -7,6 +7,7 @@ #define _ASM_RISCV_PATCH_H int patch_text_nosync(void *addr, const void *insns, size_t len); +int patch_text_set_nosync(void *addr, u8 c, size_t len); int patch_text(void *addr, u32 *insns, int ninsns); extern int riscv_patch_in_stop_machine; diff --git a/arch/riscv/include/asm/vendorid_list.h b/arch/riscv/include/asm/vendorid_list.h index cb89af3f0704..e55407ace0c3 100644 --- a/arch/riscv/include/asm/vendorid_list.h +++ b/arch/riscv/include/asm/vendorid_list.h @@ -5,6 +5,7 @@ #ifndef ASM_VENDOR_LIST_H #define ASM_VENDOR_LIST_H +#define ANDESTECH_VENDOR_ID 0x31e #define SIFIVE_VENDOR_ID 0x489 #define THEAD_VENDOR_ID 0x5b7 diff --git a/arch/riscv/include/uapi/asm/ptrace.h b/arch/riscv/include/uapi/asm/ptrace.h index 6d2d9afaabea..a38268b19c3d 100644 --- a/arch/riscv/include/uapi/asm/ptrace.h +++ b/arch/riscv/include/uapi/asm/ptrace.h @@ -108,13 +108,18 @@ struct __riscv_v_ext_state { * In signal handler, datap will be set a correct user stack offset * and vector registers will be copied to the address of datap * pointer. - * - * In ptrace syscall, datap will be set to zero and the vector - * registers will be copied to the address right after this - * structure. */ }; +struct __riscv_v_regset_state { + unsigned long vstart; + unsigned long vl; + unsigned long vtype; + unsigned long vcsr; + unsigned long vlenb; + char vreg[]; +}; + /* * According to spec: The number of bits in a single vector register, * VLEN >= ELEN, which must be a power of 2, and must be no greater than diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile index 6ac56af42f4a..95cf25d48405 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile @@ -38,6 +38,7 @@ extra-y += vmlinux.lds obj-y += head.o obj-y += soc.o obj-$(CONFIG_RISCV_ALTERNATIVE) += alternative.o +obj-y += copy-unaligned.o obj-y += cpu.o obj-y += cpufeature.o obj-y += entry.o diff --git a/arch/riscv/kernel/alternative.c b/arch/riscv/kernel/alternative.c index 6b75788c18e6..319a1da0358b 100644 --- a/arch/riscv/kernel/alternative.c +++ b/arch/riscv/kernel/alternative.c @@ -27,8 +27,6 @@ struct cpu_manufacturer_info_t { void (*patch_func)(struct alt_entry *begin, struct alt_entry *end, unsigned long archid, unsigned long impid, unsigned int stage); - void (*feature_probe_func)(unsigned int cpu, unsigned long archid, - unsigned long impid); }; static void riscv_fill_cpu_mfr_info(struct cpu_manufacturer_info_t *cpu_mfr_info) @@ -43,8 +41,12 @@ static void riscv_fill_cpu_mfr_info(struct cpu_manufacturer_info_t *cpu_mfr_info cpu_mfr_info->imp_id = sbi_get_mimpid(); #endif - cpu_mfr_info->feature_probe_func = NULL; switch (cpu_mfr_info->vendor_id) { +#ifdef CONFIG_ERRATA_ANDES + case ANDESTECH_VENDOR_ID: + cpu_mfr_info->patch_func = andes_errata_patch_func; + break; +#endif #ifdef CONFIG_ERRATA_SIFIVE case SIFIVE_VENDOR_ID: cpu_mfr_info->patch_func = sifive_errata_patch_func; @@ -53,7 +55,6 @@ static void riscv_fill_cpu_mfr_info(struct cpu_manufacturer_info_t *cpu_mfr_info #ifdef CONFIG_ERRATA_THEAD case THEAD_VENDOR_ID: cpu_mfr_info->patch_func = thead_errata_patch_func; - cpu_mfr_info->feature_probe_func = thead_feature_probe_func; break; #endif default: @@ -143,20 +144,6 @@ void riscv_alternative_fix_offsets(void *alt_ptr, unsigned int len, } } -/* Called on each CPU as it starts */ -void probe_vendor_features(unsigned int cpu) -{ - struct cpu_manufacturer_info_t cpu_mfr_info; - - riscv_fill_cpu_mfr_info(&cpu_mfr_info); - if (!cpu_mfr_info.feature_probe_func) - return; - - cpu_mfr_info.feature_probe_func(cpu, - cpu_mfr_info.arch_id, - cpu_mfr_info.imp_id); -} - /* * This is called very early in the boot process (directly after we run * a feature detect on the boot CPU). No need to worry about other CPUs @@ -211,7 +198,6 @@ void __init apply_boot_alternatives(void) /* If called on non-boot cpu things could go wrong */ WARN_ON(smp_processor_id() != 0); - probe_vendor_features(0); _apply_alternatives((struct alt_entry *)__alt_start, (struct alt_entry *)__alt_end, RISCV_ALTERNATIVES_BOOT); diff --git a/arch/riscv/kernel/copy-unaligned.S b/arch/riscv/kernel/copy-unaligned.S new file mode 100644 index 000000000000..cfdecfbaad62 --- /dev/null +++ b/arch/riscv/kernel/copy-unaligned.S @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2023 Rivos Inc. */ + +#include <linux/linkage.h> +#include <asm/asm.h> + + .text + +/* void __riscv_copy_words_unaligned(void *, const void *, size_t) */ +/* Performs a memcpy without aligning buffers, using word loads and stores. */ +/* Note: The size is truncated to a multiple of 8 * SZREG */ +ENTRY(__riscv_copy_words_unaligned) + andi a4, a2, ~((8*SZREG)-1) + beqz a4, 2f + add a3, a1, a4 +1: + REG_L a4, 0(a1) + REG_L a5, SZREG(a1) + REG_L a6, 2*SZREG(a1) + REG_L a7, 3*SZREG(a1) + REG_L t0, 4*SZREG(a1) + REG_L t1, 5*SZREG(a1) + REG_L t2, 6*SZREG(a1) + REG_L t3, 7*SZREG(a1) + REG_S a4, 0(a0) + REG_S a5, SZREG(a0) + REG_S a6, 2*SZREG(a0) + REG_S a7, 3*SZREG(a0) + REG_S t0, 4*SZREG(a0) + REG_S t1, 5*SZREG(a0) + REG_S t2, 6*SZREG(a0) + REG_S t3, 7*SZREG(a0) + addi a0, a0, 8*SZREG + addi a1, a1, 8*SZREG + bltu a1, a3, 1b + +2: + ret +END(__riscv_copy_words_unaligned) + +/* void __riscv_copy_bytes_unaligned(void *, const void *, size_t) */ +/* Performs a memcpy without aligning buffers, using only byte accesses. */ +/* Note: The size is truncated to a multiple of 8 */ +ENTRY(__riscv_copy_bytes_unaligned) + andi a4, a2, ~(8-1) + beqz a4, 2f + add a3, a1, a4 +1: + lb a4, 0(a1) + lb a5, 1(a1) + lb a6, 2(a1) + lb a7, 3(a1) + lb t0, 4(a1) + lb t1, 5(a1) + lb t2, 6(a1) + lb t3, 7(a1) + sb a4, 0(a0) + sb a5, 1(a0) + sb a6, 2(a0) + sb a7, 3(a0) + sb t0, 4(a0) + sb t1, 5(a0) + sb t2, 6(a0) + sb t3, 7(a0) + addi a0, a0, 8 + addi a1, a1, 8 + bltu a1, a3, 1b + +2: + ret +END(__riscv_copy_bytes_unaligned) diff --git a/arch/riscv/kernel/copy-unaligned.h b/arch/riscv/kernel/copy-unaligned.h new file mode 100644 index 000000000000..e3d70d35b708 --- /dev/null +++ b/arch/riscv/kernel/copy-unaligned.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2023 Rivos, Inc. + */ +#ifndef __RISCV_KERNEL_COPY_UNALIGNED_H +#define __RISCV_KERNEL_COPY_UNALIGNED_H + +#include <linux/types.h> + +void __riscv_copy_words_unaligned(void *dst, const void *src, size_t size); +void __riscv_copy_bytes_unaligned(void *dst, const void *src, size_t size); + +#endif /* __RISCV_KERNEL_COPY_UNALIGNED_H */ diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c index ef7b4fd9e876..1cfbba65d11a 100644 --- a/arch/riscv/kernel/cpufeature.c +++ b/arch/riscv/kernel/cpufeature.c @@ -18,12 +18,19 @@ #include <asm/cacheflush.h> #include <asm/cpufeature.h> #include <asm/hwcap.h> +#include <asm/hwprobe.h> #include <asm/patch.h> #include <asm/processor.h> #include <asm/vector.h> +#include "copy-unaligned.h" + #define NUM_ALPHA_EXTS ('z' - 'a' + 1) +#define MISALIGNED_ACCESS_JIFFIES_LG2 1 +#define MISALIGNED_BUFFER_SIZE 0x4000 +#define MISALIGNED_COPY_SIZE ((MISALIGNED_BUFFER_SIZE / 2) - 0x80) + unsigned long elf_hwcap __read_mostly; /* Host ISA bitmap */ @@ -549,6 +556,103 @@ unsigned long riscv_get_elf_hwcap(void) return hwcap; } +void check_unaligned_access(int cpu) +{ + u64 start_cycles, end_cycles; + u64 word_cycles; + u64 byte_cycles; + int ratio; + unsigned long start_jiffies, now; + struct page *page; + void *dst; + void *src; + long speed = RISCV_HWPROBE_MISALIGNED_SLOW; + + page = alloc_pages(GFP_NOWAIT, get_order(MISALIGNED_BUFFER_SIZE)); + if (!page) { + pr_warn("Can't alloc pages to measure memcpy performance"); + return; + } + + /* Make an unaligned destination buffer. */ + dst = (void *)((unsigned long)page_address(page) | 0x1); + /* Unalign src as well, but differently (off by 1 + 2 = 3). */ + src = dst + (MISALIGNED_BUFFER_SIZE / 2); + src += 2; + word_cycles = -1ULL; + /* Do a warmup. */ + __riscv_copy_words_unaligned(dst, src, MISALIGNED_COPY_SIZE); + preempt_disable(); + start_jiffies = jiffies; + while ((now = jiffies) == start_jiffies) + cpu_relax(); + + /* + * For a fixed amount of time, repeatedly try the function, and take + * the best time in cycles as the measurement. + */ + while (time_before(jiffies, now + (1 << MISALIGNED_ACCESS_JIFFIES_LG2))) { + start_cycles = get_cycles64(); + /* Ensure the CSR read can't reorder WRT to the copy. */ + mb(); + __riscv_copy_words_unaligned(dst, src, MISALIGNED_COPY_SIZE); + /* Ensure the copy ends before the end time is snapped. */ + mb(); + end_cycles = get_cycles64(); + if ((end_cycles - start_cycles) < word_cycles) + word_cycles = end_cycles - start_cycles; + } + + byte_cycles = -1ULL; + __riscv_copy_bytes_unaligned(dst, src, MISALIGNED_COPY_SIZE); + start_jiffies = jiffies; + while ((now = jiffies) == start_jiffies) + cpu_relax(); + + while (time_before(jiffies, now + (1 << MISALIGNED_ACCESS_JIFFIES_LG2))) { + start_cycles = get_cycles64(); + mb(); + __riscv_copy_bytes_unaligned(dst, src, MISALIGNED_COPY_SIZE); + mb(); + end_cycles = get_cycles64(); + if ((end_cycles - start_cycles) < byte_cycles) + byte_cycles = end_cycles - start_cycles; + } + + preempt_enable(); + + /* Don't divide by zero. */ + if (!word_cycles || !byte_cycles) { + pr_warn("cpu%d: rdtime lacks granularity needed to measure unaligned access speed\n", + cpu); + + goto out; + } + + if (word_cycles < byte_cycles) + speed = RISCV_HWPROBE_MISALIGNED_FAST; + + ratio = div_u64((byte_cycles * 100), word_cycles); + pr_info("cpu%d: Ratio of byte access time to unaligned word access is %d.%02d, unaligned accesses are %s\n", + cpu, + ratio / 100, + ratio % 100, + (speed == RISCV_HWPROBE_MISALIGNED_FAST) ? "fast" : "slow"); + + per_cpu(misaligned_access_speed, cpu) = speed; + +out: + __free_pages(page, get_order(MISALIGNED_BUFFER_SIZE)); +} + +static int check_unaligned_access_boot_cpu(void) +{ + check_unaligned_access(0); + return 0; +} + +arch_initcall(check_unaligned_access_boot_cpu); + #ifdef CONFIG_RISCV_ALTERNATIVE /* * Alternative patch sites consider 48 bits when determining when to patch diff --git a/arch/riscv/kernel/image-vars.h b/arch/riscv/kernel/image-vars.h index 15616155008c..ea1a10355ce9 100644 --- a/arch/riscv/kernel/image-vars.h +++ b/arch/riscv/kernel/image-vars.h @@ -27,6 +27,7 @@ __efistub__start = _start; __efistub__start_kernel = _start_kernel; __efistub__end = _end; __efistub__edata = _edata; +__efistub___init_text_end = __init_text_end; __efistub_screen_info = screen_info; #endif diff --git a/arch/riscv/kernel/patch.c b/arch/riscv/kernel/patch.c index 575e71d6c8ae..13ee7bf589a1 100644 --- a/arch/riscv/kernel/patch.c +++ b/arch/riscv/kernel/patch.c @@ -6,6 +6,7 @@ #include <linux/spinlock.h> #include <linux/mm.h> #include <linux/memory.h> +#include <linux/string.h> #include <linux/uaccess.h> #include <linux/stop_machine.h> #include <asm/kprobes.h> @@ -53,13 +54,52 @@ static void patch_unmap(int fixmap) } NOKPROBE_SYMBOL(patch_unmap); -static int patch_insn_write(void *addr, const void *insn, size_t len) +static int __patch_insn_set(void *addr, u8 c, size_t len) +{ + void *waddr = addr; + bool across_pages = (((uintptr_t)addr & ~PAGE_MASK) + len) > PAGE_SIZE; + + /* + * Only two pages can be mapped at a time for writing. + */ + if (len + offset_in_page(addr) > 2 * PAGE_SIZE) + return -EINVAL; + /* + * Before reaching here, it was expected to lock the text_mutex + * already, so we don't need to give another lock here and could + * ensure that it was safe between each cores. + */ + lockdep_assert_held(&text_mutex); + + if (across_pages) + patch_map(addr + PAGE_SIZE, FIX_TEXT_POKE1); + + waddr = patch_map(addr, FIX_TEXT_POKE0); + + memset(waddr, c, len); + + patch_unmap(FIX_TEXT_POKE0); + + if (across_pages) + patch_unmap(FIX_TEXT_POKE1); + + return 0; +} +NOKPROBE_SYMBOL(__patch_insn_set); + +static int __patch_insn_write(void *addr, const void *insn, size_t len) { void *waddr = addr; bool across_pages = (((uintptr_t) addr & ~PAGE_MASK) + len) > PAGE_SIZE; int ret; /* + * Only two pages can be mapped at a time for writing. + */ + if (len + offset_in_page(addr) > 2 * PAGE_SIZE) + return -EINVAL; + + /* * Before reaching here, it was expected to lock the text_mutex * already, so we don't need to give another lock here and could * ensure that it was safe between each cores. @@ -74,7 +114,7 @@ static int patch_insn_write(void *addr, const void *insn, size_t len) lockdep_assert_held(&text_mutex); if (across_pages) - patch_map(addr + len, FIX_TEXT_POKE1); + patch_map(addr + PAGE_SIZE, FIX_TEXT_POKE1); waddr = patch_map(addr, FIX_TEXT_POKE0); @@ -87,15 +127,79 @@ static int patch_insn_write(void *addr, const void *insn, size_t len) return ret; } -NOKPROBE_SYMBOL(patch_insn_write); +NOKPROBE_SYMBOL(__patch_insn_write); #else -static int patch_insn_write(void *addr, const void *insn, size_t len) +static int __patch_insn_set(void *addr, u8 c, size_t len) +{ + memset(addr, c, len); + + return 0; +} +NOKPROBE_SYMBOL(__patch_insn_set); + +static int __patch_insn_write(void *addr, const void *insn, size_t len) { return copy_to_kernel_nofault(addr, insn, len); } -NOKPROBE_SYMBOL(patch_insn_write); +NOKPROBE_SYMBOL(__patch_insn_write); #endif /* CONFIG_MMU */ +static int patch_insn_set(void *addr, u8 c, size_t len) +{ + size_t patched = 0; + size_t size; + int ret = 0; + + /* + * __patch_insn_set() can only work on 2 pages at a time so call it in a + * loop with len <= 2 * PAGE_SIZE. + */ + while (patched < len && !ret) { + size = min_t(size_t, PAGE_SIZE * 2 - offset_in_page(addr + patched), len - patched); + ret = __patch_insn_set(addr + patched, c, size); + + patched += size; + } + + return ret; +} +NOKPROBE_SYMBOL(patch_insn_set); + +int patch_text_set_nosync(void *addr, u8 c, size_t len) +{ + u32 *tp = addr; + int ret; + + ret = patch_insn_set(tp, c, len); + + if (!ret) + flush_icache_range((uintptr_t)tp, (uintptr_t)tp + len); + + return ret; +} +NOKPROBE_SYMBOL(patch_text_set_nosync); + +static int patch_insn_write(void *addr, const void *insn, size_t len) +{ + size_t patched = 0; + size_t size; + int ret = 0; + + /* + * Copy the instructions to the destination address, two pages at a time + * because __patch_insn_write() can only handle len <= 2 * PAGE_SIZE. + */ + while (patched < len && !ret) { + size = min_t(size_t, PAGE_SIZE * 2 - offset_in_page(addr + patched), len - patched); + ret = __patch_insn_write(addr + patched, insn + patched, size); + + patched += size; + } + + return ret; +} +NOKPROBE_SYMBOL(patch_insn_write); + int patch_text_nosync(void *addr, const void *insns, size_t len) { u32 *tp = addr; diff --git a/arch/riscv/kernel/pi/Makefile b/arch/riscv/kernel/pi/Makefile index 7b593d44c712..07915dc9279e 100644 --- a/arch/riscv/kernel/pi/Makefile +++ b/arch/riscv/kernel/pi/Makefile @@ -35,5 +35,5 @@ $(obj)/string.o: $(srctree)/lib/string.c FORCE $(obj)/ctype.o: $(srctree)/lib/ctype.c FORCE $(call if_changed_rule,cc_o_c) -obj-y := cmdline_early.pi.o string.pi.o ctype.pi.o lib-fdt.pi.o lib-fdt_ro.pi.o +obj-y := cmdline_early.pi.o fdt_early.pi.o string.pi.o ctype.pi.o lib-fdt.pi.o lib-fdt_ro.pi.o extra-y := $(patsubst %.pi.o,%.o,$(obj-y)) diff --git a/arch/riscv/kernel/pi/cmdline_early.c b/arch/riscv/kernel/pi/cmdline_early.c index 05652d13c746..68e786c84c94 100644 --- a/arch/riscv/kernel/pi/cmdline_early.c +++ b/arch/riscv/kernel/pi/cmdline_early.c @@ -14,6 +14,7 @@ static char early_cmdline[COMMAND_LINE_SIZE]; * LLVM complain because the function is actually unused in this file). */ u64 set_satp_mode_from_cmdline(uintptr_t dtb_pa); +bool set_nokaslr_from_cmdline(uintptr_t dtb_pa); static char *get_early_cmdline(uintptr_t dtb_pa) { @@ -60,3 +61,15 @@ u64 set_satp_mode_from_cmdline(uintptr_t dtb_pa) return match_noXlvl(cmdline); } + +static bool match_nokaslr(char *cmdline) +{ + return strstr(cmdline, "nokaslr"); +} + +bool set_nokaslr_from_cmdline(uintptr_t dtb_pa) +{ + char *cmdline = get_early_cmdline(dtb_pa); + + return match_nokaslr(cmdline); +} diff --git a/arch/riscv/kernel/pi/fdt_early.c b/arch/riscv/kernel/pi/fdt_early.c new file mode 100644 index 000000000000..899610e042ab --- /dev/null +++ b/arch/riscv/kernel/pi/fdt_early.c @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include <linux/types.h> +#include <linux/init.h> +#include <linux/libfdt.h> + +/* + * Declare the functions that are exported (but prefixed) here so that LLVM + * does not complain it lacks the 'static' keyword (which, if added, makes + * LLVM complain because the function is actually unused in this file). + */ +u64 get_kaslr_seed(uintptr_t dtb_pa); + +u64 get_kaslr_seed(uintptr_t dtb_pa) +{ + int node, len; + fdt64_t *prop; + u64 ret; + + node = fdt_path_offset((void *)dtb_pa, "/chosen"); + if (node < 0) + return 0; + + prop = fdt_getprop_w((void *)dtb_pa, node, "kaslr-seed", &len); + if (!prop || len != sizeof(u64)) + return 0; + + ret = fdt64_to_cpu(*prop); + *prop = 0; + return ret; +} diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c index 487303e3ef22..2afe460de16a 100644 --- a/arch/riscv/kernel/ptrace.c +++ b/arch/riscv/kernel/ptrace.c @@ -25,6 +25,9 @@ enum riscv_regset { #ifdef CONFIG_FPU REGSET_F, #endif +#ifdef CONFIG_RISCV_ISA_V + REGSET_V, +#endif }; static int riscv_gpr_get(struct task_struct *target, @@ -81,6 +84,71 @@ static int riscv_fpr_set(struct task_struct *target, } #endif +#ifdef CONFIG_RISCV_ISA_V +static int riscv_vr_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + struct __riscv_v_ext_state *vstate = &target->thread.vstate; + struct __riscv_v_regset_state ptrace_vstate; + + if (!riscv_v_vstate_query(task_pt_regs(target))) + return -EINVAL; + + /* + * Ensure the vector registers have been saved to the memory before + * copying them to membuf. + */ + if (target == current) + riscv_v_vstate_save(current, task_pt_regs(current)); + + ptrace_vstate.vstart = vstate->vstart; + ptrace_vstate.vl = vstate->vl; + ptrace_vstate.vtype = vstate->vtype; + ptrace_vstate.vcsr = vstate->vcsr; + ptrace_vstate.vlenb = vstate->vlenb; + + /* Copy vector header from vstate. */ + membuf_write(&to, &ptrace_vstate, sizeof(struct __riscv_v_regset_state)); + + /* Copy all the vector registers from vstate. */ + return membuf_write(&to, vstate->datap, riscv_v_vsize); +} + +static int riscv_vr_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int ret; + struct __riscv_v_ext_state *vstate = &target->thread.vstate; + struct __riscv_v_regset_state ptrace_vstate; + + if (!riscv_v_vstate_query(task_pt_regs(target))) + return -EINVAL; + + /* Copy rest of the vstate except datap */ + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ptrace_vstate, 0, + sizeof(struct __riscv_v_regset_state)); + if (unlikely(ret)) + return ret; + + if (vstate->vlenb != ptrace_vstate.vlenb) + return -EINVAL; + + vstate->vstart = ptrace_vstate.vstart; + vstate->vl = ptrace_vstate.vl; + vstate->vtype = ptrace_vstate.vtype; + vstate->vcsr = ptrace_vstate.vcsr; + + /* Copy all the vector registers. */ + pos = 0; + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vstate->datap, + 0, riscv_v_vsize); + return ret; +} +#endif + static const struct user_regset riscv_user_regset[] = { [REGSET_X] = { .core_note_type = NT_PRSTATUS, @@ -100,6 +168,17 @@ static const struct user_regset riscv_user_regset[] = { .set = riscv_fpr_set, }, #endif +#ifdef CONFIG_RISCV_ISA_V + [REGSET_V] = { + .core_note_type = NT_RISCV_VECTOR, + .align = 16, + .n = ((32 * RISCV_MAX_VLENB) + + sizeof(struct __riscv_v_regset_state)) / sizeof(__u32), + .size = sizeof(__u32), + .regset_get = riscv_vr_get, + .set = riscv_vr_set, + }, +#endif }; static const struct user_regset_view riscv_user_native_view = { diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index 32c2e1eb71bd..e600aab116a4 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -21,6 +21,7 @@ #include <linux/smp.h> #include <linux/efi.h> #include <linux/crash_dump.h> +#include <linux/panic_notifier.h> #include <asm/acpi.h> #include <asm/alternative.h> @@ -347,3 +348,27 @@ void free_initmem(void) free_initmem_default(POISON_FREE_INITMEM); } + +static int dump_kernel_offset(struct notifier_block *self, + unsigned long v, void *p) +{ + pr_emerg("Kernel Offset: 0x%lx from 0x%lx\n", + kernel_map.virt_offset, + KERNEL_LINK_ADDR); + + return 0; +} + +static struct notifier_block kernel_offset_notifier = { + .notifier_call = dump_kernel_offset +}; + +static int __init register_kernel_offset_dumper(void) +{ + if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) + atomic_notifier_chain_register(&panic_notifier_list, + &kernel_offset_notifier); + + return 0; +} +device_initcall(register_kernel_offset_dumper); diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c index f4d6acb38dd0..1b8da4e40a4d 100644 --- a/arch/riscv/kernel/smpboot.c +++ b/arch/riscv/kernel/smpboot.c @@ -26,6 +26,7 @@ #include <linux/sched/task_stack.h> #include <linux/sched/mm.h> #include <asm/cpu_ops.h> +#include <asm/cpufeature.h> #include <asm/irq.h> #include <asm/mmu_context.h> #include <asm/numa.h> @@ -245,7 +246,7 @@ asmlinkage __visible void smp_callin(void) numa_add_cpu(curr_cpuid); set_cpu_online(curr_cpuid, 1); - probe_vendor_features(curr_cpuid); + check_unaligned_access(curr_cpuid); if (has_vector()) { if (riscv_v_setup_vsize()) diff --git a/arch/riscv/mm/dma-noncoherent.c b/arch/riscv/mm/dma-noncoherent.c index 7270b4d8c05b..b76e7e192eb1 100644 --- a/arch/riscv/mm/dma-noncoherent.c +++ b/arch/riscv/mm/dma-noncoherent.c @@ -9,26 +9,93 @@ #include <linux/dma-map-ops.h> #include <linux/mm.h> #include <asm/cacheflush.h> +#include <asm/dma-noncoherent.h> static bool noncoherent_supported __ro_after_init; int dma_cache_alignment __ro_after_init = ARCH_DMA_MINALIGN; EXPORT_SYMBOL_GPL(dma_cache_alignment); -void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, - enum dma_data_direction dir) +struct riscv_nonstd_cache_ops noncoherent_cache_ops __ro_after_init = { + .wback = NULL, + .inv = NULL, + .wback_inv = NULL, +}; + +static inline void arch_dma_cache_wback(phys_addr_t paddr, size_t size) +{ + void *vaddr = phys_to_virt(paddr); + +#ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS + if (unlikely(noncoherent_cache_ops.wback)) { + noncoherent_cache_ops.wback(paddr, size); + return; + } +#endif + ALT_CMO_OP(clean, vaddr, size, riscv_cbom_block_size); +} + +static inline void arch_dma_cache_inv(phys_addr_t paddr, size_t size) +{ + void *vaddr = phys_to_virt(paddr); + +#ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS + if (unlikely(noncoherent_cache_ops.inv)) { + noncoherent_cache_ops.inv(paddr, size); + return; + } +#endif + + ALT_CMO_OP(inval, vaddr, size, riscv_cbom_block_size); +} + +static inline void arch_dma_cache_wback_inv(phys_addr_t paddr, size_t size) { void *vaddr = phys_to_virt(paddr); +#ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS + if (unlikely(noncoherent_cache_ops.wback_inv)) { + noncoherent_cache_ops.wback_inv(paddr, size); + return; + } +#endif + + ALT_CMO_OP(flush, vaddr, size, riscv_cbom_block_size); +} + +static inline bool arch_sync_dma_clean_before_fromdevice(void) +{ + return true; +} + +static inline bool arch_sync_dma_cpu_needs_post_dma_flush(void) +{ + return true; +} + +void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, + enum dma_data_direction dir) +{ switch (dir) { case DMA_TO_DEVICE: - ALT_CMO_OP(clean, vaddr, size, riscv_cbom_block_size); + arch_dma_cache_wback(paddr, size); break; + case DMA_FROM_DEVICE: - ALT_CMO_OP(clean, vaddr, size, riscv_cbom_block_size); - break; + if (!arch_sync_dma_clean_before_fromdevice()) { + arch_dma_cache_inv(paddr, size); + break; + } + fallthrough; + case DMA_BIDIRECTIONAL: - ALT_CMO_OP(flush, vaddr, size, riscv_cbom_block_size); + /* Skip the invalidate here if it's done later */ + if (IS_ENABLED(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) && + arch_sync_dma_cpu_needs_post_dma_flush()) + arch_dma_cache_wback(paddr, size); + else + arch_dma_cache_wback_inv(paddr, size); break; + default: break; } @@ -37,15 +104,17 @@ void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, enum dma_data_direction dir) { - void *vaddr = phys_to_virt(paddr); - switch (dir) { case DMA_TO_DEVICE: break; + case DMA_FROM_DEVICE: case DMA_BIDIRECTIONAL: - ALT_CMO_OP(flush, vaddr, size, riscv_cbom_block_size); + /* FROM_DEVICE invalidate needed if speculative CPU prefetch only */ + if (arch_sync_dma_cpu_needs_post_dma_flush()) + arch_dma_cache_inv(paddr, size); break; + default: break; } @@ -55,6 +124,13 @@ void arch_dma_prep_coherent(struct page *page, size_t size) { void *flush_addr = page_address(page); +#ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS + if (unlikely(noncoherent_cache_ops.wback_inv)) { + noncoherent_cache_ops.wback_inv(page_to_phys(page), size); + return; + } +#endif + ALT_CMO_OP(flush, flush_addr, size, riscv_cbom_block_size); } @@ -86,3 +162,12 @@ void __init riscv_set_dma_cache_alignment(void) if (!noncoherent_supported) dma_cache_alignment = 1; } + +void riscv_noncoherent_register_cache_ops(const struct riscv_nonstd_cache_ops *ops) +{ + if (!ops) + return; + + noncoherent_cache_ops = *ops; +} +EXPORT_SYMBOL_GPL(riscv_noncoherent_register_cache_ops); diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 943c18d6ef4d..0798bd861dcb 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -1014,11 +1014,45 @@ static void __init pt_ops_set_late(void) #endif } +#ifdef CONFIG_RANDOMIZE_BASE +extern bool __init __pi_set_nokaslr_from_cmdline(uintptr_t dtb_pa); +extern u64 __init __pi_get_kaslr_seed(uintptr_t dtb_pa); + +static int __init print_nokaslr(char *p) +{ + pr_info("Disabled KASLR"); + return 0; +} +early_param("nokaslr", print_nokaslr); + +unsigned long kaslr_offset(void) +{ + return kernel_map.virt_offset; +} +#endif + asmlinkage void __init setup_vm(uintptr_t dtb_pa) { pmd_t __maybe_unused fix_bmap_spmd, fix_bmap_epmd; - kernel_map.virt_addr = KERNEL_LINK_ADDR; +#ifdef CONFIG_RANDOMIZE_BASE + if (!__pi_set_nokaslr_from_cmdline(dtb_pa)) { + u64 kaslr_seed = __pi_get_kaslr_seed(dtb_pa); + u32 kernel_size = (uintptr_t)(&_end) - (uintptr_t)(&_start); + u32 nr_pos; + + /* + * Compute the number of positions available: we are limited + * by the early page table that only has one PUD and we must + * be aligned on PMD_SIZE. + */ + nr_pos = (PUD_SIZE - kernel_size) / PMD_SIZE; + + kernel_map.virt_offset = (kaslr_seed % nr_pos) * PMD_SIZE; + } +#endif + + kernel_map.virt_addr = KERNEL_LINK_ADDR + kernel_map.virt_offset; kernel_map.page_offset = _AC(CONFIG_PAGE_OFFSET, UL); #ifdef CONFIG_XIP_KERNEL diff --git a/arch/riscv/mm/pmem.c b/arch/riscv/mm/pmem.c index 089df92ae876..c5fc5ec96f6d 100644 --- a/arch/riscv/mm/pmem.c +++ b/arch/riscv/mm/pmem.c @@ -7,15 +7,28 @@ #include <linux/libnvdimm.h> #include <asm/cacheflush.h> +#include <asm/dma-noncoherent.h> void arch_wb_cache_pmem(void *addr, size_t size) { +#ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS + if (unlikely(noncoherent_cache_ops.wback)) { + noncoherent_cache_ops.wback(virt_to_phys(addr), size); + return; + } +#endif ALT_CMO_OP(clean, addr, size, riscv_cbom_block_size); } EXPORT_SYMBOL_GPL(arch_wb_cache_pmem); void arch_invalidate_pmem(void *addr, size_t size) { +#ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS + if (unlikely(noncoherent_cache_ops.inv)) { + noncoherent_cache_ops.inv(virt_to_phys(addr), size); + return; + } +#endif ALT_CMO_OP(inval, addr, size, riscv_cbom_block_size); } EXPORT_SYMBOL_GPL(arch_invalidate_pmem); diff --git a/arch/riscv/net/bpf_jit.h b/arch/riscv/net/bpf_jit.h index d21c6c92a683..a5ce1ab76ece 100644 --- a/arch/riscv/net/bpf_jit.h +++ b/arch/riscv/net/bpf_jit.h @@ -68,6 +68,7 @@ static inline bool is_creg(u8 reg) struct rv_jit_context { struct bpf_prog *prog; u16 *insns; /* RV insns */ + u16 *ro_insns; int ninsns; int prologue_len; int epilogue_offset; @@ -85,7 +86,9 @@ static inline int ninsns_rvoff(int ninsns) struct rv_jit_data { struct bpf_binary_header *header; + struct bpf_binary_header *ro_header; u8 *image; + u8 *ro_image; struct rv_jit_context ctx; }; diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c index 8423f4ddf8f5..ecd3ae6f4116 100644 --- a/arch/riscv/net/bpf_jit_comp64.c +++ b/arch/riscv/net/bpf_jit_comp64.c @@ -144,7 +144,11 @@ static bool in_auipc_jalr_range(s64 val) /* Emit fixed-length instructions for address */ static int emit_addr(u8 rd, u64 addr, bool extra_pass, struct rv_jit_context *ctx) { - u64 ip = (u64)(ctx->insns + ctx->ninsns); + /* + * Use the ro_insns(RX) to calculate the offset as the BPF program will + * finally run from this memory region. + */ + u64 ip = (u64)(ctx->ro_insns + ctx->ninsns); s64 off = addr - ip; s64 upper = (off + (1 << 11)) >> 12; s64 lower = off & 0xfff; @@ -464,8 +468,12 @@ static int emit_call(u64 addr, bool fixed_addr, struct rv_jit_context *ctx) s64 off = 0; u64 ip; - if (addr && ctx->insns) { - ip = (u64)(long)(ctx->insns + ctx->ninsns); + if (addr && ctx->insns && ctx->ro_insns) { + /* + * Use the ro_insns(RX) to calculate the offset as the BPF + * program will finally run from this memory region. + */ + ip = (u64)(long)(ctx->ro_insns + ctx->ninsns); off = addr - ip; } @@ -578,9 +586,10 @@ static int add_exception_handler(const struct bpf_insn *insn, { struct exception_table_entry *ex; unsigned long pc; - off_t offset; + off_t ins_offset; + off_t fixup_offset; - if (!ctx->insns || !ctx->prog->aux->extable || + if (!ctx->insns || !ctx->ro_insns || !ctx->prog->aux->extable || (BPF_MODE(insn->code) != BPF_PROBE_MEM && BPF_MODE(insn->code) != BPF_PROBE_MEMSX)) return 0; @@ -594,12 +603,17 @@ static int add_exception_handler(const struct bpf_insn *insn, return -EINVAL; ex = &ctx->prog->aux->extable[ctx->nexentries]; - pc = (unsigned long)&ctx->insns[ctx->ninsns - insn_len]; + pc = (unsigned long)&ctx->ro_insns[ctx->ninsns - insn_len]; - offset = pc - (long)&ex->insn; - if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN)) + /* + * This is the relative offset of the instruction that may fault from + * the exception table itself. This will be written to the exception + * table and if this instruction faults, the destination register will + * be set to '0' and the execution will jump to the next instruction. + */ + ins_offset = pc - (long)&ex->insn; + if (WARN_ON_ONCE(ins_offset >= 0 || ins_offset < INT_MIN)) return -ERANGE; - ex->insn = offset; /* * Since the extable follows the program, the fixup offset is always @@ -608,12 +622,25 @@ static int add_exception_handler(const struct bpf_insn *insn, * bits. We don't need to worry about buildtime or runtime sort * modifying the upper bits because the table is already sorted, and * isn't part of the main exception table. + * + * The fixup_offset is set to the next instruction from the instruction + * that may fault. The execution will jump to this after handling the + * fault. */ - offset = (long)&ex->fixup - (pc + insn_len * sizeof(u16)); - if (!FIELD_FIT(BPF_FIXUP_OFFSET_MASK, offset)) + fixup_offset = (long)&ex->fixup - (pc + insn_len * sizeof(u16)); + if (!FIELD_FIT(BPF_FIXUP_OFFSET_MASK, fixup_offset)) return -ERANGE; - ex->fixup = FIELD_PREP(BPF_FIXUP_OFFSET_MASK, offset) | + /* + * The offsets above have been calculated using the RO buffer but we + * need to use the R/W buffer for writes. + * switch ex to rw buffer for writing. + */ + ex = (void *)ctx->insns + ((void *)ex - (void *)ctx->ro_insns); + + ex->insn = ins_offset; + + ex->fixup = FIELD_PREP(BPF_FIXUP_OFFSET_MASK, fixup_offset) | FIELD_PREP(BPF_FIXUP_REG_MASK, dst_reg); ex->type = EX_TYPE_BPF; @@ -1007,6 +1034,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, ctx.ninsns = 0; ctx.insns = NULL; + ctx.ro_insns = NULL; ret = __arch_prepare_bpf_trampoline(im, m, tlinks, func_addr, flags, &ctx); if (ret < 0) return ret; @@ -1015,7 +1043,15 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, return -EFBIG; ctx.ninsns = 0; + /* + * The bpf_int_jit_compile() uses a RW buffer (ctx.insns) to write the + * JITed instructions and later copies it to a RX region (ctx.ro_insns). + * It also uses ctx.ro_insns to calculate offsets for jumps etc. As the + * trampoline image uses the same memory area for writing and execution, + * both ctx.insns and ctx.ro_insns can be set to image. + */ ctx.insns = image; + ctx.ro_insns = image; ret = __arch_prepare_bpf_trampoline(im, m, tlinks, func_addr, flags, &ctx); if (ret < 0) return ret; diff --git a/arch/riscv/net/bpf_jit_core.c b/arch/riscv/net/bpf_jit_core.c index 7a26a3e1c73c..7b70ccb7fec3 100644 --- a/arch/riscv/net/bpf_jit_core.c +++ b/arch/riscv/net/bpf_jit_core.c @@ -8,6 +8,8 @@ #include <linux/bpf.h> #include <linux/filter.h> +#include <linux/memory.h> +#include <asm/patch.h> #include "bpf_jit.h" /* Number of iterations to try until offsets converge. */ @@ -117,16 +119,24 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) sizeof(struct exception_table_entry); prog_size = sizeof(*ctx->insns) * ctx->ninsns; - jit_data->header = - bpf_jit_binary_alloc(prog_size + extable_size, - &jit_data->image, - sizeof(u32), - bpf_fill_ill_insns); - if (!jit_data->header) { + jit_data->ro_header = + bpf_jit_binary_pack_alloc(prog_size + extable_size, + &jit_data->ro_image, sizeof(u32), + &jit_data->header, &jit_data->image, + bpf_fill_ill_insns); + if (!jit_data->ro_header) { prog = orig_prog; goto out_offset; } + /* + * Use the image(RW) for writing the JITed instructions. But also save + * the ro_image(RX) for calculating the offsets in the image. The RW + * image will be later copied to the RX image from where the program + * will run. The bpf_jit_binary_pack_finalize() will do this copy in the + * final step. + */ + ctx->ro_insns = (u16 *)jit_data->ro_image; ctx->insns = (u16 *)jit_data->image; /* * Now, when the image is allocated, the image can @@ -138,14 +148,12 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) if (i == NR_JIT_ITERATIONS) { pr_err("bpf-jit: image did not converge in <%d passes!\n", i); - if (jit_data->header) - bpf_jit_binary_free(jit_data->header); prog = orig_prog; - goto out_offset; + goto out_free_hdr; } if (extable_size) - prog->aux->extable = (void *)ctx->insns + prog_size; + prog->aux->extable = (void *)ctx->ro_insns + prog_size; skip_init_ctx: pass++; @@ -154,23 +162,33 @@ skip_init_ctx: bpf_jit_build_prologue(ctx); if (build_body(ctx, extra_pass, NULL)) { - bpf_jit_binary_free(jit_data->header); prog = orig_prog; - goto out_offset; + goto out_free_hdr; } bpf_jit_build_epilogue(ctx); if (bpf_jit_enable > 1) bpf_jit_dump(prog->len, prog_size, pass, ctx->insns); - prog->bpf_func = (void *)ctx->insns; + prog->bpf_func = (void *)ctx->ro_insns; prog->jited = 1; prog->jited_len = prog_size; - bpf_flush_icache(jit_data->header, ctx->insns + ctx->ninsns); - if (!prog->is_func || extra_pass) { - bpf_jit_binary_lock_ro(jit_data->header); + if (WARN_ON(bpf_jit_binary_pack_finalize(prog, jit_data->ro_header, + jit_data->header))) { + /* ro_header has been freed */ + jit_data->ro_header = NULL; + prog = orig_prog; + goto out_offset; + } + /* + * The instructions have now been copied to the ROX region from + * where they will execute. + * Write any modified data cache blocks out to memory and + * invalidate the corresponding blocks in the instruction cache. + */ + bpf_flush_icache(jit_data->ro_header, ctx->ro_insns + ctx->ninsns); for (i = 0; i < prog->len; i++) ctx->offset[i] = ninsns_rvoff(ctx->offset[i]); bpf_prog_fill_jited_linfo(prog, ctx->offset); @@ -185,6 +203,14 @@ out: bpf_jit_prog_release_other(prog, prog == orig_prog ? tmp : orig_prog); return prog; + +out_free_hdr: + if (jit_data->header) { + bpf_arch_text_copy(&jit_data->ro_header->size, &jit_data->header->size, + sizeof(jit_data->header->size)); + bpf_jit_binary_pack_free(jit_data->ro_header, jit_data->header); + } + goto out_offset; } u64 bpf_jit_alloc_exec_limit(void) @@ -204,3 +230,51 @@ void bpf_jit_free_exec(void *addr) { return vfree(addr); } + +void *bpf_arch_text_copy(void *dst, void *src, size_t len) +{ + int ret; + + mutex_lock(&text_mutex); + ret = patch_text_nosync(dst, src, len); + mutex_unlock(&text_mutex); + + if (ret) + return ERR_PTR(-EINVAL); + + return dst; +} + +int bpf_arch_text_invalidate(void *dst, size_t len) +{ + int ret; + + mutex_lock(&text_mutex); + ret = patch_text_set_nosync(dst, 0, len); + mutex_unlock(&text_mutex); + + return ret; +} + +void bpf_jit_free(struct bpf_prog *prog) +{ + if (prog->jited) { + struct rv_jit_data *jit_data = prog->aux->jit_data; + struct bpf_binary_header *hdr; + + /* + * If we fail the final pass of JIT (from jit_subprogs), + * the program may not be finalized yet. Call finalize here + * before freeing it. + */ + if (jit_data) { + bpf_jit_binary_pack_finalize(prog, jit_data->ro_header, jit_data->header); + kfree(jit_data); + } + hdr = bpf_jit_binary_pack_hdr(prog); + bpf_jit_binary_pack_free(hdr, NULL); + WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(prog)); + } + + bpf_prog_unlock_free(prog); +} |