diff options
author | Will Deacon <will@kernel.org> | 2023-04-20 11:22:33 +0100 |
---|---|---|
committer | Will Deacon <will@kernel.org> | 2023-04-20 11:22:33 +0100 |
commit | 1bb31cc7afe6b1524d1cd74a1ed53c1add16fc73 (patch) | |
tree | 9f9870383e4754ab0a6129d167bc445b98788ff8 /arch | |
parent | 81444b77a4443e985b567310f19d601368ed4762 (diff) | |
parent | 414c109bdf496195269bc03d40841fe67fc2f839 (diff) |
Merge branch 'for-next/mm' into for-next/core
* for-next/mm:
arm64: mm: always map fixmap at page granularity
arm64: mm: move fixmap code to its own file
arm64: add FIXADDR_TOT_{START,SIZE}
Revert "Revert "arm64: dma: Drop cache invalidation from arch_dma_prep_coherent()""
arm: uaccess: Remove memcpy_page_flushcache()
mm,kfence: decouple kfence from page granularity mapping judgement
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm64/include/asm/fixmap.h | 22 | ||||
-rw-r--r-- | arch/arm64/include/asm/kernel-pgtable.h | 5 | ||||
-rw-r--r-- | arch/arm64/include/asm/kfence.h | 10 | ||||
-rw-r--r-- | arch/arm64/include/asm/mmu.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/uaccess.h | 2 | ||||
-rw-r--r-- | arch/arm64/lib/uaccess_flushcache.c | 6 | ||||
-rw-r--r-- | arch/arm64/mm/Makefile | 2 | ||||
-rw-r--r-- | arch/arm64/mm/dma-mapping.c | 17 | ||||
-rw-r--r-- | arch/arm64/mm/fixmap.c | 203 | ||||
-rw-r--r-- | arch/arm64/mm/mmu.c | 257 | ||||
-rw-r--r-- | arch/arm64/mm/pageattr.c | 7 | ||||
-rw-r--r-- | arch/arm64/mm/ptdump.c | 2 |
12 files changed, 301 insertions, 234 deletions
diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h index 71ed5fdf718b..58c294a96676 100644 --- a/arch/arm64/include/asm/fixmap.h +++ b/arch/arm64/include/asm/fixmap.h @@ -17,6 +17,7 @@ #ifndef __ASSEMBLY__ #include <linux/kernel.h> +#include <linux/math.h> #include <linux/sizes.h> #include <asm/boot.h> #include <asm/page.h> @@ -36,17 +37,13 @@ enum fixed_addresses { FIX_HOLE, /* - * Reserve a virtual window for the FDT that is 2 MB larger than the - * maximum supported size, and put it at the top of the fixmap region. - * The additional space ensures that any FDT that does not exceed - * MAX_FDT_SIZE can be mapped regardless of whether it crosses any - * 2 MB alignment boundaries. - * - * Keep this at the top so it remains 2 MB aligned. + * Reserve a virtual window for the FDT that is a page bigger than the + * maximum supported size. The additional space ensures that any FDT + * that does not exceed MAX_FDT_SIZE can be mapped regardless of + * whether it crosses any page boundary. */ -#define FIX_FDT_SIZE (MAX_FDT_SIZE + SZ_2M) FIX_FDT_END, - FIX_FDT = FIX_FDT_END + FIX_FDT_SIZE / PAGE_SIZE - 1, + FIX_FDT = FIX_FDT_END + DIV_ROUND_UP(MAX_FDT_SIZE, PAGE_SIZE) + 1, FIX_EARLYCON_MEM_BASE, FIX_TEXT_POKE0, @@ -95,12 +92,15 @@ enum fixed_addresses { __end_of_fixed_addresses }; -#define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT) -#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) +#define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT) +#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) +#define FIXADDR_TOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) +#define FIXADDR_TOT_START (FIXADDR_TOP - FIXADDR_TOT_SIZE) #define FIXMAP_PAGE_IO __pgprot(PROT_DEVICE_nGnRE) void __init early_fixmap_init(void); +void __init fixmap_copy(pgd_t *pgdir); #define __early_set_fixmap __set_fixmap diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h index fcd14197756f..186dd7f85b14 100644 --- a/arch/arm64/include/asm/kernel-pgtable.h +++ b/arch/arm64/include/asm/kernel-pgtable.h @@ -59,8 +59,11 @@ #define EARLY_KASLR (0) #endif +#define SPAN_NR_ENTRIES(vstart, vend, shift) \ + ((((vend) - 1) >> (shift)) - ((vstart) >> (shift)) + 1) + #define EARLY_ENTRIES(vstart, vend, shift, add) \ - ((((vend) - 1) >> (shift)) - ((vstart) >> (shift)) + 1 + add) + (SPAN_NR_ENTRIES(vstart, vend, shift) + (add)) #define EARLY_PGDS(vstart, vend, add) (EARLY_ENTRIES(vstart, vend, PGDIR_SHIFT, add)) diff --git a/arch/arm64/include/asm/kfence.h b/arch/arm64/include/asm/kfence.h index aa855c6a0ae6..a81937fae9f6 100644 --- a/arch/arm64/include/asm/kfence.h +++ b/arch/arm64/include/asm/kfence.h @@ -19,4 +19,14 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect) return true; } +#ifdef CONFIG_KFENCE +extern bool kfence_early_init; +static inline bool arm64_kfence_can_set_direct_map(void) +{ + return !kfence_early_init; +} +#else /* CONFIG_KFENCE */ +static inline bool arm64_kfence_can_set_direct_map(void) { return false; } +#endif /* CONFIG_KFENCE */ + #endif /* __ASM_KFENCE_H */ diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index 48f8466a4be9..4384eaa0aeb7 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -65,6 +65,8 @@ extern void paging_init(void); extern void bootmem_init(void); extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt); extern void init_mem_pgprot(void); +extern void create_mapping_noalloc(phys_addr_t phys, unsigned long virt, + phys_addr_t size, pgprot_t prot); extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, unsigned long virt, phys_addr_t size, pgprot_t prot, bool page_mappings_only); diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index deaf4f8f0672..8209e6a86989 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -449,8 +449,6 @@ extern long strncpy_from_user(char *dest, const char __user *src, long count); extern __must_check long strnlen_user(const char __user *str, long n); #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE -struct page; -void memcpy_page_flushcache(char *to, struct page *page, size_t offset, size_t len); extern unsigned long __must_check __copy_user_flushcache(void *to, const void __user *from, unsigned long n); static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size) diff --git a/arch/arm64/lib/uaccess_flushcache.c b/arch/arm64/lib/uaccess_flushcache.c index baee22961bdb..7510d1a23124 100644 --- a/arch/arm64/lib/uaccess_flushcache.c +++ b/arch/arm64/lib/uaccess_flushcache.c @@ -19,12 +19,6 @@ void memcpy_flushcache(void *dst, const void *src, size_t cnt) } EXPORT_SYMBOL_GPL(memcpy_flushcache); -void memcpy_page_flushcache(char *to, struct page *page, size_t offset, - size_t len) -{ - memcpy_flushcache(to, page_address(page) + offset, len); -} - unsigned long __copy_user_flushcache(void *to, const void __user *from, unsigned long n) { diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile index ff1e800ba7a1..dbd1bc95967d 100644 --- a/arch/arm64/mm/Makefile +++ b/arch/arm64/mm/Makefile @@ -2,7 +2,7 @@ obj-y := dma-mapping.o extable.o fault.o init.o \ cache.o copypage.o flush.o \ ioremap.o mmap.o pgd.o mmu.o \ - context.o proc.o pageattr.o + context.o proc.o pageattr.o fixmap.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_PTDUMP_CORE) += ptdump.o obj-$(CONFIG_PTDUMP_DEBUGFS) += ptdump_debugfs.o diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 5240f6acad64..3cb101e8cb29 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -36,22 +36,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size) { unsigned long start = (unsigned long)page_address(page); - /* - * The architecture only requires a clean to the PoC here in order to - * meet the requirements of the DMA API. However, some vendors (i.e. - * Qualcomm) abuse the DMA API for transferring buffers from the - * non-secure to the secure world, resetting the system if a non-secure - * access shows up after the buffer has been transferred: - * - * https://lore.kernel.org/r/20221114110329.68413-1-manivannan.sadhasivam@linaro.org - * - * Using clean+invalidate appears to make this issue less likely, but - * the drivers themselves still need fixing as the CPU could issue a - * speculative read from the buffer via the linear mapping irrespective - * of the cache maintenance we use. Once the drivers are fixed, we can - * relax this to a clean operation. - */ - dcache_clean_inval_poc(start, start + size); + dcache_clean_poc(start, start + size); } #ifdef CONFIG_IOMMU_DMA diff --git a/arch/arm64/mm/fixmap.c b/arch/arm64/mm/fixmap.c new file mode 100644 index 000000000000..c0a3301203bd --- /dev/null +++ b/arch/arm64/mm/fixmap.c @@ -0,0 +1,203 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Fixmap manipulation code + */ + +#include <linux/bug.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/libfdt.h> +#include <linux/memory.h> +#include <linux/mm.h> +#include <linux/sizes.h> + +#include <asm/fixmap.h> +#include <asm/kernel-pgtable.h> +#include <asm/pgalloc.h> +#include <asm/tlbflush.h> + +#define NR_BM_PTE_TABLES \ + SPAN_NR_ENTRIES(FIXADDR_TOT_START, FIXADDR_TOP, PMD_SHIFT) +#define NR_BM_PMD_TABLES \ + SPAN_NR_ENTRIES(FIXADDR_TOT_START, FIXADDR_TOP, PUD_SHIFT) + +static_assert(NR_BM_PMD_TABLES == 1); + +#define __BM_TABLE_IDX(addr, shift) \ + (((addr) >> (shift)) - (FIXADDR_TOT_START >> (shift))) + +#define BM_PTE_TABLE_IDX(addr) __BM_TABLE_IDX(addr, PMD_SHIFT) + +static pte_t bm_pte[NR_BM_PTE_TABLES][PTRS_PER_PTE] __page_aligned_bss; +static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused; +static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused; + +static inline pte_t *fixmap_pte(unsigned long addr) +{ + return &bm_pte[BM_PTE_TABLE_IDX(addr)][pte_index(addr)]; +} + +static void __init early_fixmap_init_pte(pmd_t *pmdp, unsigned long addr) +{ + pmd_t pmd = READ_ONCE(*pmdp); + pte_t *ptep; + + if (pmd_none(pmd)) { + ptep = bm_pte[BM_PTE_TABLE_IDX(addr)]; + __pmd_populate(pmdp, __pa_symbol(ptep), PMD_TYPE_TABLE); + } +} + +static void __init early_fixmap_init_pmd(pud_t *pudp, unsigned long addr, + unsigned long end) +{ + unsigned long next; + pud_t pud = READ_ONCE(*pudp); + pmd_t *pmdp; + + if (pud_none(pud)) + __pud_populate(pudp, __pa_symbol(bm_pmd), PUD_TYPE_TABLE); + + pmdp = pmd_offset_kimg(pudp, addr); + do { + next = pmd_addr_end(addr, end); + early_fixmap_init_pte(pmdp, addr); + } while (pmdp++, addr = next, addr != end); +} + + +static void __init early_fixmap_init_pud(p4d_t *p4dp, unsigned long addr, + unsigned long end) +{ + p4d_t p4d = READ_ONCE(*p4dp); + pud_t *pudp; + + if (CONFIG_PGTABLE_LEVELS > 3 && !p4d_none(p4d) && + p4d_page_paddr(p4d) != __pa_symbol(bm_pud)) { + /* + * We only end up here if the kernel mapping and the fixmap + * share the top level pgd entry, which should only happen on + * 16k/4 levels configurations. + */ + BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); + } + + if (p4d_none(p4d)) + __p4d_populate(p4dp, __pa_symbol(bm_pud), P4D_TYPE_TABLE); + + pudp = pud_offset_kimg(p4dp, addr); + early_fixmap_init_pmd(pudp, addr, end); +} + +/* + * The p*d_populate functions call virt_to_phys implicitly so they can't be used + * directly on kernel symbols (bm_p*d). This function is called too early to use + * lm_alias so __p*d_populate functions must be used to populate with the + * physical address from __pa_symbol. + */ +void __init early_fixmap_init(void) +{ + unsigned long addr = FIXADDR_TOT_START; + unsigned long end = FIXADDR_TOP; + + pgd_t *pgdp = pgd_offset_k(addr); + p4d_t *p4dp = p4d_offset(pgdp, addr); + + early_fixmap_init_pud(p4dp, addr, end); +} + +/* + * Unusually, this is also called in IRQ context (ghes_iounmap_irq) so if we + * ever need to use IPIs for TLB broadcasting, then we're in trouble here. + */ +void __set_fixmap(enum fixed_addresses idx, + phys_addr_t phys, pgprot_t flags) +{ + unsigned long addr = __fix_to_virt(idx); + pte_t *ptep; + + BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses); + + ptep = fixmap_pte(addr); + + if (pgprot_val(flags)) { + set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags)); + } else { + pte_clear(&init_mm, addr, ptep); + flush_tlb_kernel_range(addr, addr+PAGE_SIZE); + } +} + +void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot) +{ + const u64 dt_virt_base = __fix_to_virt(FIX_FDT); + phys_addr_t dt_phys_base; + int offset; + void *dt_virt; + + /* + * Check whether the physical FDT address is set and meets the minimum + * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be + * at least 8 bytes so that we can always access the magic and size + * fields of the FDT header after mapping the first chunk, double check + * here if that is indeed the case. + */ + BUILD_BUG_ON(MIN_FDT_ALIGN < 8); + if (!dt_phys || dt_phys % MIN_FDT_ALIGN) + return NULL; + + dt_phys_base = round_down(dt_phys, PAGE_SIZE); + offset = dt_phys % PAGE_SIZE; + dt_virt = (void *)dt_virt_base + offset; + + /* map the first chunk so we can read the size from the header */ + create_mapping_noalloc(dt_phys_base, dt_virt_base, PAGE_SIZE, prot); + + if (fdt_magic(dt_virt) != FDT_MAGIC) + return NULL; + + *size = fdt_totalsize(dt_virt); + if (*size > MAX_FDT_SIZE) + return NULL; + + if (offset + *size > PAGE_SIZE) { + create_mapping_noalloc(dt_phys_base, dt_virt_base, + offset + *size, prot); + } + + return dt_virt; +} + +/* + * Copy the fixmap region into a new pgdir. + */ +void __init fixmap_copy(pgd_t *pgdir) +{ + if (!READ_ONCE(pgd_val(*pgd_offset_pgd(pgdir, FIXADDR_TOT_START)))) { + /* + * The fixmap falls in a separate pgd to the kernel, and doesn't + * live in the carveout for the swapper_pg_dir. We can simply + * re-use the existing dir for the fixmap. + */ + set_pgd(pgd_offset_pgd(pgdir, FIXADDR_TOT_START), + READ_ONCE(*pgd_offset_k(FIXADDR_TOT_START))); + } else if (CONFIG_PGTABLE_LEVELS > 3) { + pgd_t *bm_pgdp; + p4d_t *bm_p4dp; + pud_t *bm_pudp; + /* + * The fixmap shares its top level pgd entry with the kernel + * mapping. This can really only occur when we are running + * with 16k/4 levels, so we can simply reuse the pud level + * entry instead. + */ + BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); + bm_pgdp = pgd_offset_pgd(pgdir, FIXADDR_TOT_START); + bm_p4dp = p4d_offset(bm_pgdp, FIXADDR_TOT_START); + bm_pudp = pud_set_fixmap_offset(bm_p4dp, FIXADDR_TOT_START); + pud_populate(&init_mm, bm_pudp, lm_alias(bm_pmd)); + pud_clear_fixmap(); + } else { + BUG(); + } +} diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 7556020a27b7..af6bc8403ee4 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -24,6 +24,7 @@ #include <linux/mm.h> #include <linux/vmalloc.h> #include <linux/set_memory.h> +#include <linux/kfence.h> #include <asm/barrier.h> #include <asm/cputype.h> @@ -38,6 +39,7 @@ #include <asm/ptdump.h> #include <asm/tlbflush.h> #include <asm/pgalloc.h> +#include <asm/kfence.h> #define NO_BLOCK_MAPPINGS BIT(0) #define NO_CONT_MAPPINGS BIT(1) @@ -71,10 +73,6 @@ long __section(".mmuoff.data.write") __early_cpu_boot_status; unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss; EXPORT_SYMBOL(empty_zero_page); -static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss; -static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused; -static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused; - static DEFINE_SPINLOCK(swapper_pgdir_lock); static DEFINE_MUTEX(fixmap_lock); @@ -450,8 +448,8 @@ static phys_addr_t pgd_pgtable_alloc(int shift) * without allocating new levels of table. Note that this permits the * creation of new section or page entries. */ -static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt, - phys_addr_t size, pgprot_t prot) +void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt, + phys_addr_t size, pgprot_t prot) { if ((virt >= PAGE_END) && (virt < VMALLOC_START)) { pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n", @@ -510,12 +508,67 @@ void __init mark_linear_text_alias_ro(void) PAGE_KERNEL_RO); } +#ifdef CONFIG_KFENCE + +bool __ro_after_init kfence_early_init = !!CONFIG_KFENCE_SAMPLE_INTERVAL; + +/* early_param() will be parsed before map_mem() below. */ +static int __init parse_kfence_early_init(char *arg) +{ + int val; + + if (get_option(&arg, &val)) + kfence_early_init = !!val; + return 0; +} +early_param("kfence.sample_interval", parse_kfence_early_init); + +static phys_addr_t __init arm64_kfence_alloc_pool(void) +{ + phys_addr_t kfence_pool; + + if (!kfence_early_init) + return 0; + + kfence_pool = memblock_phys_alloc(KFENCE_POOL_SIZE, PAGE_SIZE); + if (!kfence_pool) { + pr_err("failed to allocate kfence pool\n"); + kfence_early_init = false; + return 0; + } + + /* Temporarily mark as NOMAP. */ + memblock_mark_nomap(kfence_pool, KFENCE_POOL_SIZE); + + return kfence_pool; +} + +static void __init arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp) +{ + if (!kfence_pool) + return; + + /* KFENCE pool needs page-level mapping. */ + __map_memblock(pgdp, kfence_pool, kfence_pool + KFENCE_POOL_SIZE, + pgprot_tagged(PAGE_KERNEL), + NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS); + memblock_clear_nomap(kfence_pool, KFENCE_POOL_SIZE); + __kfence_pool = phys_to_virt(kfence_pool); +} +#else /* CONFIG_KFENCE */ + +static inline phys_addr_t arm64_kfence_alloc_pool(void) { return 0; } +static inline void arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp) { } + +#endif /* CONFIG_KFENCE */ + static void __init map_mem(pgd_t *pgdp) { static const u64 direct_map_end = _PAGE_END(VA_BITS_MIN); phys_addr_t kernel_start = __pa_symbol(_stext); phys_addr_t kernel_end = __pa_symbol(__init_begin); phys_addr_t start, end; + phys_addr_t early_kfence_pool; int flags = NO_EXEC_MAPPINGS; u64 i; @@ -528,6 +581,8 @@ static void __init map_mem(pgd_t *pgdp) */ BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end)); + early_kfence_pool = arm64_kfence_alloc_pool(); + if (can_set_direct_map()) flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; @@ -565,6 +620,7 @@ static void __init map_mem(pgd_t *pgdp) __map_memblock(pgdp, kernel_start, kernel_end, PAGE_KERNEL, NO_CONT_MAPPINGS); memblock_clear_nomap(kernel_start, kernel_end - kernel_start); + arm64_kfence_map_pool(early_kfence_pool, pgdp); } void mark_rodata_ro(void) @@ -691,34 +747,7 @@ static void __init map_kernel(pgd_t *pgdp) &vmlinux_initdata, 0, VM_NO_GUARD); map_kernel_segment(pgdp, _data, _end, PAGE_KERNEL, &vmlinux_data, 0, 0); - if (!READ_ONCE(pgd_val(*pgd_offset_pgd(pgdp, FIXADDR_START)))) { - /* - * The fixmap falls in a separate pgd to the kernel, and doesn't - * live in the carveout for the swapper_pg_dir. We can simply - * re-use the existing dir for the fixmap. - */ - set_pgd(pgd_offset_pgd(pgdp, FIXADDR_START), - READ_ONCE(*pgd_offset_k(FIXADDR_START))); - } else if (CONFIG_PGTABLE_LEVELS > 3) { - pgd_t *bm_pgdp; - p4d_t *bm_p4dp; - pud_t *bm_pudp; - /* - * The fixmap shares its top level pgd entry with the kernel - * mapping. This can really only occur when we are running - * with 16k/4 levels, so we can simply reuse the pud level - * entry instead. - */ - BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); - bm_pgdp = pgd_offset_pgd(pgdp, FIXADDR_START); - bm_p4dp = p4d_offset(bm_pgdp, FIXADDR_START); - bm_pudp = pud_set_fixmap_offset(bm_p4dp, FIXADDR_START); - pud_populate(&init_mm, bm_pudp, lm_alias(bm_pmd)); - pud_clear_fixmap(); - } else { - BUG(); - } - + fixmap_copy(pgdp); kasan_copy_shadow(pgdp); } @@ -1133,166 +1162,6 @@ void vmemmap_free(unsigned long start, unsigned long end, } #endif /* CONFIG_MEMORY_HOTPLUG */ -static inline pud_t *fixmap_pud(unsigned long addr) -{ - pgd_t *pgdp = pgd_offset_k(addr); - p4d_t *p4dp = p4d_offset(pgdp, addr); - p4d_t p4d = READ_ONCE(*p4dp); - - BUG_ON(p4d_none(p4d) || p4d_bad(p4d)); - - return pud_offset_kimg(p4dp, addr); -} - -static inline pmd_t *fixmap_pmd(unsigned long addr) -{ - pud_t *pudp = fixmap_pud(addr); - pud_t pud = READ_ONCE(*pudp); - - BUG_ON(pud_none(pud) || pud_bad(pud)); - - return pmd_offset_kimg(pudp, addr); -} - -static inline pte_t *fixmap_pte(unsigned long addr) -{ - return &bm_pte[pte_index(addr)]; -} - -/* - * The p*d_populate functions call virt_to_phys implicitly so they can't be used - * directly on kernel symbols (bm_p*d). This function is called too early to use - * lm_alias so __p*d_populate functions must be used to populate with the - * physical address from __pa_symbol. - */ -void __init early_fixmap_init(void) -{ - pgd_t *pgdp; - p4d_t *p4dp, p4d; - pud_t *pudp; - pmd_t *pmdp; - unsigned long addr = FIXADDR_START; - - pgdp = pgd_offset_k(addr); - p4dp = p4d_offset(pgdp, addr); - p4d = READ_ONCE(*p4dp); - if (CONFIG_PGTABLE_LEVELS > 3 && - !(p4d_none(p4d) || p4d_page_paddr(p4d) == __pa_symbol(bm_pud))) { - /* - * We only end up here if the kernel mapping and the fixmap - * share the top level pgd entry, which should only happen on - * 16k/4 levels configurations. - */ - BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); - pudp = pud_offset_kimg(p4dp, addr); - } else { - if (p4d_none(p4d)) - __p4d_populate(p4dp, __pa_symbol(bm_pud), P4D_TYPE_TABLE); - pudp = fixmap_pud(addr); - } - if (pud_none(READ_ONCE(*pudp))) - __pud_populate(pudp, __pa_symbol(bm_pmd), PUD_TYPE_TABLE); - pmdp = fixmap_pmd(addr); - __pmd_populate(pmdp, __pa_symbol(bm_pte), PMD_TYPE_TABLE); - - /* - * The boot-ioremap range spans multiple pmds, for which - * we are not prepared: - */ - BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT) - != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT)); - - if ((pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN))) - || pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) { - WARN_ON(1); - pr_warn("pmdp %p != %p, %p\n", - pmdp, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)), - fixmap_pmd(fix_to_virt(FIX_BTMAP_END))); - pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", - fix_to_virt(FIX_BTMAP_BEGIN)); - pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n", - fix_to_virt(FIX_BTMAP_END)); - - pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END); - pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN); - } -} - -/* - * Unusually, this is also called in IRQ context (ghes_iounmap_irq) so if we - * ever need to use IPIs for TLB broadcasting, then we're in trouble here. - */ -void __set_fixmap(enum fixed_addresses idx, - phys_addr_t phys, pgprot_t flags) -{ - unsigned long addr = __fix_to_virt(idx); - pte_t *ptep; - - BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses); - - ptep = fixmap_pte(addr); - - if (pgprot_val(flags)) { - set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags)); - } else { - pte_clear(&init_mm, addr, ptep); - flush_tlb_kernel_range(addr, addr+PAGE_SIZE); - } -} - -void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot) -{ - const u64 dt_virt_base = __fix_to_virt(FIX_FDT); - int offset; - void *dt_virt; - - /* - * Check whether the physical FDT address is set and meets the minimum - * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be - * at least 8 bytes so that we can always access the magic and size - * fields of the FDT header after mapping the first chunk, double check - * here if that is indeed the case. - */ - BUILD_BUG_ON(MIN_FDT_ALIGN < 8); - if (!dt_phys || dt_phys % MIN_FDT_ALIGN) - return NULL; - - /* - * Make sure that the FDT region can be mapped without the need to - * allocate additional translation table pages, so that it is safe - * to call create_mapping_noalloc() this early. - * - * On 64k pages, the FDT will be mapped using PTEs, so we need to - * be in the same PMD as the rest of the fixmap. - * On 4k pages, we'll use section mappings for the FDT so we only - * have to be in the same PUD. - */ - BUILD_BUG_ON(dt_virt_base % SZ_2M); - - BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT != - __fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT); - - offset = dt_phys % SWAPPER_BLOCK_SIZE; - dt_virt = (void *)dt_virt_base + offset; - - /* map the first chunk so we can read the size from the header */ - create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), - dt_virt_base, SWAPPER_BLOCK_SIZE, prot); - - if (fdt_magic(dt_virt) != FDT_MAGIC) - return NULL; - - *size = fdt_totalsize(dt_virt); - if (*size > MAX_FDT_SIZE) - return NULL; - - if (offset + *size > SWAPPER_BLOCK_SIZE) - create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base, - round_up(offset + *size, SWAPPER_BLOCK_SIZE), prot); - - return dt_virt; -} - int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot) { pud_t new_pud = pfn_pud(__phys_to_pfn(phys), mk_pud_sect_prot(prot)); diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c index 79dd201c59d8..8e2017ba5f1b 100644 --- a/arch/arm64/mm/pageattr.c +++ b/arch/arm64/mm/pageattr.c @@ -11,6 +11,7 @@ #include <asm/cacheflush.h> #include <asm/set_memory.h> #include <asm/tlbflush.h> +#include <asm/kfence.h> struct page_change_data { pgprot_t set_mask; @@ -22,12 +23,14 @@ bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED bool can_set_direct_map(void) { /* - * rodata_full, DEBUG_PAGEALLOC and KFENCE require linear map to be + * rodata_full and DEBUG_PAGEALLOC require linear map to be * mapped at page granularity, so that it is possible to * protect/unprotect single pages. + * + * KFENCE pool requires page-granular mapping if initialized late. */ return (rodata_enabled && rodata_full) || debug_pagealloc_enabled() || - IS_ENABLED(CONFIG_KFENCE); + arm64_kfence_can_set_direct_map(); } static int change_page_range(pte_t *ptep, unsigned long addr, void *data) diff --git a/arch/arm64/mm/ptdump.c b/arch/arm64/mm/ptdump.c index 9bc4066c5bf3..e305b6593c4e 100644 --- a/arch/arm64/mm/ptdump.c +++ b/arch/arm64/mm/ptdump.c @@ -45,7 +45,7 @@ static struct addr_marker address_markers[] = { { MODULES_END, "Modules end" }, { VMALLOC_START, "vmalloc() area" }, { VMALLOC_END, "vmalloc() end" }, - { FIXADDR_START, "Fixmap start" }, + { FIXADDR_TOT_START, "Fixmap start" }, { FIXADDR_TOP, "Fixmap end" }, { PCI_IO_START, "PCI I/O start" }, { PCI_IO_END, "PCI I/O end" }, |