diff options
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r-- | arch/arm64/mm/Makefile | 2 | ||||
-rw-r--r-- | arch/arm64/mm/context.c | 11 | ||||
-rw-r--r-- | arch/arm64/mm/dma-mapping.c | 38 | ||||
-rw-r--r-- | arch/arm64/mm/fault.c | 14 | ||||
-rw-r--r-- | arch/arm64/mm/init.c | 12 | ||||
-rw-r--r-- | arch/arm64/mm/ioremap.c | 2 | ||||
-rw-r--r-- | arch/arm64/mm/kasan_init.c | 22 | ||||
-rw-r--r-- | arch/arm64/mm/mmu.c | 35 | ||||
-rw-r--r-- | arch/arm64/mm/physaddr.c | 30 | ||||
-rw-r--r-- | arch/arm64/mm/proc.S | 1 |
10 files changed, 117 insertions, 50 deletions
diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile index e703fb9defad..9b0ba191e48e 100644 --- a/arch/arm64/mm/Makefile +++ b/arch/arm64/mm/Makefile @@ -6,6 +6,8 @@ obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_ARM64_PTDUMP_CORE) += dump.o obj-$(CONFIG_ARM64_PTDUMP_DEBUGFS) += ptdump_debugfs.o obj-$(CONFIG_NUMA) += numa.o +obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o +KASAN_SANITIZE_physaddr.o += n obj-$(CONFIG_KASAN) += kasan_init.o KASAN_SANITIZE_kasan_init.o := n diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index 4c63cb154859..68634c630cdd 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c @@ -79,6 +79,13 @@ void verify_cpu_asid_bits(void) } } +static void set_reserved_asid_bits(void) +{ + if (IS_ENABLED(CONFIG_QCOM_FALKOR_ERRATUM_1003) && + cpus_have_const_cap(ARM64_WORKAROUND_QCOM_FALKOR_E1003)) + __set_bit(FALKOR_RESERVED_ASID, asid_map); +} + static void flush_context(unsigned int cpu) { int i; @@ -87,6 +94,8 @@ static void flush_context(unsigned int cpu) /* Update the list of reserved ASIDs and the ASID bitmap. */ bitmap_clear(asid_map, 0, NUM_USER_ASIDS); + set_reserved_asid_bits(); + /* * Ensure the generation bump is observed before we xchg the * active_asids. @@ -244,6 +253,8 @@ static int asids_init(void) panic("Failed to allocate bitmap for %lu ASIDs\n", NUM_USER_ASIDS); + set_reserved_asid_bits(); + pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS); return 0; } diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 4a14b25163fb..351f7595cb3e 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -211,7 +211,8 @@ static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page, dma_addr_t dev_addr; dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs); - if (!is_device_dma_coherent(dev)) + if (!is_device_dma_coherent(dev) && + (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir); return dev_addr; @@ -222,7 +223,8 @@ static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, unsigned long attrs) { - if (!is_device_dma_coherent(dev)) + if (!is_device_dma_coherent(dev) && + (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir); swiotlb_unmap_page(dev, dev_addr, size, dir, attrs); } @@ -235,7 +237,8 @@ static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int i, ret; ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs); - if (!is_device_dma_coherent(dev)) + if (!is_device_dma_coherent(dev) && + (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) for_each_sg(sgl, sg, ret, i) __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)), sg->length, dir); @@ -251,7 +254,8 @@ static void __swiotlb_unmap_sg_attrs(struct device *dev, struct scatterlist *sg; int i; - if (!is_device_dma_coherent(dev)) + if (!is_device_dma_coherent(dev) && + (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) for_each_sg(sgl, sg, nelems, i) __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)), sg->length, dir); @@ -352,6 +356,13 @@ static int __swiotlb_dma_supported(struct device *hwdev, u64 mask) return 1; } +static int __swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t addr) +{ + if (swiotlb) + return swiotlb_dma_mapping_error(hwdev, addr); + return 0; +} + static struct dma_map_ops swiotlb_dma_ops = { .alloc = __dma_alloc, .free = __dma_free, @@ -366,7 +377,7 @@ static struct dma_map_ops swiotlb_dma_ops = { .sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu, .sync_sg_for_device = __swiotlb_sync_sg_for_device, .dma_supported = __swiotlb_dma_supported, - .mapping_error = swiotlb_dma_mapping_error, + .mapping_error = __swiotlb_dma_mapping_error, }; static int __init atomic_pool_init(void) @@ -830,14 +841,21 @@ static bool do_iommu_attach(struct device *dev, const struct iommu_ops *ops, * then the IOMMU core will have already configured a group for this * device, and allocated the default domain for that group. */ - if (!domain || iommu_dma_init_domain(domain, dma_base, size, dev)) { - pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n", - dev_name(dev)); - return false; + if (!domain) + goto out_err; + + if (domain->type == IOMMU_DOMAIN_DMA) { + if (iommu_dma_init_domain(domain, dma_base, size, dev)) + goto out_err; + + dev->archdata.dma_ops = &iommu_dma_ops; } - dev->archdata.dma_ops = &iommu_dma_ops; return true; +out_err: + pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n", + dev_name(dev)); + return false; } static void queue_iommu_attach(struct device *dev, const struct iommu_ops *ops, diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 156169c6981b..81283851c9af 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -691,17 +691,3 @@ int cpu_enable_pan(void *__unused) return 0; } #endif /* CONFIG_ARM64_PAN */ - -#ifdef CONFIG_ARM64_UAO -/* - * Kernel threads have fs=KERNEL_DS by default, and don't need to call - * set_fs(), devtmpfs in particular relies on this behaviour. - * We need to enable the feature at runtime (instead of adding it to - * PSR_MODE_EL1h) as the feature may not be implemented by the cpu. - */ -int cpu_enable_uao(void *__unused) -{ - asm(SET_PSTATE_UAO(1)); - return 0; -} -#endif /* CONFIG_ARM64_UAO */ diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 380ebe705093..e19e06593e37 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -36,6 +36,7 @@ #include <linux/efi.h> #include <linux/swiotlb.h> #include <linux/vmalloc.h> +#include <linux/mm.h> #include <asm/boot.h> #include <asm/fixmap.h> @@ -209,8 +210,8 @@ void __init arm64_memblock_init(void) * linear mapping. Take care not to clip the kernel which may be * high in memory. */ - memblock_remove(max_t(u64, memstart_addr + linear_region_size, __pa(_end)), - ULLONG_MAX); + memblock_remove(max_t(u64, memstart_addr + linear_region_size, + __pa_symbol(_end)), ULLONG_MAX); if (memstart_addr + linear_region_size < memblock_end_of_DRAM()) { /* ensure that memstart_addr remains sufficiently aligned */ memstart_addr = round_up(memblock_end_of_DRAM() - linear_region_size, @@ -225,7 +226,7 @@ void __init arm64_memblock_init(void) */ if (memory_limit != (phys_addr_t)ULLONG_MAX) { memblock_mem_limit_remove_map(memory_limit); - memblock_add(__pa(_text), (u64)(_end - _text)); + memblock_add(__pa_symbol(_text), (u64)(_end - _text)); } if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_start) { @@ -278,7 +279,7 @@ void __init arm64_memblock_init(void) * Register the kernel text, kernel data, initrd, and initial * pagetables with memblock. */ - memblock_reserve(__pa(_text), _end - _text); + memblock_reserve(__pa_symbol(_text), _end - _text); #ifdef CONFIG_BLK_DEV_INITRD if (initrd_start) { memblock_reserve(initrd_start, initrd_end - initrd_start); @@ -486,7 +487,8 @@ void __init mem_init(void) void free_initmem(void) { - free_reserved_area(__va(__pa(__init_begin)), __va(__pa(__init_end)), + free_reserved_area(lm_alias(__init_begin), + lm_alias(__init_end), 0, "unused kernel"); /* * Unmap the __init region but leave the VM area in place. This diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c index 01e88c8bcab0..c4c8cd4c31d4 100644 --- a/arch/arm64/mm/ioremap.c +++ b/arch/arm64/mm/ioremap.c @@ -88,7 +88,7 @@ void __iounmap(volatile void __iomem *io_addr) * We could get an address outside vmalloc range in case * of ioremap_cache() reusing a RAM mapping. */ - if (VMALLOC_START <= addr && addr < VMALLOC_END) + if (is_vmalloc_addr((void *)addr)) vunmap((void *)addr); } EXPORT_SYMBOL(__iounmap); diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index 757009daa9ed..201d918e7575 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c @@ -15,6 +15,7 @@ #include <linux/kernel.h> #include <linux/memblock.h> #include <linux/start_kernel.h> +#include <linux/mm.h> #include <asm/mmu_context.h> #include <asm/kernel-pgtable.h> @@ -26,6 +27,13 @@ static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE); +/* + * The p*d_populate functions call virt_to_phys implicitly so they can't be used + * directly on kernel symbols (bm_p*d). All the early functions are called too + * early to use lm_alias so __p*d_populate functions must be used to populate + * with the physical address from __pa_symbol. + */ + static void __init kasan_early_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long end) { @@ -33,12 +41,12 @@ static void __init kasan_early_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long next; if (pmd_none(*pmd)) - pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte); + __pmd_populate(pmd, __pa_symbol(kasan_zero_pte), PMD_TYPE_TABLE); pte = pte_offset_kimg(pmd, addr); do { next = addr + PAGE_SIZE; - set_pte(pte, pfn_pte(virt_to_pfn(kasan_zero_page), + set_pte(pte, pfn_pte(sym_to_pfn(kasan_zero_page), PAGE_KERNEL)); } while (pte++, addr = next, addr != end && pte_none(*pte)); } @@ -51,7 +59,7 @@ static void __init kasan_early_pmd_populate(pud_t *pud, unsigned long next; if (pud_none(*pud)) - pud_populate(&init_mm, pud, kasan_zero_pmd); + __pud_populate(pud, __pa_symbol(kasan_zero_pmd), PMD_TYPE_TABLE); pmd = pmd_offset_kimg(pud, addr); do { @@ -68,7 +76,7 @@ static void __init kasan_early_pud_populate(pgd_t *pgd, unsigned long next; if (pgd_none(*pgd)) - pgd_populate(&init_mm, pgd, kasan_zero_pud); + __pgd_populate(pgd, __pa_symbol(kasan_zero_pud), PUD_TYPE_TABLE); pud = pud_offset_kimg(pgd, addr); do { @@ -148,7 +156,7 @@ void __init kasan_init(void) */ memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir)); dsb(ishst); - cpu_replace_ttbr1(tmp_pg_dir); + cpu_replace_ttbr1(lm_alias(tmp_pg_dir)); clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); @@ -199,10 +207,10 @@ void __init kasan_init(void) */ for (i = 0; i < PTRS_PER_PTE; i++) set_pte(&kasan_zero_pte[i], - pfn_pte(virt_to_pfn(kasan_zero_page), PAGE_KERNEL_RO)); + pfn_pte(sym_to_pfn(kasan_zero_page), PAGE_KERNEL_RO)); memset(kasan_zero_page, 0, PAGE_SIZE); - cpu_replace_ttbr1(swapper_pg_dir); + cpu_replace_ttbr1(lm_alias(swapper_pg_dir)); /* At this point kasan is fully initialized. Enable error messages */ init_task.kasan_depth = 0; diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 17243e43184e..b805c017f789 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -28,6 +28,7 @@ #include <linux/memblock.h> #include <linux/fs.h> #include <linux/io.h> +#include <linux/mm.h> #include <asm/barrier.h> #include <asm/cputype.h> @@ -359,8 +360,8 @@ static void create_mapping_late(phys_addr_t phys, unsigned long virt, static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end) { - unsigned long kernel_start = __pa(_text); - unsigned long kernel_end = __pa(__init_begin); + phys_addr_t kernel_start = __pa_symbol(_text); + phys_addr_t kernel_end = __pa_symbol(__init_begin); /* * Take care not to create a writable alias for the @@ -427,14 +428,14 @@ void mark_rodata_ro(void) unsigned long section_size; section_size = (unsigned long)_etext - (unsigned long)_text; - create_mapping_late(__pa(_text), (unsigned long)_text, + create_mapping_late(__pa_symbol(_text), (unsigned long)_text, section_size, PAGE_KERNEL_ROX); /* * mark .rodata as read only. Use __init_begin rather than __end_rodata * to cover NOTES and EXCEPTION_TABLE. */ section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata; - create_mapping_late(__pa(__start_rodata), (unsigned long)__start_rodata, + create_mapping_late(__pa_symbol(__start_rodata), (unsigned long)__start_rodata, section_size, PAGE_KERNEL_RO); /* flush the TLBs after updating live kernel mappings */ @@ -446,7 +447,7 @@ void mark_rodata_ro(void) static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end, pgprot_t prot, struct vm_struct *vma) { - phys_addr_t pa_start = __pa(va_start); + phys_addr_t pa_start = __pa_symbol(va_start); unsigned long size = va_end - va_start; BUG_ON(!PAGE_ALIGNED(pa_start)); @@ -494,7 +495,7 @@ static void __init map_kernel(pgd_t *pgd) */ BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); set_pud(pud_set_fixmap_offset(pgd, FIXADDR_START), - __pud(__pa(bm_pmd) | PUD_TYPE_TABLE)); + __pud(__pa_symbol(bm_pmd) | PUD_TYPE_TABLE)); pud_clear_fixmap(); } else { BUG(); @@ -524,8 +525,8 @@ void __init paging_init(void) * To do this we need to go via a temporary pgd. */ cpu_replace_ttbr1(__va(pgd_phys)); - memcpy(swapper_pg_dir, pgd, PAGE_SIZE); - cpu_replace_ttbr1(swapper_pg_dir); + memcpy(swapper_pg_dir, pgd, PGD_SIZE); + cpu_replace_ttbr1(lm_alias(swapper_pg_dir)); pgd_clear_fixmap(); memblock_free(pgd_phys, PAGE_SIZE); @@ -534,7 +535,7 @@ void __init paging_init(void) * We only reuse the PGD from the swapper_pg_dir, not the pud + pmd * allocated with it. */ - memblock_free(__pa(swapper_pg_dir) + PAGE_SIZE, + memblock_free(__pa_symbol(swapper_pg_dir) + PAGE_SIZE, SWAPPER_DIR_SIZE - PAGE_SIZE); } @@ -645,6 +646,12 @@ static inline pte_t * fixmap_pte(unsigned long addr) return &bm_pte[pte_index(addr)]; } +/* + * The p*d_populate functions call virt_to_phys implicitly so they can't be used + * directly on kernel symbols (bm_p*d). This function is called too early to use + * lm_alias so __p*d_populate functions must be used to populate with the + * physical address from __pa_symbol. + */ void __init early_fixmap_init(void) { pgd_t *pgd; @@ -654,7 +661,7 @@ void __init early_fixmap_init(void) pgd = pgd_offset_k(addr); if (CONFIG_PGTABLE_LEVELS > 3 && - !(pgd_none(*pgd) || pgd_page_paddr(*pgd) == __pa(bm_pud))) { + !(pgd_none(*pgd) || pgd_page_paddr(*pgd) == __pa_symbol(bm_pud))) { /* * We only end up here if the kernel mapping and the fixmap * share the top level pgd entry, which should only happen on @@ -663,12 +670,14 @@ void __init early_fixmap_init(void) BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); pud = pud_offset_kimg(pgd, addr); } else { - pgd_populate(&init_mm, pgd, bm_pud); + if (pgd_none(*pgd)) + __pgd_populate(pgd, __pa_symbol(bm_pud), PUD_TYPE_TABLE); pud = fixmap_pud(addr); } - pud_populate(&init_mm, pud, bm_pmd); + if (pud_none(*pud)) + __pud_populate(pud, __pa_symbol(bm_pmd), PMD_TYPE_TABLE); pmd = fixmap_pmd(addr); - pmd_populate_kernel(&init_mm, pmd, bm_pte); + __pmd_populate(pmd, __pa_symbol(bm_pte), PMD_TYPE_TABLE); /* * The boot-ioremap range spans multiple pmds, for which diff --git a/arch/arm64/mm/physaddr.c b/arch/arm64/mm/physaddr.c new file mode 100644 index 000000000000..91371daf397c --- /dev/null +++ b/arch/arm64/mm/physaddr.c @@ -0,0 +1,30 @@ +#include <linux/bug.h> +#include <linux/export.h> +#include <linux/types.h> +#include <linux/mmdebug.h> +#include <linux/mm.h> + +#include <asm/memory.h> + +phys_addr_t __virt_to_phys(unsigned long x) +{ + WARN(!__is_lm_address(x), + "virt_to_phys used for non-linear address: %pK (%pS)\n", + (void *)x, + (void *)x); + + return __virt_to_phys_nodebug(x); +} +EXPORT_SYMBOL(__virt_to_phys); + +phys_addr_t __phys_addr_symbol(unsigned long x) +{ + /* + * This is bounds checking against the kernel image only. + * __pa_symbol should only be used on kernel symbol addresses. + */ + VIRTUAL_BUG_ON(x < (unsigned long) KERNEL_START || + x > (unsigned long) KERNEL_END); + return __pa_symbol_nodebug(x); +} +EXPORT_SYMBOL(__phys_addr_symbol); diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 32682be978e0..cd4d53d7e458 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -138,6 +138,7 @@ ENDPROC(cpu_do_resume) * - pgd_phys - physical address of new TTB */ ENTRY(cpu_do_switch_mm) + pre_ttbr0_update_workaround x0, x1, x2 mmid x1, x1 // get mm->context.id bfi x0, x1, #48, #16 // set the ASID msr ttbr0_el1, x0 // set TTBR0 |