summaryrefslogtreecommitdiff
path: root/arch/riscv
diff options
context:
space:
mode:
authorAlexandre Ghiti <alex@ghiti.fr>2021-07-23 15:01:25 +0200
committerPalmer Dabbelt <palmerdabbelt@google.com>2021-08-11 22:41:20 -0700
commit526f83df1d83b9c95e571ea5d4dff12be1b215ec (patch)
tree2b5468acd8aafc501a506b0a5f3d1e732b535ddd /arch/riscv
parent0aba691a7443a7541c1dc56423e0c92cc6ea7164 (diff)
riscv: Get rid of map_size parameter to create_kernel_page_table
The kernel must always be mapped using PMD_SIZE, and this is already the case, this just simplifies create_kernel_page_table. Signed-off-by: Alexandre Ghiti <alex@ghiti.fr> Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
Diffstat (limited to 'arch/riscv')
-rw-r--r--arch/riscv/mm/init.c30
1 files changed, 11 insertions, 19 deletions
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 263ae055e81a..83d0ed915914 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -495,36 +495,35 @@ static __init pgprot_t pgprot_from_va(uintptr_t va)
#endif
#ifdef CONFIG_XIP_KERNEL
-static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size,
+static void __init create_kernel_page_table(pgd_t *pgdir,
__always_unused bool early)
{
uintptr_t va, end_va;
/* Map the flash resident part */
end_va = kernel_map.virt_addr + kernel_map.xiprom_sz;
- for (va = kernel_map.virt_addr; va < end_va; va += map_size)
+ for (va = kernel_map.virt_addr; va < end_va; va += PMD_SIZE)
create_pgd_mapping(pgdir, va,
kernel_map.xiprom + (va - kernel_map.virt_addr),
- map_size, PAGE_KERNEL_EXEC);
+ PMD_SIZE, PAGE_KERNEL_EXEC);
/* Map the data in RAM */
end_va = kernel_map.virt_addr + XIP_OFFSET + kernel_map.size;
- for (va = kernel_map.virt_addr + XIP_OFFSET; va < end_va; va += map_size)
+ for (va = kernel_map.virt_addr + XIP_OFFSET; va < end_va; va += PMD_SIZE)
create_pgd_mapping(pgdir, va,
kernel_map.phys_addr + (va - (kernel_map.virt_addr + XIP_OFFSET)),
- map_size, PAGE_KERNEL);
+ PMD_SIZE, PAGE_KERNEL);
}
#else
-static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size,
- bool early)
+static void __init create_kernel_page_table(pgd_t *pgdir, bool early)
{
uintptr_t va, end_va;
end_va = kernel_map.virt_addr + kernel_map.size;
- for (va = kernel_map.virt_addr; va < end_va; va += map_size)
+ for (va = kernel_map.virt_addr; va < end_va; va += PMD_SIZE)
create_pgd_mapping(pgdir, va,
kernel_map.phys_addr + (va - kernel_map.virt_addr),
- map_size,
+ PMD_SIZE,
early ?
PAGE_KERNEL_EXEC : pgprot_from_va(va));
}
@@ -533,7 +532,6 @@ static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size,
asmlinkage void __init setup_vm(uintptr_t dtb_pa)
{
uintptr_t __maybe_unused pa;
- uintptr_t map_size;
#ifndef __PAGETABLE_PMD_FOLDED
pmd_t fix_bmap_spmd, fix_bmap_epmd;
#endif
@@ -557,15 +555,9 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
pfn_base = PFN_DOWN(kernel_map.phys_addr);
- /*
- * Enforce boot alignment requirements of RV32 and
- * RV64 by only allowing PMD or PGD mappings.
- */
- map_size = PMD_SIZE;
-
/* Sanity check alignment and size */
BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
- BUG_ON((kernel_map.phys_addr % map_size) != 0);
+ BUG_ON((kernel_map.phys_addr % PMD_SIZE) != 0);
pt_ops.alloc_pte = alloc_pte_early;
pt_ops.get_pte_virt = get_pte_virt_early;
@@ -602,7 +594,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
* us to reach paging_init(). We map all memory banks later
* in setup_vm_final() below.
*/
- create_kernel_page_table(early_pg_dir, map_size, true);
+ create_kernel_page_table(early_pg_dir, true);
#ifndef __PAGETABLE_PMD_FOLDED
/* Setup early PMD for DTB */
@@ -718,7 +710,7 @@ static void __init setup_vm_final(void)
#ifdef CONFIG_64BIT
/* Map the kernel */
- create_kernel_page_table(swapper_pg_dir, PMD_SIZE, false);
+ create_kernel_page_table(swapper_pg_dir, false);
#endif
/* Clear fixmap PTE and PMD mappings */