diff options
Diffstat (limited to 'arch/sh/mm')
-rw-r--r-- | arch/sh/mm/Kconfig | 24 | ||||
-rw-r--r-- | arch/sh/mm/cache-sh4.c | 3 | ||||
-rw-r--r-- | arch/sh/mm/fault_32.c | 3 | ||||
-rw-r--r-- | arch/sh/mm/init.c | 6 | ||||
-rw-r--r-- | arch/sh/mm/ioremap_32.c | 10 | ||||
-rw-r--r-- | arch/sh/mm/ioremap_64.c | 6 | ||||
-rw-r--r-- | arch/sh/mm/mmap.c | 3 | ||||
-rw-r--r-- | arch/sh/mm/numa.c | 15 |
8 files changed, 53 insertions, 17 deletions
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index 0e7ba8e891cf..b3f6c1a30b22 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig @@ -190,19 +190,37 @@ config ARCH_MEMORY_PROBE depends on MEMORY_HOTPLUG choice + prompt "Page table layout" + default PGTABLE_LEVELS_3 if X2TLB + default PGTABLE_LEVELS_2 + +config PGTABLE_LEVELS_2 + bool "2 Levels" + help + This is the default page table layout for all SuperH CPUs. + +config PGTABLE_LEVELS_3 + bool "3 Levels" + depends on X2TLB + help + This enables a 3 level page table structure. + +endchoice + +choice prompt "Kernel page size" default PAGE_SIZE_8KB if X2TLB default PAGE_SIZE_4KB config PAGE_SIZE_4KB bool "4kB" - depends on !MMU || !X2TLB + depends on !MMU || !X2TLB || PGTABLE_LEVELS_3 help This is the default page size used by all SuperH CPUs. config PAGE_SIZE_8KB bool "8kB" - depends on !MMU || X2TLB + depends on !MMU || X2TLB && !PGTABLE_LEVELS_3 help This enables 8kB pages as supported by SH-X2 and later MMUs. @@ -214,7 +232,7 @@ config PAGE_SIZE_16KB config PAGE_SIZE_64KB bool "64kB" - depends on !MMU || CPU_SH4 || CPU_SH5 + depends on !MMU || CPU_SH4 && !PGTABLE_LEVELS_3 || CPU_SH5 help This enables support for 64kB pages, possible on all SH-4 CPUs and later. diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index f36a08bf3d5c..560ddb6bc8a7 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c @@ -256,8 +256,7 @@ static void sh4_flush_cache_page(void *args) address = (unsigned long)vaddr; } - if (pages_do_alias(address, phys)) - flush_cache_one(CACHE_OC_ADDRESS_ARRAY | + flush_cache_one(CACHE_OC_ADDRESS_ARRAY | (address & shm_align_mask), phys); if (vma->vm_flags & VM_EXEC) diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c index 47530104e0ad..28e22839c665 100644 --- a/arch/sh/mm/fault_32.c +++ b/arch/sh/mm/fault_32.c @@ -53,6 +53,9 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) if (!pud_present(*pud_k)) return NULL; + if (!pud_present(*pud)) + set_pud(pud, *pud_k); + pmd = pmd_offset(pud, address); pmd_k = pmd_offset(pud_k, address); if (!pmd_present(*pmd_k)) diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 432acd07e76a..761910d142f8 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -120,7 +120,13 @@ void __init page_table_range_init(unsigned long start, unsigned long end, for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { pud = (pud_t *)pgd; for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { +#ifdef __PAGETABLE_PMD_FOLDED pmd = (pmd_t *)pud; +#else + pmd = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE); + pud_populate(&init_mm, pud, pmd); + pmd += k; +#endif for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { if (pmd_none(*pmd)) { pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); diff --git a/arch/sh/mm/ioremap_32.c b/arch/sh/mm/ioremap_32.c index a86eaa9d75a5..2141befb4f91 100644 --- a/arch/sh/mm/ioremap_32.c +++ b/arch/sh/mm/ioremap_32.c @@ -33,10 +33,10 @@ * have to convert them into an offset in a page-aligned mapping, but the * caller shouldn't need to know that small detail. */ -void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, - unsigned long flags) +void __iomem *__ioremap_caller(unsigned long phys_addr, unsigned long size, + unsigned long flags, void *caller) { - struct vm_struct * area; + struct vm_struct *area; unsigned long offset, last_addr, addr, orig_addr; pgprot_t pgprot; @@ -67,7 +67,7 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, /* * Ok, go for it.. */ - area = get_vm_area(size, VM_IOREMAP); + area = get_vm_area_caller(size, VM_IOREMAP, caller); if (!area) return NULL; area->phys_addr = phys_addr; @@ -103,7 +103,7 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, return (void __iomem *)(offset + (char *)orig_addr); } -EXPORT_SYMBOL(__ioremap); +EXPORT_SYMBOL(__ioremap_caller); void __iounmap(void __iomem *addr) { diff --git a/arch/sh/mm/ioremap_64.c b/arch/sh/mm/ioremap_64.c index b16843d02b76..ef434657d428 100644 --- a/arch/sh/mm/ioremap_64.c +++ b/arch/sh/mm/ioremap_64.c @@ -258,15 +258,15 @@ static void shmedia_unmapioaddr(unsigned long vaddr) pte_clear(&init_mm, vaddr, ptep); } -void __iomem *__ioremap(unsigned long offset, unsigned long size, - unsigned long flags) +void __iomem *__ioremap_caller(unsigned long offset, unsigned long size, + unsigned long flags, void *caller) { char name[14]; sprintf(name, "phys_%08x", (u32)offset); return shmedia_alloc_io(offset, size, name, flags); } -EXPORT_SYMBOL(__ioremap); +EXPORT_SYMBOL(__ioremap_caller); void __iounmap(void __iomem *virtual) { diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c index d2984fa42d3d..afeb710ec5c3 100644 --- a/arch/sh/mm/mmap.c +++ b/arch/sh/mm/mmap.c @@ -54,7 +54,8 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, /* We do not accept a shared mapping if it would violate * cache aliasing constraints. */ - if ((flags & MAP_SHARED) && (addr & shm_align_mask)) + if ((flags & MAP_SHARED) && + ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) return -EINVAL; return addr; } diff --git a/arch/sh/mm/numa.c b/arch/sh/mm/numa.c index 6c524446c0f6..422e92721878 100644 --- a/arch/sh/mm/numa.c +++ b/arch/sh/mm/numa.c @@ -28,7 +28,7 @@ void __init setup_memory(void) { unsigned long free_pfn = PFN_UP(__pa(_end)); u64 base = min_low_pfn << PAGE_SHIFT; - u64 size = (max_low_pfn << PAGE_SHIFT) - min_low_pfn; + u64 size = (max_low_pfn << PAGE_SHIFT) - base; lmb_add(base, size); @@ -38,6 +38,15 @@ void __init setup_memory(void) (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET)); /* + * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET. + */ + if (CONFIG_ZERO_PAGE_OFFSET != 0) + lmb_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET); + + lmb_analyze(); + lmb_dump_all(); + + /* * Node 0 sets up its pgdat at the first available pfn, * and bumps it up before setting up the bootmem allocator. */ @@ -71,7 +80,7 @@ void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end) /* Node-local pgdat */ NODE_DATA(nid) = __va(lmb_alloc_base(sizeof(struct pglist_data), - SMP_CACHE_BYTES, end_pfn)); + SMP_CACHE_BYTES, end)); memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; @@ -81,7 +90,7 @@ void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end) /* Node-local bootmap */ bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn); bootmem_paddr = lmb_alloc_base(bootmap_pages << PAGE_SHIFT, - PAGE_SIZE, end_pfn); + PAGE_SIZE, end); init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT, start_pfn, end_pfn); |