summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/mips/kernel/vmlinux.lds.S21
-rw-r--r--arch/mips/mm/init.c17
2 files changed, 28 insertions, 10 deletions
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index df243a64f430..007ccbe1e264 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -1,6 +1,13 @@
#include <asm/asm-offsets.h>
#include <asm/page.h>
#include <asm/thread_info.h>
+
+/*
+ * Put .bss..swapper_pg_dir as the first thing in .bss. This will
+ * ensure that it has .bss alignment (64K).
+ */
+#define BSS_FIRST_SECTIONS *(.bss..swapper_pg_dir)
+
#include <asm-generic/vmlinux.lds.h>
#undef mips
@@ -119,11 +126,21 @@ SECTIONS
}
PERCPU_SECTION(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
- . = ALIGN(PAGE_SIZE);
+ /*
+ * Align to 64K in attempt to eliminate holes before the
+ * .bss..swapper_pg_dir section at the start of .bss. This
+ * also satisfies PAGE_SIZE alignment as the largest page size
+ * allowed is 64K.
+ */
+ . = ALIGN(0x10000);
__init_end = .;
/* freed after init ends here */
- BSS_SECTION(0, 0, 0)
+ /*
+ * Force .bss to 64K alignment so that .bss..swapper_pg_dir
+ * gets that alignment. .sbss should be empty, so there will be
+ * no holes after __init_end. */
+ BSS_SECTION(0, 0x10000, 0)
_end = . ;
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 1a85ba92eb5c..be9acb2b959d 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -469,19 +469,20 @@ void __init_refok free_initmem(void)
#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
unsigned long pgd_current[NR_CPUS];
#endif
-/*
- * On 64-bit we've got three-level pagetables with a slightly
- * different layout ...
- */
-#define __page_aligned(order) __attribute__((__aligned__(PAGE_SIZE<<order)))
/*
* gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER
* are constants. So we use the variants from asm-offset.h until that gcc
* will officially be retired.
+ *
+ * Align swapper_pg_dir in to 64K, allows its address to be loaded
+ * with a single LUI instruction in the TLB handlers. If we used
+ * __aligned(64K), its size would get rounded up to the alignment
+ * size, and waste space. So we place it in its own section and align
+ * it in the linker script.
*/
-pgd_t swapper_pg_dir[_PTRS_PER_PGD] __page_aligned(_PGD_ORDER);
+pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
#ifndef __PAGETABLE_PMD_FOLDED
-pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned(PMD_ORDER);
+pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
#endif
-pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER);
+pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;