summaryrefslogtreecommitdiff
path: root/arch/powerpc/include/asm/book3s
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/include/asm/book3s')
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgtable.h7
-rw-r--r--arch/powerpc/include/asm/book3s/32/tlbflush.h25
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-64k.h5
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash.h2
-rw-r--r--arch/powerpc/include/asm/book3s/64/hugetlb.h20
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu-hash.h15
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgalloc.h30
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h23
-rw-r--r--arch/powerpc/include/asm/book3s/64/radix.h40
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush-radix.h2
-rw-r--r--arch/powerpc/include/asm/book3s/tlbflush.h11
11 files changed, 111 insertions, 69 deletions
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index 02f5acd7ccc4..751cf931bb3f 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -84,17 +84,12 @@
* of RAM. -- Cort
*/
#define VMALLOC_OFFSET (0x1000000) /* 16M */
-#ifdef PPC_PIN_SIZE
-#define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
-#else
#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
-#endif
#define VMALLOC_END ioremap_bot
#ifndef __ASSEMBLY__
#include <linux/sched.h>
#include <linux/threads.h>
-#include <asm/io.h> /* For sub-arch specific PPC_PIN_SIZE */
extern unsigned long ioremap_bot;
@@ -164,7 +159,6 @@ static inline unsigned long pte_update(pte_t *p,
1: lwarx %0,0,%3\n\
andc %1,%0,%4\n\
or %1,%1,%5\n"
- PPC405_ERR77(0,%3)
" stwcx. %1,0,%3\n\
bne- 1b"
: "=&r" (old), "=&r" (tmp), "=m" (*p)
@@ -186,7 +180,6 @@ static inline unsigned long long pte_update(pte_t *p,
lwzx %0,0,%3\n\
andc %1,%L0,%5\n\
or %1,%1,%6\n"
- PPC405_ERR77(0,%3)
" stwcx. %1,0,%4\n\
bne- 1b"
: "=&r" (old), "=&r" (tmp), "=m" (*p)
diff --git a/arch/powerpc/include/asm/book3s/32/tlbflush.h b/arch/powerpc/include/asm/book3s/32/tlbflush.h
new file mode 100644
index 000000000000..068085b709fb
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/32/tlbflush.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BOOK3S_32_TLBFLUSH_H
+#define _ASM_POWERPC_BOOK3S_32_TLBFLUSH_H
+
+#define MMU_NO_CONTEXT (0)
+/*
+ * TLB flushing for "classic" hash-MMU 32-bit CPUs, 6xx, 7xx, 7xxx
+ */
+extern void flush_tlb_mm(struct mm_struct *mm);
+extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
+extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr);
+extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end);
+extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+static inline void local_flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long vmaddr)
+{
+ flush_tlb_page(vma, vmaddr);
+}
+static inline void local_flush_tlb_mm(struct mm_struct *mm)
+{
+ flush_tlb_mm(mm);
+}
+
+#endif /* _ASM_POWERPC_TLBFLUSH_H */
diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h
index c81793d47af9..f82ee8a3b561 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-64k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h
@@ -137,10 +137,9 @@ extern bool __rpte_sub_valid(real_pte_t rpte, unsigned long index);
shift = mmu_psize_defs[psize].shift; \
for (index = 0; vpn < __end; index++, \
vpn += (1L << (shift - VPN_SHIFT))) { \
- if (!__split || __rpte_sub_valid(rpte, index)) \
- do {
+ if (!__split || __rpte_sub_valid(rpte, index))
-#define pte_iterate_hashed_end() } while(0); } } while(0)
+#define pte_iterate_hashed_end() } } while(0)
#define pte_pagesize_index(mm, addr, pte) \
(((pte) & H_PAGE_COMBO)? MMU_PAGE_4K: MMU_PAGE_64K)
diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
index 0387b155f13d..d52a51b2ce7b 100644
--- a/arch/powerpc/include/asm/book3s/64/hash.h
+++ b/arch/powerpc/include/asm/book3s/64/hash.h
@@ -3,6 +3,8 @@
#define _ASM_POWERPC_BOOK3S_64_HASH_H
#ifdef __KERNEL__
+#include <asm/asm-const.h>
+
/*
* Common bits between 4K and 64K pages in a linux-style PTE.
* Additional bits may be defined in pgtable-hash64-*.h
diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb.h b/arch/powerpc/include/asm/book3s/64/hugetlb.h
index c459f937d484..50888388a359 100644
--- a/arch/powerpc/include/asm/book3s/64/hugetlb.h
+++ b/arch/powerpc/include/asm/book3s/64/hugetlb.h
@@ -32,26 +32,6 @@ static inline int hstate_get_psize(struct hstate *hstate)
}
}
-#define arch_make_huge_pte arch_make_huge_pte
-static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
- struct page *page, int writable)
-{
- unsigned long page_shift;
-
- if (!cpu_has_feature(CPU_FTR_POWER9_DD1))
- return entry;
-
- page_shift = huge_page_shift(hstate_vma(vma));
- /*
- * We don't support 1G hugetlb pages yet.
- */
- VM_WARN_ON(page_shift == mmu_psize_defs[MMU_PAGE_1G].shift);
- if (page_shift == mmu_psize_defs[MMU_PAGE_2M].shift)
- return __pte(pte_val(entry) | R_PAGE_LARGE);
- else
- return entry;
-}
-
#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
static inline bool gigantic_page_supported(void)
{
diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
index 50ed64fba4ae..b3520b549cba 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
@@ -12,9 +12,9 @@
* 2 of the License, or (at your option) any later version.
*/
-#include <asm/asm-compat.h>
#include <asm/page.h>
#include <asm/bug.h>
+#include <asm/asm-const.h>
/*
* This is necessary to get the definition of PGTABLE_RANGE which we
@@ -364,6 +364,16 @@ static inline unsigned long hpte_new_to_old_r(unsigned long r)
return r & ~HPTE_R_3_0_SSIZE_MASK;
}
+static inline unsigned long hpte_get_old_v(struct hash_pte *hptep)
+{
+ unsigned long hpte_v;
+
+ hpte_v = be64_to_cpu(hptep->v);
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
+ return hpte_v;
+}
+
/*
* This function sets the AVPN and L fields of the HPTE appropriately
* using the base page size and actual page size.
@@ -487,6 +497,9 @@ extern void hpte_init_native(void);
extern void slb_initialize(void);
extern void slb_flush_and_rebolt(void);
+void slb_flush_all_realmode(void);
+void __slb_restore_bolted_realmode(void);
+void slb_restore_bolted_realmode(void);
extern void slb_vmalloc_update(void);
extern void slb_set_size(u16 size);
diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h
index 01ee40f11f3a..391ed2c3b697 100644
--- a/arch/powerpc/include/asm/book3s/64/pgalloc.h
+++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h
@@ -9,6 +9,7 @@
#include <linux/slab.h>
#include <linux/cpumask.h>
+#include <linux/kmemleak.h>
#include <linux/percpu.h>
struct vmemmap_backing {
@@ -83,6 +84,13 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
pgd = kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
pgtable_gfp_flags(mm, GFP_KERNEL));
/*
+ * Don't scan the PGD for pointers, it contains references to PUDs but
+ * those references are not full pointers and so can't be recognised by
+ * kmemleak.
+ */
+ kmemleak_no_scan(pgd);
+
+ /*
* With hugetlb, we don't clear the second half of the page table.
* If we share the same slab cache with the pmd or pud level table,
* we need to make sure we zero out the full table on alloc.
@@ -110,8 +118,19 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- return kmem_cache_alloc(PGT_CACHE(PUD_CACHE_INDEX),
- pgtable_gfp_flags(mm, GFP_KERNEL));
+ pud_t *pud;
+
+ pud = kmem_cache_alloc(PGT_CACHE(PUD_CACHE_INDEX),
+ pgtable_gfp_flags(mm, GFP_KERNEL));
+ /*
+ * Tell kmemleak to ignore the PUD, that means don't scan it for
+ * pointers and don't consider it a leak. PUDs are typically only
+ * referred to by their PGD, but kmemleak is not able to recognise those
+ * as pointers, leading to false leak reports.
+ */
+ kmemleak_ignore(pud);
+
+ return pud;
}
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
@@ -208,4 +227,11 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
#define check_pgt_cache() do { } while (0)
+extern atomic_long_t direct_pages_count[MMU_PAGE_COUNT];
+static inline void update_page_count(int psize, long count)
+{
+ if (IS_ENABLED(CONFIG_PROC_FS))
+ atomic_long_add(count, &direct_pages_count[psize]);
+}
+
#endif /* _ASM_POWERPC_BOOK3S_64_PGALLOC_H */
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 42aafba7a308..13a688fc8cd0 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -44,6 +44,16 @@
#define _PAGE_PTE 0x4000000000000000UL /* distinguishes PTEs from pointers */
#define _PAGE_PRESENT 0x8000000000000000UL /* pte contains a translation */
+/*
+ * We need to mark a pmd pte invalid while splitting. We can do that by clearing
+ * the _PAGE_PRESENT bit. But then that will be taken as a swap pte. In order to
+ * differentiate between two use a SW field when invalidating.
+ *
+ * We do that temporary invalidate for regular pte entry in ptep_set_access_flags
+ *
+ * This is used only when _PAGE_PRESENT is cleared.
+ */
+#define _PAGE_INVALID _RPAGE_SW0
/*
* Top and bottom bits of RPN which can be used by hash
@@ -479,9 +489,8 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
{
if (full && radix_enabled()) {
/*
- * Let's skip the DD1 style pte update here. We know that
- * this is a full mm pte clear and hence can be sure there is
- * no parallel set_pte.
+ * We know that this is a full mm pte clear and
+ * hence can be sure there is no parallel set_pte.
*/
return radix__ptep_get_and_clear_full(mm, addr, ptep, full);
}
@@ -569,7 +578,13 @@ static inline pte_t pte_clear_savedwrite(pte_t pte)
static inline int pte_present(pte_t pte)
{
- return !!(pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT));
+ /*
+ * A pte is considerent present if _PAGE_PRESENT is set.
+ * We also need to consider the pte present which is marked
+ * invalid during ptep_set_access_flags. Hence we look for _PAGE_INVALID
+ * if we find _PAGE_PRESENT cleared.
+ */
+ return !!(pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT | _PAGE_INVALID));
}
#ifdef CONFIG_PPC_MEM_KEYS
diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
index ef9f96742ce1..7d1a3d1543fc 100644
--- a/arch/powerpc/include/asm/book3s/64/radix.h
+++ b/arch/powerpc/include/asm/book3s/64/radix.h
@@ -2,6 +2,8 @@
#ifndef _ASM_POWERPC_PGTABLE_RADIX_H
#define _ASM_POWERPC_PGTABLE_RADIX_H
+#include <asm/asm-const.h>
+
#ifndef __ASSEMBLY__
#include <asm/cmpxchg.h>
#endif
@@ -12,12 +14,6 @@
#include <asm/book3s/64/radix-4k.h>
#endif
-/*
- * For P9 DD1 only, we need to track whether the pte's huge.
- */
-#define R_PAGE_LARGE _RPAGE_RSV1
-
-
#ifndef __ASSEMBLY__
#include <asm/book3s/64/tlbflush-radix.h>
#include <asm/cpu_has_feature.h>
@@ -36,6 +32,9 @@
#define RADIX_PUD_BAD_BITS 0x60000000000000e0UL
#define RADIX_PGD_BAD_BITS 0x60000000000000e0UL
+#define RADIX_PMD_SHIFT (PAGE_SHIFT + RADIX_PTE_INDEX_SIZE)
+#define RADIX_PUD_SHIFT (RADIX_PMD_SHIFT + RADIX_PMD_INDEX_SIZE)
+#define RADIX_PGD_SHIFT (RADIX_PUD_SHIFT + RADIX_PUD_INDEX_SIZE)
/*
* Size of EA range mapped by our pagetables.
*/
@@ -154,20 +153,7 @@ static inline unsigned long radix__pte_update(struct mm_struct *mm,
{
unsigned long old_pte;
- if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
-
- unsigned long new_pte;
-
- old_pte = __radix_pte_update(ptep, ~0ul, 0);
- /*
- * new value of pte
- */
- new_pte = (old_pte | set) & ~clr;
- radix__flush_tlb_pte_p9_dd1(old_pte, mm, addr);
- if (new_pte)
- __radix_pte_update(ptep, 0, new_pte);
- } else
- old_pte = __radix_pte_update(ptep, clr, set);
+ old_pte = __radix_pte_update(ptep, clr, set);
if (!huge)
assert_pte_locked(mm, addr);
@@ -253,8 +239,6 @@ static inline int radix__pmd_trans_huge(pmd_t pmd)
static inline pmd_t radix__pmd_mkhuge(pmd_t pmd)
{
- if (cpu_has_feature(CPU_FTR_POWER9_DD1))
- return __pmd(pmd_val(pmd) | _PAGE_PTE | R_PAGE_LARGE);
return __pmd(pmd_val(pmd) | _PAGE_PTE);
}
@@ -285,18 +269,14 @@ static inline unsigned long radix__get_tree_size(void)
unsigned long rts_field;
/*
* We support 52 bits, hence:
- * DD1 52-28 = 24, 0b11000
- * Others 52-31 = 21, 0b10101
+ * bits 52 - 31 = 21, 0b10101
* RTS encoding details
* bits 0 - 3 of rts -> bits 6 - 8 unsigned long
* bits 4 - 5 of rts -> bits 62 - 63 of unsigned long
*/
- if (cpu_has_feature(CPU_FTR_POWER9_DD1))
- rts_field = (0x3UL << 61);
- else {
- rts_field = (0x5UL << 5); /* 6 - 8 bits */
- rts_field |= (0x2UL << 61);
- }
+ rts_field = (0x5UL << 5); /* 6 - 8 bits */
+ rts_field |= (0x2UL << 61);
+
return rts_field;
}
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
index ef5c3f2994c9..1154a6dc6d26 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
@@ -48,8 +48,6 @@ extern void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmad
extern void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr);
extern void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr);
extern void radix__flush_tlb_all(void);
-extern void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm,
- unsigned long address);
extern void radix__flush_tlb_lpid_page(unsigned int lpid,
unsigned long addr,
diff --git a/arch/powerpc/include/asm/book3s/tlbflush.h b/arch/powerpc/include/asm/book3s/tlbflush.h
new file mode 100644
index 000000000000..dec11de41055
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/tlbflush.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BOOK3S_TLBFLUSH_H
+#define _ASM_POWERPC_BOOK3S_TLBFLUSH_H
+
+#ifdef CONFIG_PPC64
+#include <asm/book3s/64/tlbflush.h>
+#else
+#include <asm/book3s/32/tlbflush.h>
+#endif
+
+#endif /* _ASM_POWERPC_BOOK3S_TLBFLUSH_H */