diff options
author | Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> | 2018-06-14 16:01:52 +0530 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2018-06-20 09:13:25 +1000 |
commit | fadd03c615922d8521a2e76d4ba2335891cb2790 (patch) | |
tree | 69ffe2b951674633146183b0178d7a262adcddc9 /arch | |
parent | 758380b8155f69b4e2f77f27562f8a7a466749d6 (diff) |
powerpc/mm/hash/4k: Free hugetlb page table caches correctly.
With 4k page size for hugetlb we allocate hugepage directories from its on slab
cache. With patch 0c4d26802 ("powerpc/book3s64/mm: Simplify the rcu callback for page table free")
we missed to free these allocated hugepd tables.
Update pgtable_free to handle hugetlb hugepd directory table.
Fixes: 0c4d268029bf ("powerpc/book3s64/mm: Simplify the rcu callback for page table free")
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
[mpe: Add CONFIG_HUGETLB_PAGE guard to fix build break]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/include/asm/book3s/32/pgalloc.h | 1 | ||||
-rw-r--r-- | arch/powerpc/include/asm/book3s/64/pgtable-4k.h | 21 | ||||
-rw-r--r-- | arch/powerpc/include/asm/book3s/64/pgtable-64k.h | 9 | ||||
-rw-r--r-- | arch/powerpc/include/asm/book3s/64/pgtable.h | 5 | ||||
-rw-r--r-- | arch/powerpc/include/asm/nohash/32/pgalloc.h | 1 | ||||
-rw-r--r-- | arch/powerpc/include/asm/nohash/64/pgalloc.h | 1 | ||||
-rw-r--r-- | arch/powerpc/mm/hugetlbpage.c | 3 | ||||
-rw-r--r-- | arch/powerpc/mm/pgtable-book3s64.c | 12 |
8 files changed, 52 insertions, 1 deletions
diff --git a/arch/powerpc/include/asm/book3s/32/pgalloc.h b/arch/powerpc/include/asm/book3s/32/pgalloc.h index 6a6673907e45..e4633803fe43 100644 --- a/arch/powerpc/include/asm/book3s/32/pgalloc.h +++ b/arch/powerpc/include/asm/book3s/32/pgalloc.h @@ -108,6 +108,7 @@ static inline void pgtable_free(void *table, unsigned index_size) } #define check_pgt_cache() do { } while (0) +#define get_hugepd_cache_index(x) (x) #ifdef CONFIG_SMP static inline void pgtable_free_tlb(struct mmu_gather *tlb, diff --git a/arch/powerpc/include/asm/book3s/64/pgtable-4k.h b/arch/powerpc/include/asm/book3s/64/pgtable-4k.h index af5f2baac80f..a069dfcac9a9 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable-4k.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable-4k.h @@ -49,6 +49,27 @@ static inline int hugepd_ok(hugepd_t hpd) } #define is_hugepd(hpd) (hugepd_ok(hpd)) +/* + * 16M and 16G huge page directory tables are allocated from slab cache + * + */ +#define H_16M_CACHE_INDEX (PAGE_SHIFT + H_PTE_INDEX_SIZE + H_PMD_INDEX_SIZE - 24) +#define H_16G_CACHE_INDEX \ + (PAGE_SHIFT + H_PTE_INDEX_SIZE + H_PMD_INDEX_SIZE + H_PUD_INDEX_SIZE - 34) + +static inline int get_hugepd_cache_index(int index) +{ + switch (index) { + case H_16M_CACHE_INDEX: + return HTLB_16M_INDEX; + case H_16G_CACHE_INDEX: + return HTLB_16G_INDEX; + default: + BUG(); + } + /* should not reach */ +} + #else /* !CONFIG_HUGETLB_PAGE */ static inline int pmd_huge(pmd_t pmd) { return 0; } static inline int pud_huge(pud_t pud) { return 0; } diff --git a/arch/powerpc/include/asm/book3s/64/pgtable-64k.h b/arch/powerpc/include/asm/book3s/64/pgtable-64k.h index fb4b3ba52339..d7ee249d6890 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable-64k.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable-64k.h @@ -45,8 +45,17 @@ static inline int hugepd_ok(hugepd_t hpd) { return 0; } + #define is_hugepd(pdep) 0 +/* + * This should never get called + */ +static inline int get_hugepd_cache_index(int index) +{ + BUG(); +} + #else /* !CONFIG_HUGETLB_PAGE */ static inline int pmd_huge(pmd_t pmd) { return 0; } static inline int pud_huge(pud_t pud) { return 0; } diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 63cee159022b..42aafba7a308 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -287,6 +287,11 @@ enum pgtable_index { PMD_INDEX, PUD_INDEX, PGD_INDEX, + /* + * Below are used with 4k page size and hugetlb + */ + HTLB_16M_INDEX, + HTLB_16G_INDEX, }; extern unsigned long __vmalloc_start; diff --git a/arch/powerpc/include/asm/nohash/32/pgalloc.h b/arch/powerpc/include/asm/nohash/32/pgalloc.h index 1707781d2f20..9de40eb614da 100644 --- a/arch/powerpc/include/asm/nohash/32/pgalloc.h +++ b/arch/powerpc/include/asm/nohash/32/pgalloc.h @@ -109,6 +109,7 @@ static inline void pgtable_free(void *table, unsigned index_size) } #define check_pgt_cache() do { } while (0) +#define get_hugepd_cache_index(x) (x) #ifdef CONFIG_SMP static inline void pgtable_free_tlb(struct mmu_gather *tlb, diff --git a/arch/powerpc/include/asm/nohash/64/pgalloc.h b/arch/powerpc/include/asm/nohash/64/pgalloc.h index 0e693f322cb2..e2d62d033708 100644 --- a/arch/powerpc/include/asm/nohash/64/pgalloc.h +++ b/arch/powerpc/include/asm/nohash/64/pgalloc.h @@ -141,6 +141,7 @@ static inline void pgtable_free(void *table, int shift) } } +#define get_hugepd_cache_index(x) (x) #ifdef CONFIG_SMP static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift) { diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 7c5f479c5c00..8a9a49c13865 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -337,7 +337,8 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif if (shift >= pdshift) hugepd_free(tlb, hugepte); else - pgtable_free_tlb(tlb, hugepte, pdshift - shift); + pgtable_free_tlb(tlb, hugepte, + get_hugepd_cache_index(pdshift - shift)); } static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c index c1f4ca45c93a..4afbfbb64bfd 100644 --- a/arch/powerpc/mm/pgtable-book3s64.c +++ b/arch/powerpc/mm/pgtable-book3s64.c @@ -409,6 +409,18 @@ static inline void pgtable_free(void *table, int index) case PUD_INDEX: kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), table); break; +#if defined(CONFIG_PPC_4K_PAGES) && defined(CONFIG_HUGETLB_PAGE) + /* 16M hugepd directory at pud level */ + case HTLB_16M_INDEX: + BUILD_BUG_ON(H_16M_CACHE_INDEX <= 0); + kmem_cache_free(PGT_CACHE(H_16M_CACHE_INDEX), table); + break; + /* 16G hugepd directory at the pgd level */ + case HTLB_16G_INDEX: + BUILD_BUG_ON(H_16G_CACHE_INDEX <= 0); + kmem_cache_free(PGT_CACHE(H_16G_CACHE_INDEX), table); + break; +#endif /* We don't free pgd table via RCU callback */ default: BUG(); |