diff options
author | Peter Zijlstra <peterz@infradead.org> | 2020-02-03 17:37:05 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-02-04 03:05:26 +0000 |
commit | 3af4bd033759c4dab4f0ff594f0aa1e8d182b9d7 (patch) | |
tree | a6978c1569fbfe5a1d78d753fbc853b3d87a7407 | |
parent | ff2e6d7259f82ccc9a5aaa7f41194161d9262392 (diff) |
asm-generic/tlb: rename HAVE_MMU_GATHER_PAGE_SIZE
Towards a more consistent naming scheme.
Link: http://lkml.kernel.org/r/20200116064531.483522-8-aneesh.kumar@linux.ibm.com
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | arch/Kconfig | 2 | ||||
-rw-r--r-- | arch/powerpc/Kconfig | 2 | ||||
-rw-r--r-- | include/asm-generic/tlb.h | 9 | ||||
-rw-r--r-- | mm/mmu_gather.c | 4 |
4 files changed, 10 insertions, 7 deletions
diff --git a/arch/Kconfig b/arch/Kconfig index 501d565690b5..e8548211b6a9 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -396,7 +396,7 @@ config HAVE_ARCH_JUMP_LABEL_RELATIVE config MMU_GATHER_RCU_TABLE_FREE bool -config HAVE_MMU_GATHER_PAGE_SIZE +config MMU_GATHER_PAGE_SIZE bool config MMU_GATHER_NO_RANGE diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 490e0e7a7f4f..bf2b538aba12 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -223,7 +223,7 @@ config PPC select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP select MMU_GATHER_RCU_TABLE_FREE - select HAVE_MMU_GATHER_PAGE_SIZE + select MMU_GATHER_PAGE_SIZE select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_RELIABLE_STACKTRACE if PPC_BOOK3S_64 && CPU_LITTLE_ENDIAN select HAVE_SYSCALL_TRACEPOINTS diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 04a1b8f08eea..53befa5acb27 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -121,11 +121,14 @@ * * Additionally there are a few opt-in features: * - * HAVE_MMU_GATHER_PAGE_SIZE + * MMU_GATHER_PAGE_SIZE * * This ensures we call tlb_flush() every time tlb_change_page_size() actually * changes the size and provides mmu_gather::page_size to tlb_flush(). * + * This might be useful if your architecture has size specific TLB + * invalidation instructions. + * * MMU_GATHER_RCU_TABLE_FREE * * This provides tlb_remove_table(), to be used instead of tlb_remove_page() @@ -279,7 +282,7 @@ struct mmu_gather { struct mmu_gather_batch local; struct page *__pages[MMU_GATHER_BUNDLE]; -#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE +#ifdef CONFIG_MMU_GATHER_PAGE_SIZE unsigned int page_size; #endif #endif @@ -435,7 +438,7 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) static inline void tlb_change_page_size(struct mmu_gather *tlb, unsigned int page_size) { -#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE +#ifdef CONFIG_MMU_GATHER_PAGE_SIZE if (tlb->page_size && tlb->page_size != page_size) { if (!tlb->fullmm && !tlb->need_flush_all) tlb_flush_mmu(tlb); diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c index 86bb2176e173..297c70307367 100644 --- a/mm/mmu_gather.c +++ b/mm/mmu_gather.c @@ -69,7 +69,7 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_ VM_BUG_ON(!tlb->end); -#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE +#ifdef CONFIG_MMU_GATHER_PAGE_SIZE VM_WARN_ON(tlb->page_size != page_size); #endif @@ -223,7 +223,7 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, #ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE tlb->batch = NULL; #endif -#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE +#ifdef CONFIG_MMU_GATHER_PAGE_SIZE tlb->page_size = 0; #endif |