diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-05-22 19:24:13 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-05-22 19:24:13 -0700 |
commit | 21f9debf74c3ae8fdd26fd2bc3c0169a08eba6b4 (patch) | |
tree | a7e9cdcaf0177bb9606d6e5982937cce55df9898 /arch | |
parent | bd28b14591b98f696bc9f94c5ba2e598ca487dfd (diff) | |
parent | 24e49ee3d76b70853a96520e46b8837e5eae65b2 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc
Pull sparc updates from David Miller:
"Some 32-bit kgdb cleanups from Sam Ravnborg, and a hugepage TLB flush
overhead fix on 64-bit from Nitin Gupta"
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc:
sparc64: Reduce TLB flushes during hugepte changes
aeroflex/greth: fix warning about unused variable
openprom: fix warning
sparc32: drop superfluous cast in calls to __nocache_pa()
sparc32: fix build with STRICT_MM_TYPECHECKS
sparc32: use proper prototype for trapbase
sparc32: drop local prototype in kgdb_32
sparc32: drop hardcoding trap_level in kgdb_trap
Diffstat (limited to 'arch')
-rw-r--r-- | arch/sparc/include/asm/head_32.h | 8 | ||||
-rw-r--r-- | arch/sparc/include/asm/kgdb.h | 2 | ||||
-rw-r--r-- | arch/sparc/include/asm/page_32.h | 2 | ||||
-rw-r--r-- | arch/sparc/include/asm/pgalloc_32.h | 4 | ||||
-rw-r--r-- | arch/sparc/include/asm/pgtable_32.h | 2 | ||||
-rw-r--r-- | arch/sparc/include/asm/pgtable_64.h | 43 | ||||
-rw-r--r-- | arch/sparc/include/asm/tlbflush_64.h | 3 | ||||
-rw-r--r-- | arch/sparc/kernel/entry.S | 10 | ||||
-rw-r--r-- | arch/sparc/kernel/kernel.h | 1 | ||||
-rw-r--r-- | arch/sparc/kernel/kgdb_32.c | 11 | ||||
-rw-r--r-- | arch/sparc/kernel/setup_32.c | 4 | ||||
-rw-r--r-- | arch/sparc/mm/hugetlbpage.c | 33 | ||||
-rw-r--r-- | arch/sparc/mm/init_64.c | 12 | ||||
-rw-r--r-- | arch/sparc/mm/io-unit.c | 4 | ||||
-rw-r--r-- | arch/sparc/mm/srmmu.c | 19 | ||||
-rw-r--r-- | arch/sparc/mm/tlb.c | 25 | ||||
-rw-r--r-- | arch/sparc/mm/tsb.c | 32 |
17 files changed, 130 insertions, 85 deletions
diff --git a/arch/sparc/include/asm/head_32.h b/arch/sparc/include/asm/head_32.h index 5f1dbe315bc8..6fc60fd182c4 100644 --- a/arch/sparc/include/asm/head_32.h +++ b/arch/sparc/include/asm/head_32.h @@ -43,10 +43,10 @@ nop; #ifdef CONFIG_KGDB -#define KGDB_TRAP(num) \ - b kgdb_trap_low; \ - rd %psr,%l0; \ - nop; \ +#define KGDB_TRAP(num) \ + mov num, %l7; \ + b kgdb_trap_low; \ + rd %psr,%l0; \ nop; #else #define KGDB_TRAP(num) \ diff --git a/arch/sparc/include/asm/kgdb.h b/arch/sparc/include/asm/kgdb.h index 47366af7a589..a6ad7bf84bac 100644 --- a/arch/sparc/include/asm/kgdb.h +++ b/arch/sparc/include/asm/kgdb.h @@ -28,10 +28,10 @@ enum regnames { #define NUMREGBYTES ((GDB_CSR + 1) * 4) #else #define NUMREGBYTES ((GDB_Y + 1) * 8) +#endif struct pt_regs; asmlinkage void kgdb_trap(unsigned long trap_level, struct pt_regs *regs); -#endif void arch_kgdb_breakpoint(void); diff --git a/arch/sparc/include/asm/page_32.h b/arch/sparc/include/asm/page_32.h index f82a1f36b655..0efd0583a8c9 100644 --- a/arch/sparc/include/asm/page_32.h +++ b/arch/sparc/include/asm/page_32.h @@ -69,7 +69,6 @@ typedef struct { unsigned long iopgprot; } iopgprot_t; #define __pte(x) ((pte_t) { (x) } ) #define __iopte(x) ((iopte_t) { (x) } ) -/* #define __pmd(x) ((pmd_t) { (x) } ) */ /* XXX procedure with loop */ #define __pgd(x) ((pgd_t) { (x) } ) #define __ctxd(x) ((ctxd_t) { (x) } ) #define __pgprot(x) ((pgprot_t) { (x) } ) @@ -97,7 +96,6 @@ typedef unsigned long iopgprot_t; #define __pte(x) (x) #define __iopte(x) (x) -/* #define __pmd(x) (x) */ /* XXX later */ #define __pgd(x) (x) #define __ctxd(x) (x) #define __pgprot(x) (x) diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h index a3890da94428..0346c7e62452 100644 --- a/arch/sparc/include/asm/pgalloc_32.h +++ b/arch/sparc/include/asm/pgalloc_32.h @@ -29,9 +29,9 @@ static inline void free_pgd_fast(pgd_t *pgd) static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp) { - unsigned long pa = __nocache_pa((unsigned long)pmdp); + unsigned long pa = __nocache_pa(pmdp); - set_pte((pte_t *)pgdp, (SRMMU_ET_PTD | (pa >> 4))); + set_pte((pte_t *)pgdp, __pte((SRMMU_ET_PTD | (pa >> 4)))); } #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD) diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h index 91b963a887b7..ce6f56980aef 100644 --- a/arch/sparc/include/asm/pgtable_32.h +++ b/arch/sparc/include/asm/pgtable_32.h @@ -298,7 +298,7 @@ static inline pte_t mk_pte_io(unsigned long page, pgprot_t pgprot, int space) #define pgprot_noncached pgprot_noncached static inline pgprot_t pgprot_noncached(pgprot_t prot) { - prot &= ~__pgprot(SRMMU_CACHE); + pgprot_val(prot) &= ~pgprot_val(__pgprot(SRMMU_CACHE)); return prot; } diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 93ce0ada3c63..e7d82803a48f 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -375,7 +375,7 @@ static inline pgprot_t pgprot_noncached(pgprot_t prot) #define pgprot_noncached pgprot_noncached #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) -static inline pte_t pte_mkhuge(pte_t pte) +static inline unsigned long __pte_huge_mask(void) { unsigned long mask; @@ -390,8 +390,19 @@ static inline pte_t pte_mkhuge(pte_t pte) : "=r" (mask) : "i" (_PAGE_SZHUGE_4U), "i" (_PAGE_SZHUGE_4V)); - return __pte(pte_val(pte) | mask); + return mask; +} + +static inline pte_t pte_mkhuge(pte_t pte) +{ + return __pte(pte_val(pte) | __pte_huge_mask()); +} + +static inline bool is_hugetlb_pte(pte_t pte) +{ + return !!(pte_val(pte) & __pte_huge_mask()); } + #ifdef CONFIG_TRANSPARENT_HUGEPAGE static inline pmd_t pmd_mkhuge(pmd_t pmd) { @@ -403,6 +414,11 @@ static inline pmd_t pmd_mkhuge(pmd_t pmd) return __pmd(pte_val(pte)); } #endif +#else +static inline bool is_hugetlb_pte(pte_t pte) +{ + return false; +} #endif static inline pte_t pte_mkdirty(pte_t pte) @@ -856,6 +872,19 @@ static inline unsigned long pud_pfn(pud_t pud) void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig, int fullmm); +static void maybe_tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, + pte_t *ptep, pte_t orig, int fullmm) +{ + /* It is more efficient to let flush_tlb_kernel_range() + * handle init_mm tlb flushes. + * + * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U + * and SUN4V pte layout, so this inline test is fine. + */ + if (likely(mm != &init_mm) && pte_accessible(mm, orig)) + tlb_batch_add(mm, vaddr, ptep, orig, fullmm); +} + #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr, @@ -872,15 +901,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t orig = *ptep; *ptep = pte; - - /* It is more efficient to let flush_tlb_kernel_range() - * handle init_mm tlb flushes. - * - * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U - * and SUN4V pte layout, so this inline test is fine. - */ - if (likely(mm != &init_mm) && pte_accessible(mm, orig)) - tlb_batch_add(mm, addr, ptep, orig, fullmm); + maybe_tlb_batch_add(mm, addr, ptep, orig, fullmm); } #define set_pte_at(mm,addr,ptep,pte) \ diff --git a/arch/sparc/include/asm/tlbflush_64.h b/arch/sparc/include/asm/tlbflush_64.h index dea1cfa2122b..a8e192e90700 100644 --- a/arch/sparc/include/asm/tlbflush_64.h +++ b/arch/sparc/include/asm/tlbflush_64.h @@ -8,6 +8,7 @@ #define TLB_BATCH_NR 192 struct tlb_batch { + bool huge; struct mm_struct *mm; unsigned long tlb_nr; unsigned long active; @@ -16,7 +17,7 @@ struct tlb_batch { void flush_tsb_kernel_range(unsigned long start, unsigned long end); void flush_tsb_user(struct tlb_batch *tb); -void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr); +void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, bool huge); /* TLB flush operations. */ diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S index 51aa6e86a5f8..07918ab3062e 100644 --- a/arch/sparc/kernel/entry.S +++ b/arch/sparc/kernel/entry.S @@ -1225,20 +1225,18 @@ breakpoint_trap: RESTORE_ALL #ifdef CONFIG_KGDB - .align 4 - .globl kgdb_trap_low - .type kgdb_trap_low,#function -kgdb_trap_low: + ENTRY(kgdb_trap_low) rd %wim,%l3 SAVE_ALL wr %l0, PSR_ET, %psr WRITE_PAUSE + mov %l7, %o0 ! trap_level call kgdb_trap - add %sp, STACKFRAME_SZ, %o0 + add %sp, STACKFRAME_SZ, %o1 ! struct pt_regs *regs RESTORE_ALL - .size kgdb_trap_low,.-kgdb_trap_low + ENDPROC(kgdb_trap_low) #endif .align 4 diff --git a/arch/sparc/kernel/kernel.h b/arch/sparc/kernel/kernel.h index 5057ec2e4af6..c9804551262c 100644 --- a/arch/sparc/kernel/kernel.h +++ b/arch/sparc/kernel/kernel.h @@ -127,6 +127,7 @@ extern unsigned int t_nmi[]; extern unsigned int linux_trap_ipi15_sun4d[]; extern unsigned int linux_trap_ipi15_sun4m[]; +extern struct tt_entry trapbase; extern struct tt_entry trapbase_cpu1; extern struct tt_entry trapbase_cpu2; extern struct tt_entry trapbase_cpu3; diff --git a/arch/sparc/kernel/kgdb_32.c b/arch/sparc/kernel/kgdb_32.c index dcf210811af4..6e8e318c57be 100644 --- a/arch/sparc/kernel/kgdb_32.c +++ b/arch/sparc/kernel/kgdb_32.c @@ -12,7 +12,8 @@ #include <asm/irq.h> #include <asm/cacheflush.h> -extern unsigned long trapbase; +#include "kernel.h" +#include "entry.h" void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) { @@ -133,21 +134,19 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code, return -1; } -extern void do_hw_interrupt(struct pt_regs *regs, unsigned long type); - -asmlinkage void kgdb_trap(struct pt_regs *regs) +asmlinkage void kgdb_trap(unsigned long trap_level, struct pt_regs *regs) { unsigned long flags; if (user_mode(regs)) { - do_hw_interrupt(regs, 0xfd); + do_hw_interrupt(regs, trap_level); return; } flushw_all(); local_irq_save(flags); - kgdb_handle_exception(0x172, SIGTRAP, 0, regs); + kgdb_handle_exception(trap_level, SIGTRAP, 0, regs); local_irq_restore(flags); } diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c index 69d75ff1c25c..c4e65cb3280f 100644 --- a/arch/sparc/kernel/setup_32.c +++ b/arch/sparc/kernel/setup_32.c @@ -68,8 +68,6 @@ struct screen_info screen_info = { * prints out pretty messages and returns. */ -extern unsigned long trapbase; - /* Pretty sick eh? */ static void prom_sync_me(void) { @@ -300,7 +298,7 @@ void __init setup_arch(char **cmdline_p) int i; unsigned long highest_paddr; - sparc_ttable = (struct tt_entry *) &trapbase; + sparc_ttable = &trapbase; /* Initialize PROM console and command line. */ *cmdline_p = prom_getbootargs(); diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index 4977800e9770..ba52e6466a82 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -176,17 +176,31 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t entry) { int i; + pte_t orig[2]; + unsigned long nptes; if (!pte_present(*ptep) && pte_present(entry)) mm->context.huge_pte_count++; addr &= HPAGE_MASK; - for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { - set_pte_at(mm, addr, ptep, entry); + + nptes = 1 << HUGETLB_PAGE_ORDER; + orig[0] = *ptep; + orig[1] = *(ptep + nptes / 2); + for (i = 0; i < nptes; i++) { + *ptep = entry; ptep++; addr += PAGE_SIZE; pte_val(entry) += PAGE_SIZE; } + + /* Issue TLB flush at REAL_HPAGE_SIZE boundaries */ + addr -= REAL_HPAGE_SIZE; + ptep -= nptes / 2; + maybe_tlb_batch_add(mm, addr, ptep, orig[1], 0); + addr -= REAL_HPAGE_SIZE; + ptep -= nptes / 2; + maybe_tlb_batch_add(mm, addr, ptep, orig[0], 0); } pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, @@ -194,19 +208,28 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, { pte_t entry; int i; + unsigned long nptes; entry = *ptep; if (pte_present(entry)) mm->context.huge_pte_count--; addr &= HPAGE_MASK; - - for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { - pte_clear(mm, addr, ptep); + nptes = 1 << HUGETLB_PAGE_ORDER; + for (i = 0; i < nptes; i++) { + *ptep = __pte(0UL); addr += PAGE_SIZE; ptep++; } + /* Issue TLB flush at REAL_HPAGE_SIZE boundaries */ + addr -= REAL_HPAGE_SIZE; + ptep -= nptes / 2; + maybe_tlb_batch_add(mm, addr, ptep, entry, 0); + addr -= REAL_HPAGE_SIZE; + ptep -= nptes / 2; + maybe_tlb_batch_add(mm, addr, ptep, entry, 0); + return entry; } diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 09e838801e39..652683cb4b4b 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -324,18 +324,6 @@ static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_inde tsb_insert(tsb, tag, tte); } -#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) -static inline bool is_hugetlb_pte(pte_t pte) -{ - if ((tlb_type == hypervisor && - (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) || - (tlb_type != hypervisor && - (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U)) - return true; - return false; -} -#endif - void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { struct mm_struct *mm; diff --git a/arch/sparc/mm/io-unit.c b/arch/sparc/mm/io-unit.c index f311bf219016..338fb71535de 100644 --- a/arch/sparc/mm/io-unit.c +++ b/arch/sparc/mm/io-unit.c @@ -133,7 +133,7 @@ nexti: scan = find_next_zero_bit(iounit->bmap, limit, scan); vaddr = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + (vaddr & ~PAGE_MASK); for (k = 0; k < npages; k++, iopte = __iopte(iopte_val(iopte) + 0x100), scan++) { set_bit(scan, iounit->bmap); - sbus_writel(iopte, &iounit->page_table[scan]); + sbus_writel(iopte_val(iopte), &iounit->page_table[scan]); } IOD(("%08lx\n", vaddr)); return vaddr; @@ -228,7 +228,7 @@ static int iounit_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned lon i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT); iopte = iounit->page_table + i; - sbus_writel(MKIOPTE(__pa(page)), iopte); + sbus_writel(iopte_val(MKIOPTE(__pa(page))), iopte); } addr += PAGE_SIZE; va += PAGE_SIZE; diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index 5cbc96d801ff..c7f2a5295b3a 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c @@ -107,17 +107,22 @@ static inline int srmmu_pmd_none(pmd_t pmd) /* XXX should we hyper_flush_whole_icache here - Anton */ static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp) -{ set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pgdp) >> 4))); } +{ + pte_t pte; + + pte = __pte((SRMMU_ET_PTD | (__nocache_pa(pgdp) >> 4))); + set_pte((pte_t *)ctxp, pte); +} void pmd_set(pmd_t *pmdp, pte_t *ptep) { unsigned long ptp; /* Physical address, shifted right by 4 */ int i; - ptp = __nocache_pa((unsigned long) ptep) >> 4; + ptp = __nocache_pa(ptep) >> 4; for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) { - set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp); - ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4); + set_pte((pte_t *)&pmdp->pmdv[i], __pte(SRMMU_ET_PTD | ptp)); + ptp += (SRMMU_REAL_PTRS_PER_PTE * sizeof(pte_t) >> 4); } } @@ -128,8 +133,8 @@ void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep) ptp = page_to_pfn(ptep) << (PAGE_SHIFT-4); /* watch for overflow */ for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) { - set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp); - ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4); + set_pte((pte_t *)&pmdp->pmdv[i], __pte(SRMMU_ET_PTD | ptp)); + ptp += (SRMMU_REAL_PTRS_PER_PTE * sizeof(pte_t) >> 4); } } @@ -911,7 +916,7 @@ void __init srmmu_paging_init(void) /* ctx table has to be physically aligned to its size */ srmmu_context_table = __srmmu_get_nocache(num_contexts * sizeof(ctxd_t), num_contexts * sizeof(ctxd_t)); - srmmu_ctx_table_phys = (ctxd_t *)__nocache_pa((unsigned long)srmmu_context_table); + srmmu_ctx_table_phys = (ctxd_t *)__nocache_pa(srmmu_context_table); for (i = 0; i < num_contexts; i++) srmmu_ctxd_set((ctxd_t *)__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir); diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index 9df2190c097e..f81cd9736700 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c @@ -67,7 +67,7 @@ void arch_leave_lazy_mmu_mode(void) } static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, - bool exec) + bool exec, bool huge) { struct tlb_batch *tb = &get_cpu_var(tlb_batch); unsigned long nr; @@ -84,13 +84,21 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, } if (!tb->active) { - flush_tsb_user_page(mm, vaddr); + flush_tsb_user_page(mm, vaddr, huge); global_flush_tlb_page(mm, vaddr); goto out; } - if (nr == 0) + if (nr == 0) { tb->mm = mm; + tb->huge = huge; + } + + if (tb->huge != huge) { + flush_tlb_pending(); + tb->huge = huge; + nr = 0; + } tb->vaddrs[nr] = vaddr; tb->tlb_nr = ++nr; @@ -104,6 +112,8 @@ out: void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig, int fullmm) { + bool huge = is_hugetlb_pte(orig); + if (tlb_type != hypervisor && pte_dirty(orig)) { unsigned long paddr, pfn = pte_pfn(orig); @@ -129,7 +139,7 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, no_cache_flush: if (!fullmm) - tlb_batch_add_one(mm, vaddr, pte_exec(orig)); + tlb_batch_add_one(mm, vaddr, pte_exec(orig), huge); } #ifdef CONFIG_TRANSPARENT_HUGEPAGE @@ -145,7 +155,7 @@ static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr, if (pte_val(*pte) & _PAGE_VALID) { bool exec = pte_exec(*pte); - tlb_batch_add_one(mm, vaddr, exec); + tlb_batch_add_one(mm, vaddr, exec, false); } pte++; vaddr += PAGE_SIZE; @@ -185,8 +195,9 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, pte_t orig_pte = __pte(pmd_val(orig)); bool exec = pte_exec(orig_pte); - tlb_batch_add_one(mm, addr, exec); - tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec); + tlb_batch_add_one(mm, addr, exec, true); + tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec, + true); } else { tlb_batch_pmd_scan(mm, addr, orig); } diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index a06576683c38..a0604a493a36 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c @@ -76,14 +76,15 @@ void flush_tsb_user(struct tlb_batch *tb) spin_lock_irqsave(&mm->context.lock, flags); - base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; - nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; - if (tlb_type == cheetah_plus || tlb_type == hypervisor) - base = __pa(base); - __flush_tsb_one(tb, PAGE_SHIFT, base, nentries); - + if (!tb->huge) { + base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; + nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; + if (tlb_type == cheetah_plus || tlb_type == hypervisor) + base = __pa(base); + __flush_tsb_one(tb, PAGE_SHIFT, base, nentries); + } #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) - if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { + if (tb->huge && mm->context.tsb_block[MM_TSB_HUGE].tsb) { base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; if (tlb_type == cheetah_plus || tlb_type == hypervisor) @@ -94,20 +95,21 @@ void flush_tsb_user(struct tlb_batch *tb) spin_unlock_irqrestore(&mm->context.lock, flags); } -void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr) +void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, bool huge) { unsigned long nentries, base, flags; spin_lock_irqsave(&mm->context.lock, flags); - base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; - nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; - if (tlb_type == cheetah_plus || tlb_type == hypervisor) - base = __pa(base); - __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries); - + if (!huge) { + base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; + nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; + if (tlb_type == cheetah_plus || tlb_type == hypervisor) + base = __pa(base); + __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries); + } #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) - if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { + if (huge && mm->context.tsb_block[MM_TSB_HUGE].tsb) { base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; if (tlb_type == cheetah_plus || tlb_type == hypervisor) |