summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVineet Gupta <vgupta@synopsys.com>2015-02-20 10:36:28 +0530
committerVineet Gupta <vgupta@synopsys.com>2015-10-17 17:48:20 +0530
commit12ebc1581ad114543ae822aa3a12f76072e2f902 (patch)
tree2e80b72284497b1a45f29f0970ce13332f6896e2
parentbd5e88ad72b26ebf7ecb231bc22ceecd6cbdb951 (diff)
mm,thp: introduce flush_pmd_tlb_range
ARCHes with special requirements for evicting THP backing TLB entries can implement this. Otherwise also, it can help optimize TLB flush in THP regime. stock flush_tlb_range() typically has optimization to nuke the entire TLB if flush span is greater than a certain threshhold, which will likely be true for a single huge page. Thus a single thp flush will invalidate the entrire TLB which is not desirable. e.g. see arch/arc: flush_pmd_tlb_range Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Link: http://lkml.kernel.org/r/20151009100816.GC7873@node Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
-rw-r--r--mm/huge_memory.c2
-rw-r--r--mm/pgtable-generic.c26
2 files changed, 21 insertions, 7 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 4b06b8db9df2..e25eb3d2081a 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1880,7 +1880,7 @@ static int __split_huge_page_map(struct page *page,
* here). But it is generally safer to never allow
* small and huge TLB entries for the same virtual
* address to be loaded simultaneously. So instead of
- * doing "pmd_populate(); flush_tlb_range();" we first
+ * doing "pmd_populate(); flush_pmd_tlb_range();" we first
* mark the current pmd notpresent (atomically because
* here the pmd_trans_huge and pmd_trans_splitting
* must remain set at all times on the pmd until the
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
index c9c59bb75a17..7d3db0247983 100644
--- a/mm/pgtable-generic.c
+++ b/mm/pgtable-generic.c
@@ -84,6 +84,20 @@ pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
+
+/*
+ * ARCHes with special requirements for evicting THP backing TLB entries can
+ * implement this. Otherwise also, it can help optimize normal TLB flush in
+ * THP regime. stock flush_tlb_range() typically has optimization to nuke the
+ * entire TLB TLB if flush span is greater than a threshhold, which will
+ * likely be true for a single huge page. Thus a single thp flush will
+ * invalidate the entire TLB which is not desitable.
+ * e.g. see arch/arc: flush_pmd_tlb_range
+ */
+#define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
+#endif
+
#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
int pmdp_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp,
@@ -93,7 +107,7 @@ int pmdp_set_access_flags(struct vm_area_struct *vma,
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
if (changed) {
set_pmd_at(vma->vm_mm, address, pmdp, entry);
- flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+ flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
}
return changed;
}
@@ -107,7 +121,7 @@ int pmdp_clear_flush_young(struct vm_area_struct *vma,
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
young = pmdp_test_and_clear_young(vma, address, pmdp);
if (young)
- flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+ flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
return young;
}
#endif
@@ -120,7 +134,7 @@ pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
VM_BUG_ON(!pmd_trans_huge(*pmdp));
pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
- flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+ flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
return pmd;
}
#endif
@@ -133,7 +147,7 @@ void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
set_pmd_at(vma->vm_mm, address, pmdp, pmd);
/* tlb flush only to serialize against gup-fast */
- flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+ flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
}
#endif
@@ -179,7 +193,7 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
{
pmd_t entry = *pmdp;
set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(entry));
- flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+ flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
}
#endif
@@ -196,7 +210,7 @@ pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
VM_BUG_ON(pmd_trans_huge(*pmdp));
pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
- flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+ flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
return pmd;
}
#endif