summaryrefslogtreecommitdiff
path: root/arch/s390/mm
diff options
context:
space:
mode:
authorDavid Hildenbrand <david@redhat.com>2020-07-22 11:45:55 +0200
committerHeiko Carstens <hca@linux.ibm.com>2020-07-27 10:33:59 +0200
commitb9ff81003cf1a0b12b8d60b6ef33a34e84dfe7ac (patch)
tree523f19c8854820a441891769c062af88cba1c8ca /arch/s390/mm
parentaa18e0e65800bf3250b23914a28e0e3fd9cadec2 (diff)
s390/vmem: cleanup empty page tables
Let's cleanup empty page tables. Consider only page tables that fully fall into the idendity mapping and the vmemmap range. As there are no valid accesses to vmem/vmemmap within non-populated ranges, the single tlb flush at the end should be sufficient. Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Christian Borntraeger <borntraeger@de.ibm.com> Cc: Gerald Schaefer <gerald.schaefer@de.ibm.com> Signed-off-by: David Hildenbrand <david@redhat.com> Message-Id: <20200722094558.9828-7-david@redhat.com> Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/vmem.c102
1 files changed, 101 insertions, 1 deletions
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index a2b79681df69..b831f9f9130a 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -63,6 +63,15 @@ pte_t __ref *vmem_pte_alloc(void)
return pte;
}
+static void vmem_pte_free(unsigned long *table)
+{
+ /* We don't expect boot memory to be removed ever. */
+ if (!slab_is_available() ||
+ WARN_ON_ONCE(PageReserved(virt_to_page(table))))
+ return;
+ page_table_free(&init_mm, table);
+}
+
/* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */
static int __ref modify_pte_table(pmd_t *pmd, unsigned long addr,
unsigned long end, bool add, bool direct)
@@ -105,6 +114,21 @@ out:
return ret;
}
+static void try_free_pte_table(pmd_t *pmd, unsigned long start)
+{
+ pte_t *pte;
+ int i;
+
+ /* We can safely assume this is fully in 1:1 mapping & vmemmap area */
+ pte = pte_offset_kernel(pmd, start);
+ for (i = 0; i < PTRS_PER_PTE; i++, pte++)
+ if (!pte_none(*pte))
+ return;
+
+ vmem_pte_free(__va(pmd_deref(*pmd)));
+ pmd_clear(pmd);
+}
+
/* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */
static int __ref modify_pmd_table(pud_t *pud, unsigned long addr,
unsigned long end, bool add, bool direct)
@@ -171,6 +195,8 @@ static int __ref modify_pmd_table(pud_t *pud, unsigned long addr,
ret = modify_pte_table(pmd, addr, next, add, direct);
if (ret)
goto out;
+ if (!add)
+ try_free_pte_table(pmd, addr & PMD_MASK);
}
ret = 0;
out:
@@ -179,6 +205,29 @@ out:
return ret;
}
+static void try_free_pmd_table(pud_t *pud, unsigned long start)
+{
+ const unsigned long end = start + PUD_SIZE;
+ pmd_t *pmd;
+ int i;
+
+ /* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
+ if (end > VMALLOC_START)
+ return;
+#ifdef CONFIG_KASAN
+ if (start < KASAN_SHADOW_END && KASAN_SHADOW_START > end)
+ return;
+#endif
+
+ pmd = pmd_offset(pud, start);
+ for (i = 0; i < PTRS_PER_PMD; i++, pmd++)
+ if (!pmd_none(*pmd))
+ return;
+
+ vmem_free_pages(pud_deref(*pud), CRST_ALLOC_ORDER);
+ pud_clear(pud);
+}
+
static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
bool add, bool direct)
{
@@ -225,6 +274,8 @@ static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
ret = modify_pmd_table(pud, addr, next, add, direct);
if (ret)
goto out;
+ if (!add)
+ try_free_pmd_table(pud, addr & PUD_MASK);
}
ret = 0;
out:
@@ -233,6 +284,29 @@ out:
return ret;
}
+static void try_free_pud_table(p4d_t *p4d, unsigned long start)
+{
+ const unsigned long end = start + P4D_SIZE;
+ pud_t *pud;
+ int i;
+
+ /* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
+ if (end > VMALLOC_START)
+ return;
+#ifdef CONFIG_KASAN
+ if (start < KASAN_SHADOW_END && KASAN_SHADOW_START > end)
+ return;
+#endif
+
+ pud = pud_offset(p4d, start);
+ for (i = 0; i < PTRS_PER_PUD; i++, pud++)
+ if (!pud_none(*pud))
+ return;
+
+ vmem_free_pages(p4d_deref(*p4d), CRST_ALLOC_ORDER);
+ p4d_clear(p4d);
+}
+
static int modify_p4d_table(pgd_t *pgd, unsigned long addr, unsigned long end,
bool add, bool direct)
{
@@ -257,12 +331,37 @@ static int modify_p4d_table(pgd_t *pgd, unsigned long addr, unsigned long end,
ret = modify_pud_table(p4d, addr, next, add, direct);
if (ret)
goto out;
+ if (!add)
+ try_free_pud_table(p4d, addr & P4D_MASK);
}
ret = 0;
out:
return ret;
}
+static void try_free_p4d_table(pgd_t *pgd, unsigned long start)
+{
+ const unsigned long end = start + PGDIR_SIZE;
+ p4d_t *p4d;
+ int i;
+
+ /* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
+ if (end > VMALLOC_START)
+ return;
+#ifdef CONFIG_KASAN
+ if (start < KASAN_SHADOW_END && KASAN_SHADOW_START > end)
+ return;
+#endif
+
+ p4d = p4d_offset(pgd, start);
+ for (i = 0; i < PTRS_PER_P4D; i++, p4d++)
+ if (!p4d_none(*p4d))
+ return;
+
+ vmem_free_pages(pgd_deref(*pgd), CRST_ALLOC_ORDER);
+ pgd_clear(pgd);
+}
+
static int modify_pagetable(unsigned long start, unsigned long end, bool add,
bool direct)
{
@@ -291,6 +390,8 @@ static int modify_pagetable(unsigned long start, unsigned long end, bool add,
ret = modify_p4d_table(pgd, addr, next, add, direct);
if (ret)
goto out;
+ if (!add)
+ try_free_p4d_table(pgd, addr & PGDIR_MASK);
}
ret = 0;
out:
@@ -319,7 +420,6 @@ static int vmem_add_range(unsigned long start, unsigned long size)
/*
* Remove a physical memory range from the 1:1 mapping.
- * Currently only invalidates page table entries.
*/
static void vmem_remove_range(unsigned long start, unsigned long size)
{