summaryrefslogtreecommitdiff
path: root/mm/mm_init.c
diff options
context:
space:
mode:
authorBaoquan He <bhe@redhat.com>2024-03-25 22:56:45 +0800
committerAndrew Morton <akpm@linux-foundation.org>2024-04-25 20:56:11 -0700
commit90e796e22e35af0d19874c36fa4a22709aec1659 (patch)
tree303ac261e6b2b13e9ae9cbd4e4d47424e9cbfe41 /mm/mm_init.c
parent0ac5e785dcb797a3f0af1b205ce48134e186e88f (diff)
mm/mm_init.c: remove unneeded calc_memmap_size()
Nobody calls calc_memmap_size() now. Link: https://lkml.kernel.org/r/20240325145646.1044760-6-bhe@redhat.com Signed-off-by: Baoquan He <bhe@redhat.com> Reviewed-by: Mike Rapoport (IBM) <rppt@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/mm_init.c')
-rw-r--r--mm/mm_init.c20
1 files changed, 0 insertions, 20 deletions
diff --git a/mm/mm_init.c b/mm/mm_init.c
index b211b30231cb..8c261572ca6e 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -1332,26 +1332,6 @@ static void __init calculate_node_totalpages(struct pglist_data *pgdat,
pr_debug("On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages);
}
-static unsigned long __init calc_memmap_size(unsigned long spanned_pages,
- unsigned long present_pages)
-{
- unsigned long pages = spanned_pages;
-
- /*
- * Provide a more accurate estimation if there are holes within
- * the zone and SPARSEMEM is in use. If there are holes within the
- * zone, each populated memory region may cost us one or two extra
- * memmap pages due to alignment because memmap pages for each
- * populated regions may not be naturally aligned on page boundary.
- * So the (present_pages >> 4) heuristic is a tradeoff for that.
- */
- if (spanned_pages > present_pages + (present_pages >> 4) &&
- IS_ENABLED(CONFIG_SPARSEMEM))
- pages = present_pages;
-
- return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
-}
-
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static void pgdat_init_split_queue(struct pglist_data *pgdat)
{