summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2024-04-05 16:32:25 +0100
committerAndrew Morton <akpm@linux-foundation.org>2024-04-25 20:56:44 -0700
commit2542b1ac9a46ac58f9565de0048457956898d481 (patch)
tree3704ea4b8529db5a7ce998c622605786f306ae29 /mm
parent5b8d75913a0ed9deb16140c0aa880c4d6db2dc62 (diff)
mm: inline destroy_large_folio() into __folio_put_large()
destroy_large_folio() has only one caller, move its contents there. Link: https://lkml.kernel.org/r/20240405153228.2563754-4-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Zi Yan <ziy@nvidia.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c14
-rw-r--r--mm/swap.c13
2 files changed, 10 insertions, 17 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c01757251de2..22e8b9f1d710 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -565,20 +565,6 @@ void prep_compound_page(struct page *page, unsigned int order)
prep_compound_head(page, order);
}
-void destroy_large_folio(struct folio *folio)
-{
- if (folio_test_hugetlb(folio)) {
- free_huge_folio(folio);
- return;
- }
-
- if (folio_test_large_rmappable(folio))
- folio_undo_large_rmappable(folio);
-
- mem_cgroup_uncharge(folio);
- free_unref_page(&folio->page, folio_order(folio));
-}
-
static inline void set_buddy_order(struct page *page, unsigned int order)
{
set_page_private(page, order);
diff --git a/mm/swap.c b/mm/swap.c
index d7db3cd4e80a..6e3bd03673ea 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -127,9 +127,16 @@ static void __folio_put_large(struct folio *folio)
* (it's never listed to any LRU lists) and no memcg routines should
* be called for hugetlb (it has a separate hugetlb_cgroup.)
*/
- if (!folio_test_hugetlb(folio))
- page_cache_release(folio);
- destroy_large_folio(folio);
+ if (folio_test_hugetlb(folio)) {
+ free_huge_folio(folio);
+ return;
+ }
+
+ page_cache_release(folio);
+ if (folio_test_large_rmappable(folio))
+ folio_undo_large_rmappable(folio);
+ mem_cgroup_uncharge(folio);
+ free_unref_page(&folio->page, folio_order(folio));
}
void __folio_put(struct folio *folio)