diff options
author | Matthew Wilcox (Oracle) <willy@infradead.org> | 2023-08-16 16:11:53 +0100 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2023-08-21 14:28:43 -0700 |
commit | da6e7bf3a0315025e4199d599bd31763f0df3b4a (patch) | |
tree | 24c02cc8fd9f24bb05f5f2590cc193d3e9f6bc42 | |
parent | 8dc4a8f1e038189cb575f89bcd23364698b88cc1 (diff) |
mm: convert prep_transhuge_page() to folio_prep_large_rmappable()
Match folio_undo_large_rmappable(), and move the casting from page to
folio into the callers (which they were largely doing anyway).
Link: https://lkml.kernel.org/r/20230816151201.3655946-6-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Cc: Yanteng Si <siyanteng@loongson.cn>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r-- | include/linux/huge_mm.h | 4 | ||||
-rw-r--r-- | mm/huge_memory.c | 4 | ||||
-rw-r--r-- | mm/khugepaged.c | 2 | ||||
-rw-r--r-- | mm/mempolicy.c | 15 | ||||
-rw-r--r-- | mm/page_alloc.c | 7 |
5 files changed, 16 insertions, 16 deletions
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index ceda26a20830..fa0350b0812a 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -140,7 +140,7 @@ bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags, unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); -void prep_transhuge_page(struct page *page); +void folio_prep_large_rmappable(struct folio *folio); bool can_split_folio(struct folio *folio, int *pextra_pins); int split_huge_page_to_list(struct page *page, struct list_head *list); static inline int split_huge_page(struct page *page) @@ -280,7 +280,7 @@ static inline bool hugepage_vma_check(struct vm_area_struct *vma, return false; } -static inline void prep_transhuge_page(struct page *page) {} +static inline void folio_prep_large_rmappable(struct folio *folio) {} #define transparent_hugepage_flags 0UL diff --git a/mm/huge_memory.c b/mm/huge_memory.c index b33456683b93..5817bf77f1f0 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -577,10 +577,8 @@ struct deferred_split *get_deferred_split_queue(struct folio *folio) } #endif -void prep_transhuge_page(struct page *page) +void folio_prep_large_rmappable(struct folio *folio) { - struct folio *folio = (struct folio *)page; - VM_BUG_ON_FOLIO(folio_order(folio) < 2, folio); INIT_LIST_HEAD(&folio->_deferred_list); folio_set_compound_dtor(folio, TRANSHUGE_PAGE_DTOR); diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 9a6e0d507759..40d43eccdee8 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -896,7 +896,7 @@ static bool hpage_collapse_alloc_page(struct page **hpage, gfp_t gfp, int node, return false; } - prep_transhuge_page(*hpage); + folio_prep_large_rmappable((struct folio *)*hpage); count_vm_event(THP_COLLAPSE_ALLOC); return true; } diff --git a/mm/mempolicy.c b/mm/mempolicy.c index ec2eaceffd74..42b5567e3773 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -2195,9 +2195,9 @@ struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma, mpol_cond_put(pol); gfp |= __GFP_COMP; page = alloc_page_interleave(gfp, order, nid); - if (page && order > 1) - prep_transhuge_page(page); folio = (struct folio *)page; + if (folio && order > 1) + folio_prep_large_rmappable(folio); goto out; } @@ -2208,9 +2208,9 @@ struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma, gfp |= __GFP_COMP; page = alloc_pages_preferred_many(gfp, order, node, pol); mpol_cond_put(pol); - if (page && order > 1) - prep_transhuge_page(page); folio = (struct folio *)page; + if (folio && order > 1) + folio_prep_large_rmappable(folio); goto out; } @@ -2306,10 +2306,11 @@ EXPORT_SYMBOL(alloc_pages); struct folio *folio_alloc(gfp_t gfp, unsigned order) { struct page *page = alloc_pages(gfp | __GFP_COMP, order); + struct folio *folio = (struct folio *)page; - if (page && order > 1) - prep_transhuge_page(page); - return (struct folio *)page; + if (folio && order > 1) + folio_prep_large_rmappable(folio); + return folio; } EXPORT_SYMBOL(folio_alloc); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 4047b5897443..a97d6fa9cea0 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4489,10 +4489,11 @@ struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid, { struct page *page = __alloc_pages(gfp | __GFP_COMP, order, preferred_nid, nodemask); + struct folio *folio = (struct folio *)page; - if (page && order > 1) - prep_transhuge_page(page); - return (struct folio *)page; + if (folio && order > 1) + folio_prep_large_rmappable(folio); + return folio; } EXPORT_SYMBOL(__folio_alloc); |