summaryrefslogtreecommitdiff
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
authorCheng Li <lic121@chinatelecom.cn>2022-09-09 07:31:09 +0000
committerAndrew Morton <akpm@linux-foundation.org>2022-10-03 14:03:08 -0700
commit14455eabd8404a503dc8e80cd8ce185e96a94b22 (patch)
treeebe035ca739c4efeecb5c1679aeb839db43ab101 /mm/hugetlb.c
parent0d83b2d89dbfad17b62d4e7fb8f0b0525ba1a204 (diff)
mm: use nth_page instead of mem_map_offset mem_map_next
To handle the discontiguous case, mem_map_next() has a parameter named `offset`. As a function caller, one would be confused why "get next entry" needs a parameter named "offset". The other drawback of mem_map_next() is that the callers must take care of the map between parameter "iter" and "offset", otherwise we may get an hole or duplication during iteration. So we use nth_page instead of mem_map_next. And replace mem_map_offset with nth_page() per Matthew's comments. Link: https://lkml.kernel.org/r/1662708669-9395-1-git-send-email-lic121@chinatelecom.cn Signed-off-by: Cheng Li <lic121@chinatelecom.cn> Fixes: 69d177c2fc70 ("hugetlbfs: handle pages higher order than MAX_ORDER") Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Mike Kravetz <mike.kravetz@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c29
1 files changed, 17 insertions, 12 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 008955d8f411..6af123374e98 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1306,12 +1306,13 @@ static void __destroy_compound_gigantic_page(struct page *page,
{
int i;
int nr_pages = 1 << order;
- struct page *p = page + 1;
+ struct page *p;
atomic_set(compound_mapcount_ptr(page), 0);
atomic_set(compound_pincount_ptr(page), 0);
- for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
+ for (i = 1; i < nr_pages; i++) {
+ p = nth_page(page, i);
p->mapping = NULL;
clear_compound_head(p);
if (!demote)
@@ -1532,7 +1533,7 @@ static void add_hugetlb_page(struct hstate *h, struct page *page,
static void __update_and_free_page(struct hstate *h, struct page *page)
{
int i;
- struct page *subpage = page;
+ struct page *subpage;
if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
return;
@@ -1563,8 +1564,8 @@ static void __update_and_free_page(struct hstate *h, struct page *page)
if (unlikely(PageHWPoison(page)))
hugetlb_clear_page_hwpoison(page);
- for (i = 0; i < pages_per_huge_page(h);
- i++, subpage = mem_map_next(subpage, page, i)) {
+ for (i = 0; i < pages_per_huge_page(h); i++) {
+ subpage = nth_page(page, i);
subpage->flags &= ~(1 << PG_locked | 1 << PG_error |
1 << PG_referenced | 1 << PG_dirty |
1 << PG_active | 1 << PG_private |
@@ -1771,13 +1772,15 @@ static bool __prep_compound_gigantic_page(struct page *page, unsigned int order,
{
int i, j;
int nr_pages = 1 << order;
- struct page *p = page + 1;
+ struct page *p;
/* we rely on prep_new_huge_page to set the destructor */
set_compound_order(page, order);
__ClearPageReserved(page);
__SetPageHead(page);
- for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
+ for (i = 1; i < nr_pages; i++) {
+ p = nth_page(page, i);
+
/*
* For gigantic hugepages allocated through bootmem at
* boot, it's safer to be consistent with the not-gigantic
@@ -1824,14 +1827,16 @@ static bool __prep_compound_gigantic_page(struct page *page, unsigned int order,
out_error:
/* undo tail page modifications made above */
- p = page + 1;
- for (j = 1; j < i; j++, p = mem_map_next(p, page, j)) {
+ for (j = 1; j < i; j++) {
+ p = nth_page(page, j);
clear_compound_head(p);
set_page_refcounted(p);
}
/* need to clear PG_reserved on remaining tail pages */
- for (; j < nr_pages; j++, p = mem_map_next(p, page, j))
+ for (; j < nr_pages; j++) {
+ p = nth_page(page, j);
__ClearPageReserved(p);
+ }
set_compound_order(page, 0);
#ifdef CONFIG_64BIT
page[1].compound_nr = 0;
@@ -6128,7 +6133,7 @@ static void record_subpages_vmas(struct page *page, struct vm_area_struct *vma,
for (nr = 0; nr < refs; nr++) {
if (likely(pages))
- pages[nr] = mem_map_offset(page, nr);
+ pages[nr] = nth_page(page, nr);
if (vmas)
vmas[nr] = vma;
}
@@ -6292,7 +6297,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
(vma->vm_end - ALIGN_DOWN(vaddr, PAGE_SIZE)) >> PAGE_SHIFT);
if (pages || vmas)
- record_subpages_vmas(mem_map_offset(page, pfn_offset),
+ record_subpages_vmas(nth_page(page, pfn_offset),
vma, refs,
likely(pages) ? pages + i : NULL,
vmas ? vmas + i : NULL);