diff options
author | Matthew Wilcox (Oracle) <willy@infradead.org> | 2023-01-16 19:18:12 +0000 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2023-02-02 22:33:19 -0800 |
commit | edf5047058395c89a912783ea29ec8f9e53be414 (patch) | |
tree | bea40bd9093f29122ef996c5e8cca1ba23320e22 | |
parent | 28d41a4863316321bb5aa616bd82d65c84fc0f8b (diff) |
mm: use a folio in copy_pte_range()
Allocate an order-0 folio instead of a page and pass it all the way down
the call chain. Removes dozens of calls to compound_head().
Link: https://lkml.kernel.org/r/20230116191813.2145215-5-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r-- | mm/memory.c | 51 |
1 files changed, 25 insertions, 26 deletions
diff --git a/mm/memory.c b/mm/memory.c index 720fbb4771de..20a5c8087a5a 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -863,13 +863,13 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, static inline int copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss, - struct page **prealloc, struct page *page) + struct folio **prealloc, struct page *page) { - struct page *new_page; + struct folio *new_folio; pte_t pte; - new_page = *prealloc; - if (!new_page) + new_folio = *prealloc; + if (!new_folio) return -EAGAIN; /* @@ -877,14 +877,14 @@ copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma * over and copy the page & arm it. */ *prealloc = NULL; - copy_user_highpage(new_page, page, addr, src_vma); - __SetPageUptodate(new_page); - page_add_new_anon_rmap(new_page, dst_vma, addr); - lru_cache_add_inactive_or_unevictable(new_page, dst_vma); - rss[mm_counter(new_page)]++; + copy_user_highpage(&new_folio->page, page, addr, src_vma); + __folio_mark_uptodate(new_folio); + folio_add_new_anon_rmap(new_folio, dst_vma, addr); + folio_add_lru_vma(new_folio, dst_vma); + rss[MM_ANONPAGES]++; /* All done, just insert the new page copy in the child */ - pte = mk_pte(new_page, dst_vma->vm_page_prot); + pte = mk_pte(&new_folio->page, dst_vma->vm_page_prot); pte = maybe_mkwrite(pte_mkdirty(pte), dst_vma); if (userfaultfd_pte_wp(dst_vma, *src_pte)) /* Uffd-wp needs to be delivered to dest pte as well */ @@ -900,7 +900,7 @@ copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma static inline int copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss, - struct page **prealloc) + struct folio **prealloc) { struct mm_struct *src_mm = src_vma->vm_mm; unsigned long vm_flags = src_vma->vm_flags; @@ -922,11 +922,11 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, return copy_present_page(dst_vma, src_vma, dst_pte, src_pte, addr, rss, prealloc, page); } - rss[mm_counter(page)]++; + rss[MM_ANONPAGES]++; } else if (page) { get_page(page); page_dup_file_rmap(page, false); - rss[mm_counter(page)]++; + rss[mm_counter_file(page)]++; } /* @@ -954,23 +954,22 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, return 0; } -static inline struct page * -page_copy_prealloc(struct mm_struct *src_mm, struct vm_area_struct *vma, - unsigned long addr) +static inline struct folio *page_copy_prealloc(struct mm_struct *src_mm, + struct vm_area_struct *vma, unsigned long addr) { - struct page *new_page; + struct folio *new_folio; - new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, addr); - if (!new_page) + new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr, false); + if (!new_folio) return NULL; - if (mem_cgroup_charge(page_folio(new_page), src_mm, GFP_KERNEL)) { - put_page(new_page); + if (mem_cgroup_charge(new_folio, src_mm, GFP_KERNEL)) { + folio_put(new_folio); return NULL; } - cgroup_throttle_swaprate(new_page, GFP_KERNEL); + cgroup_throttle_swaprate(&new_folio->page, GFP_KERNEL); - return new_page; + return new_folio; } static int @@ -986,7 +985,7 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, int progress, ret = 0; int rss[NR_MM_COUNTERS]; swp_entry_t entry = (swp_entry_t){0}; - struct page *prealloc = NULL; + struct folio *prealloc = NULL; again: progress = 0; @@ -1056,7 +1055,7 @@ again: * will allocate page according to address). This * could only happen if one pinned pte changed. */ - put_page(prealloc); + folio_put(prealloc); prealloc = NULL; } progress += 8; @@ -1093,7 +1092,7 @@ again: goto again; out: if (unlikely(prealloc)) - put_page(prealloc); + folio_put(prealloc); return ret; } |