diff options
author | ZhangPeng <zhangpeng362@huawei.com> | 2023-04-10 21:39:27 +0800 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2023-04-18 16:29:54 -0700 |
commit | 07e6d4095c75bcf0bf511b36eecaceb3fbb91ad9 (patch) | |
tree | 742b07f392864ed3bde4b4e8766604cda4181a0a /mm/userfaultfd.c | |
parent | b4aca54792e76556bb9f8661bab07b4bf2eb4e5f (diff) |
userfaultfd: convert mfill_atomic_pte_copy() to use a folio
Patch series "userfaultfd: convert userfaultfd functions to use folios",
v6.
This patch series converts several userfaultfd functions to use folios.
This patch (of 6):
Call vma_alloc_folio() directly instead of alloc_page_vma() and convert
page_kaddr to kaddr in mfill_atomic_pte_copy(). Removes several calls to
compound_head().
Link: https://lkml.kernel.org/r/20230410133932.32288-1-zhangpeng362@huawei.com
Link: https://lkml.kernel.org/r/20230410133932.32288-2-zhangpeng362@huawei.com
Signed-off-by: ZhangPeng <zhangpeng362@huawei.com>
Reviewed-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Nanyong Sun <sunnanyong@huawei.com>
Cc: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/userfaultfd.c')
-rw-r--r-- | mm/userfaultfd.c | 32 |
1 files changed, 16 insertions, 16 deletions
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 7f1b5f8b712c..313bc683c2b6 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -135,17 +135,18 @@ static int mfill_atomic_pte_copy(pmd_t *dst_pmd, uffd_flags_t flags, struct page **pagep) { - void *page_kaddr; + void *kaddr; int ret; - struct page *page; + struct folio *folio; if (!*pagep) { ret = -ENOMEM; - page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, dst_vma, dst_addr); - if (!page) + folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, dst_vma, + dst_addr, false); + if (!folio) goto out; - page_kaddr = kmap_local_page(page); + kaddr = kmap_local_folio(folio, 0); /* * The read mmap_lock is held here. Despite the * mmap_lock being read recursive a deadlock is still @@ -162,45 +163,44 @@ static int mfill_atomic_pte_copy(pmd_t *dst_pmd, * and retry the copy outside the mmap_lock. */ pagefault_disable(); - ret = copy_from_user(page_kaddr, - (const void __user *) src_addr, + ret = copy_from_user(kaddr, (const void __user *) src_addr, PAGE_SIZE); pagefault_enable(); - kunmap_local(page_kaddr); + kunmap_local(kaddr); /* fallback to copy_from_user outside mmap_lock */ if (unlikely(ret)) { ret = -ENOENT; - *pagep = page; + *pagep = &folio->page; /* don't free the page */ goto out; } - flush_dcache_page(page); + flush_dcache_folio(folio); } else { - page = *pagep; + folio = page_folio(*pagep); *pagep = NULL; } /* - * The memory barrier inside __SetPageUptodate makes sure that + * The memory barrier inside __folio_mark_uptodate makes sure that * preceding stores to the page contents become visible before * the set_pte_at() write. */ - __SetPageUptodate(page); + __folio_mark_uptodate(folio); ret = -ENOMEM; - if (mem_cgroup_charge(page_folio(page), dst_vma->vm_mm, GFP_KERNEL)) + if (mem_cgroup_charge(folio, dst_vma->vm_mm, GFP_KERNEL)) goto out_release; ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr, - page, true, flags); + &folio->page, true, flags); if (ret) goto out_release; out: return ret; out_release: - put_page(page); + folio_put(folio); goto out; } |