diff options
author | ZhangPeng <zhangpeng362@huawei.com> | 2023-04-10 21:39:32 +0800 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2023-04-18 16:29:55 -0700 |
commit | d7be6d7eee1bbf98671d7a2c95654322241e2ae4 (patch) | |
tree | 1fccbe5e88ebe37a9aac3ad2928750e565340f46 /mm/userfaultfd.c | |
parent | c0e8150e144b62ae467520d0b51c4707c09e897b (diff) |
userfaultfd: convert mfill_atomic() to use a folio
Convert mfill_atomic_pte_copy(), shmem_mfill_atomic_pte() and
mfill_atomic_pte() to take in a folio pointer.
Convert mfill_atomic() to use a folio. Convert page_kaddr to kaddr in
mfill_atomic().
Link: https://lkml.kernel.org/r/20230410133932.32288-7-zhangpeng362@huawei.com
Signed-off-by: ZhangPeng <zhangpeng362@huawei.com>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Nanyong Sun <sunnanyong@huawei.com>
Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Cc: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/userfaultfd.c')
-rw-r--r-- | mm/userfaultfd.c | 40 |
1 files changed, 20 insertions, 20 deletions
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 2f263afb823d..11cfd82c6726 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -133,13 +133,13 @@ static int mfill_atomic_pte_copy(pmd_t *dst_pmd, unsigned long dst_addr, unsigned long src_addr, uffd_flags_t flags, - struct page **pagep) + struct folio **foliop) { void *kaddr; int ret; struct folio *folio; - if (!*pagep) { + if (!*foliop) { ret = -ENOMEM; folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, dst_vma, dst_addr, false); @@ -171,15 +171,15 @@ static int mfill_atomic_pte_copy(pmd_t *dst_pmd, /* fallback to copy_from_user outside mmap_lock */ if (unlikely(ret)) { ret = -ENOENT; - *pagep = &folio->page; + *foliop = folio; /* don't free the page */ goto out; } flush_dcache_folio(folio); } else { - folio = page_folio(*pagep); - *pagep = NULL; + folio = *foliop; + *foliop = NULL; } /* @@ -470,7 +470,7 @@ static __always_inline ssize_t mfill_atomic_pte(pmd_t *dst_pmd, unsigned long dst_addr, unsigned long src_addr, uffd_flags_t flags, - struct page **pagep) + struct folio **foliop) { ssize_t err; @@ -493,14 +493,14 @@ static __always_inline ssize_t mfill_atomic_pte(pmd_t *dst_pmd, if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY)) err = mfill_atomic_pte_copy(dst_pmd, dst_vma, dst_addr, src_addr, - flags, pagep); + flags, foliop); else err = mfill_atomic_pte_zeropage(dst_pmd, dst_vma, dst_addr); } else { err = shmem_mfill_atomic_pte(dst_pmd, dst_vma, dst_addr, src_addr, - flags, pagep); + flags, foliop); } return err; @@ -518,7 +518,7 @@ static __always_inline ssize_t mfill_atomic(struct mm_struct *dst_mm, pmd_t *dst_pmd; unsigned long src_addr, dst_addr; long copied; - struct page *page; + struct folio *folio; /* * Sanitize the command parameters: @@ -533,7 +533,7 @@ static __always_inline ssize_t mfill_atomic(struct mm_struct *dst_mm, src_addr = src_start; dst_addr = dst_start; copied = 0; - page = NULL; + folio = NULL; retry: mmap_read_lock(dst_mm); @@ -629,28 +629,28 @@ retry: BUG_ON(pmd_trans_huge(*dst_pmd)); err = mfill_atomic_pte(dst_pmd, dst_vma, dst_addr, - src_addr, flags, &page); + src_addr, flags, &folio); cond_resched(); if (unlikely(err == -ENOENT)) { - void *page_kaddr; + void *kaddr; mmap_read_unlock(dst_mm); - BUG_ON(!page); + BUG_ON(!folio); - page_kaddr = kmap_local_page(page); - err = copy_from_user(page_kaddr, + kaddr = kmap_local_folio(folio, 0); + err = copy_from_user(kaddr, (const void __user *) src_addr, PAGE_SIZE); - kunmap_local(page_kaddr); + kunmap_local(kaddr); if (unlikely(err)) { err = -EFAULT; goto out; } - flush_dcache_page(page); + flush_dcache_folio(folio); goto retry; } else - BUG_ON(page); + BUG_ON(folio); if (!err) { dst_addr += PAGE_SIZE; @@ -667,8 +667,8 @@ retry: out_unlock: mmap_read_unlock(dst_mm); out: - if (page) - put_page(page); + if (folio) + folio_put(folio); BUG_ON(copied < 0); BUG_ON(err > 0); BUG_ON(!copied && !err); |