diff options
author | ZhangPeng <zhangpeng362@huawei.com> | 2023-06-06 14:20:13 +0800 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2023-06-19 16:19:04 -0700 |
commit | 061e62e8180d3fab378a52d868e29ceebe2fe1d2 (patch) | |
tree | e1c65eb4d0c44f22cd925f28678ae78c544f5bb8 /mm/hugetlb.c | |
parent | 959a78b6dd4526fb11d3cacf2de909479b06a4f4 (diff) |
mm/hugetlb: use a folio in hugetlb_fault()
We can replace seven implicit calls to compound_head() with one by using
folio.
[akpm@linux-foundation.org: update comment, per Sidhartha]
Link: https://lkml.kernel.org/r/20230606062013.2947002-4-zhangpeng362@huawei.com
Signed-off-by: ZhangPeng <zhangpeng362@huawei.com>
Reviewed-by Sidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Muchun Song <songmuchun@bytedance.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Nanyong Sun <sunnanyong@huawei.com>
Cc: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r-- | mm/hugetlb.c | 20 |
1 files changed, 10 insertions, 10 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index e58f8001fd92..dfa412d8cb30 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -6062,7 +6062,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, vm_fault_t ret; u32 hash; pgoff_t idx; - struct page *page = NULL; + struct folio *folio = NULL; struct folio *pagecache_folio = NULL; struct hstate *h = hstate_vma(vma); struct address_space *mapping; @@ -6179,16 +6179,16 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, /* * hugetlb_wp() requires page locks of pte_page(entry) and * pagecache_folio, so here we need take the former one - * when page != pagecache_folio or !pagecache_folio. + * when folio != pagecache_folio or !pagecache_folio. */ - page = pte_page(entry); - if (page_folio(page) != pagecache_folio) - if (!trylock_page(page)) { + folio = page_folio(pte_page(entry)); + if (folio != pagecache_folio) + if (!folio_trylock(folio)) { need_wait_lock = 1; goto out_ptl; } - get_page(page); + folio_get(folio); if (flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) { if (!huge_pte_write(entry)) { @@ -6204,9 +6204,9 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, flags & FAULT_FLAG_WRITE)) update_mmu_cache(vma, haddr, ptep); out_put_page: - if (page_folio(page) != pagecache_folio) - unlock_page(page); - put_page(page); + if (folio != pagecache_folio) + folio_unlock(folio); + folio_put(folio); out_ptl: spin_unlock(ptl); @@ -6225,7 +6225,7 @@ out_mutex: * here without taking refcount. */ if (need_wait_lock) - wait_on_page_locked(page); + folio_wait_locked(folio); return ret; } |