diff options
author | Matthew Wilcox (Oracle) <willy@infradead.org> | 2024-03-26 20:28:27 +0000 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2024-04-25 20:56:20 -0700 |
commit | c93012d849c9e3fab9540458623b98794c4bf0a1 (patch) | |
tree | 662649c2d6403fe1f1d45a3fc10a915fbc68a7c6 /fs/dax.c | |
parent | e28833bc4ace001e77201d7b69ac389233482864 (diff) |
dax: use huge_zero_folio
Convert from huge_zero_page to huge_zero_folio.
Link: https://lkml.kernel.org/r/20240326202833.523759-8-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'fs/dax.c')
-rw-r--r-- | fs/dax.c | 14 |
1 files changed, 7 insertions, 7 deletions
@@ -1207,17 +1207,17 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, struct vm_area_struct *vma = vmf->vma; struct inode *inode = mapping->host; pgtable_t pgtable = NULL; - struct page *zero_page; + struct folio *zero_folio; spinlock_t *ptl; pmd_t pmd_entry; pfn_t pfn; - zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm); + zero_folio = mm_get_huge_zero_folio(vmf->vma->vm_mm); - if (unlikely(!zero_page)) + if (unlikely(!zero_folio)) goto fallback; - pfn = page_to_pfn_t(zero_page); + pfn = page_to_pfn_t(&zero_folio->page); *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, DAX_PMD | DAX_ZERO_PAGE); @@ -1237,17 +1237,17 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); mm_inc_nr_ptes(vma->vm_mm); } - pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot); + pmd_entry = mk_pmd(&zero_folio->page, vmf->vma->vm_page_prot); pmd_entry = pmd_mkhuge(pmd_entry); set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry); spin_unlock(ptl); - trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry); + trace_dax_pmd_load_hole(inode, vmf, zero_folio, *entry); return VM_FAULT_NOPAGE; fallback: if (pgtable) pte_free(vma->vm_mm, pgtable); - trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry); + trace_dax_pmd_load_hole_fallback(inode, vmf, zero_folio, *entry); return VM_FAULT_FALLBACK; } #else |