summaryrefslogtreecommitdiff
path: root/fs/proc
diff options
context:
space:
mode:
authorKefeng Wang <wangkefeng.wang@huawei.com>2024-06-04 19:48:19 +0800
committerAndrew Morton <akpm@linux-foundation.org>2024-07-03 19:30:17 -0700
commitaca08acce76f1f7de12dd22a2ab36411ce02074a (patch)
tree556e799e680a60c79c03d686125f99f9996d49a1 /fs/proc
parent11d5401b011e3557894d824dac210f0b18cc3911 (diff)
fs/proc/task_mmu: use folio API in pte_is_pinned()
Patch series "mm: remove page_maybe_dma_pinned() and page_mkclean()". Most page_maybe_dma_pinned() and page_mkclean() callers have been converted to the folio equivalents, after two more convertsions, remove them and update the comment and documention. This patch (of 4): Convert to use vm_normal_folio() and folio_maybe_dma_pinned() API, which helps to remove page_maybe_dma_pinned() in the subsequent change. Link: https://lkml.kernel.org/r/20240604114822.2089819-1-wangkefeng.wang@huawei.com Link: https://lkml.kernel.org/r/20240604114822.2089819-2-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> Acked-by: David Hildenbrand <david@redhat.com> Cc: Daniel Vetter <daniel@ffwll.ch> Cc: Helge Deller <deller@gmx.de> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'fs/proc')
-rw-r--r--fs/proc/task_mmu.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index f2e83284aaf8..93fb2c61b154 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1100,7 +1100,7 @@ struct clear_refs_private {
static inline bool pte_is_pinned(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
{
- struct page *page;
+ struct folio *folio;
if (!pte_write(pte))
return false;
@@ -1108,10 +1108,10 @@ static inline bool pte_is_pinned(struct vm_area_struct *vma, unsigned long addr,
return false;
if (likely(!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags)))
return false;
- page = vm_normal_page(vma, addr, pte);
- if (!page)
+ folio = vm_normal_folio(vma, addr, pte);
+ if (!folio)
return false;
- return page_maybe_dma_pinned(page);
+ return folio_maybe_dma_pinned(folio);
}
static inline void clear_soft_dirty(struct vm_area_struct *vma,