diff options
author | Matthew Wilcox (Oracle) <willy@infradead.org> | 2023-01-16 19:28:25 +0000 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2023-02-02 22:33:20 -0800 |
commit | 7efecffb8e7968c4a6c53177b0053ca4765fe233 (patch) | |
tree | 1b24f9d3ced3869c347ef1f83851fa1b0089376d /mm/internal.h | |
parent | 90c9d13a47d45f2f16530c4d62af2fa4d74dfd16 (diff) |
mm: remove mlock_vma_page()
All callers now have a folio and can call mlock_vma_folio(). Update the
documentation to refer to mlock_vma_folio().
Link: https://lkml.kernel.org/r/20230116192827.2146732-3-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/internal.h')
-rw-r--r-- | mm/internal.h | 10 |
1 files changed, 1 insertions, 9 deletions
diff --git a/mm/internal.h b/mm/internal.h index 74bc1fe45711..0b74105ea363 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -518,7 +518,7 @@ extern long faultin_vma_page_range(struct vm_area_struct *vma, extern int mlock_future_check(struct mm_struct *mm, unsigned long flags, unsigned long len); /* - * mlock_vma_page() and munlock_vma_page(): + * mlock_vma_folio() and munlock_vma_folio(): * should be called with vma's mmap_lock held for read or write, * under page table lock for the pte/pmd being added or removed. * @@ -547,12 +547,6 @@ static inline void mlock_vma_folio(struct folio *folio, mlock_folio(folio); } -static inline void mlock_vma_page(struct page *page, - struct vm_area_struct *vma, bool compound) -{ - mlock_vma_folio(page_folio(page), vma, compound); -} - void munlock_folio(struct folio *folio); static inline void munlock_vma_folio(struct folio *folio, @@ -656,8 +650,6 @@ static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf, } #else /* !CONFIG_MMU */ static inline void unmap_mapping_folio(struct folio *folio) { } -static inline void mlock_vma_page(struct page *page, - struct vm_area_struct *vma, bool compound) { } static inline void munlock_vma_page(struct page *page, struct vm_area_struct *vma, bool compound) { } static inline void mlock_new_folio(struct folio *folio) { } |