diff options
author | Matthew Wilcox (Oracle) <willy@infradead.org> | 2022-01-29 15:53:59 -0500 |
---|---|---|
committer | Matthew Wilcox (Oracle) <willy@infradead.org> | 2022-03-21 12:59:02 -0400 |
commit | 4aed23a2f8aaaafad0232d3392afcf493c3c3df3 (patch) | |
tree | 53c2fca4f8ef9d450e231cc40df64c93e942b2f6 /mm/page_idle.c | |
parent | 2aff7a4755bed2870ee23b75bc88cdc8d76cdd03 (diff) |
mm/page_idle: Convert page_idle_clear_pte_refs() to use a folio
The PG_idle and PG_young bits are ignored if they're set on tail
pages, so ensure we're passing a folio around.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'mm/page_idle.c')
-rw-r--r-- | mm/page_idle.c | 21 |
1 files changed, 12 insertions, 9 deletions
diff --git a/mm/page_idle.c b/mm/page_idle.c index 3e05bf1ce825..2427d832f5d6 100644 --- a/mm/page_idle.c +++ b/mm/page_idle.c @@ -13,6 +13,8 @@ #include <linux/page_ext.h> #include <linux/page_idle.h> +#include "internal.h" + #define BITMAP_CHUNK_SIZE sizeof(u64) #define BITMAP_CHUNK_BITS (BITMAP_CHUNK_SIZE * BITS_PER_BYTE) @@ -48,7 +50,8 @@ static bool page_idle_clear_pte_refs_one(struct page *page, struct vm_area_struct *vma, unsigned long addr, void *arg) { - DEFINE_PAGE_VMA_WALK(pvmw, page, vma, addr, 0); + struct folio *folio = page_folio(page); + DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); bool referenced = false; while (page_vma_mapped_walk(&pvmw)) { @@ -70,19 +73,20 @@ static bool page_idle_clear_pte_refs_one(struct page *page, } if (referenced) { - clear_page_idle(page); + folio_clear_idle(folio); /* * We cleared the referenced bit in a mapping to this page. To * avoid interference with page reclaim, mark it young so that * page_referenced() will return > 0. */ - set_page_young(page); + folio_set_young(folio); } return true; } static void page_idle_clear_pte_refs(struct page *page) { + struct folio *folio = page_folio(page); /* * Since rwc.arg is unused, rwc is effectively immutable, so we * can make it static const to save some cycles and stack. @@ -93,18 +97,17 @@ static void page_idle_clear_pte_refs(struct page *page) }; bool need_lock; - if (!page_mapped(page) || - !page_rmapping(page)) + if (!folio_mapped(folio) || !folio_raw_mapping(folio)) return; - need_lock = !PageAnon(page) || PageKsm(page); - if (need_lock && !trylock_page(page)) + need_lock = !folio_test_anon(folio) || folio_test_ksm(folio); + if (need_lock && !folio_trylock(folio)) return; - rmap_walk(page, (struct rmap_walk_control *)&rwc); + rmap_walk(&folio->page, (struct rmap_walk_control *)&rwc); if (need_lock) - unlock_page(page); + folio_unlock(folio); } static ssize_t page_idle_bitmap_read(struct file *file, struct kobject *kobj, |