diff options
-rw-r--r-- | include/linux/rmap.h | 16 | ||||
-rw-r--r-- | kernel/events/uprobes.c | 6 | ||||
-rw-r--r-- | mm/damon/paddr.c | 12 | ||||
-rw-r--r-- | mm/ksm.c | 5 | ||||
-rw-r--r-- | mm/migrate.c | 7 | ||||
-rw-r--r-- | mm/page_idle.c | 6 | ||||
-rw-r--r-- | mm/rmap.c | 31 |
7 files changed, 27 insertions, 56 deletions
diff --git a/include/linux/rmap.h b/include/linux/rmap.h index ac29b076082b..0d894a2bfaa1 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -214,6 +214,22 @@ struct page_vma_mapped_walk { unsigned int flags; }; +#define DEFINE_PAGE_VMA_WALK(name, _page, _vma, _address, _flags) \ + struct page_vma_mapped_walk name = { \ + .page = _page, \ + .vma = _vma, \ + .address = _address, \ + .flags = _flags, \ + } + +#define DEFINE_FOLIO_VMA_WALK(name, _folio, _vma, _address, _flags) \ + struct page_vma_mapped_walk name = { \ + .page = &_folio->page, \ + .vma = _vma, \ + .address = _address, \ + .flags = _flags, \ + } + static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw) { /* HugeTLB pte is set to the relevant page table entry without pte_mapped. */ diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index eed2f7437d96..6418083901d4 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -155,11 +155,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, struct page *old_page, struct page *new_page) { struct mm_struct *mm = vma->vm_mm; - struct page_vma_mapped_walk pvmw = { - .page = compound_head(old_page), - .vma = vma, - .address = addr, - }; + DEFINE_FOLIO_VMA_WALK(pvmw, page_folio(old_page), vma, addr, 0); int err; struct mmu_notifier_range range; diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c index 5e8244f65a1a..cb45d49c731d 100644 --- a/mm/damon/paddr.c +++ b/mm/damon/paddr.c @@ -19,11 +19,7 @@ static bool __damon_pa_mkold(struct page *page, struct vm_area_struct *vma, unsigned long addr, void *arg) { - struct page_vma_mapped_walk pvmw = { - .page = page, - .vma = vma, - .address = addr, - }; + DEFINE_PAGE_VMA_WALK(pvmw, page, vma, addr, 0); while (page_vma_mapped_walk(&pvmw)) { addr = pvmw.address; @@ -93,11 +89,7 @@ static bool __damon_pa_young(struct page *page, struct vm_area_struct *vma, unsigned long addr, void *arg) { struct damon_pa_access_chk_result *result = arg; - struct page_vma_mapped_walk pvmw = { - .page = page, - .vma = vma, - .address = addr, - }; + DEFINE_PAGE_VMA_WALK(pvmw, page, vma, addr, 0); result->accessed = false; result->page_sz = PAGE_SIZE; @@ -1034,10 +1034,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, pte_t *orig_pte) { struct mm_struct *mm = vma->vm_mm; - struct page_vma_mapped_walk pvmw = { - .page = page, - .vma = vma, - }; + DEFINE_PAGE_VMA_WALK(pvmw, page, vma, 0, 0); int swapped; int err = -EFAULT; struct mmu_notifier_range range; diff --git a/mm/migrate.c b/mm/migrate.c index f4076093c855..71f92e8ed934 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -174,12 +174,7 @@ void putback_movable_pages(struct list_head *l) static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma, unsigned long addr, void *old) { - struct page_vma_mapped_walk pvmw = { - .page = old, - .vma = vma, - .address = addr, - .flags = PVMW_SYNC | PVMW_MIGRATION, - }; + DEFINE_PAGE_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION); struct page *new; pte_t pte; swp_entry_t entry; diff --git a/mm/page_idle.c b/mm/page_idle.c index edead6a8a5f9..3e05bf1ce825 100644 --- a/mm/page_idle.c +++ b/mm/page_idle.c @@ -48,11 +48,7 @@ static bool page_idle_clear_pte_refs_one(struct page *page, struct vm_area_struct *vma, unsigned long addr, void *arg) { - struct page_vma_mapped_walk pvmw = { - .page = page, - .vma = vma, - .address = addr, - }; + DEFINE_PAGE_VMA_WALK(pvmw, page, vma, addr, 0); bool referenced = false; while (page_vma_mapped_walk(&pvmw)) { diff --git a/mm/rmap.c b/mm/rmap.c index 1a13d5d6cfc7..a7f06b76b503 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -802,11 +802,7 @@ static bool page_referenced_one(struct page *page, struct vm_area_struct *vma, unsigned long address, void *arg) { struct page_referenced_arg *pra = arg; - struct page_vma_mapped_walk pvmw = { - .page = page, - .vma = vma, - .address = address, - }; + DEFINE_PAGE_VMA_WALK(pvmw, page, vma, address, 0); int referenced = 0; while (page_vma_mapped_walk(&pvmw)) { @@ -934,12 +930,7 @@ int page_referenced(struct page *page, static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, unsigned long address, void *arg) { - struct page_vma_mapped_walk pvmw = { - .page = page, - .vma = vma, - .address = address, - .flags = PVMW_SYNC, - }; + DEFINE_PAGE_VMA_WALK(pvmw, page, vma, address, PVMW_SYNC); struct mmu_notifier_range range; int *cleaned = arg; @@ -1419,11 +1410,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, unsigned long address, void *arg) { struct mm_struct *mm = vma->vm_mm; - struct page_vma_mapped_walk pvmw = { - .page = page, - .vma = vma, - .address = address, - }; + DEFINE_PAGE_VMA_WALK(pvmw, page, vma, address, 0); pte_t pteval; struct page *subpage; bool ret = true; @@ -1714,11 +1701,7 @@ static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma, unsigned long address, void *arg) { struct mm_struct *mm = vma->vm_mm; - struct page_vma_mapped_walk pvmw = { - .page = page, - .vma = vma, - .address = address, - }; + DEFINE_PAGE_VMA_WALK(pvmw, page, vma, address, 0); pte_t pteval; struct page *subpage; bool ret = true; @@ -2001,11 +1984,7 @@ static bool page_make_device_exclusive_one(struct page *page, struct vm_area_struct *vma, unsigned long address, void *priv) { struct mm_struct *mm = vma->vm_mm; - struct page_vma_mapped_walk pvmw = { - .page = page, - .vma = vma, - .address = address, - }; + DEFINE_PAGE_VMA_WALK(pvmw, page, vma, address, 0); struct make_exclusive_args *args = priv; pte_t pteval; struct page *subpage; |