diff options
author | Matthew Wilcox (Oracle) <willy@infradead.org> | 2023-12-11 16:22:12 +0000 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2023-12-29 11:58:26 -0800 |
commit | d3b082736518562f4eed185e1a67f28d20635fef (patch) | |
tree | 20ac804cfc8f37079da5522fe78807a7445811b0 /mm/migrate_device.c | |
parent | cb9089babc91f7ffc785d51a0fa567365b0e7751 (diff) |
mm: convert migrate_vma_insert_page() to use a folio
Replaces five calls to compound_head() with one.
Link: https://lkml.kernel.org/r/20231211162214.2146080-8-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Alistair Popple <apopple@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/migrate_device.c')
-rw-r--r-- | mm/migrate_device.c | 23 |
1 files changed, 12 insertions, 11 deletions
diff --git a/mm/migrate_device.c b/mm/migrate_device.c index 8ac1f79f754a..81193363f8cd 100644 --- a/mm/migrate_device.c +++ b/mm/migrate_device.c @@ -564,6 +564,7 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate, struct page *page, unsigned long *src) { + struct folio *folio = page_folio(page); struct vm_area_struct *vma = migrate->vma; struct mm_struct *mm = vma->vm_mm; bool flush = false; @@ -596,17 +597,17 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate, goto abort; if (unlikely(anon_vma_prepare(vma))) goto abort; - if (mem_cgroup_charge(page_folio(page), vma->vm_mm, GFP_KERNEL)) + if (mem_cgroup_charge(folio, vma->vm_mm, GFP_KERNEL)) goto abort; /* - * The memory barrier inside __SetPageUptodate makes sure that - * preceding stores to the page contents become visible before + * The memory barrier inside __folio_mark_uptodate makes sure that + * preceding stores to the folio contents become visible before * the set_pte_at() write. */ - __SetPageUptodate(page); + __folio_mark_uptodate(folio); - if (is_device_private_page(page)) { + if (folio_is_device_private(folio)) { swp_entry_t swp_entry; if (vma->vm_flags & VM_WRITE) @@ -617,8 +618,8 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate, page_to_pfn(page)); entry = swp_entry_to_pte(swp_entry); } else { - if (is_zone_device_page(page) && - !is_device_coherent_page(page)) { + if (folio_is_zone_device(folio) && + !folio_is_device_coherent(folio)) { pr_warn_once("Unsupported ZONE_DEVICE page type.\n"); goto abort; } @@ -652,10 +653,10 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate, goto unlock_abort; inc_mm_counter(mm, MM_ANONPAGES); - page_add_new_anon_rmap(page, vma, addr); - if (!is_zone_device_page(page)) - lru_cache_add_inactive_or_unevictable(page, vma); - get_page(page); + folio_add_new_anon_rmap(folio, vma, addr); + if (!folio_is_zone_device(folio)) + folio_add_lru_vma(folio, vma); + folio_get(folio); if (flush) { flush_cache_page(vma, addr, pte_pfn(orig_pte)); |