diff options
author | Kirill A. Shutemov <kirill.shutemov@linux.intel.com> | 2024-08-09 14:48:52 +0300 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2024-09-01 20:26:07 -0700 |
commit | 55ad43e8ba0f5ccc9792846479839c4affb04660 (patch) | |
tree | 7f644d43a316c73055baddb7be6488f11a5ff0f0 /mm | |
parent | 5adfeaecc487e7023f1c7bbdc081707d7a93110f (diff) |
mm: add a helper to accept page
Accept a given struct page and add it free list.
The help is useful for physical memory scanners that want to use free
unaccepted memory.
Link: https://lkml.kernel.org/r/20240809114854.3745464-7-kirill.shutemov@linux.intel.com
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: David Hildenbrand <david@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Mike Rapoport (Microsoft) <rppt@kernel.org>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/internal.h | 8 | ||||
-rw-r--r-- | mm/page_alloc.c | 47 |
2 files changed, 43 insertions, 12 deletions
diff --git a/mm/internal.h b/mm/internal.h index a479fe6e1895..acda347620c6 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -1432,4 +1432,12 @@ unsigned long move_page_tables(struct vm_area_struct *vma, unsigned long new_addr, unsigned long len, bool need_rmap_locks, bool for_stack); +#ifdef CONFIG_UNACCEPTED_MEMORY +void accept_page(struct page *page); +#else /* CONFIG_UNACCEPTED_MEMORY */ +static inline void accept_page(struct page *page) +{ +} +#endif /* CONFIG_UNACCEPTED_MEMORY */ + #endif /* __MM_INTERNAL_H */ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 6ba88929c1e3..927f4e111273 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -6935,27 +6935,18 @@ static bool page_contains_unaccepted(struct page *page, unsigned int order) return range_contains_unaccepted_memory(start, PAGE_SIZE << order); } -static bool try_to_accept_memory_one(struct zone *zone) +static void __accept_page(struct zone *zone, unsigned long *flags, + struct page *page) { - unsigned long flags; - struct page *page; bool last; - spin_lock_irqsave(&zone->lock, flags); - page = list_first_entry_or_null(&zone->unaccepted_pages, - struct page, lru); - if (!page) { - spin_unlock_irqrestore(&zone->lock, flags); - return false; - } - list_del(&page->lru); last = list_empty(&zone->unaccepted_pages); account_freepages(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE); __mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES); __ClearPageUnaccepted(page); - spin_unlock_irqrestore(&zone->lock, flags); + spin_unlock_irqrestore(&zone->lock, *flags); accept_memory(page_to_phys(page), PAGE_SIZE << MAX_PAGE_ORDER); @@ -6963,6 +6954,38 @@ static bool try_to_accept_memory_one(struct zone *zone) if (last) static_branch_dec(&zones_with_unaccepted_pages); +} + +void accept_page(struct page *page) +{ + struct zone *zone = page_zone(page); + unsigned long flags; + + spin_lock_irqsave(&zone->lock, flags); + if (!PageUnaccepted(page)) { + spin_unlock_irqrestore(&zone->lock, flags); + return; + } + + /* Unlocks zone->lock */ + __accept_page(zone, &flags, page); +} + +static bool try_to_accept_memory_one(struct zone *zone) +{ + unsigned long flags; + struct page *page; + + spin_lock_irqsave(&zone->lock, flags); + page = list_first_entry_or_null(&zone->unaccepted_pages, + struct page, lru); + if (!page) { + spin_unlock_irqrestore(&zone->lock, flags); + return false; + } + + /* Unlocks zone->lock */ + __accept_page(zone, &flags, page); return true; } |