diff options
author | Matthew Wilcox (Oracle) <willy@infradead.org> | 2023-08-16 16:11:52 +0100 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2023-08-21 14:28:43 -0700 |
commit | 8dc4a8f1e038189cb575f89bcd23364698b88cc1 (patch) | |
tree | 000cc2ba54fe80c8b41ba25dc5791f55d338a137 /mm/huge_memory.c | |
parent | 454a00c40a21c59e99c526fe8cc57bd029cf8f0e (diff) |
mm: convert free_transhuge_folio() to folio_undo_large_rmappable()
Indirect calls are expensive, thanks to Spectre. Test for
TRANSHUGE_PAGE_DTOR and destroy the folio appropriately. Move the
free_compound_page() call into destroy_large_folio() to simplify later
patches.
Link: https://lkml.kernel.org/r/20230816151201.3655946-5-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Cc: Yanteng Si <siyanteng@loongson.cn>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r-- | mm/huge_memory.c | 22 |
1 files changed, 11 insertions, 11 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 154c210892a1..b33456683b93 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2776,10 +2776,9 @@ out: return ret; } -void free_transhuge_page(struct page *page) +void folio_undo_large_rmappable(struct folio *folio) { - struct folio *folio = (struct folio *)page; - struct deferred_split *ds_queue = get_deferred_split_queue(folio); + struct deferred_split *ds_queue; unsigned long flags; /* @@ -2787,15 +2786,16 @@ void free_transhuge_page(struct page *page) * deferred_list. If folio is not in deferred_list, it's safe * to check without acquiring the split_queue_lock. */ - if (data_race(!list_empty(&folio->_deferred_list))) { - spin_lock_irqsave(&ds_queue->split_queue_lock, flags); - if (!list_empty(&folio->_deferred_list)) { - ds_queue->split_queue_len--; - list_del(&folio->_deferred_list); - } - spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); + if (data_race(list_empty(&folio->_deferred_list))) + return; + + ds_queue = get_deferred_split_queue(folio); + spin_lock_irqsave(&ds_queue->split_queue_lock, flags); + if (!list_empty(&folio->_deferred_list)) { + ds_queue->split_queue_len--; + list_del(&folio->_deferred_list); } - free_compound_page(page); + spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); } void deferred_split_folio(struct folio *folio) |