From 593a10dabe08dcf93259fce2badd8dc2528859a8 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Tue, 21 May 2024 21:03:15 +0800 Subject: mm: refactor folio_undo_large_rmappable() Folios of order <= 1 are not in deferred list, the check of order is added into folio_undo_large_rmappable() from commit 8897277acfef ("mm: support order-1 folios in the page cache"), but there is a repeated check for small folio (order 0) during each call of the folio_undo_large_rmappable(), so only keep folio_order() check inside the function. In addition, move all the checks into header file to save a function call for non-large-rmappable or empty deferred_list folio. Link: https://lkml.kernel.org/r/20240521130315.46072-1-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Reviewed-by: David Hildenbrand Reviewed-by: Vishal Moola (Oracle) Cc: Johannes Weiner Cc: Lance Yang Cc: Matthew Wilcox (Oracle) Cc: Michal Hocko Cc: Muchun Song Cc: Roman Gushchin Cc: Shakeel Butt Signed-off-by: Andrew Morton --- mm/internal.h | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) (limited to 'mm/internal.h') diff --git a/mm/internal.h b/mm/internal.h index 0af4c9885424..2ea9a88dcb95 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -622,7 +622,22 @@ static inline void folio_set_order(struct folio *folio, unsigned int order) #endif } -void folio_undo_large_rmappable(struct folio *folio); +void __folio_undo_large_rmappable(struct folio *folio); +static inline void folio_undo_large_rmappable(struct folio *folio) +{ + if (folio_order(folio) <= 1 || !folio_test_large_rmappable(folio)) + return; + + /* + * At this point, there is no one trying to add the folio to + * deferred_list. If folio is not in deferred_list, it's safe + * to check without acquiring the split_queue_lock. + */ + if (data_race(list_empty(&folio->_deferred_list))) + return; + + __folio_undo_large_rmappable(folio); +} static inline struct folio *page_rmappable_folio(struct page *page) { -- cgit v1.2.3-58-ga151