diff options
author | Matthew Wilcox (Oracle) <willy@infradead.org> | 2023-11-08 18:28:07 +0000 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2023-12-10 16:51:38 -0800 |
commit | 049b26048dd287d52f6f6fbe5eafa301fdca5d37 (patch) | |
tree | 957f946ba50b6c384530e399c6454d449f24ac41 /mm | |
parent | 19369d866a8b89788cdc9b10c7b8c9b2777f806b (diff) |
mm: convert soft_offline_in_use_page() to use a folio
Replace the existing head-page logic with folio logic.
Link: https://lkml.kernel.org/r/20231108182809.602073-5-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Naoya Horiguchi <naoya.horiguchi@nec.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memory-failure.c | 24 |
1 files changed, 12 insertions, 12 deletions
diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 9f03952e6d38..075db5b5ad5e 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -2645,40 +2645,40 @@ static int soft_offline_in_use_page(struct page *page) { long ret = 0; unsigned long pfn = page_to_pfn(page); - struct page *hpage = compound_head(page); + struct folio *folio = page_folio(page); char const *msg_page[] = {"page", "hugepage"}; - bool huge = PageHuge(page); + bool huge = folio_test_hugetlb(folio); LIST_HEAD(pagelist); struct migration_target_control mtc = { .nid = NUMA_NO_NODE, .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL, }; - if (!huge && PageTransHuge(hpage)) { + if (!huge && folio_test_large(folio)) { if (try_to_split_thp_page(page)) { pr_info("soft offline: %#lx: thp split failed\n", pfn); return -EBUSY; } - hpage = page; + folio = page_folio(page); } - lock_page(page); + folio_lock(folio); if (!huge) - wait_on_page_writeback(page); + folio_wait_writeback(folio); if (PageHWPoison(page)) { - unlock_page(page); - put_page(page); + folio_unlock(folio); + folio_put(folio); pr_info("soft offline: %#lx page already poisoned\n", pfn); return 0; } - if (!huge && PageLRU(page) && !PageSwapCache(page)) + if (!huge && folio_test_lru(folio) && !folio_test_swapcache(folio)) /* * Try to invalidate first. This should work for * non dirty unmapped page cache pages. */ - ret = invalidate_inode_page(page); - unlock_page(page); + ret = mapping_evict_folio(folio_mapping(folio), folio); + folio_unlock(folio); if (ret) { pr_info("soft_offline: %#lx: invalidated\n", pfn); @@ -2686,7 +2686,7 @@ static int soft_offline_in_use_page(struct page *page) return 0; } - if (isolate_page(hpage, &pagelist)) { + if (isolate_page(&folio->page, &pagelist)) { ret = migrate_pages(&pagelist, alloc_migration_target, NULL, (unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_FAILURE, NULL); if (!ret) { |