diff options
author | Matthew Wilcox (Oracle) <willy@infradead.org> | 2023-11-17 16:14:44 +0000 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2023-12-10 16:51:41 -0800 |
commit | f7092393570f24865199d1642eb097f9e1c8f01e (patch) | |
tree | a9a877c92295af21b564911c32b368a08b2dde63 /mm/memory-failure.c | |
parent | 6304b531cd8f568ed2b8d680837b8ceebe175b89 (diff) |
memory-failure: convert delete_from_lru_cache() to take a folio
All three callers now have a folio; pass it in instead of the page.
Saves five calls to compound_head().
Link: https://lkml.kernel.org/r/20231117161447.2461643-4-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Naoya Horiguchi <naoya.horiguchi@nec.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/memory-failure.c')
-rw-r--r-- | mm/memory-failure.c | 22 |
1 files changed, 11 insertions, 11 deletions
diff --git a/mm/memory-failure.c b/mm/memory-failure.c index d2764fd3e448..e73f2047ffcb 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -902,26 +902,26 @@ static const char * const action_page_types[] = { * The page count will stop it from being freed by unpoison. * Stress tests should be aware of this memory leak problem. */ -static int delete_from_lru_cache(struct page *p) +static int delete_from_lru_cache(struct folio *folio) { - if (isolate_lru_page(p)) { + if (folio_isolate_lru(folio)) { /* * Clear sensible page flags, so that the buddy system won't - * complain when the page is unpoison-and-freed. + * complain when the folio is unpoison-and-freed. */ - ClearPageActive(p); - ClearPageUnevictable(p); + folio_clear_active(folio); + folio_clear_unevictable(folio); /* * Poisoned page might never drop its ref count to 0 so we have * to uncharge it manually from its memcg. */ - mem_cgroup_uncharge(page_folio(p)); + mem_cgroup_uncharge(folio); /* - * drop the page count elevated by isolate_lru_page() + * drop the refcount elevated by folio_isolate_lru() */ - put_page(p); + folio_put(folio); return 0; } return -EIO; @@ -1019,7 +1019,7 @@ static int me_pagecache_clean(struct page_state *ps, struct page *p) struct address_space *mapping; bool extra_pins; - delete_from_lru_cache(p); + delete_from_lru_cache(folio); /* * For anonymous folios the only reference left @@ -1146,7 +1146,7 @@ static int me_swapcache_dirty(struct page_state *ps, struct page *p) /* Trigger EIO in shmem: */ folio_clear_uptodate(folio); - ret = delete_from_lru_cache(p) ? MF_FAILED : MF_DELAYED; + ret = delete_from_lru_cache(folio) ? MF_FAILED : MF_DELAYED; folio_unlock(folio); if (ret == MF_DELAYED) @@ -1165,7 +1165,7 @@ static int me_swapcache_clean(struct page_state *ps, struct page *p) delete_from_swap_cache(folio); - ret = delete_from_lru_cache(p) ? MF_FAILED : MF_RECOVERED; + ret = delete_from_lru_cache(folio) ? MF_FAILED : MF_RECOVERED; folio_unlock(folio); if (has_extra_refcount(ps, p, false)) |