diff options
-rw-r--r-- | mm/swap.c | 3 | ||||
-rw-r--r-- | mm/vmscan.c | 6 |
2 files changed, 9 insertions, 0 deletions
diff --git a/mm/swap.c b/mm/swap.c index 6b697d33fa5b..e43a5911b170 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -1012,6 +1012,9 @@ void folios_put_refs(struct folio_batch *folios, unsigned int *refs) free_huge_folio(folio); continue; } + if (folio_test_large(folio) && + folio_test_large_rmappable(folio)) + folio_undo_large_rmappable(folio); __page_cache_release(folio, &lruvec, &flags); diff --git a/mm/vmscan.c b/mm/vmscan.c index e3349b75f15b..61606fa83504 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1413,6 +1413,9 @@ free_it: */ nr_reclaimed += nr_pages; + if (folio_test_large(folio) && + folio_test_large_rmappable(folio)) + folio_undo_large_rmappable(folio); if (folio_batch_add(&free_folios, folio) == 0) { mem_cgroup_uncharge_folios(&free_folios); try_to_unmap_flush(); @@ -1819,6 +1822,9 @@ static unsigned int move_folios_to_lru(struct lruvec *lruvec, if (unlikely(folio_put_testzero(folio))) { __folio_clear_lru_flags(folio); + if (folio_test_large(folio) && + folio_test_large_rmappable(folio)) + folio_undo_large_rmappable(folio); if (folio_batch_add(&free_folios, folio) == 0) { spin_unlock_irq(&lruvec->lru_lock); mem_cgroup_uncharge_folios(&free_folios); |