From 6bace090a25455cb1dffaa9ab4aabc36dbd44d4a Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Wed, 10 Dec 2014 15:43:31 -0800 Subject: mm, compaction: always update cached scanner positions Compaction caches the migration and free scanner positions between compaction invocations, so that the whole zone gets eventually scanned and there is no bias towards the initial scanner positions at the beginning/end of the zone. The cached positions are continuously updated as scanners progress and the updating stops as soon as a page is successfully isolated. The reasoning behind this is that a pageblock where isolation succeeded is likely to succeed again in near future and it should be worth revisiting it. However, the downside is that potentially many pages are rescanned without successful isolation. At worst, there might be a page where isolation from LRU succeeds but migration fails (potentially always). So upon encountering this page, cached position would always stop being updated for no good reason. It might have been useful to let such page be rescanned with sync compaction after async one failed, but this is now handled by caching scanner position for async and sync mode separately since commit 35979ef33931 ("mm, compaction: add per-zone migration pfn cache for async compaction"). After this patch, cached positions are updated unconditionally. In stress-highalloc benchmark, this has decreased the numbers of scanned pages by few percent, without affecting allocation success rates. To prevent free scanner from leaving free pages behind after they are returned due to page migration failure, the cached scanner pfn is changed to point to the pageblock of the returned free page with the highest pfn, before leaving compact_zone(). [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Vlastimil Babka Cc: Minchan Kim Cc: Mel Gorman Cc: Joonsoo Kim Cc: Michal Nazarewicz Cc: Naoya Horiguchi Cc: Christoph Lameter Acked-by: Rik van Riel Cc: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/compaction.c | 43 +++++++++++++++++++++++-------------------- 1 file changed, 23 insertions(+), 20 deletions(-) (limited to 'mm/compaction.c') diff --git a/mm/compaction.c b/mm/compaction.c index eaf0a925ff26..8f211bd2ea0d 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -41,15 +41,17 @@ static inline void count_compact_events(enum vm_event_item item, long delta) static unsigned long release_freepages(struct list_head *freelist) { struct page *page, *next; - unsigned long count = 0; + unsigned long high_pfn = 0; list_for_each_entry_safe(page, next, freelist, lru) { + unsigned long pfn = page_to_pfn(page); list_del(&page->lru); __free_page(page); - count++; + if (pfn > high_pfn) + high_pfn = pfn; } - return count; + return high_pfn; } static void map_pages(struct list_head *list) @@ -195,16 +197,12 @@ static void update_pageblock_skip(struct compact_control *cc, /* Update where async and sync compaction should restart */ if (migrate_scanner) { - if (cc->finished_update_migrate) - return; if (pfn > zone->compact_cached_migrate_pfn[0]) zone->compact_cached_migrate_pfn[0] = pfn; if (cc->mode != MIGRATE_ASYNC && pfn > zone->compact_cached_migrate_pfn[1]) zone->compact_cached_migrate_pfn[1] = pfn; } else { - if (cc->finished_update_free) - return; if (pfn < zone->compact_cached_free_pfn) zone->compact_cached_free_pfn = pfn; } @@ -715,7 +713,6 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, del_page_from_lru_list(page, lruvec, page_lru(page)); isolate_success: - cc->finished_update_migrate = true; list_add(&page->lru, migratelist); cc->nr_migratepages++; nr_isolated++; @@ -888,15 +885,6 @@ static void isolate_freepages(struct compact_control *cc) isolate_start_pfn : block_start_pfn - pageblock_nr_pages; - /* - * Set a flag that we successfully isolated in this pageblock. - * In the next loop iteration, zone->compact_cached_free_pfn - * will not be updated and thus it will effectively contain the - * highest pageblock we isolated pages from. - */ - if (isolated) - cc->finished_update_free = true; - /* * isolate_freepages_block() might have aborted due to async * compaction being contended @@ -1251,9 +1239,24 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) } out: - /* Release free pages and check accounting */ - cc->nr_freepages -= release_freepages(&cc->freepages); - VM_BUG_ON(cc->nr_freepages != 0); + /* + * Release free pages and update where the free scanner should restart, + * so we don't leave any returned pages behind in the next attempt. + */ + if (cc->nr_freepages > 0) { + unsigned long free_pfn = release_freepages(&cc->freepages); + + cc->nr_freepages = 0; + VM_BUG_ON(free_pfn == 0); + /* The cached pfn is always the first in a pageblock */ + free_pfn &= ~(pageblock_nr_pages-1); + /* + * Only go back, not forward. The cached pfn might have been + * already reset to zone end in compact_finished() + */ + if (free_pfn > zone->compact_cached_free_pfn) + zone->compact_cached_free_pfn = free_pfn; + } trace_mm_compaction_end(ret); -- cgit v1.2.3-58-ga151