diff options
author | Mel Gorman <mel@csn.ul.ie> | 2009-06-16 15:32:13 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-16 19:47:35 -0700 |
commit | f2260e6b1f4eba0f5b5906795117791b5c660154 (patch) | |
tree | 0faa8ce5fb0875835142e6ff3928b2ce076b4874 /mm | |
parent | 418589663d6011de9006425b6c5721e1544fb47a (diff) |
page allocator: update NR_FREE_PAGES only as necessary
When pages are being freed to the buddy allocator, the zone NR_FREE_PAGES
counter must be updated. In the case of bulk per-cpu page freeing, it's
updated once per page. This retouches cache lines more than necessary.
Update the counters one per per-cpu bulk free.
Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Reviewed-by: Christoph Lameter <cl@linux-foundation.org>
Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Dave Hansen <dave@linux.vnet.ibm.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/page_alloc.c | 13 |
1 files changed, 7 insertions, 6 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index abe26003124d..d56e377ad085 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -456,7 +456,6 @@ static inline void __free_one_page(struct page *page, int migratetype) { unsigned long page_idx; - int order_size = 1 << order; if (unlikely(PageCompound(page))) if (unlikely(destroy_compound_page(page, order))) @@ -466,10 +465,9 @@ static inline void __free_one_page(struct page *page, page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); - VM_BUG_ON(page_idx & (order_size - 1)); + VM_BUG_ON(page_idx & ((1 << order) - 1)); VM_BUG_ON(bad_range(zone, page)); - __mod_zone_page_state(zone, NR_FREE_PAGES, order_size); while (order < MAX_ORDER-1) { unsigned long combined_idx; struct page *buddy; @@ -524,6 +522,8 @@ static void free_pages_bulk(struct zone *zone, int count, spin_lock(&zone->lock); zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE); zone->pages_scanned = 0; + + __mod_zone_page_state(zone, NR_FREE_PAGES, count << order); while (count--) { struct page *page; @@ -542,6 +542,8 @@ static void free_one_page(struct zone *zone, struct page *page, int order, spin_lock(&zone->lock); zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE); zone->pages_scanned = 0; + + __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order); __free_one_page(page, zone, order, migratetype); spin_unlock(&zone->lock); } @@ -686,7 +688,6 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, list_del(&page->lru); rmv_page_order(page); area->nr_free--; - __mod_zone_page_state(zone, NR_FREE_PAGES, - (1UL << order)); expand(zone, page, order, current_order, area, migratetype); return page; } @@ -826,8 +827,6 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype) /* Remove the page from the freelists */ list_del(&page->lru); rmv_page_order(page); - __mod_zone_page_state(zone, NR_FREE_PAGES, - -(1UL << order)); if (current_order == pageblock_order) set_pageblock_migratetype(page, @@ -900,6 +899,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, set_page_private(page, migratetype); list = &page->lru; } + __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); spin_unlock(&zone->lock); return i; } @@ -1129,6 +1129,7 @@ again: } else { spin_lock_irqsave(&zone->lock, flags); page = __rmqueue(zone, order, migratetype); + __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order)); spin_unlock(&zone->lock); if (!page) goto failed; |