summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2009-01-06 14:39:27 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2009-01-06 15:59:02 -0800
commitcbf84b7add8103b92aaa84928e335df726bfc8da (patch)
tree04e3ef2b3f70e5bf647f4f4c8c3ce50dd12e8474
parent2afd1c928f1132b8d0099866e75ce8ad713a1180 (diff)
mm: further cleanup page_add_new_anon_rmap
Moving lru_cache_add_active_or_unevictable() into page_add_new_anon_rmap() was good but stupid: we can and should SetPageSwapBacked() there too; and we know for sure that this anonymous, swap-backed page is not file cache. Signed-off-by: Hugh Dickins <hugh@veritas.com> Cc: Lee Schermerhorn <lee.schermerhorn@hp.com> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Acked-by: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/memory.c3
-rw-r--r--mm/rmap.c6
2 files changed, 3 insertions, 6 deletions
diff --git a/mm/memory.c b/mm/memory.c
index a138c50dc39a..122d965e820f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1948,7 +1948,6 @@ gotten:
* thread doing COW.
*/
ptep_clear_flush_notify(vma, address, page_table);
- SetPageSwapBacked(new_page);
page_add_new_anon_rmap(new_page, vma, address);
set_pte_at(mm, address, page_table, entry);
update_mmu_cache(vma, address, entry);
@@ -2444,7 +2443,6 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
if (!pte_none(*page_table))
goto release;
inc_mm_counter(mm, anon_rss);
- SetPageSwapBacked(page);
page_add_new_anon_rmap(page, vma, address);
set_pte_at(mm, address, page_table, entry);
@@ -2592,7 +2590,6 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
if (anon) {
inc_mm_counter(mm, anon_rss);
- SetPageSwapBacked(page);
page_add_new_anon_rmap(page, vma, address);
} else {
inc_mm_counter(mm, file_rss);
diff --git a/mm/rmap.c b/mm/rmap.c
index 892e1877366b..b1770b11a571 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -47,7 +47,6 @@
#include <linux/rmap.h>
#include <linux/rcupdate.h>
#include <linux/module.h>
-#include <linux/mm_inline.h>
#include <linux/kallsyms.h>
#include <linux/memcontrol.h>
#include <linux/mmu_notifier.h>
@@ -673,10 +672,11 @@ void page_add_new_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address)
{
VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
- atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */
+ SetPageSwapBacked(page);
+ atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */
__page_set_anon_rmap(page, vma, address);
if (page_evictable(page, vma))
- lru_cache_add_lru(page, LRU_ACTIVE + page_is_file_cache(page));
+ lru_cache_add_lru(page, LRU_ACTIVE_ANON);
else
add_page_to_unevictable_list(page);
}