diff options
author | Christoph Lameter <clameter@sgi.com> | 2006-06-23 02:03:53 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-06-23 07:42:53 -0700 |
commit | 95a402c3847cc16f4ba03013cd01404fa0f14c2e (patch) | |
tree | 0fd9b3379f70cc99b2325bccaa150089abf6c8b3 /mm/mempolicy.c | |
parent | aaa994b300a172afafab47938804836b923e5ef7 (diff) |
[PATCH] page migration: use allocator function for migrate_pages()
Instead of passing a list of new pages, pass a function to allocate a new
page. This allows the correct placement of MPOL_INTERLEAVE pages during page
migration. It also further simplifies the callers of migrate pages.
migrate_pages() becomes similar to migrate_pages_to() so drop
migrate_pages_to(). The batching of new page allocations becomes unnecessary.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: Jes Sorensen <jes@trained-monkey.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: Andi Kleen <ak@muc.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/mempolicy.c')
-rw-r--r-- | mm/mempolicy.c | 23 |
1 files changed, 21 insertions, 2 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 244f3f130e4a..f432642e9e66 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -87,6 +87,7 @@ #include <linux/seq_file.h> #include <linux/proc_fs.h> #include <linux/migrate.h> +#include <linux/rmap.h> #include <asm/tlbflush.h> #include <asm/uaccess.h> @@ -587,6 +588,11 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist, isolate_lru_page(page, pagelist); } +static struct page *new_node_page(struct page *page, unsigned long node) +{ + return alloc_pages_node(node, GFP_HIGHUSER, 0); +} + /* * Migrate pages from one node to a target node. * Returns error or the number of pages not migrated. @@ -604,7 +610,8 @@ int migrate_to_node(struct mm_struct *mm, int source, int dest, int flags) flags | MPOL_MF_DISCONTIG_OK, &pagelist); if (!list_empty(&pagelist)) - err = migrate_pages_to(&pagelist, NULL, dest); + err = migrate_pages(&pagelist, new_node_page, dest); + return err; } @@ -691,6 +698,12 @@ int do_migrate_pages(struct mm_struct *mm, } +static struct page *new_vma_page(struct page *page, unsigned long private) +{ + struct vm_area_struct *vma = (struct vm_area_struct *)private; + + return alloc_page_vma(GFP_HIGHUSER, vma, page_address_in_vma(page, vma)); +} #else static void migrate_page_add(struct page *page, struct list_head *pagelist, @@ -703,6 +716,11 @@ int do_migrate_pages(struct mm_struct *mm, { return -ENOSYS; } + +static struct page *new_vma_page(struct page *page, unsigned long private) +{ + return NULL; +} #endif long do_mbind(unsigned long start, unsigned long len, @@ -764,7 +782,8 @@ long do_mbind(unsigned long start, unsigned long len, err = mbind_range(vma, start, end, new); if (!list_empty(&pagelist)) - nr_failed = migrate_pages_to(&pagelist, vma, -1); + nr_failed = migrate_pages(&pagelist, new_vma_page, + (unsigned long)vma); if (!err && nr_failed && (flags & MPOL_MF_STRICT)) err = -EIO; |