From 5dbe0af47f8a8f968bac2991c3ec974c6e3eaabc Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Sat, 28 May 2011 13:17:04 -0700 Subject: mm: fix kernel BUG at mm/rmap.c:1017! I've hit the "address >= vma->vm_end" check in do_page_add_anon_rmap() just once. The stack showed khugepaged allocation trying to compact pages: the call to page_add_anon_rmap() coming from remove_migration_pte(). That path holds anon_vma lock, but does not hold mmap_sem: it can therefore race with a split_vma(), and in commit 5f70b962ccc2 "mmap: avoid unnecessary anon_vma lock" we just took away the anon_vma lock protection when adjusting vma->vm_end. I don't think that particular BUG_ON ever caught anything interesting, so better replace it by a comment, than reinstate the anon_vma locking. Signed-off-by: Hugh Dickins Signed-off-by: Linus Torvalds --- mm/rmap.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'mm/rmap.c') diff --git a/mm/rmap.c b/mm/rmap.c index 3a39b518a653..ba58ca36fc90 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1014,7 +1014,7 @@ void do_page_add_anon_rmap(struct page *page, return; VM_BUG_ON(!PageLocked(page)); - VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); + /* address might be in next vma when migration races vma_adjust */ if (first) __page_set_anon_rmap(page, vma, address, exclusive); else @@ -1709,7 +1709,7 @@ void hugepage_add_anon_rmap(struct page *page, BUG_ON(!PageLocked(page)); BUG_ON(!anon_vma); - BUG_ON(address < vma->vm_start || address >= vma->vm_end); + /* address might be in next vma when migration races vma_adjust */ first = atomic_inc_and_test(&page->_mapcount); if (first) __hugepage_set_anon_rmap(page, vma, address, 0); -- cgit v1.2.3-58-ga151