From 409eb8c2611b4310947a150af988111f7f52ab15 Mon Sep 17 00:00:00 2001 From: Hillf Danton Date: Fri, 20 Jan 2012 14:34:13 -0800 Subject: mm/hugetlb.c: undo change to page mapcount in fault handler Page mapcount should be updated only if we are sure that the page ends up in the page table otherwise we would leak if we couldn't COW due to reservations or if idx is out of bounds. Signed-off-by: Hillf Danton Reviewed-by: Michal Hocko Acked-by: KAMEZAWA Hiroyuki Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/hugetlb.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/hugetlb.c b/mm/hugetlb.c index ea8c3a4cd2ae..5f34bd8dda34 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2508,6 +2508,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, { struct hstate *h = hstate_vma(vma); int ret = VM_FAULT_SIGBUS; + int anon_rmap = 0; pgoff_t idx; unsigned long size; struct page *page; @@ -2562,14 +2563,13 @@ retry: spin_lock(&inode->i_lock); inode->i_blocks += blocks_per_huge_page(h); spin_unlock(&inode->i_lock); - page_dup_rmap(page); } else { lock_page(page); if (unlikely(anon_vma_prepare(vma))) { ret = VM_FAULT_OOM; goto backout_unlocked; } - hugepage_add_new_anon_rmap(page, vma, address); + anon_rmap = 1; } } else { /* @@ -2582,7 +2582,6 @@ retry: VM_FAULT_SET_HINDEX(h - hstates); goto backout_unlocked; } - page_dup_rmap(page); } /* @@ -2606,6 +2605,10 @@ retry: if (!huge_pte_none(huge_ptep_get(ptep))) goto backout; + if (anon_rmap) + hugepage_add_new_anon_rmap(page, vma, address); + else + page_dup_rmap(page); new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_SHARED))); set_huge_pte_at(mm, address, ptep, new_pte); -- cgit v1.2.3-58-ga151