diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memory.c | 6 | ||||
-rw-r--r-- | mm/rmap.c | 7 |
2 files changed, 7 insertions, 6 deletions
diff --git a/mm/memory.c b/mm/memory.c index 9f5f829a1b1f..4cd8cadf1268 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3511,10 +3511,10 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) struct vm_area_struct *vma = vmf->vma; struct page *page = NULL, *swapcache; struct swap_info_struct *si = NULL; + rmap_t rmap_flags = RMAP_NONE; swp_entry_t entry; pte_t pte; int locked; - int exclusive = 0; vm_fault_t ret = 0; void *shadow = NULL; @@ -3689,7 +3689,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) pte = maybe_mkwrite(pte_mkdirty(pte), vma); vmf->flags &= ~FAULT_FLAG_WRITE; ret |= VM_FAULT_WRITE; - exclusive = RMAP_EXCLUSIVE; + rmap_flags |= RMAP_EXCLUSIVE; } flush_icache_page(vma, page); if (pte_swp_soft_dirty(vmf->orig_pte)) @@ -3705,7 +3705,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) page_add_new_anon_rmap(page, vma, vmf->address, false); lru_cache_add_inactive_or_unevictable(page, vma); } else { - do_page_add_anon_rmap(page, vma, vmf->address, exclusive); + do_page_add_anon_rmap(page, vma, vmf->address, rmap_flags); } set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte); diff --git a/mm/rmap.c b/mm/rmap.c index 91a63dc636ad..23a41132995e 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1181,7 +1181,8 @@ static void __page_check_anon_rmap(struct page *page, void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address, bool compound) { - do_page_add_anon_rmap(page, vma, address, compound ? RMAP_COMPOUND : 0); + do_page_add_anon_rmap(page, vma, address, + compound ? RMAP_COMPOUND : RMAP_NONE); } /* @@ -1190,7 +1191,7 @@ void page_add_anon_rmap(struct page *page, * Everybody else should continue to use page_add_anon_rmap above. */ void do_page_add_anon_rmap(struct page *page, - struct vm_area_struct *vma, unsigned long address, int flags) + struct vm_area_struct *vma, unsigned long address, rmap_t flags) { bool compound = flags & RMAP_COMPOUND; bool first; @@ -1229,7 +1230,7 @@ void do_page_add_anon_rmap(struct page *page, /* address might be in next vma when migration races vma_adjust */ else if (first) __page_set_anon_rmap(page, vma, address, - flags & RMAP_EXCLUSIVE); + !!(flags & RMAP_EXCLUSIVE)); else __page_check_anon_rmap(page, vma, address); |