summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--mm/memory.c54
1 files changed, 32 insertions, 22 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 8dc69471be6c..a62f06750e93 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4125,29 +4125,17 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
goto out;
}
- /*
- * Make it present again, Depending on how arch implementes non
- * accessible ptes, some can allow access by kernel mode.
- */
- old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte);
+ /* Get the normal PTE */
+ old_pte = ptep_get(vmf->pte);
pte = pte_modify(old_pte, vma->vm_page_prot);
- pte = pte_mkyoung(pte);
- if (was_writable)
- pte = pte_mkwrite(pte);
- ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte);
- update_mmu_cache(vma, vmf->address, vmf->pte);
page = vm_normal_page(vma, vmf->address, pte);
- if (!page) {
- pte_unmap_unlock(vmf->pte, vmf->ptl);
- return 0;
- }
+ if (!page)
+ goto out_map;
/* TODO: handle PTE-mapped THP */
- if (PageCompound(page)) {
- pte_unmap_unlock(vmf->pte, vmf->ptl);
- return 0;
- }
+ if (PageCompound(page))
+ goto out_map;
/*
* Avoid grouping on RO pages in general. RO pages shouldn't hurt as
@@ -4157,7 +4145,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
* pte_dirty has unpredictable behaviour between PTE scan updates,
* background writeback, dirty balancing and application behaviour.
*/
- if (!pte_write(pte))
+ if (!was_writable)
flags |= TNF_NO_GROUP;
/*
@@ -4171,23 +4159,45 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
page_nid = page_to_nid(page);
target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid,
&flags);
- pte_unmap_unlock(vmf->pte, vmf->ptl);
if (target_nid == NUMA_NO_NODE) {
put_page(page);
- goto out;
+ goto out_map;
}
+ pte_unmap_unlock(vmf->pte, vmf->ptl);
/* Migrate to the requested node */
if (migrate_misplaced_page(page, vma, target_nid)) {
page_nid = target_nid;
flags |= TNF_MIGRATED;
- } else
+ } else {
flags |= TNF_MIGRATE_FAIL;
+ vmf->pte = pte_offset_map(vmf->pmd, vmf->address);
+ spin_lock(vmf->ptl);
+ if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) {
+ pte_unmap_unlock(vmf->pte, vmf->ptl);
+ goto out;
+ }
+ goto out_map;
+ }
out:
if (page_nid != NUMA_NO_NODE)
task_numa_fault(last_cpupid, page_nid, 1, flags);
return 0;
+out_map:
+ /*
+ * Make it present again, depending on how arch implements
+ * non-accessible ptes, some can allow access by kernel mode.
+ */
+ old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte);
+ pte = pte_modify(old_pte, vma->vm_page_prot);
+ pte = pte_mkyoung(pte);
+ if (was_writable)
+ pte = pte_mkwrite(pte);
+ ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte);
+ update_mmu_cache(vma, vmf->address, vmf->pte);
+ pte_unmap_unlock(vmf->pte, vmf->ptl);
+ goto out;
}
static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf)