summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2016-12-14 15:07:50 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-14 16:04:09 -0800
commit2f89dc12a25ddf995b9acd7b6543fe892e3473d6 (patch)
treed162312aa8bdcb6602a9c30dd5810c0f2f8e0a16
parenta6abc2c0e77b16480f4d2c1eb7925e5287ae1526 (diff)
dax: protect PTE modification on WP fault by radix tree entry lock
Currently PTE gets updated in wp_pfn_shared() after dax_pfn_mkwrite() has released corresponding radix tree entry lock. When we want to writeprotect PTE on cache flush, we need PTE modification to happen under radix tree entry lock to ensure consistent updates of PTE and radix tree (standard faults use page lock to ensure this consistency). So move update of PTE bit into dax_pfn_mkwrite(). Link: http://lkml.kernel.org/r/1479460644-25076-20-git-send-email-jack@suse.cz Signed-off-by: Jan Kara <jack@suse.cz> Reviewed-by: Ross Zwisler <ross.zwisler@linux.intel.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--fs/dax.c22
-rw-r--r--mm/memory.c2
2 files changed, 17 insertions, 7 deletions
diff --git a/fs/dax.c b/fs/dax.c
index df5c0daba698..cf7a20a5858b 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -783,17 +783,27 @@ int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct file *file = vma->vm_file;
struct address_space *mapping = file->f_mapping;
- void *entry;
+ void *entry, **slot;
pgoff_t index = vmf->pgoff;
spin_lock_irq(&mapping->tree_lock);
- entry = get_unlocked_mapping_entry(mapping, index, NULL);
- if (!entry || !radix_tree_exceptional_entry(entry))
- goto out;
+ entry = get_unlocked_mapping_entry(mapping, index, &slot);
+ if (!entry || !radix_tree_exceptional_entry(entry)) {
+ if (entry)
+ put_unlocked_mapping_entry(mapping, index, entry);
+ spin_unlock_irq(&mapping->tree_lock);
+ return VM_FAULT_NOPAGE;
+ }
radix_tree_tag_set(&mapping->page_tree, index, PAGECACHE_TAG_DIRTY);
- put_unlocked_mapping_entry(mapping, index, entry);
-out:
+ entry = lock_slot(mapping, slot);
spin_unlock_irq(&mapping->tree_lock);
+ /*
+ * If we race with somebody updating the PTE and finish_mkwrite_fault()
+ * fails, we don't care. We need to return VM_FAULT_NOPAGE and retry
+ * the fault in either case.
+ */
+ finish_mkwrite_fault(vmf);
+ put_locked_mapping_entry(mapping, index, entry);
return VM_FAULT_NOPAGE;
}
EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
diff --git a/mm/memory.c b/mm/memory.c
index edd899d0decb..57d0bd1bd2c4 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2315,7 +2315,7 @@ static int wp_pfn_shared(struct vm_fault *vmf)
pte_unmap_unlock(vmf->pte, vmf->ptl);
vmf->flags |= FAULT_FLAG_MKWRITE;
ret = vma->vm_ops->pfn_mkwrite(vma, vmf);
- if (ret & VM_FAULT_ERROR)
+ if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))
return ret;
return finish_mkwrite_fault(vmf);
}