summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorSuren Baghdasaryan <surenb@google.com>2023-08-04 08:27:22 -0700
committerAndrew Morton <akpm@linux-foundation.org>2023-08-21 13:37:46 -0700
commit60081bf19b0ec8fa40c589bd361fa2bc763f1050 (patch)
treeb4c4909b23e49edc487efcd44d983fb2f2af6334 /mm
parente727bfd5e73a35ecbc4a01a15c659b9fafaa97c0 (diff)
mm: lock vma explicitly before doing vm_flags_reset and vm_flags_reset_once
Implicit vma locking inside vm_flags_reset() and vm_flags_reset_once() is not obvious and makes it hard to understand where vma locking is happening. Also in some cases (like in dup_userfaultfd()) vma should be locked earlier than vma_flags modification. To make locking more visible, change these functions to assert that the vma write lock is taken and explicitly lock the vma beforehand. Fix userfaultfd functions which should lock the vma earlier. Link: https://lkml.kernel.org/r/20230804152724.3090321-5-surenb@google.com Suggested-by: Linus Torvalds <torvalds@linuxfoundation.org> Signed-off-by: Suren Baghdasaryan <surenb@google.com> Cc: Jann Horn <jannh@google.com> Cc: Liam R. Howlett <Liam.Howlett@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/madvise.c5
-rw-r--r--mm/mlock.c3
-rw-r--r--mm/mprotect.c1
3 files changed, 5 insertions, 4 deletions
diff --git a/mm/madvise.c b/mm/madvise.c
index da65f8bd9ac3..8498f700c284 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -173,9 +173,8 @@ static int madvise_update_vma(struct vm_area_struct *vma,
}
success:
- /*
- * vm_flags is protected by the mmap_lock held in write mode.
- */
+ /* vm_flags is protected by the mmap_lock held in write mode. */
+ vma_start_write(vma);
vm_flags_reset(vma, new_flags);
if (!vma->vm_file || vma_is_anon_shmem(vma)) {
error = replace_anon_vma_name(vma, anon_name);
diff --git a/mm/mlock.c b/mm/mlock.c
index 0a0c996c5c21..1746600a2e14 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -386,6 +386,7 @@ static void mlock_vma_pages_range(struct vm_area_struct *vma,
*/
if (newflags & VM_LOCKED)
newflags |= VM_IO;
+ vma_start_write(vma);
vm_flags_reset_once(vma, newflags);
lru_add_drain();
@@ -460,9 +461,9 @@ success:
* It's okay if try_to_unmap_one unmaps a page just after we
* set VM_LOCKED, populate_vma_page_range will bring it back.
*/
-
if ((newflags & VM_LOCKED) && (oldflags & VM_LOCKED)) {
/* No work to do, and mlocking twice would be wrong */
+ vma_start_write(vma);
vm_flags_reset(vma, newflags);
} else {
mlock_vma_pages_range(vma, start, end, newflags);
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 3f36c88a238e..7cd7f644da80 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -656,6 +656,7 @@ success:
* vm_flags and vm_page_prot are protected by the mmap_lock
* held in write mode.
*/
+ vma_start_write(vma);
vm_flags_reset(vma, newflags);
if (vma_wants_manual_pte_write_upgrade(vma))
mm_cp_flags |= MM_CP_TRY_CHANGE_WRITABLE;