summaryrefslogtreecommitdiff
path: root/mm/mmap.c
diff options
context:
space:
mode:
authorLiam R. Howlett <Liam.Howlett@oracle.com>2023-01-26 16:20:49 -0500
committerAndrew Morton <akpm@linux-foundation.org>2023-02-09 16:51:33 -0800
commit27b267011296e35dd5c983bf6c53b7230c78f383 (patch)
treedf031321caa6afdd9ceb2f0781d220891e10c86c /mm/mmap.c
parentd60beb1f698a429825ea2c463ee9e3dc3b1a79b7 (diff)
ipc/shm: introduce new do_vma_munmap() to munmap
The shm already has the vma iterator in position for a write. do_vmi_munmap() searches for the correct position and aligns the write, so it is not the right function to use in this case. The shm VMA tree modification is similar to the brk munmap situation, the vma iterator is in position and the VMA is already known. This patch generalizes the brk munmap function do_brk_munmap() to be used for any other callers with the vma iterator already in position to munmap a VMA. Link: https://lkml.kernel.org/r/20230126212049.980501-1-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com> Reported-by: Sven Schnelle <svens@linux.ibm.com> Link: https://lore.kernel.org/linux-mm/yt9dh6wec21a.fsf@linux.ibm.com/ Cc: Arnd Bergmann <arnd@arndb.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/mmap.c')
-rw-r--r--mm/mmap.c38
1 files changed, 18 insertions, 20 deletions
diff --git a/mm/mmap.c b/mm/mmap.c
index 894017841d5d..408e9cc47333 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -180,9 +180,6 @@ static int check_brk_limits(unsigned long addr, unsigned long len)
return mlock_future_check(current->mm, current->mm->def_flags, len);
}
-static int do_brk_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
- unsigned long newbrk, unsigned long oldbrk,
- struct list_head *uf);
static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *brkvma,
unsigned long addr, unsigned long request, unsigned long flags);
SYSCALL_DEFINE1(brk, unsigned long, brk)
@@ -236,7 +233,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
/*
* Always allow shrinking brk.
- * do_brk_munmap() may downgrade mmap_lock to read.
+ * do_vma_munmap() may downgrade mmap_lock to read.
*/
if (brk <= mm->brk) {
int ret;
@@ -248,11 +245,11 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
goto out; /* mapping intersects with an existing non-brk vma. */
/*
* mm->brk must be protected by write mmap_lock.
- * do_brk_munmap() may downgrade the lock, so update it
- * before calling do_brk_munmap().
+ * do_vma_munmap() may downgrade the lock, so update it
+ * before calling do_vma_munmap().
*/
mm->brk = brk;
- ret = do_brk_munmap(&vmi, brkvma, newbrk, oldbrk, &uf);
+ ret = do_vma_munmap(&vmi, brkvma, newbrk, oldbrk, &uf, true);
if (ret == 1) {
downgraded = true;
goto success;
@@ -2951,26 +2948,27 @@ out:
}
/*
- * brk_munmap() - Unmap a full or partial vma.
- * @vmi: The vma iterator
- * @vma: The vma to be modified
- * @newbrk: the start of the address to unmap
- * @oldbrk: The end of the address to unmap
+ * do_vma_munmap() - Unmap a full or partial vma.
+ * @vmi: The vma iterator pointing at the vma
+ * @vma: The first vma to be munmapped
+ * @start: the start of the address to unmap
+ * @end: The end of the address to unmap
* @uf: The userfaultfd list_head
+ * @downgrade: Attempt to downgrade or not
*
- * Returns: 1 on success.
- * unmaps a partial VMA mapping. Does not handle alignment, downgrades lock if
- * possible.
+ * Returns: 0 on success and not downgraded, 1 on success and downgraded.
+ * unmaps a VMA mapping when the vma iterator is already in position.
+ * Does not handle alignment.
*/
-static int do_brk_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
- unsigned long newbrk, unsigned long oldbrk,
- struct list_head *uf)
+int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
+ unsigned long start, unsigned long end,
+ struct list_head *uf, bool downgrade)
{
struct mm_struct *mm = vma->vm_mm;
int ret;
- arch_unmap(mm, newbrk, oldbrk);
- ret = do_vmi_align_munmap(vmi, vma, mm, newbrk, oldbrk, uf, true);
+ arch_unmap(mm, start, end);
+ ret = do_vmi_align_munmap(vmi, vma, mm, start, end, uf, downgrade);
validate_mm_mt(mm);
return ret;
}