diff options
author | Michel Lespinasse <walken@google.com> | 2020-06-08 21:33:25 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-06-09 09:39:14 -0700 |
commit | d8ed45c5dcd455fc5848d47f86883a1b872ac0d0 (patch) | |
tree | f9270b32da5f3f7be73b086c99d3dfc29a13161a /mm/khugepaged.c | |
parent | 0adf65f53aae86aa86d8dccada02890545de8938 (diff) |
mmap locking API: use coccinelle to convert mmap_sem rwsem call sites
This change converts the existing mmap_sem rwsem calls to use the new mmap
locking API instead.
The change is generated using coccinelle with the following rule:
// spatch --sp-file mmap_lock_api.cocci --in-place --include-headers --dir .
@@
expression mm;
@@
(
-init_rwsem
+mmap_init_lock
|
-down_write
+mmap_write_lock
|
-down_write_killable
+mmap_write_lock_killable
|
-down_write_trylock
+mmap_write_trylock
|
-up_write
+mmap_write_unlock
|
-downgrade_write
+mmap_write_downgrade
|
-down_read
+mmap_read_lock
|
-down_read_killable
+mmap_read_lock_killable
|
-down_read_trylock
+mmap_read_trylock
|
-up_read
+mmap_read_unlock
)
-(&mm->mmap_sem)
+(mm)
Signed-off-by: Michel Lespinasse <walken@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Daniel Jordan <daniel.m.jordan@oracle.com>
Reviewed-by: Laurent Dufour <ldufour@linux.ibm.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Davidlohr Bueso <dbueso@suse.de>
Cc: David Rientjes <rientjes@google.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Liam Howlett <Liam.Howlett@oracle.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ying Han <yinghan@google.com>
Link: http://lkml.kernel.org/r/20200520052908.204642-5-walken@google.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/khugepaged.c')
-rw-r--r-- | mm/khugepaged.c | 36 |
1 files changed, 18 insertions, 18 deletions
diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 3f032487825b..19f3401e568a 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -536,8 +536,8 @@ void __khugepaged_exit(struct mm_struct *mm) * khugepaged has finished working on the pagetables * under the mmap_sem. */ - down_write(&mm->mmap_sem); - up_write(&mm->mmap_sem); + mmap_write_lock(mm); + mmap_write_unlock(mm); } } @@ -995,7 +995,7 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm, /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */ if (ret & VM_FAULT_RETRY) { - down_read(&mm->mmap_sem); + mmap_read_lock(mm); if (hugepage_vma_revalidate(mm, address, &vmf.vma)) { /* vma is no longer available, don't continue to swapin */ trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0); @@ -1052,7 +1052,7 @@ static void collapse_huge_page(struct mm_struct *mm, * sync compaction, and we do not need to hold the mmap_sem during * that. We will recheck the vma after taking it again in write mode. */ - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); new_page = khugepaged_alloc_page(hpage, gfp, node); if (!new_page) { result = SCAN_ALLOC_HUGE_PAGE_FAIL; @@ -1065,17 +1065,17 @@ static void collapse_huge_page(struct mm_struct *mm, } count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC); - down_read(&mm->mmap_sem); + mmap_read_lock(mm); result = hugepage_vma_revalidate(mm, address, &vma); if (result) { - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); goto out_nolock; } pmd = mm_find_pmd(mm, address); if (!pmd) { result = SCAN_PMD_NULL; - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); goto out_nolock; } @@ -1086,17 +1086,17 @@ static void collapse_huge_page(struct mm_struct *mm, */ if (unmapped && !__collapse_huge_page_swapin(mm, vma, address, pmd, referenced)) { - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); goto out_nolock; } - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); /* * Prevent all access to pagetables with the exception of * gup_fast later handled by the ptep_clear_flush and the VM * handled by the anon_vma lock + PG_lock. */ - down_write(&mm->mmap_sem); + mmap_write_lock(mm); result = SCAN_ANY_PROCESS; if (!mmget_still_valid(mm)) goto out; @@ -1184,7 +1184,7 @@ static void collapse_huge_page(struct mm_struct *mm, khugepaged_pages_collapsed++; result = SCAN_SUCCEED; out_up_write: - up_write(&mm->mmap_sem); + mmap_write_unlock(mm); out_nolock: if (!IS_ERR_OR_NULL(*hpage)) mem_cgroup_uncharge(*hpage); @@ -1517,7 +1517,7 @@ static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot) if (likely(mm_slot->nr_pte_mapped_thp == 0)) return 0; - if (!down_write_trylock(&mm->mmap_sem)) + if (!mmap_write_trylock(mm)) return -EBUSY; if (unlikely(khugepaged_test_exit(mm))) @@ -1528,7 +1528,7 @@ static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot) out: mm_slot->nr_pte_mapped_thp = 0; - up_write(&mm->mmap_sem); + mmap_write_unlock(mm); return 0; } @@ -1573,12 +1573,12 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) * mmap_sem while holding page lock. Fault path does it in * reverse order. Trylock is a way to avoid deadlock. */ - if (down_write_trylock(&vma->vm_mm->mmap_sem)) { + if (mmap_write_trylock(vma->vm_mm)) { spinlock_t *ptl = pmd_lock(vma->vm_mm, pmd); /* assume page table is clear */ _pmd = pmdp_collapse_flush(vma, addr, pmd); spin_unlock(ptl); - up_write(&vma->vm_mm->mmap_sem); + mmap_write_unlock(vma->vm_mm); mm_dec_nr_ptes(vma->vm_mm); pte_free(vma->vm_mm, pmd_pgtable(_pmd)); } else { @@ -2057,7 +2057,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, * the next mm on the list. */ vma = NULL; - if (unlikely(!down_read_trylock(&mm->mmap_sem))) + if (unlikely(!mmap_read_trylock(mm))) goto breakouterloop_mmap_sem; if (likely(!khugepaged_test_exit(mm))) vma = find_vma(mm, khugepaged_scan.address); @@ -2102,7 +2102,7 @@ skip: pgoff_t pgoff = linear_page_index(vma, khugepaged_scan.address); - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); ret = 1; khugepaged_scan_file(mm, file, pgoff, hpage); fput(file); @@ -2122,7 +2122,7 @@ skip: } } breakouterloop: - up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */ + mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */ breakouterloop_mmap_sem: spin_lock(&khugepaged_mm_lock); |