diff options
author | Jason Gunthorpe <jgg@mellanox.com> | 2019-05-23 11:08:28 -0300 |
---|---|---|
committer | Jason Gunthorpe <jgg@mellanox.com> | 2019-06-18 12:11:16 -0300 |
commit | 47f245985a4f3e270b1e4f28aa49f4c939527981 (patch) | |
tree | ba0dcb5d040a33a32c50a9dbad6f467cc65f248d /mm/hmm.c | |
parent | 157816f3775f5c4df2c68ef0e3a100ada974aa2e (diff) |
mm/hmm: Hold on to the mmget for the lifetime of the range
Range functions like hmm_range_snapshot() and hmm_range_fault() call
find_vma, which requires hodling the mmget() and the mmap_sem for the mm.
Make this simpler for the callers by holding the mmget() inside the range
for the lifetime of the range. Other functions that accept a range should
only be called if the range is registered.
This has the side effect of directly preventing hmm_release() from
happening while a range is registered. That means range->dead cannot be
false during the lifetime of the range, so remove dead and
hmm_mirror_mm_is_alive() entirely.
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Reviewed-by: John Hubbard <jhubbard@nvidia.com>
Reviewed-by: Ralph Campbell <rcampbell@nvidia.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Tested-by: Philip Yang <Philip.Yang@amd.com>
Diffstat (limited to 'mm/hmm.c')
-rw-r--r-- | mm/hmm.c | 32 |
1 files changed, 11 insertions, 21 deletions
@@ -67,7 +67,6 @@ static struct hmm *hmm_get_or_create(struct mm_struct *mm) mutex_init(&hmm->lock); kref_init(&hmm->kref); hmm->notifiers = 0; - hmm->dead = false; hmm->mm = mm; hmm->mmu_notifier.ops = &hmm_mmu_notifier_ops; @@ -120,21 +119,16 @@ static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm) { struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier); struct hmm_mirror *mirror; - struct hmm_range *range; /* Bail out if hmm is in the process of being freed */ if (!kref_get_unless_zero(&hmm->kref)) return; - /* Report this HMM as dying. */ - hmm->dead = true; - - /* Wake-up everyone waiting on any range. */ - mutex_lock(&hmm->lock); - list_for_each_entry(range, &hmm->ranges, list) - range->valid = false; - wake_up_all(&hmm->wq); - mutex_unlock(&hmm->lock); + /* + * Since hmm_range_register() holds the mmget() lock hmm_release() is + * prevented as long as a range exists. + */ + WARN_ON(!list_empty_careful(&hmm->ranges)); down_write(&hmm->mirrors_sem); mirror = list_first_entry_or_null(&hmm->mirrors, struct hmm_mirror, @@ -903,8 +897,8 @@ int hmm_range_register(struct hmm_range *range, range->start = start; range->end = end; - /* Check if hmm_mm_destroy() was call. */ - if (hmm->mm == NULL || hmm->dead) + /* Prevent hmm_release() from running while the range is valid */ + if (!mmget_not_zero(hmm->mm)) return -EFAULT; /* Initialize range to track CPU page table updates. */ @@ -942,11 +936,12 @@ void hmm_range_unregister(struct hmm_range *range) return; mutex_lock(&hmm->lock); - list_del(&range->list); + list_del_init(&range->list); mutex_unlock(&hmm->lock); /* Drop reference taken by hmm_range_register() */ range->valid = false; + mmput(hmm->mm); hmm_put(hmm); range->hmm = NULL; } @@ -974,10 +969,7 @@ long hmm_range_snapshot(struct hmm_range *range) struct vm_area_struct *vma; struct mm_walk mm_walk; - /* Check if hmm_mm_destroy() was call. */ - if (hmm->mm == NULL || hmm->dead) - return -EFAULT; - + lockdep_assert_held(&hmm->mm->mmap_sem); do { /* If range is no longer valid force retry. */ if (!range->valid) @@ -1072,9 +1064,7 @@ long hmm_range_fault(struct hmm_range *range, bool block) struct mm_walk mm_walk; int ret; - /* Check if hmm_mm_destroy() was call. */ - if (hmm->mm == NULL || hmm->dead) - return -EFAULT; + lockdep_assert_held(&hmm->mm->mmap_sem); do { /* If range is no longer valid force retry. */ |