diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-06-02 14:05:27 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-06-02 14:05:27 -0700 |
commit | cfa3b8068b09f25037146bfd5eed041b78878bee (patch) | |
tree | 8053e072074c05cb1fe176181cef3fdeedbaa24b /drivers/gpu | |
parent | 194098915ac74daddca9d6ed46fd11be57f45e16 (diff) | |
parent | f07e2f6be37a750737b93f5635485171ad459eb9 (diff) |
Merge tag 'for-linus-hmm' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull hmm updates from Jason Gunthorpe:
"This series adds a selftest for hmm_range_fault() and several of the
DEVICE_PRIVATE migration related actions, and another simplification
for hmm_range_fault()'s API.
- Simplify hmm_range_fault() with a simpler return code, no
HMM_PFN_SPECIAL, and no customizable output PFN format
- Add a selftest for hmm_range_fault() and DEVICE_PRIVATE related
functionality"
* tag 'for-linus-hmm' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
MAINTAINERS: add HMM selftests
mm/hmm/test: add selftests for HMM
mm/hmm/test: add selftest driver for HMM
mm/hmm: remove the customizable pfn format from hmm_range_fault
mm/hmm: remove HMM_PFN_SPECIAL
drm/amdgpu: remove dead code after hmm_range_fault()
mm/hmm: make hmm_range_fault return 0 or -1
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 56 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_dmem.c | 27 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_dmem.h | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_svm.c | 94 |
4 files changed, 82 insertions, 98 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 6309ff72bd78..b1c62da527c5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -766,18 +766,6 @@ struct amdgpu_ttm_tt { }; #ifdef CONFIG_DRM_AMDGPU_USERPTR -/* flags used by HMM internal, not related to CPU/GPU PTE flags */ -static const uint64_t hmm_range_flags[HMM_PFN_FLAG_MAX] = { - (1 << 0), /* HMM_PFN_VALID */ - (1 << 1), /* HMM_PFN_WRITE */ -}; - -static const uint64_t hmm_range_values[HMM_PFN_VALUE_MAX] = { - 0xfffffffffffffffeUL, /* HMM_PFN_ERROR */ - 0, /* HMM_PFN_NONE */ - 0xfffffffffffffffcUL /* HMM_PFN_SPECIAL */ -}; - /** * amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user * memory and start HMM tracking CPU page table update @@ -816,18 +804,15 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages) goto out; } range->notifier = &bo->notifier; - range->flags = hmm_range_flags; - range->values = hmm_range_values; - range->pfn_shift = PAGE_SHIFT; range->start = bo->notifier.interval_tree.start; range->end = bo->notifier.interval_tree.last + 1; - range->default_flags = hmm_range_flags[HMM_PFN_VALID]; + range->default_flags = HMM_PFN_REQ_FAULT; if (!amdgpu_ttm_tt_is_readonly(ttm)) - range->default_flags |= range->flags[HMM_PFN_WRITE]; + range->default_flags |= HMM_PFN_REQ_WRITE; - range->pfns = kvmalloc_array(ttm->num_pages, sizeof(*range->pfns), - GFP_KERNEL); - if (unlikely(!range->pfns)) { + range->hmm_pfns = kvmalloc_array(ttm->num_pages, + sizeof(*range->hmm_pfns), GFP_KERNEL); + if (unlikely(!range->hmm_pfns)) { r = -ENOMEM; goto out_free_ranges; } @@ -852,27 +837,23 @@ retry: down_read(&mm->mmap_sem); r = hmm_range_fault(range); up_read(&mm->mmap_sem); - if (unlikely(r <= 0)) { + if (unlikely(r)) { /* * FIXME: This timeout should encompass the retry from * mmu_interval_read_retry() as well. */ - if ((r == 0 || r == -EBUSY) && !time_after(jiffies, timeout)) + if (r == -EBUSY && !time_after(jiffies, timeout)) goto retry; goto out_free_pfns; } - for (i = 0; i < ttm->num_pages; i++) { - /* FIXME: The pages cannot be touched outside the notifier_lock */ - pages[i] = hmm_device_entry_to_page(range, range->pfns[i]); - if (unlikely(!pages[i])) { - pr_err("Page fault failed for pfn[%lu] = 0x%llx\n", - i, range->pfns[i]); - r = -ENOMEM; - - goto out_free_pfns; - } - } + /* + * Due to default_flags, all pages are HMM_PFN_VALID or + * hmm_range_fault() fails. FIXME: The pages cannot be touched outside + * the notifier_lock, and mmu_interval_read_retry() must be done first. + */ + for (i = 0; i < ttm->num_pages; i++) + pages[i] = hmm_pfn_to_page(range->hmm_pfns[i]); gtt->range = range; mmput(mm); @@ -882,7 +863,7 @@ retry: out_unlock: up_read(&mm->mmap_sem); out_free_pfns: - kvfree(range->pfns); + kvfree(range->hmm_pfns); out_free_ranges: kfree(range); out: @@ -907,7 +888,7 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm) DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%lx\n", gtt->userptr, ttm->num_pages); - WARN_ONCE(!gtt->range || !gtt->range->pfns, + WARN_ONCE(!gtt->range || !gtt->range->hmm_pfns, "No user pages to check\n"); if (gtt->range) { @@ -917,7 +898,7 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm) */ r = mmu_interval_read_retry(gtt->range->notifier, gtt->range->notifier_seq); - kvfree(gtt->range->pfns); + kvfree(gtt->range->hmm_pfns); kfree(gtt->range); gtt->range = NULL; } @@ -1008,8 +989,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) for (i = 0; i < ttm->num_pages; i++) { if (ttm->pages[i] != - hmm_device_entry_to_page(gtt->range, - gtt->range->pfns[i])) + hmm_pfn_to_page(gtt->range->hmm_pfns[i])) break; } diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c index ad89e09a0be3..3364904eccff 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c @@ -85,7 +85,7 @@ static inline struct nouveau_dmem *page_to_dmem(struct page *page) return container_of(page->pgmap, struct nouveau_dmem, pagemap); } -static unsigned long nouveau_dmem_page_addr(struct page *page) +unsigned long nouveau_dmem_page_addr(struct page *page) { struct nouveau_dmem_chunk *chunk = page->zone_device_data; unsigned long idx = page_to_pfn(page) - chunk->pfn_first; @@ -671,28 +671,3 @@ out_free_src: out: return ret; } - -void -nouveau_dmem_convert_pfn(struct nouveau_drm *drm, - struct hmm_range *range) -{ - unsigned long i, npages; - - npages = (range->end - range->start) >> PAGE_SHIFT; - for (i = 0; i < npages; ++i) { - struct page *page; - uint64_t addr; - - page = hmm_device_entry_to_page(range, range->pfns[i]); - if (page == NULL) - continue; - - if (!is_device_private_page(page)) - continue; - - addr = nouveau_dmem_page_addr(page); - range->pfns[i] &= ((1UL << range->pfn_shift) - 1); - range->pfns[i] |= (addr >> PAGE_SHIFT) << range->pfn_shift; - range->pfns[i] |= NVIF_VMM_PFNMAP_V0_VRAM; - } -} diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.h b/drivers/gpu/drm/nouveau/nouveau_dmem.h index 92394be5d649..db3b59b210af 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.h +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.h @@ -37,9 +37,8 @@ int nouveau_dmem_migrate_vma(struct nouveau_drm *drm, struct vm_area_struct *vma, unsigned long start, unsigned long end); +unsigned long nouveau_dmem_page_addr(struct page *page); -void nouveau_dmem_convert_pfn(struct nouveau_drm *drm, - struct hmm_range *range); #else /* IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM) */ static inline void nouveau_dmem_init(struct nouveau_drm *drm) {} static inline void nouveau_dmem_fini(struct nouveau_drm *drm) {} diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c index 645fedd77e21..407e34a5c0ab 100644 --- a/drivers/gpu/drm/nouveau/nouveau_svm.c +++ b/drivers/gpu/drm/nouveau/nouveau_svm.c @@ -369,19 +369,6 @@ out_free: return ret; } -static const u64 -nouveau_svm_pfn_flags[HMM_PFN_FLAG_MAX] = { - [HMM_PFN_VALID ] = NVIF_VMM_PFNMAP_V0_V, - [HMM_PFN_WRITE ] = NVIF_VMM_PFNMAP_V0_W, -}; - -static const u64 -nouveau_svm_pfn_values[HMM_PFN_VALUE_MAX] = { - [HMM_PFN_ERROR ] = ~NVIF_VMM_PFNMAP_V0_V, - [HMM_PFN_NONE ] = NVIF_VMM_PFNMAP_V0_NONE, - [HMM_PFN_SPECIAL] = ~NVIF_VMM_PFNMAP_V0_V, -}; - /* Issue fault replay for GPU to retry accesses that faulted previously. */ static void nouveau_svm_fault_replay(struct nouveau_svm *svm) @@ -519,9 +506,45 @@ static const struct mmu_interval_notifier_ops nouveau_svm_mni_ops = { .invalidate = nouveau_svm_range_invalidate, }; +static void nouveau_hmm_convert_pfn(struct nouveau_drm *drm, + struct hmm_range *range, u64 *ioctl_addr) +{ + unsigned long i, npages; + + /* + * The ioctl_addr prepared here is passed through nvif_object_ioctl() + * to an eventual DMA map in something like gp100_vmm_pgt_pfn() + * + * This is all just encoding the internal hmm representation into a + * different nouveau internal representation. + */ + npages = (range->end - range->start) >> PAGE_SHIFT; + for (i = 0; i < npages; ++i) { + struct page *page; + + if (!(range->hmm_pfns[i] & HMM_PFN_VALID)) { + ioctl_addr[i] = 0; + continue; + } + + page = hmm_pfn_to_page(range->hmm_pfns[i]); + if (is_device_private_page(page)) + ioctl_addr[i] = nouveau_dmem_page_addr(page) | + NVIF_VMM_PFNMAP_V0_V | + NVIF_VMM_PFNMAP_V0_VRAM; + else + ioctl_addr[i] = page_to_phys(page) | + NVIF_VMM_PFNMAP_V0_V | + NVIF_VMM_PFNMAP_V0_HOST; + if (range->hmm_pfns[i] & HMM_PFN_WRITE) + ioctl_addr[i] |= NVIF_VMM_PFNMAP_V0_W; + } +} + static int nouveau_range_fault(struct nouveau_svmm *svmm, struct nouveau_drm *drm, void *data, u32 size, - u64 *pfns, struct svm_notifier *notifier) + unsigned long hmm_pfns[], u64 *ioctl_addr, + struct svm_notifier *notifier) { unsigned long timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT); @@ -530,26 +553,27 @@ static int nouveau_range_fault(struct nouveau_svmm *svmm, .notifier = ¬ifier->notifier, .start = notifier->notifier.interval_tree.start, .end = notifier->notifier.interval_tree.last + 1, - .pfns = pfns, - .flags = nouveau_svm_pfn_flags, - .values = nouveau_svm_pfn_values, - .pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT, + .pfn_flags_mask = HMM_PFN_REQ_FAULT | HMM_PFN_REQ_WRITE, + .hmm_pfns = hmm_pfns, }; struct mm_struct *mm = notifier->notifier.mm; - long ret; + int ret; while (true) { if (time_after(jiffies, timeout)) return -EBUSY; range.notifier_seq = mmu_interval_read_begin(range.notifier); - range.default_flags = 0; - range.pfn_flags_mask = -1UL; down_read(&mm->mmap_sem); ret = hmm_range_fault(&range); up_read(&mm->mmap_sem); - if (ret <= 0) { - if (ret == 0 || ret == -EBUSY) + if (ret) { + /* + * FIXME: the input PFN_REQ flags are destroyed on + * -EBUSY, we need to regenerate them, also for the + * other continue below + */ + if (ret == -EBUSY) continue; return ret; } @@ -563,7 +587,7 @@ static int nouveau_range_fault(struct nouveau_svmm *svmm, break; } - nouveau_dmem_convert_pfn(drm, &range); + nouveau_hmm_convert_pfn(drm, &range, ioctl_addr); svmm->vmm->vmm.object.client->super = true; ret = nvif_object_ioctl(&svmm->vmm->vmm.object, data, size, NULL); @@ -590,6 +614,7 @@ nouveau_svm_fault(struct nvif_notify *notify) } i; u64 phys[16]; } args; + unsigned long hmm_pfns[ARRAY_SIZE(args.phys)]; struct vm_area_struct *vma; u64 inst, start, limit; int fi, fn, pi, fill; @@ -705,12 +730,17 @@ nouveau_svm_fault(struct nvif_notify *notify) * access flags. *XXX: atomic? */ - if (buffer->fault[fn]->access != 0 /* READ. */ && - buffer->fault[fn]->access != 3 /* PREFETCH. */) { - args.phys[pi++] = NVIF_VMM_PFNMAP_V0_V | - NVIF_VMM_PFNMAP_V0_W; - } else { - args.phys[pi++] = NVIF_VMM_PFNMAP_V0_V; + switch (buffer->fault[fn]->access) { + case 0: /* READ. */ + hmm_pfns[pi++] = HMM_PFN_REQ_FAULT; + break; + case 3: /* PREFETCH. */ + hmm_pfns[pi++] = 0; + break; + default: + hmm_pfns[pi++] = HMM_PFN_REQ_FAULT | + HMM_PFN_REQ_WRITE; + break; } args.i.p.size = pi << PAGE_SHIFT; @@ -738,7 +768,7 @@ nouveau_svm_fault(struct nvif_notify *notify) fill = (buffer->fault[fn ]->addr - buffer->fault[fn - 1]->addr) >> PAGE_SHIFT; while (--fill) - args.phys[pi++] = NVIF_VMM_PFNMAP_V0_NONE; + hmm_pfns[pi++] = 0; } SVMM_DBG(svmm, "wndw %016llx-%016llx covering %d fault(s)", @@ -754,7 +784,7 @@ nouveau_svm_fault(struct nvif_notify *notify) ret = nouveau_range_fault( svmm, svm->drm, &args, sizeof(args.i) + pi * sizeof(args.phys[0]), - args.phys, ¬ifier); + hmm_pfns, args.phys, ¬ifier); mmu_interval_notifier_remove(¬ifier.notifier); } mmput(mm); |