diff options
author | Dave Airlie <airlied@redhat.com> | 2020-09-17 14:18:51 +1000 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2020-09-17 16:07:11 +1000 |
commit | b40be05ed255d9a0257fb66ab2728ecca2c9d597 (patch) | |
tree | bf69d734e4413c24ce2e60f273d1de637a4aea5e | |
parent | 818280d5adf1d80e78f95821815148abe9407e14 (diff) | |
parent | be0704beb229431f206fee3ddd65fa2c5eebdce3 (diff) |
Merge branch 'for-5.10-drm-sg-fix' of https://github.com/mszyprow/linux into drm-next
Please pull a set of fixes for various DRM drivers that finally resolve
incorrect usage of the scatterlists (struct sg_table nents and orig_nents
entries), what causes issues when IOMMU is used.
Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Marek Szyprowski <m.szyprowski@samsung.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200910080505.24456-1-m.szyprowski@samsung.com
36 files changed, 234 insertions, 341 deletions
diff --git a/drivers/dma-buf/heaps/heap-helpers.c b/drivers/dma-buf/heaps/heap-helpers.c index 9f964ca3f59c..d0696cf937af 100644 --- a/drivers/dma-buf/heaps/heap-helpers.c +++ b/drivers/dma-buf/heaps/heap-helpers.c @@ -140,13 +140,12 @@ struct sg_table *dma_heap_map_dma_buf(struct dma_buf_attachment *attachment, enum dma_data_direction direction) { struct dma_heaps_attachment *a = attachment->priv; - struct sg_table *table; - - table = &a->table; + struct sg_table *table = &a->table; + int ret; - if (!dma_map_sg(attachment->dev, table->sgl, table->nents, - direction)) - table = ERR_PTR(-ENOMEM); + ret = dma_map_sgtable(attachment->dev, table, direction, 0); + if (ret) + table = ERR_PTR(ret); return table; } @@ -154,7 +153,7 @@ static void dma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment, struct sg_table *table, enum dma_data_direction direction) { - dma_unmap_sg(attachment->dev, table->sgl, table->nents, direction); + dma_unmap_sgtable(attachment->dev, table, direction, 0); } static vm_fault_t dma_heap_vm_fault(struct vm_fault *vmf) diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c index acb26c627d27..89e293bd9252 100644 --- a/drivers/dma-buf/udmabuf.c +++ b/drivers/dma-buf/udmabuf.c @@ -63,10 +63,9 @@ static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf, GFP_KERNEL); if (ret < 0) goto err; - if (!dma_map_sg(dev, sg->sgl, sg->nents, direction)) { - ret = -EINVAL; + ret = dma_map_sgtable(dev, sg, direction, 0); + if (ret < 0) goto err; - } return sg; err: @@ -78,7 +77,7 @@ err: static void put_sg_table(struct device *dev, struct sg_table *sg, enum dma_data_direction direction) { - dma_unmap_sg(dev, sg->sgl, sg->nents, direction); + dma_unmap_sgtable(dev, sg, direction, 0); sg_free_table(sg); kfree(sg); } diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c index 8005614d2e6b..a63008ce284d 100644 --- a/drivers/gpu/drm/armada/armada_gem.c +++ b/drivers/gpu/drm/armada/armada_gem.c @@ -379,7 +379,7 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach, struct armada_gem_object *dobj = drm_to_armada_gem(obj); struct scatterlist *sg; struct sg_table *sgt; - int i, num; + int i; sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); if (!sgt) @@ -395,22 +395,18 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach, mapping = dobj->obj.filp->f_mapping; - for_each_sg(sgt->sgl, sg, count, i) { + for_each_sgtable_sg(sgt, sg, i) { struct page *page; page = shmem_read_mapping_page(mapping, i); - if (IS_ERR(page)) { - num = i; + if (IS_ERR(page)) goto release; - } sg_set_page(sg, page, PAGE_SIZE, 0); } - if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) { - num = sgt->nents; + if (dma_map_sgtable(attach->dev, sgt, dir, 0)) goto release; - } } else if (dobj->page) { /* Single contiguous page */ if (sg_alloc_table(sgt, 1, GFP_KERNEL)) @@ -418,7 +414,7 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach, sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0); - if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) + if (dma_map_sgtable(attach->dev, sgt, dir, 0)) goto free_table; } else if (dobj->linear) { /* Single contiguous physical region - no struct page */ @@ -432,8 +428,9 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach, return sgt; release: - for_each_sg(sgt->sgl, sg, num, i) - put_page(sg_page(sg)); + for_each_sgtable_sg(sgt, sg, i) + if (sg_page(sg)) + put_page(sg_page(sg)); free_table: sg_free_table(sgt); free_sgt: @@ -449,11 +446,12 @@ static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach, int i; if (!dobj->linear) - dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir); + dma_unmap_sgtable(attach->dev, sgt, dir, 0); if (dobj->obj.filp) { struct scatterlist *sg; - for_each_sg(sgt->sgl, sg, sgt->nents, i) + + for_each_sgtable_sg(sgt, sg, i) put_page(sg_page(sg)); } diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c index 03e01b000f7a..0fe3c496002a 100644 --- a/drivers/gpu/drm/drm_cache.c +++ b/drivers/gpu/drm/drm_cache.c @@ -127,7 +127,7 @@ drm_clflush_sg(struct sg_table *st) struct sg_page_iter sg_iter; mb(); /*CLFLUSH is ordered only by using memory barriers*/ - for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) + for_each_sgtable_page(st, &sg_iter, 0) drm_clflush_page(sg_page_iter_page(&sg_iter)); mb(); /*Make sure that all cache line entry is flushed*/ diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c index 822edeadbab3..59b9ca207b42 100644 --- a/drivers/gpu/drm/drm_gem_cma_helper.c +++ b/drivers/gpu/drm/drm_gem_cma_helper.c @@ -471,26 +471,9 @@ drm_gem_cma_prime_import_sg_table(struct drm_device *dev, { struct drm_gem_cma_object *cma_obj; - if (sgt->nents != 1) { - /* check if the entries in the sg_table are contiguous */ - dma_addr_t next_addr = sg_dma_address(sgt->sgl); - struct scatterlist *s; - unsigned int i; - - for_each_sg(sgt->sgl, s, sgt->nents, i) { - /* - * sg_dma_address(s) is only valid for entries - * that have sg_dma_len(s) != 0 - */ - if (!sg_dma_len(s)) - continue; - - if (sg_dma_address(s) != next_addr) - return ERR_PTR(-EINVAL); - - next_addr = sg_dma_address(s) + sg_dma_len(s); - } - } + /* check if the entries in the sg_table are contiguous */ + if (drm_prime_get_contiguous_size(sgt) < attach->dmabuf->size) + return ERR_PTR(-EINVAL); /* Create a CMA GEM buffer. */ cma_obj = __drm_gem_cma_create(dev, attach->dmabuf->size); diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index 4b7cfbac4daa..47d8211221f2 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -126,8 +126,8 @@ void drm_gem_shmem_free_object(struct drm_gem_object *obj) drm_prime_gem_destroy(obj, shmem->sgt); } else { if (shmem->sgt) { - dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl, - shmem->sgt->nents, DMA_BIDIRECTIONAL); + dma_unmap_sgtable(obj->dev->dev, shmem->sgt, + DMA_BIDIRECTIONAL, 0); sg_free_table(shmem->sgt); kfree(shmem->sgt); } @@ -424,8 +424,7 @@ void drm_gem_shmem_purge_locked(struct drm_gem_object *obj) WARN_ON(!drm_gem_shmem_is_purgeable(shmem)); - dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl, - shmem->sgt->nents, DMA_BIDIRECTIONAL); + dma_unmap_sgtable(obj->dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0); sg_free_table(shmem->sgt); kfree(shmem->sgt); shmem->sgt = NULL; @@ -697,12 +696,17 @@ struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_object *obj) goto err_put_pages; } /* Map the pages for use by the h/w. */ - dma_map_sg(obj->dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL); + ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0); + if (ret) + goto err_free_sgt; shmem->sgt = sgt; return sgt; +err_free_sgt: + sg_free_table(sgt); + kfree(sgt); err_put_pages: drm_gem_shmem_put_pages(shmem); return ERR_PTR(ret); diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 1693aa7c14b5..b8c7f068a5a4 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -617,6 +617,7 @@ struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach, { struct drm_gem_object *obj = attach->dmabuf->priv; struct sg_table *sgt; + int ret; if (WARN_ON(dir == DMA_NONE)) return ERR_PTR(-EINVAL); @@ -626,11 +627,12 @@ struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach, else sgt = obj->dev->driver->gem_prime_get_sg_table(obj); - if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir, - DMA_ATTR_SKIP_CPU_SYNC)) { + ret = dma_map_sgtable(attach->dev, sgt, dir, + DMA_ATTR_SKIP_CPU_SYNC); + if (ret) { sg_free_table(sgt); kfree(sgt); - sgt = ERR_PTR(-ENOMEM); + sgt = ERR_PTR(ret); } return sgt; @@ -652,8 +654,7 @@ void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach, if (!sgt) return; - dma_unmap_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir, - DMA_ATTR_SKIP_CPU_SYNC); + dma_unmap_sgtable(attach->dev, sgt, dir, DMA_ATTR_SKIP_CPU_SYNC); sg_free_table(sgt); kfree(sgt); } @@ -826,6 +827,37 @@ out: EXPORT_SYMBOL(drm_prime_pages_to_sg); /** + * drm_prime_get_contiguous_size - returns the contiguous size of the buffer + * @sgt: sg_table describing the buffer to check + * + * This helper calculates the contiguous size in the DMA address space + * of the the buffer described by the provided sg_table. + * + * This is useful for implementing + * &drm_gem_object_funcs.gem_prime_import_sg_table. + */ +unsigned long drm_prime_get_contiguous_size(struct sg_table *sgt) +{ + dma_addr_t expected = sg_dma_address(sgt->sgl); + struct scatterlist *sg; + unsigned long size = 0; + int i; + + for_each_sgtable_dma_sg(sgt, sg, i) { + unsigned int len = sg_dma_len(sg); + + if (!len) + break; + if (sg_dma_address(sg) != expected) + break; + expected += len; + size += len; + } + return size; +} +EXPORT_SYMBOL(drm_prime_get_contiguous_size); + +/** * drm_gem_prime_export - helper library implementation of the export callback * @obj: GEM object to export * @flags: flags like DRM_CLOEXEC and DRM_RDWR @@ -959,45 +991,26 @@ EXPORT_SYMBOL(drm_gem_prime_import); int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages, dma_addr_t *addrs, int max_entries) { - unsigned count; - struct scatterlist *sg; - struct page *page; - u32 page_len, page_index; - dma_addr_t addr; - u32 dma_len, dma_index; - - /* - * Scatterlist elements contains both pages and DMA addresses, but - * one shoud not assume 1:1 relation between them. The sg->length is - * the size of the physical memory chunk described by the sg->page, - * while sg_dma_len(sg) is the size of the DMA (IO virtual) chunk - * described by the sg_dma_address(sg). - */ - page_index = 0; - dma_index = 0; - for_each_sg(sgt->sgl, sg, sgt->nents, count) { - page_len = sg->length; - page = sg_page(sg); - dma_len = sg_dma_len(sg); - addr = sg_dma_address(sg); - - while (pages && page_len > 0) { - if (WARN_ON(page_index >= max_entries)) + struct sg_dma_page_iter dma_iter; + struct sg_page_iter page_iter; + struct page **p = pages; + dma_addr_t *a = addrs; + + if (pages) { + for_each_sgtable_page(sgt, &page_iter, 0) { + if (WARN_ON(p - pages >= max_entries)) return -1; - pages[page_index] = page; - page++; - page_len -= PAGE_SIZE; - page_index++; + *p++ = sg_page_iter_page(&page_iter); } - while (addrs && dma_len > 0) { - if (WARN_ON(dma_index >= max_entries)) + } + if (addrs) { + for_each_sgtable_dma_page(sgt, &dma_iter, 0) { + if (WARN_ON(a - addrs >= max_entries)) return -1; - addrs[dma_index] = addr; - addr += PAGE_SIZE; - dma_len -= PAGE_SIZE; - dma_index++; + *a++ = sg_page_iter_dma_address(&dma_iter); } } + return 0; } EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index f06e19e7be04..eaf1949bc2e4 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -27,7 +27,7 @@ static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj) * because display controller, GPU, etc. are not coherent. */ if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK) - dma_map_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL); + dma_map_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0); } static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj) @@ -51,7 +51,7 @@ static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj * discard those writes. */ if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK) - dma_unmap_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL); + dma_unmap_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0); } /* called with etnaviv_obj->lock held */ @@ -404,9 +404,8 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op, } if (etnaviv_obj->flags & ETNA_BO_CACHED) { - dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl, - etnaviv_obj->sgt->nents, - etnaviv_op_to_dma_dir(op)); + dma_sync_sgtable_for_cpu(dev->dev, etnaviv_obj->sgt, + etnaviv_op_to_dma_dir(op)); etnaviv_obj->last_cpu_prep_op = op; } @@ -421,8 +420,7 @@ int etnaviv_gem_cpu_fini(struct drm_gem_object *obj) if (etnaviv_obj->flags & ETNA_BO_CACHED) { /* fini without a prep is almost certainly a userspace error */ WARN_ON(etnaviv_obj->last_cpu_prep_op == 0); - dma_sync_sg_for_device(dev->dev, etnaviv_obj->sgt->sgl, - etnaviv_obj->sgt->nents, + dma_sync_sgtable_for_device(dev->dev, etnaviv_obj->sgt, etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op)); etnaviv_obj->last_cpu_prep_op = 0; } diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c index 3607d348c298..15d9fa3879e5 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c @@ -73,13 +73,13 @@ static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova, struct sg_table *sgt, unsigned len, int prot) { struct scatterlist *sg; unsigned int da = iova; - unsigned int i, j; + unsigned int i; int ret; if (!context || !sgt) return -EINVAL; - for_each_sg(sgt->sgl, sg, sgt->nents, i) { + for_each_sgtable_dma_sg(sgt, sg, i) { u32 pa = sg_dma_address(sg) - sg->offset; size_t bytes = sg_dma_len(sg) + sg->offset; @@ -95,14 +95,7 @@ static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova, return 0; fail: - da = iova; - - for_each_sg(sgt->sgl, sg, i, j) { - size_t bytes = sg_dma_len(sg) + sg->offset; - - etnaviv_context_unmap(context, da, bytes); - da += bytes; - } + etnaviv_context_unmap(context, iova, da - iova); return ret; } @@ -113,7 +106,7 @@ static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 iova, unsigned int da = iova; int i; - for_each_sg(sgt->sgl, sg, sgt->nents, i) { + for_each_sgtable_dma_sg(sgt, sg, i) { size_t bytes = sg_dma_len(sg) + sg->offset; etnaviv_context_unmap(context, da, bytes); diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 03be31427181..967a5cdc120e 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -395,8 +395,8 @@ static void g2d_userptr_put_dma_addr(struct g2d_data *g2d, return; out: - dma_unmap_sg(to_dma_dev(g2d->drm_dev), g2d_userptr->sgt->sgl, - g2d_userptr->sgt->nents, DMA_BIDIRECTIONAL); + dma_unmap_sgtable(to_dma_dev(g2d->drm_dev), g2d_userptr->sgt, + DMA_BIDIRECTIONAL, 0); pages = frame_vector_pages(g2d_userptr->vec); if (!IS_ERR(pages)) { @@ -511,10 +511,10 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct g2d_data *g2d, g2d_userptr->sgt = sgt; - if (!dma_map_sg(to_dma_dev(g2d->drm_dev), sgt->sgl, sgt->nents, - DMA_BIDIRECTIONAL)) { + ret = dma_map_sgtable(to_dma_dev(g2d->drm_dev), sgt, + DMA_BIDIRECTIONAL, 0); + if (ret) { DRM_DEV_ERROR(g2d->dev, "failed to map sgt with dma region.\n"); - ret = -ENOMEM; goto err_sg_free_table; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index efa476858db5..1716a023bca0 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -431,27 +431,10 @@ exynos_drm_gem_prime_import_sg_table(struct drm_device *dev, { struct exynos_drm_gem *exynos_gem; - if (sgt->nents < 1) + /* check if the entries in the sg_table are contiguous */ + if (drm_prime_get_contiguous_size(sgt) < attach->dmabuf->size) { + DRM_ERROR("buffer chunks must be mapped contiguously"); return ERR_PTR(-EINVAL); - - /* - * Check if the provided buffer has been mapped as contiguous - * into DMA address space. - */ - if (sgt->nents > 1) { - dma_addr_t next_addr = sg_dma_address(sgt->sgl); - struct scatterlist *s; - unsigned int i; - - for_each_sg(sgt->sgl, s, sgt->nents, i) { - if (!sg_dma_len(s)) - break; - if (sg_dma_address(s) != next_addr) { - DRM_ERROR("buffer chunks must be mapped contiguously"); - return ERR_PTR(-EINVAL); - } - next_addr = sg_dma_address(s) + sg_dma_len(s); - } } exynos_gem = exynos_drm_gem_init(dev, attach->dmabuf->size); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c index 27fddc22a7c6..8dd295dbe241 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c @@ -48,12 +48,9 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme src = sg_next(src); } - if (!dma_map_sg_attrs(attachment->dev, - st->sgl, st->nents, dir, - DMA_ATTR_SKIP_CPU_SYNC)) { - ret = -ENOMEM; + ret = dma_map_sgtable(attachment->dev, st, dir, DMA_ATTR_SKIP_CPU_SYNC); + if (ret) goto err_free_sg; - } return st; @@ -73,9 +70,7 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, { struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf); - dma_unmap_sg_attrs(attachment->dev, - sg->sgl, sg->nents, dir, - DMA_ATTR_SKIP_CPU_SYNC); + dma_unmap_sgtable(attachment->dev, sg, dir, DMA_ATTR_SKIP_CPU_SYNC); sg_free_table(sg); kfree(sg); diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c index debaf7b18ab5..be30b27e2926 100644 --- a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c @@ -28,10 +28,9 @@ static struct sg_table *mock_map_dma_buf(struct dma_buf_attachment *attachment, sg = sg_next(sg); } - if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) { - err = -ENOMEM; + err = dma_map_sgtable(attachment->dev, st, dir, 0); + if (err) goto err_st; - } return st; @@ -46,7 +45,7 @@ static void mock_unmap_dma_buf(struct dma_buf_attachment *attachment, struct sg_table *st, enum dma_data_direction dir) { - dma_unmap_sg(attachment->dev, st->sgl, st->nents, dir); + dma_unmap_sgtable(attachment->dev, st, dir, 0); sg_free_table(st); kfree(st); } diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c index 155f2b4b4030..11223fe348df 100644 --- a/drivers/gpu/drm/lima/lima_gem.c +++ b/drivers/gpu/drm/lima/lima_gem.c @@ -69,8 +69,7 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm) return ret; if (bo->base.sgt) { - dma_unmap_sg(dev, bo->base.sgt->sgl, - bo->base.sgt->nents, DMA_BIDIRECTIONAL); + dma_unmap_sgtable(dev, bo->base.sgt, DMA_BIDIRECTIONAL, 0); sg_free_table(bo->base.sgt); } else { bo->base.sgt = kmalloc(sizeof(*bo->base.sgt), GFP_KERNEL); @@ -80,7 +79,13 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm) } } - dma_map_sg(dev, sgt.sgl, sgt.nents, DMA_BIDIRECTIONAL); + ret = dma_map_sgtable(dev, &sgt, DMA_BIDIRECTIONAL, 0); + if (ret) { + sg_free_table(&sgt); + kfree(bo->base.sgt); + bo->base.sgt = NULL; + return ret; + } *bo->base.sgt = sgt; diff --git a/drivers/gpu/drm/lima/lima_vm.c b/drivers/gpu/drm/lima/lima_vm.c index 5b92fb82674a..2b2739adc7f5 100644 --- a/drivers/gpu/drm/lima/lima_vm.c +++ b/drivers/gpu/drm/lima/lima_vm.c @@ -124,7 +124,7 @@ int lima_vm_bo_add(struct lima_vm *vm, struct lima_bo *bo, bool create) if (err) goto err_out1; - for_each_sg_dma_page(bo->base.sgt->sgl, &sg_iter, bo->base.sgt->nents, 0) { + for_each_sgtable_dma_page(bo->base.sgt, &sg_iter, 0) { err = lima_vm_map_page(vm, sg_page_iter_dma_address(&sg_iter), bo_va->node.start + offset); if (err) @@ -298,8 +298,7 @@ int lima_vm_map_bo(struct lima_vm *vm, struct lima_bo *bo, int pageoff) mutex_lock(&vm->lock); base = bo_va->node.start + (pageoff << PAGE_SHIFT); - for_each_sg_dma_page(bo->base.sgt->sgl, &sg_iter, - bo->base.sgt->nents, pageoff) { + for_each_sgtable_dma_page(bo->base.sgt, &sg_iter, pageoff) { err = lima_vm_map_page(vm, sg_page_iter_dma_address(&sg_iter), base + offset); if (err) diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c index 6190cc3b7b0d..0583e557ad37 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c @@ -212,46 +212,28 @@ struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sg) { struct mtk_drm_gem_obj *mtk_gem; - int ret; - struct scatterlist *s; - unsigned int i; - dma_addr_t expected; - mtk_gem = mtk_drm_gem_init(dev, attach->dmabuf->size); + /* check if the entries in the sg_table are contiguous */ + if (drm_prime_get_contiguous_size(sg) < attach->dmabuf->size) { + DRM_ERROR("sg_table is not contiguous"); + return ERR_PTR(-EINVAL); + } + mtk_gem = mtk_drm_gem_init(dev, attach->dmabuf->size); if (IS_ERR(mtk_gem)) return ERR_CAST(mtk_gem); - expected = sg_dma_address(sg->sgl); - for_each_sg(sg->sgl, s, sg->nents, i) { - if (!sg_dma_len(s)) - break; - - if (sg_dma_address(s) != expected) { - DRM_ERROR("sg_table is not contiguous"); - ret = -EINVAL; - goto err_gem_free; - } - expected = sg_dma_address(s) + sg_dma_len(s); - } - mtk_gem->dma_addr = sg_dma_address(sg->sgl); mtk_gem->sg = sg; return &mtk_gem->base; - -err_gem_free: - kfree(mtk_gem); - return ERR_PTR(ret); } void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj) { struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj); struct sg_table *sgt; - struct sg_page_iter iter; unsigned int npages; - unsigned int i = 0; if (mtk_gem->kvaddr) return mtk_gem->kvaddr; @@ -265,11 +247,8 @@ void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj) if (!mtk_gem->pages) goto out; - for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) { - mtk_gem->pages[i++] = sg_page_iter_page(&iter); - if (i > npages) - break; - } + drm_prime_sg_to_page_addr_arrays(sgt, mtk_gem->pages, NULL, npages); + mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP, pgprot_writecombine(PAGE_KERNEL)); diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index b2f49152b4d4..8c7ae812b813 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -53,11 +53,10 @@ static void sync_for_device(struct msm_gem_object *msm_obj) struct device *dev = msm_obj->base.dev->dev; if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) { - dma_sync_sg_for_device(dev, msm_obj->sgt->sgl, - msm_obj->sgt->nents, DMA_BIDIRECTIONAL); + dma_sync_sgtable_for_device(dev, msm_obj->sgt, + DMA_BIDIRECTIONAL); } else { - dma_map_sg(dev, msm_obj->sgt->sgl, - msm_obj->sgt->nents, DMA_BIDIRECTIONAL); + dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0); } } @@ -66,11 +65,9 @@ static void sync_for_cpu(struct msm_gem_object *msm_obj) struct device *dev = msm_obj->base.dev->dev; if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) { - dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl, - msm_obj->sgt->nents, DMA_BIDIRECTIONAL); + dma_sync_sgtable_for_cpu(dev, msm_obj->sgt, DMA_BIDIRECTIONAL); } else { - dma_unmap_sg(dev, msm_obj->sgt->sgl, - msm_obj->sgt->nents, DMA_BIDIRECTIONAL); + dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0); } } diff --git a/drivers/gpu/drm/msm/msm_gpummu.c b/drivers/gpu/drm/msm/msm_gpummu.c index 310a31b05faa..53a7348476a1 100644 --- a/drivers/gpu/drm/msm/msm_gpummu.c +++ b/drivers/gpu/drm/msm/msm_gpummu.c @@ -30,21 +30,20 @@ static int msm_gpummu_map(struct msm_mmu *mmu, uint64_t iova, { struct msm_gpummu *gpummu = to_msm_gpummu(mmu); unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE; - struct scatterlist *sg; + struct sg_dma_page_iter dma_iter; unsigned prot_bits = 0; - unsigned i, j; if (prot & IOMMU_WRITE) prot_bits |= 1; if (prot & IOMMU_READ) prot_bits |= 2; - for_each_sg(sgt->sgl, sg, sgt->nents, i) { - dma_addr_t addr = sg->dma_address; - for (j = 0; j < sg->length / GPUMMU_PAGE_SIZE; j++, idx++) { - gpummu->table[idx] = addr | prot_bits; - addr += GPUMMU_PAGE_SIZE; - } + for_each_sgtable_dma_page(sgt, &dma_iter, 0) { + dma_addr_t addr = sg_page_iter_dma_address(&dma_iter); + int i; + + for (i = 0; i < PAGE_SIZE; i += GPUMMU_PAGE_SIZE) + gpummu->table[idx++] = (addr + i) | prot_bits; } /* we can improve by deferring flush for multiple map() */ diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c index 3a381a9674c9..6c31e65834c6 100644 --- a/drivers/gpu/drm/msm/msm_iommu.c +++ b/drivers/gpu/drm/msm/msm_iommu.c @@ -36,7 +36,7 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova, struct msm_iommu *iommu = to_msm_iommu(mmu); size_t ret; - ret = iommu_map_sg(iommu->domain, iova, sgt->sgl, sgt->nents, prot); + ret = iommu_map_sgtable(iommu->domain, iova, sgt, prot); WARN_ON(!ret); return (ret == len) ? 0 : -EINVAL; diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index d0d12d5dd76c..f67f223c6479 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c @@ -1297,10 +1297,9 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size, omap_obj->dma_addr = sg_dma_address(sgt->sgl); } else { /* Create pages list from sgt */ - struct sg_page_iter iter; struct page **pages; unsigned int npages; - unsigned int i = 0; + unsigned int ret; npages = DIV_ROUND_UP(size, PAGE_SIZE); pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL); @@ -1311,14 +1310,9 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size, } omap_obj->pages = pages; - - for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) { - pages[i++] = sg_page_iter_page(&iter); - if (i > npages) - break; - } - - if (WARN_ON(i != npages)) { + ret = drm_prime_sg_to_page_addr_arrays(sgt, pages, NULL, + npages); + if (ret) { omap_gem_free_object(obj); obj = ERR_PTR(-ENOMEM); goto done; diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c index 33355dd302f1..1a6cea0e0bd7 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem.c +++ b/drivers/gpu/drm/panfrost/panfrost_gem.c @@ -41,8 +41,8 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj) for (i = 0; i < n_sgt; i++) { if (bo->sgts[i].sgl) { - dma_unmap_sg(pfdev->dev, bo->sgts[i].sgl, - bo->sgts[i].nents, DMA_BIDIRECTIONAL); + dma_unmap_sgtable(pfdev->dev, &bo->sgts[i], + DMA_BIDIRECTIONAL, 0); sg_free_table(&bo->sgts[i]); } } diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index e8f7b11352d2..776448c527ea 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -253,7 +253,7 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu, struct io_pgtable_ops *ops = mmu->pgtbl_ops; u64 start_iova = iova; - for_each_sg(sgt->sgl, sgl, sgt->nents, count) { + for_each_sgtable_dma_sg(sgt, sgl, count) { unsigned long paddr = sg_dma_address(sgl); size_t len = sg_dma_len(sgl); @@ -517,10 +517,9 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, if (ret) goto err_pages; - if (!dma_map_sg(pfdev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL)) { - ret = -EINVAL; + ret = dma_map_sgtable(pfdev->dev, sgt, DMA_BIDIRECTIONAL, 0); + if (ret) goto err_map; - } mmu_map_sg(pfdev, bomapping->mmu, addr, IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt); diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c index f1a81c9b184d..a27bff999649 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c @@ -197,9 +197,8 @@ int rcar_du_vsp_map_fb(struct rcar_du_vsp *vsp, struct drm_framebuffer *fb, goto fail; ret = vsp1_du_map_sg(vsp->vsp, sgt); - if (!ret) { + if (ret) { sg_free_table(sgt); - ret = -ENOMEM; goto fail; } } diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c index b9275ba7c5a5..cb50f2ba2e46 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c @@ -36,8 +36,8 @@ static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj) rk_obj->dma_addr = rk_obj->mm.start; - ret = iommu_map_sg(private->domain, rk_obj->dma_addr, rk_obj->sgt->sgl, - rk_obj->sgt->nents, prot); + ret = iommu_map_sgtable(private->domain, rk_obj->dma_addr, rk_obj->sgt, + prot); if (ret < rk_obj->base.size) { DRM_ERROR("failed to map buffer: size=%zd request_size=%zd\n", ret, rk_obj->base.size); @@ -98,11 +98,10 @@ static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj) * TODO: Replace this by drm_clflush_sg() once it can be implemented * without relying on symbols that are not exported. */ - for_each_sg(rk_obj->sgt->sgl, s, rk_obj->sgt->nents, i) + for_each_sgtable_sg(rk_obj->sgt, s, i) sg_dma_address(s) = sg_phys(s); - dma_sync_sg_for_device(drm->dev, rk_obj->sgt->sgl, rk_obj->sgt->nents, - DMA_TO_DEVICE); + dma_sync_sgtable_for_device(drm->dev, rk_obj->sgt, DMA_TO_DEVICE); return 0; @@ -350,8 +349,8 @@ void rockchip_gem_free_object(struct drm_gem_object *obj) if (private->domain) { rockchip_gem_iommu_unmap(rk_obj); } else { - dma_unmap_sg(drm->dev, rk_obj->sgt->sgl, - rk_obj->sgt->nents, DMA_BIDIRECTIONAL); + dma_unmap_sgtable(drm->dev, rk_obj->sgt, + DMA_BIDIRECTIONAL, 0); } drm_prime_gem_destroy(obj, rk_obj->sgt); } else { @@ -460,23 +459,6 @@ struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj) return sgt; } -static unsigned long rockchip_sg_get_contiguous_size(struct sg_table *sgt, - int count) -{ - struct scatterlist *s; - dma_addr_t expected = sg_dma_address(sgt->sgl); - unsigned int i; - unsigned long size = 0; - - for_each_sg(sgt->sgl, s, count, i) { - if (sg_dma_address(s) != expected) - break; - expected = sg_dma_address(s) + sg_dma_len(s); - size += sg_dma_len(s); - } - return size; -} - static int rockchip_gem_iommu_map_sg(struct drm_device *drm, struct dma_buf_attachment *attach, @@ -493,15 +475,13 @@ rockchip_gem_dma_map_sg(struct drm_device *drm, struct sg_table *sg, struct rockchip_gem_object *rk_obj) { - int count = dma_map_sg(drm->dev, sg->sgl, sg->nents, - DMA_BIDIRECTIONAL); - if (!count) - return -EINVAL; + int err = dma_map_sgtable(drm->dev, sg, DMA_BIDIRECTIONAL, 0); + if (err) + return err; - if (rockchip_sg_get_contiguous_size(sg, count) < attach->dmabuf->size) { + if (drm_prime_get_contiguous_size(sg) < attach->dmabuf->size) { DRM_ERROR("failed to map sg_table to contiguous linear address.\n"); - dma_unmap_sg(drm->dev, sg->sgl, sg->nents, - DMA_BIDIRECTIONAL); + dma_unmap_sgtable(drm->dev, sg, DMA_BIDIRECTIONAL, 0); return -EINVAL; } diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c index 723df142a981..01d94befab11 100644 --- a/drivers/gpu/drm/tegra/gem.c +++ b/drivers/gpu/drm/tegra/gem.c @@ -98,8 +98,8 @@ static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo, * the SG table needs to be copied to avoid overwriting any * other potential users of the original SG table. */ - err = sg_alloc_table_from_sg(sgt, obj->sgt->sgl, obj->sgt->nents, - GFP_KERNEL); + err = sg_alloc_table_from_sg(sgt, obj->sgt->sgl, + obj->sgt->orig_nents, GFP_KERNEL); if (err < 0) goto free; } else { @@ -196,8 +196,7 @@ static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo) bo->iova = bo->mm->start; - bo->size = iommu_map_sg(tegra->domain, bo->iova, bo->sgt->sgl, - bo->sgt->nents, prot); + bo->size = iommu_map_sgtable(tegra->domain, bo->iova, bo->sgt, prot); if (!bo->size) { dev_err(tegra->drm->dev, "failed to map buffer\n"); err = -ENOMEM; @@ -264,8 +263,7 @@ free: static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo) { if (bo->pages) { - dma_unmap_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents, - DMA_FROM_DEVICE); + dma_unmap_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0); drm_gem_put_pages(&bo->gem, bo->pages, true, true); sg_free_table(bo->sgt); kfree(bo->sgt); @@ -290,12 +288,9 @@ static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo) goto put_pages; } - err = dma_map_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents, - DMA_FROM_DEVICE); - if (err == 0) { - err = -EFAULT; + err = dma_map_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0); + if (err) goto free_sgt; - } return 0; @@ -571,7 +566,7 @@ tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach, goto free; } - if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) + if (dma_map_sgtable(attach->dev, sgt, dir, 0)) goto free; return sgt; @@ -590,7 +585,7 @@ static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach, struct tegra_bo *bo = to_tegra_bo(gem); if (bo->pages) - dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir); + dma_unmap_sgtable(attach->dev, sgt, dir, 0); sg_free_table(sgt); kfree(sgt); @@ -609,8 +604,7 @@ static int tegra_gem_prime_begin_cpu_access(struct dma_buf *buf, struct drm_device *drm = gem->dev; if (bo->pages) - dma_sync_sg_for_cpu(drm->dev, bo->sgt->sgl, bo->sgt->nents, - DMA_FROM_DEVICE); + dma_sync_sgtable_for_cpu(drm->dev, bo->sgt, DMA_FROM_DEVICE); return 0; } @@ -623,8 +617,7 @@ static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf, struct drm_device *drm = gem->dev; if (bo->pages) - dma_sync_sg_for_device(drm->dev, bo->sgt->sgl, bo->sgt->nents, - DMA_TO_DEVICE); + dma_sync_sgtable_for_device(drm->dev, bo->sgt, DMA_TO_DEVICE); return 0; } diff --git a/drivers/gpu/drm/tegra/plane.c b/drivers/gpu/drm/tegra/plane.c index 4cd0461cc508..539d14935728 100644 --- a/drivers/gpu/drm/tegra/plane.c +++ b/drivers/gpu/drm/tegra/plane.c @@ -131,12 +131,9 @@ static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state) } if (sgt) { - err = dma_map_sg(dc->dev, sgt->sgl, sgt->nents, - DMA_TO_DEVICE); - if (err == 0) { - err = -ENOMEM; + err = dma_map_sgtable(dc->dev, sgt, DMA_TO_DEVICE, 0); + if (err) goto unpin; - } /* * The display controller needs contiguous memory, so @@ -144,7 +141,7 @@ static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state) * map its SG table to a single contiguous chunk of * I/O virtual memory. */ - if (err > 1) { + if (sgt->nents > 1) { err = -EINVAL; goto unpin; } @@ -166,8 +163,7 @@ unpin: struct sg_table *sgt = state->sgt[i]; if (sgt) - dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents, - DMA_TO_DEVICE); + dma_unmap_sgtable(dc->dev, sgt, DMA_TO_DEVICE, 0); host1x_bo_unpin(dc->dev, &bo->base, sgt); state->iova[i] = DMA_MAPPING_ERROR; @@ -186,8 +182,7 @@ static void tegra_dc_unpin(struct tegra_dc *dc, struct tegra_plane_state *state) struct sg_table *sgt = state->sgt[i]; if (sgt) - dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents, - DMA_TO_DEVICE); + dma_unmap_sgtable(dc->dev, sgt, DMA_TO_DEVICE, 0); host1x_bo_unpin(dc->dev, &bo->base, sgt); state->iova[i] = DMA_MAPPING_ERROR; diff --git a/drivers/gpu/drm/v3d/v3d_mmu.c b/drivers/gpu/drm/v3d/v3d_mmu.c index 3b81ea28c0bb..5a453532901f 100644 --- a/drivers/gpu/drm/v3d/v3d_mmu.c +++ b/drivers/gpu/drm/v3d/v3d_mmu.c @@ -90,18 +90,17 @@ void v3d_mmu_insert_ptes(struct v3d_bo *bo) struct v3d_dev *v3d = to_v3d_dev(shmem_obj->base.dev); u32 page = bo->node.start; u32 page_prot = V3D_PTE_WRITEABLE | V3D_PTE_VALID; - unsigned int count; - struct scatterlist *sgl; + struct sg_dma_page_iter dma_iter; - for_each_sg(shmem_obj->sgt->sgl, sgl, shmem_obj->sgt->nents, count) { - u32 page_address = sg_dma_address(sgl) >> V3D_MMU_PAGE_SHIFT; + for_each_sgtable_dma_page(shmem_obj->sgt, &dma_iter, 0) { + dma_addr_t dma_addr = sg_page_iter_dma_address(&dma_iter); + u32 page_address = dma_addr >> V3D_MMU_PAGE_SHIFT; u32 pte = page_prot | page_address; u32 i; - BUG_ON(page_address + (sg_dma_len(sgl) >> V3D_MMU_PAGE_SHIFT) >= + BUG_ON(page_address + (PAGE_SIZE >> V3D_MMU_PAGE_SHIFT) >= BIT(24)); - - for (i = 0; i < sg_dma_len(sgl) >> V3D_MMU_PAGE_SHIFT; i++) + for (i = 0; i < PAGE_SIZE >> V3D_MMU_PAGE_SHIFT; i++) v3d->pt[page++] = pte + i; } diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c index 842f8b61aa89..00d6b95e259d 100644 --- a/drivers/gpu/drm/virtio/virtgpu_object.c +++ b/drivers/gpu/drm/virtio/virtgpu_object.c @@ -72,9 +72,8 @@ void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo) if (shmem->pages) { if (shmem->mapped) { - dma_unmap_sg(vgdev->vdev->dev.parent, - shmem->pages->sgl, shmem->mapped, - DMA_TO_DEVICE); + dma_unmap_sgtable(vgdev->vdev->dev.parent, + shmem->pages, DMA_TO_DEVICE, 0); shmem->mapped = 0; } @@ -164,13 +163,13 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev, } if (use_dma_api) { - shmem->mapped = dma_map_sg(vgdev->vdev->dev.parent, - shmem->pages->sgl, - shmem->pages->nents, - DMA_TO_DEVICE); - *nents = shmem->mapped; + ret = dma_map_sgtable(vgdev->vdev->dev.parent, + shmem->pages, DMA_TO_DEVICE, 0); + if (ret) + return ret; + *nents = shmem->mapped = shmem->pages->nents; } else { - *nents = shmem->pages->nents; + *nents = shmem->pages->orig_nents; } *ents = kmalloc_array(*nents, sizeof(struct virtio_gpu_mem_entry), @@ -180,13 +179,20 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev, return -ENOMEM; } - for_each_sg(shmem->pages->sgl, sg, *nents, si) { - (*ents)[si].addr = cpu_to_le64(use_dma_api - ? sg_dma_address(sg) - : sg_phys(sg)); - (*ents)[si].length = cpu_to_le32(sg->length); - (*ents)[si].padding = 0; + if (use_dma_api) { + for_each_sgtable_dma_sg(shmem->pages, sg, si) { + (*ents)[si].addr = cpu_to_le64(sg_dma_address(sg)); + (*ents)[si].length = cpu_to_le32(sg_dma_len(sg)); + (*ents)[si].padding = 0; + } + } else { + for_each_sgtable_sg(shmem->pages, sg, si) { + (*ents)[si].addr = cpu_to_le64(sg_phys(sg)); + (*ents)[si].length = cpu_to_le32(sg->length); + (*ents)[si].padding = 0; + } } + return 0; } diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index c93c2db35aaf..651d1b0e8e8d 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -302,7 +302,7 @@ static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents) return NULL; } - for_each_sg(sgt->sgl, sg, *sg_ents, i) { + for_each_sgtable_sg(sgt, sg, i) { pg = vmalloc_to_page(data); if (!pg) { sg_free_table(sgt); @@ -603,9 +603,8 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo); if (use_dma_api) - dma_sync_sg_for_device(vgdev->vdev->dev.parent, - shmem->pages->sgl, shmem->pages->nents, - DMA_TO_DEVICE); + dma_sync_sgtable_for_device(vgdev->vdev->dev.parent, + shmem->pages, DMA_TO_DEVICE); cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); memset(cmd_p, 0, sizeof(*cmd_p)); @@ -1019,9 +1018,8 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo); if (use_dma_api) - dma_sync_sg_for_device(vgdev->vdev->dev.parent, - shmem->pages->sgl, shmem->pages->nents, - DMA_TO_DEVICE); + dma_sync_sgtable_for_device(vgdev->vdev->dev.parent, + shmem->pages, DMA_TO_DEVICE); cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); memset(cmd_p, 0, sizeof(*cmd_p)); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c index c7f10b2c93d2..13c31e2d7254 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c @@ -362,8 +362,7 @@ static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt) { struct device *dev = vmw_tt->dev_priv->dev->dev; - dma_unmap_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.nents, - DMA_BIDIRECTIONAL); + dma_unmap_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0); vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents; } @@ -383,16 +382,8 @@ static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt) static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt) { struct device *dev = vmw_tt->dev_priv->dev->dev; - int ret; - - ret = dma_map_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.orig_nents, - DMA_BIDIRECTIONAL); - if (unlikely(ret == 0)) - return -ENOMEM; - vmw_tt->sgt.nents = ret; - - return 0; + return dma_map_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0); } /** @@ -449,10 +440,10 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt) if (unlikely(ret != 0)) goto out_sg_alloc_fail; - if (vsgt->num_pages > vmw_tt->sgt.nents) { + if (vsgt->num_pages > vmw_tt->sgt.orig_nents) { uint64_t over_alloc = sgl_size * (vsgt->num_pages - - vmw_tt->sgt.nents); + vmw_tt->sgt.orig_nents); ttm_mem_global_free(glob, over_alloc); vmw_tt->sg_alloc_size -= over_alloc; diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c index 534daf37c97e..a487384d5cb7 100644 --- a/drivers/gpu/drm/xen/xen_drm_front_gem.c +++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c @@ -217,7 +217,7 @@ xen_drm_front_gem_import_sg_table(struct drm_device *dev, return ERR_PTR(ret); DRM_DEBUG("Imported buffer of size %zu with nents %u\n", - size, sgt->nents); + size, sgt->orig_nents); return &xen_obj->base; } diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c index 89b6c14b7392..82d0a60ba3f7 100644 --- a/drivers/gpu/host1x/job.c +++ b/drivers/gpu/host1x/job.c @@ -170,11 +170,9 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job) goto unpin; } - err = dma_map_sg(dev, sgt->sgl, sgt->nents, dir); - if (!err) { - err = -ENOMEM; + err = dma_map_sgtable(dev, sgt, dir, 0); + if (err) goto unpin; - } job->unpins[job->num_unpins].dev = dev; job->unpins[job->num_unpins].dir = dir; @@ -228,7 +226,7 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job) } if (host->domain) { - for_each_sg(sgt->sgl, sg, sgt->nents, j) + for_each_sgtable_sg(sgt, sg, j) gather_size += sg->length; gather_size = iova_align(&host->iova, gather_size); @@ -240,9 +238,9 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job) goto put; } - err = iommu_map_sg(host->domain, + err = iommu_map_sgtable(host->domain, iova_dma_addr(&host->iova, alloc), - sgt->sgl, sgt->nents, IOMMU_READ); + sgt, IOMMU_READ); if (err == 0) { __free_iova(&host->iova, alloc); err = -EINVAL; @@ -252,12 +250,9 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job) job->unpins[job->num_unpins].size = gather_size; phys_addr = iova_dma_addr(&host->iova, alloc); } else if (sgt) { - err = dma_map_sg(host->dev, sgt->sgl, sgt->nents, - DMA_TO_DEVICE); - if (!err) { - err = -ENOMEM; + err = dma_map_sgtable(host->dev, sgt, DMA_TO_DEVICE, 0); + if (err) goto put; - } job->unpins[job->num_unpins].dir = DMA_TO_DEVICE; job->unpins[job->num_unpins].dev = host->dev; @@ -660,8 +655,7 @@ void host1x_job_unpin(struct host1x_job *job) } if (unpin->dev && sgt) - dma_unmap_sg(unpin->dev, sgt->sgl, sgt->nents, - unpin->dir); + dma_unmap_sgtable(unpin->dev, sgt, unpin->dir, 0); host1x_bo_unpin(dev, unpin->bo, sgt); host1x_bo_put(unpin->bo); diff --git a/drivers/media/platform/vsp1/vsp1_drm.c b/drivers/media/platform/vsp1/vsp1_drm.c index a4a45d68a6ef..86d5e3f4b1ff 100644 --- a/drivers/media/platform/vsp1/vsp1_drm.c +++ b/drivers/media/platform/vsp1/vsp1_drm.c @@ -912,8 +912,8 @@ int vsp1_du_map_sg(struct device *dev, struct sg_table *sgt) * skip cache sync. This will need to be revisited when support for * non-coherent buffers will be added to the DU driver. */ - return dma_map_sg_attrs(vsp1->bus_master, sgt->sgl, sgt->nents, - DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); + return dma_map_sgtable(vsp1->bus_master, sgt, DMA_TO_DEVICE, + DMA_ATTR_SKIP_CPU_SYNC); } EXPORT_SYMBOL_GPL(vsp1_du_map_sg); @@ -921,8 +921,8 @@ void vsp1_du_unmap_sg(struct device *dev, struct sg_table *sgt) { struct vsp1_device *vsp1 = dev_get_drvdata(dev); - dma_unmap_sg_attrs(vsp1->bus_master, sgt->sgl, sgt->nents, - DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); + dma_unmap_sgtable(vsp1->bus_master, sgt, DMA_TO_DEVICE, + DMA_ATTR_SKIP_CPU_SYNC); } EXPORT_SYMBOL_GPL(vsp1_du_unmap_sg); diff --git a/drivers/xen/gntdev-dmabuf.c b/drivers/xen/gntdev-dmabuf.c index b1b6eebafd5d..4c13cbc99896 100644 --- a/drivers/xen/gntdev-dmabuf.c +++ b/drivers/xen/gntdev-dmabuf.c @@ -247,10 +247,9 @@ static void dmabuf_exp_ops_detach(struct dma_buf *dma_buf, if (sgt) { if (gntdev_dmabuf_attach->dir != DMA_NONE) - dma_unmap_sg_attrs(attach->dev, sgt->sgl, - sgt->nents, - gntdev_dmabuf_attach->dir, - DMA_ATTR_SKIP_CPU_SYNC); + dma_unmap_sgtable(attach->dev, sgt, + gntdev_dmabuf_attach->dir, + DMA_ATTR_SKIP_CPU_SYNC); sg_free_table(sgt); } @@ -288,8 +287,8 @@ dmabuf_exp_ops_map_dma_buf(struct dma_buf_attachment *attach, sgt = dmabuf_pages_to_sgt(gntdev_dmabuf->pages, gntdev_dmabuf->nr_pages); if (!IS_ERR(sgt)) { - if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir, - DMA_ATTR_SKIP_CPU_SYNC)) { + if (dma_map_sgtable(attach->dev, sgt, dir, + DMA_ATTR_SKIP_CPU_SYNC)) { sg_free_table(sgt); kfree(sgt); sgt = ERR_PTR(-ENOMEM); @@ -633,7 +632,7 @@ dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev, /* Now convert sgt to array of pages and check for page validity. */ i = 0; - for_each_sg_page(sgt->sgl, &sg_iter, sgt->nents, 0) { + for_each_sgtable_page(sgt, &sg_iter, 0) { struct page *page = sg_page_iter_page(&sg_iter); /* * Check if page is valid: this can happen if we are given diff --git a/include/drm/drm_prime.h b/include/drm/drm_prime.h index 9af7422b44cf..47ef11614627 100644 --- a/include/drm/drm_prime.h +++ b/include/drm/drm_prime.h @@ -92,6 +92,8 @@ struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_page struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj, int flags); +unsigned long drm_prime_get_contiguous_size(struct sg_table *sgt); + /* helper functions for importing */ struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev, struct dma_buf *dma_buf, diff --git a/samples/vfio-mdev/mbochs.c b/samples/vfio-mdev/mbochs.c index 3cc5e5921682..e03068917273 100644 --- a/samples/vfio-mdev/mbochs.c +++ b/samples/vfio-mdev/mbochs.c @@ -846,7 +846,7 @@ static struct sg_table *mbochs_map_dmabuf(struct dma_buf_attachment *at, if (sg_alloc_table_from_pages(sg, dmabuf->pages, dmabuf->pagecount, 0, dmabuf->mode.size, GFP_KERNEL) < 0) goto err2; - if (!dma_map_sg(at->dev, sg->sgl, sg->nents, direction)) + if (dma_map_sgtable(at->dev, sg, direction, 0)) goto err3; return sg; @@ -868,6 +868,7 @@ static void mbochs_unmap_dmabuf(struct dma_buf_attachment *at, dev_dbg(dev, "%s: %d\n", __func__, dmabuf->id); + dma_unmap_sgtable(at->dev, sg, direction, 0); sg_free_table(sg); kfree(sg); } |