The Documentation/DMA-API-HOWTO.txt states that dma_map_sg returns the
numer of the created entries in the DMA address space. However the
subsequent calls to dma_sync_sg_for_{device,cpu} and dma_unmap_sg must be
called with the original number of entries passed to dma_map_sg. The
sg_table->nents in turn holds the result of the dma_map_sg call as stated
in include/linux/scatterlist.h. Adapt the code to obey those rules.

Signed-off-by: Marek Szyprowski <m.szyprow...@samsung.com>
---
For more information, see '[PATCH v2 00/21] DRM: fix struct sg_table nents
vs. orig_nents misuse' thread: https://lkml.org/lkml/2020/5/4/373
---
 drivers/gpu/drm/etnaviv/etnaviv_gem.c | 10 ++++++----
 1 file changed, 6 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c 
b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index dc9ef30..a224a97 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -27,7 +27,8 @@ static void etnaviv_gem_scatter_map(struct etnaviv_gem_object 
*etnaviv_obj)
         * because display controller, GPU, etc. are not coherent.
         */
        if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
-               dma_map_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
+               sgt->nents = dma_map_sg(dev->dev, sgt->sgl, sgt->orig_nents,
+                                       DMA_BIDIRECTIONAL);
 }
 
 static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object 
*etnaviv_obj)
@@ -51,7 +52,8 @@ static void etnaviv_gem_scatterlist_unmap(struct 
etnaviv_gem_object *etnaviv_obj
         * discard those writes.
         */
        if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
-               dma_unmap_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
+               dma_unmap_sg(dev->dev, sgt->sgl, sgt->orig_nents,
+                            DMA_BIDIRECTIONAL);
 }
 
 /* called with etnaviv_obj->lock held */
@@ -405,7 +407,7 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
 
        if (etnaviv_obj->flags & ETNA_BO_CACHED) {
                dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl,
-                                   etnaviv_obj->sgt->nents,
+                                   etnaviv_obj->sgt->orig_nents,
                                    etnaviv_op_to_dma_dir(op));
                etnaviv_obj->last_cpu_prep_op = op;
        }
@@ -422,7 +424,7 @@ int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
                /* fini without a prep is almost certainly a userspace error */
                WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
                dma_sync_sg_for_device(dev->dev, etnaviv_obj->sgt->sgl,
-                       etnaviv_obj->sgt->nents,
+                       etnaviv_obj->sgt->orig_nents,
                        etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
                etnaviv_obj->last_cpu_prep_op = 0;
        }
-- 
1.9.1

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to