dma_map_sg() expects a DMA domain. However, the drm devices
have been traditionally using unmanaged iommu domain which
is non-dma type. Using dma mapping APIs with that domain is bad.

Replace dma_map_sg() calls with dma_sync_sg_for_device{|cpu}()
to do the cache maintenance.

Signed-off-by: Vivek Gautam <vivek.gau...@codeaurora.org>
Suggested-by: Tomasz Figa <tf...@chromium.org>
---

Tested on an MTP sdm845:
https://github.com/vivekgautam1/linux/tree/v4.19/sdm845-mtp-display-working

 drivers/gpu/drm/msm/msm_gem.c | 27 ++++++++++++++++++++-------
 1 file changed, 20 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 00c795ced02c..d7a7af610803 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -81,6 +81,8 @@ static struct page **get_pages(struct drm_gem_object *obj)
                struct drm_device *dev = obj->dev;
                struct page **p;
                int npages = obj->size >> PAGE_SHIFT;
+               struct scatterlist *s;
+               int i;
 
                if (use_pages(obj))
                        p = drm_gem_get_pages(obj);
@@ -107,9 +109,19 @@ static struct page **get_pages(struct drm_gem_object *obj)
                /* For non-cached buffers, ensure the new pages are clean
                 * because display controller, GPU, etc. are not coherent:
                 */
-               if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
-                       dma_map_sg(dev->dev, msm_obj->sgt->sgl,
-                                       msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+               if (msm_obj->flags & (MSM_BO_WC | MSM_BO_UNCACHED)) {
+                       /*
+                        * Fake up the SG table so that dma_sync_sg_*()
+                        * can be used to flush the pages associated with it.
+                        */
+                       for_each_sg(msm_obj->sgt->sgl, s,
+                                   msm_obj->sgt->nents, i)
+                               sg_dma_address(s) = sg_phys(s);
+
+                       dma_sync_sg_for_device(dev->dev, msm_obj->sgt->sgl,
+                                              msm_obj->sgt->nents,
+                                              DMA_TO_DEVICE);
+               }
        }
 
        return msm_obj->pages;
@@ -137,10 +149,11 @@ static void put_pages(struct drm_gem_object *obj)
                         * pages are clean because display controller,
                         * GPU, etc. are not coherent:
                         */
-                       if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
-                               dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
-                                            msm_obj->sgt->nents,
-                                            DMA_BIDIRECTIONAL);
+                       if (msm_obj->flags & (MSM_BO_WC | MSM_BO_UNCACHED))
+                               dma_sync_sg_for_cpu(obj->dev->dev,
+                                                   msm_obj->sgt->sgl,
+                                                   msm_obj->sgt->nents,
+                                                   DMA_FROM_DEVICE);
 
                        sg_free_table(msm_obj->sgt);
                        kfree(msm_obj->sgt);
-- 
QUALCOMM INDIA, on behalf of Qualcomm Innovation Center, Inc. is a member
of Code Aurora Forum, hosted by The Linux Foundation

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

Reply via email to