From: Petr Tesarik <petr.tesarik....@huawei.com>

These mappings are never done from atomic context. If a dynamically
allocated bounce buffer is used for the mapping, this change allows
to allocate from CMA.

Signed-off-by: Petr Tesarik <petr.tesarik....@huawei.com>
---
 drivers/gpu/drm/drm_gem_shmem_helper.c | 2 +-
 drivers/gpu/drm/drm_prime.c            | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c 
b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 2b2163c8138e..b5bb4f9c130a 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -702,7 +702,7 @@ static struct sg_table 
*drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_
                goto err_put_pages;
        }
        /* Map the pages for use by the h/w. */
-       ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
+       ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 
DMA_ATTR_MAY_SLEEP);
        if (ret)
                goto err_free_sgt;
 
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index f924b8b4ab6b..f32e12445570 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -635,7 +635,7 @@ struct sg_table *drm_gem_map_dma_buf(struct 
dma_buf_attachment *attach,
                return sgt;
 
        ret = dma_map_sgtable(attach->dev, sgt, dir,
-                             DMA_ATTR_SKIP_CPU_SYNC);
+                             DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MAY_SLEEP);
        if (ret) {
                sg_free_table(sgt);
                kfree(sgt);
-- 
2.25.1

Reply via email to