Am 31.08.22 um 17:37 schrieb Dmitry Osipenko:
Prepare Tegra DRM driver to the common dynamic dma-buf locking convention
by starting to use the unlocked versions of dma-buf API functions.

Signed-off-by: Dmitry Osipenko <dmitry.osipe...@collabora.com>

Acked-by: Christian König <christian.koe...@amd.com>

---
  drivers/gpu/drm/tegra/gem.c | 17 +++++++++--------
  1 file changed, 9 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 81991090adcc..b09b8ab40ae4 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -84,7 +84,7 @@ static struct host1x_bo_mapping *tegra_bo_pin(struct device 
*dev, struct host1x_
                        goto free;
                }
- map->sgt = dma_buf_map_attachment(map->attach, direction);
+               map->sgt = dma_buf_map_attachment_unlocked(map->attach, 
direction);
                if (IS_ERR(map->sgt)) {
                        dma_buf_detach(buf, map->attach);
                        err = PTR_ERR(map->sgt);
@@ -160,7 +160,8 @@ static struct host1x_bo_mapping *tegra_bo_pin(struct device 
*dev, struct host1x_
  static void tegra_bo_unpin(struct host1x_bo_mapping *map)
  {
        if (map->attach) {
-               dma_buf_unmap_attachment(map->attach, map->sgt, map->direction);
+               dma_buf_unmap_attachment_unlocked(map->attach, map->sgt,
+                                                 map->direction);
                dma_buf_detach(map->attach->dmabuf, map->attach);
        } else {
                dma_unmap_sgtable(map->dev, map->sgt, map->direction, 0);
@@ -181,7 +182,7 @@ static void *tegra_bo_mmap(struct host1x_bo *bo)
        if (obj->vaddr) {
                return obj->vaddr;
        } else if (obj->gem.import_attach) {
-               ret = dma_buf_vmap(obj->gem.import_attach->dmabuf, &map);
+               ret = dma_buf_vmap_unlocked(obj->gem.import_attach->dmabuf, 
&map);
                return ret ? NULL : map.vaddr;
        } else {
                return vmap(obj->pages, obj->num_pages, VM_MAP,
@@ -197,7 +198,7 @@ static void tegra_bo_munmap(struct host1x_bo *bo, void 
*addr)
        if (obj->vaddr)
                return;
        else if (obj->gem.import_attach)
-               dma_buf_vunmap(obj->gem.import_attach->dmabuf, &map);
+               dma_buf_vunmap_unlocked(obj->gem.import_attach->dmabuf, &map);
        else
                vunmap(addr);
  }
@@ -461,7 +462,7 @@ static struct tegra_bo *tegra_bo_import(struct drm_device 
*drm,
get_dma_buf(buf); - bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
+       bo->sgt = dma_buf_map_attachment_unlocked(attach, DMA_TO_DEVICE);
        if (IS_ERR(bo->sgt)) {
                err = PTR_ERR(bo->sgt);
                goto detach;
@@ -479,7 +480,7 @@ static struct tegra_bo *tegra_bo_import(struct drm_device 
*drm,
detach:
        if (!IS_ERR_OR_NULL(bo->sgt))
-               dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);
+               dma_buf_unmap_attachment_unlocked(attach, bo->sgt, 
DMA_TO_DEVICE);
dma_buf_detach(buf, attach);
        dma_buf_put(buf);
@@ -508,8 +509,8 @@ void tegra_bo_free_object(struct drm_gem_object *gem)
                tegra_bo_iommu_unmap(tegra, bo);
if (gem->import_attach) {
-               dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
-                                        DMA_TO_DEVICE);
+               dma_buf_unmap_attachment_unlocked(gem->import_attach, bo->sgt,
+                                                 DMA_TO_DEVICE);
                drm_prime_gem_destroy(gem, NULL);
        } else {
                tegra_bo_free(gem->dev, bo);

Reply via email to