Prepare i915 driver to the common dynamic dma-buf locking convention
by starting to use the unlocked versions of dma-buf API functions
and handling cases where importer now holds the reservation lock.

Acked-by: Christian König <christian.koe...@amd.com>
Reviewed-by: Michael J. Ruhl <michael.j.r...@intel.com>
Signed-off-by: Dmitry Osipenko <dmitry.osipe...@collabora.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c       |  2 +-
 drivers/gpu/drm/i915/gem/i915_gem_object.c       | 14 ++++++++++++++
 .../gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c | 16 ++++++++--------
 3 files changed, 23 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c 
b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index f5062d0c6333..07eee1c09aaf 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -72,7 +72,7 @@ static int i915_gem_dmabuf_vmap(struct dma_buf *dma_buf,
        struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
        void *vaddr;
 
-       vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
+       vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
        if (IS_ERR(vaddr))
                return PTR_ERR(vaddr);
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c 
b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 7ff9c7877bec..3e3f63f86629 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -290,7 +290,21 @@ void __i915_gem_object_pages_fini(struct 
drm_i915_gem_object *obj)
        __i915_gem_object_free_mmaps(obj);
 
        atomic_set(&obj->mm.pages_pin_count, 0);
+
+       /*
+        * dma_buf_unmap_attachment() requires reservation to be
+        * locked. The imported GEM shouldn't share reservation lock
+        * and ttm_bo_cleanup_memtype_use() shouldn't be invoked for
+        * dma-buf, so it's safe to take the lock.
+        */
+       if (obj->base.import_attach)
+               i915_gem_object_lock(obj, NULL);
+
        __i915_gem_object_put_pages(obj);
+
+       if (obj->base.import_attach)
+               i915_gem_object_unlock(obj);
+
        GEM_BUG_ON(i915_gem_object_has_pages(obj));
 }
 
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c 
b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
index 51ed824b020c..f2f3cfad807b 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
@@ -213,7 +213,7 @@ static int igt_dmabuf_import_same_driver(struct 
drm_i915_private *i915,
                goto out_import;
        }
 
-       st = dma_buf_map_attachment(import_attach, DMA_BIDIRECTIONAL);
+       st = dma_buf_map_attachment_unlocked(import_attach, DMA_BIDIRECTIONAL);
        if (IS_ERR(st)) {
                err = PTR_ERR(st);
                goto out_detach;
@@ -226,7 +226,7 @@ static int igt_dmabuf_import_same_driver(struct 
drm_i915_private *i915,
                timeout = -ETIME;
        }
        err = timeout > 0 ? 0 : timeout;
-       dma_buf_unmap_attachment(import_attach, st, DMA_BIDIRECTIONAL);
+       dma_buf_unmap_attachment_unlocked(import_attach, st, DMA_BIDIRECTIONAL);
 out_detach:
        dma_buf_detach(dmabuf, import_attach);
 out_import:
@@ -296,7 +296,7 @@ static int igt_dmabuf_import(void *arg)
                goto out_obj;
        }
 
-       err = dma_buf_vmap(dmabuf, &map);
+       err = dma_buf_vmap_unlocked(dmabuf, &map);
        dma_map = err ? NULL : map.vaddr;
        if (!dma_map) {
                pr_err("dma_buf_vmap failed\n");
@@ -337,7 +337,7 @@ static int igt_dmabuf_import(void *arg)
 
        err = 0;
 out_dma_map:
-       dma_buf_vunmap(dmabuf, &map);
+       dma_buf_vunmap_unlocked(dmabuf, &map);
 out_obj:
        i915_gem_object_put(obj);
 out_dmabuf:
@@ -358,7 +358,7 @@ static int igt_dmabuf_import_ownership(void *arg)
        if (IS_ERR(dmabuf))
                return PTR_ERR(dmabuf);
 
-       err = dma_buf_vmap(dmabuf, &map);
+       err = dma_buf_vmap_unlocked(dmabuf, &map);
        ptr = err ? NULL : map.vaddr;
        if (!ptr) {
                pr_err("dma_buf_vmap failed\n");
@@ -367,7 +367,7 @@ static int igt_dmabuf_import_ownership(void *arg)
        }
 
        memset(ptr, 0xc5, PAGE_SIZE);
-       dma_buf_vunmap(dmabuf, &map);
+       dma_buf_vunmap_unlocked(dmabuf, &map);
 
        obj = to_intel_bo(i915_gem_prime_import(&i915->drm, dmabuf));
        if (IS_ERR(obj)) {
@@ -418,7 +418,7 @@ static int igt_dmabuf_export_vmap(void *arg)
        }
        i915_gem_object_put(obj);
 
-       err = dma_buf_vmap(dmabuf, &map);
+       err = dma_buf_vmap_unlocked(dmabuf, &map);
        ptr = err ? NULL : map.vaddr;
        if (!ptr) {
                pr_err("dma_buf_vmap failed\n");
@@ -435,7 +435,7 @@ static int igt_dmabuf_export_vmap(void *arg)
        memset(ptr, 0xc5, dmabuf->size);
 
        err = 0;
-       dma_buf_vunmap(dmabuf, &map);
+       dma_buf_vunmap_unlocked(dmabuf, &map);
 out:
        dma_buf_put(dmabuf);
        return err;
-- 
2.37.3

Reply via email to