Use proper amdgpu_gem_prime_import function to handle all kinds of
imports. Remember the dmabuf reference to enable proper multi-GPU
attachment to multiple VMs without erroneously re-exporting the
underlying BO multiple times.

Signed-off-by: Felix Kuehling <felix.kuehl...@amd.com>
---
 .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c  | 45 ++++++++++++-------
 1 file changed, 28 insertions(+), 17 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 90b985436878..d53d19b9d6dc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -2000,30 +2000,34 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct 
amdgpu_device *adev,
        struct amdgpu_bo *bo;
        int ret;
 
-       if (dma_buf->ops != &amdgpu_dmabuf_ops)
-               /* Can't handle non-graphics buffers */
-               return -EINVAL;
-
-       obj = dma_buf->priv;
-       if (drm_to_adev(obj->dev) != adev)
-               /* Can't handle buffers from other devices */
-               return -EINVAL;
+       obj = amdgpu_gem_prime_import(adev_to_drm(adev), dma_buf);
+       if (IS_ERR(obj))
+               return PTR_ERR(obj);
+       if (obj->import_attach) {
+               /* Import takes an extra reference if it creates a dynamic
+                * dmabuf attachment. Release that reference to avoid
+                * leaking it. FIXME: I think this is a bug.
+                */
+               dma_buf_put(dma_buf);
+       }
 
        bo = gem_to_amdgpu_bo(obj);
        if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
-                                   AMDGPU_GEM_DOMAIN_GTT)))
+                                   AMDGPU_GEM_DOMAIN_GTT))) {
                /* Only VRAM and GTT BOs are supported */
-               return -EINVAL;
+               ret = -EINVAL;
+               goto err_put_obj;
+       }
 
        *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
-       if (!*mem)
-               return -ENOMEM;
+       if (!*mem) {
+               ret = -ENOMEM;
+               goto err_put_obj;
+       }
 
        ret = drm_vma_node_allow(&obj->vma_node, drm_priv);
-       if (ret) {
-               kfree(mem);
-               return ret;
-       }
+       if (ret)
+               goto err_free_mem;
 
        if (size)
                *size = amdgpu_bo_size(bo);
@@ -2040,7 +2044,8 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct 
amdgpu_device *adev,
                | KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE
                | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
 
-       drm_gem_object_get(&bo->tbo.base);
+       get_dma_buf(dma_buf);
+       (*mem)->dmabuf = dma_buf;
        (*mem)->bo = bo;
        (*mem)->va = va;
        (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
@@ -2052,6 +2057,12 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct 
amdgpu_device *adev,
        (*mem)->is_imported = true;
 
        return 0;
+
+err_free_mem:
+       kfree(mem);
+err_put_obj:
+       drm_gem_object_put(obj);
+       return ret;
 }
 
 /* Evict a userptr BO by stopping the queues if necessary
-- 
2.32.0

Reply via email to