DRM GEM objects private to a single GPUVM can use a shared dma-resv.
Make use of the shared dma-resv of GPUVM rather than a driver specific
one.

The shared dma-resv originates from a "root" GEM object serving as
container for the dma-resv to make it compatible with drm_exec.

In order to make sure the object proving the shared dma-resv can't be
freed up before the objects making use of it, let every such GEM object
take a reference on it.

Signed-off-by: Danilo Krummrich <d...@redhat.com>
---
 drivers/gpu/drm/nouveau/nouveau_bo.c   | 11 +++++++++--
 drivers/gpu/drm/nouveau/nouveau_bo.h   |  5 +++++
 drivers/gpu/drm/nouveau/nouveau_gem.c  | 10 ++++++++--
 drivers/gpu/drm/nouveau/nouveau_uvmm.c |  7 ++-----
 drivers/gpu/drm/nouveau/nouveau_uvmm.h |  1 -
 5 files changed, 24 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c 
b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 0f3bd187ede6..7afad86da64b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -148,10 +148,17 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
         * If nouveau_bo_new() allocated this buffer, the GEM object was never
         * initialized, so don't attempt to release it.
         */
-       if (bo->base.dev)
+       if (bo->base.dev) {
+               /* Gem objects not being shared with other VMs get their
+                * dma_resv from a root GEM object.
+                */
+               if (nvbo->no_share)
+                       drm_gem_object_put(nvbo->r_obj);
+
                drm_gem_object_release(&bo->base);
-       else
+       } else {
                dma_resv_fini(&bo->base._resv);
+       }
 
        kfree(nvbo);
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h 
b/drivers/gpu/drm/nouveau/nouveau_bo.h
index 07f671cf895e..70c551921a9e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -26,6 +26,11 @@ struct nouveau_bo {
        struct list_head entry;
        int pbbo_index;
        bool validate_mapped;
+
+       /* Root GEM object we derive the dma_resv of in case this BO is not
+        * shared between VMs.
+        */
+       struct drm_gem_object *r_obj;
        bool no_share;
 
        /* GPU address space is independent of CPU word size */
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c 
b/drivers/gpu/drm/nouveau/nouveau_gem.c
index a0d303e5ce3d..49c2bcbef129 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -111,7 +111,8 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct 
drm_file *file_priv)
        if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
                return 0;
 
-       if (nvbo->no_share && uvmm && &uvmm->resv != nvbo->bo.base.resv)
+       if (nvbo->no_share && uvmm &&
+           drm_gpuvm_resv(&uvmm->base) != nvbo->bo.base.resv)
                return -EPERM;
 
        ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
@@ -245,7 +246,7 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int 
align, uint32_t domain,
                if (unlikely(!uvmm))
                        return -EINVAL;
 
-               resv = &uvmm->resv;
+               resv = drm_gpuvm_resv(&uvmm->base);
        }
 
        if (!(domain & (NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART)))
@@ -288,6 +289,11 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int 
align, uint32_t domain,
        if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
                nvbo->valid_domains &= domain;
 
+       if (nvbo->no_share) {
+               nvbo->r_obj = drm_gpuvm_resv_obj(&uvmm->base);
+               drm_gem_object_get(nvbo->r_obj);
+       }
+
        *pnvbo = nvbo;
        return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c 
b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
index f74bf30bc683..8977a518de96 100644
--- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
@@ -1802,7 +1802,6 @@ nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct 
nouveau_cli *cli,
        int ret;
 
        mutex_init(&uvmm->mutex);
-       dma_resv_init(&uvmm->resv);
        mt_init_flags(&uvmm->region_mt, MT_FLAGS_LOCK_EXTERN);
        mt_set_external_lock(&uvmm->region_mt, &uvmm->mutex);
 
@@ -1842,14 +1841,14 @@ nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct 
nouveau_cli *cli,
                            kernel_managed_addr, kernel_managed_size,
                            NULL, 0, &cli->uvmm.vmm.vmm);
        if (ret)
-               goto out_free_gpuva_mgr;
+               goto out_gpuvm_fini;
 
        cli->uvmm.vmm.cli = cli;
        mutex_unlock(&cli->mutex);
 
        return 0;
 
-out_free_gpuva_mgr:
+out_gpuvm_fini:
        drm_gpuvm_destroy(&uvmm->base);
 out_unlock:
        mutex_unlock(&cli->mutex);
@@ -1907,6 +1906,4 @@ nouveau_uvmm_fini(struct nouveau_uvmm *uvmm)
        nouveau_vmm_fini(&uvmm->vmm);
        drm_gpuvm_destroy(&uvmm->base);
        mutex_unlock(&cli->mutex);
-
-       dma_resv_fini(&uvmm->resv);
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.h 
b/drivers/gpu/drm/nouveau/nouveau_uvmm.h
index 06a0c36de392..22607270fae0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_uvmm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.h
@@ -12,7 +12,6 @@ struct nouveau_uvmm {
        struct nouveau_vmm vmm;
        struct maple_tree region_mt;
        struct mutex mutex;
-       struct dma_resv resv;
 
        bool disabled;
 };
-- 
2.41.0

Reply via email to