Instead of putting that into the job sync object.

Signed-off-by: Christian König <christian.koe...@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c    | 56 +++++++++++++++------
 drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h    |  2 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c | 10 +++-
 3 files changed, 52 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 090e66a1b284..bac7976975bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -259,6 +259,14 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct 
amdgpu_sync *sync,
        return 0;
 }
 
+/* Free the entry back to the slab */
+static void amdgpu_sync_entry_free(struct amdgpu_sync_entry *e)
+{
+       hash_del(&e->node);
+       dma_fence_put(e->fence);
+       kmem_cache_free(amdgpu_sync_slab, e);
+}
+
 /**
  * amdgpu_sync_peek_fence - get the next fence not signaled yet
  *
@@ -280,9 +288,7 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync 
*sync,
                struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
 
                if (dma_fence_is_signaled(f)) {
-                       hash_del(&e->node);
-                       dma_fence_put(f);
-                       kmem_cache_free(amdgpu_sync_slab, e);
+                       amdgpu_sync_entry_free(e);
                        continue;
                }
                if (ring && s_fence) {
@@ -355,15 +361,42 @@ int amdgpu_sync_clone(struct amdgpu_sync *source, struct 
amdgpu_sync *clone)
                        if (r)
                                return r;
                } else {
-                       hash_del(&e->node);
-                       dma_fence_put(f);
-                       kmem_cache_free(amdgpu_sync_slab, e);
+                       amdgpu_sync_entry_free(e);
                }
        }
 
        return 0;
 }
 
+/**
+ * amdgpu_sync_push_to_job - push fences into job
+ * @sync: sync object to get the fences from
+ * @job: job to push the fences into
+ *
+ * Add all unsignaled fences from sync to job.
+ */
+int amdgpu_sync_push_to_job(struct amdgpu_sync *sync, struct amdgpu_job *job)
+{
+       struct amdgpu_sync_entry *e;
+       struct hlist_node *tmp;
+       struct dma_fence *f;
+       int i, r;
+
+       hash_for_each_safe(sync->fences, i, tmp, e, node) {
+               f = e->fence;
+               if (dma_fence_is_signaled(f)) {
+                       amdgpu_sync_entry_free(e);
+                       continue;
+               }
+
+               dma_fence_get(f);
+               r = drm_sched_job_add_dependency(&job->base, f);
+               if (r)
+                       return r;
+       }
+       return 0;
+}
+
 int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr)
 {
        struct amdgpu_sync_entry *e;
@@ -375,9 +408,7 @@ int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr)
                if (r)
                        return r;
 
-               hash_del(&e->node);
-               dma_fence_put(e->fence);
-               kmem_cache_free(amdgpu_sync_slab, e);
+               amdgpu_sync_entry_free(e);
        }
 
        return 0;
@@ -396,11 +427,8 @@ void amdgpu_sync_free(struct amdgpu_sync *sync)
        struct hlist_node *tmp;
        unsigned int i;
 
-       hash_for_each_safe(sync->fences, i, tmp, e, node) {
-               hash_del(&e->node);
-               dma_fence_put(e->fence);
-               kmem_cache_free(amdgpu_sync_slab, e);
-       }
+       hash_for_each_safe(sync->fences, i, tmp, e, node)
+               amdgpu_sync_entry_free(e);
 }
 
 /**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
index 2d5c613cda10..cf1e9e858efd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
@@ -30,6 +30,7 @@ struct dma_fence;
 struct dma_resv;
 struct amdgpu_device;
 struct amdgpu_ring;
+struct amdgpu_job;
 
 enum amdgpu_sync_mode {
        AMDGPU_SYNC_ALWAYS,
@@ -54,6 +55,7 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync 
*sync,
                                     struct amdgpu_ring *ring);
 struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
 int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone);
+int amdgpu_sync_push_to_job(struct amdgpu_sync *sync, struct amdgpu_job *job);
 int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr);
 void amdgpu_sync_free(struct amdgpu_sync *sync);
 int amdgpu_sync_init(void);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index 126364882d09..59cf64216fbb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -87,6 +87,7 @@ static int amdgpu_vm_sdma_prepare(struct 
amdgpu_vm_update_params *p,
                                  struct dma_resv *resv,
                                  enum amdgpu_sync_mode sync_mode)
 {
+       struct amdgpu_sync sync;
        int r;
 
        r = amdgpu_vm_sdma_alloc_job(p, 0);
@@ -96,7 +97,12 @@ static int amdgpu_vm_sdma_prepare(struct 
amdgpu_vm_update_params *p,
        if (!resv)
                return 0;
 
-       return amdgpu_sync_resv(p->adev, &p->job->sync, resv, sync_mode, p->vm);
+       amdgpu_sync_create(&sync);
+       r = amdgpu_sync_resv(p->adev, &sync, resv, sync_mode, p->vm);
+       if (!r)
+               r = amdgpu_sync_push_to_job(&sync, p->job);
+       amdgpu_sync_free(&sync);
+       return r;
 }
 
 /**
@@ -232,7 +238,7 @@ static int amdgpu_vm_sdma_update(struct 
amdgpu_vm_update_params *p,
        /* Wait for PD/PT moves to be completed */
        dma_resv_iter_begin(&cursor, bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL);
        dma_resv_for_each_fence_unlocked(&cursor, fence) {
-               r = amdgpu_sync_fence(&p->job->sync, fence);
+               r = drm_sched_job_add_dependency(&p->job->base, fence);
                if (r) {
                        dma_resv_iter_end(&cursor);
                        return r;
-- 
2.25.1

Reply via email to