Am 09.06.2017 um 00:06 schrieb Andres Rodriguez:
This is useful for changing an entity's priority at runtime.

Signed-off-by: Andres Rodriguez <[email protected]>
---
  drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 26 +++++++++++++++++++++++---
  drivers/gpu/drm/amd/scheduler/gpu_scheduler.h |  6 +++++-
  2 files changed, 28 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c 
b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index a203736..c19bb85 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -121,30 +121,31 @@ amd_sched_rq_select_entity(struct amd_sched_rq *rq)
  int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
                          struct amd_sched_entity *entity,
                          struct amd_sched_rq *rq,
                          uint32_t jobs)
  {
        int r;
if (!(sched && entity && rq))
                return -EINVAL;
memset(entity, 0, sizeof(struct amd_sched_entity));
        INIT_LIST_HEAD(&entity->list);
        entity->rq = rq;
        entity->sched = sched;
+ spin_lock_init(&entity->rq_lock);
        spin_lock_init(&entity->queue_lock);
        r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL);
        if (r)
                return r;
atomic_set(&entity->fence_seq, 0);
        entity->fence_context = dma_fence_context_alloc(2);
return 0;
  }
/**
   * Query if entity is initialized
   *
   * @sched       Pointer to scheduler instance
@@ -192,62 +193,79 @@ static bool amd_sched_entity_is_ready(struct 
amd_sched_entity *entity)
return true;
  }
/**
   * Destroy a context entity
   *
   * @sched       Pointer to scheduler instance
   * @entity    The pointer to a valid scheduler entity
   *
   * Cleanup and free the allocated resources.
   */
  void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
                           struct amd_sched_entity *entity)
  {
-       struct amd_sched_rq *rq = entity->rq;
-
        if (!amd_sched_entity_is_initialized(sched, entity))
                return;
/**
         * The client will not queue more IBs during this fini, consume existing
         * queued IBs
        */
        wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity));
- amd_sched_rq_remove_entity(rq, entity);
+       amd_sched_entity_set_rq(entity, NULL);
+
        kfifo_free(&entity->job_queue);
  }
static void amd_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
  {
        struct amd_sched_entity *entity =
                container_of(cb, struct amd_sched_entity, cb);
        entity->dependency = NULL;
        dma_fence_put(f);
        amd_sched_wakeup(entity->sched);
  }
static void amd_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb)
  {
        struct amd_sched_entity *entity =
                container_of(cb, struct amd_sched_entity, cb);
        entity->dependency = NULL;
        dma_fence_put(f);
  }
+void amd_sched_entity_set_rq(struct amd_sched_entity *entity,
+                            struct amd_sched_rq *rq)
+{
+       if (entity->rq == rq)
+               return;
+
+       spin_lock(&entity->rq_lock);
+
+       if (entity->rq)
+               amd_sched_rq_remove_entity(entity->rq, entity);
+
+       entity->rq = rq;
+       if (rq)
+               amd_sched_rq_add_entity(rq, entity);
+
+       spin_unlock(&entity->rq_lock);
+}
+
  bool amd_sched_dependency_optimized(struct dma_fence* fence,
                                    struct amd_sched_entity *entity)
  {
        struct amd_gpu_scheduler *sched = entity->sched;
        struct amd_sched_fence *s_fence;
if (!fence || dma_fence_is_signaled(fence))
                return false;
        if (fence->context == entity->fence_context)
                return true;
        s_fence = to_amd_sched_fence(fence);
        if (s_fence && s_fence->sched == sched)
                return true;
return false;
@@ -321,31 +339,33 @@ static bool amd_sched_entity_in(struct amd_sched_job 
*sched_job)
        struct amd_sched_entity *entity = sched_job->s_entity;
        bool added, first = false;
spin_lock(&entity->queue_lock);
        added = kfifo_in(&entity->job_queue, &sched_job,
                        sizeof(sched_job)) == sizeof(sched_job);
if (added && kfifo_len(&entity->job_queue) == sizeof(sched_job))
                first = true;
spin_unlock(&entity->queue_lock); /* first job wakes up scheduler */
        if (first) {
                /* Add the entity to the run queue */
+               spin_lock(&entity->rq_lock);
                amd_sched_rq_add_entity(entity->rq, entity);
+               spin_unlock(&entity->rq_lock);
                amd_sched_wakeup(sched);
        }
        return added;
  }
/* job_finish is called after hw fence signaled, and
   * the job had already been deleted from ring_mirror_list
   */
  static void amd_sched_job_finish(struct work_struct *work)
  {
        struct amd_sched_job *s_job = container_of(work, struct amd_sched_job,
                                                   finish_work);
        struct amd_gpu_scheduler *sched = s_job->sched;
/* remove job from ring_mirror_list */
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h 
b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index b9283b5..51d626d 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -27,33 +27,35 @@
  #include <linux/kfifo.h>
  #include <linux/dma-fence.h>
struct amd_gpu_scheduler;
  struct amd_sched_rq;
  struct amd_sched_priority_ctr;
/**
   * A scheduler entity is a wrapper around a job queue or a group
   * of other entities. Entities take turns emitting jobs from their
   * job queues to corresponding hardware ring based on scheduling
   * policy.
  */
  struct amd_sched_entity {
        struct list_head                list;
-       struct amd_sched_rq             *rq;
        struct amd_gpu_scheduler        *sched;
+ spinlock_t rq_lock;
+       struct amd_sched_rq             *rq;
+

Keep the order here, the "list" memory belongs to the rq member.

Apart from that the patch looks good to me,
Christian.

        spinlock_t                      queue_lock;
        struct kfifo                    job_queue;
atomic_t fence_seq;
        uint64_t                        fence_context;
struct dma_fence *dependency;
        struct dma_fence_cb             cb;
  };
/**
   * Run queue is a set of entities scheduling command submissions for
   * one specific ring. It implements the scheduling policy that selects
   * the next entity to emit commands from.
  */
@@ -154,30 +156,32 @@ struct amd_gpu_scheduler {
        spinlock_t                      job_list_lock;
  };
int amd_sched_init(struct amd_gpu_scheduler *sched,
                   const struct amd_sched_backend_ops *ops,
                   uint32_t hw_submission, long timeout, const char *name);
  void amd_sched_fini(struct amd_gpu_scheduler *sched);
int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
                          struct amd_sched_entity *entity,
                          struct amd_sched_rq *rq,
                          uint32_t jobs);
  void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
                           struct amd_sched_entity *entity);
  void amd_sched_entity_push_job(struct amd_sched_job *sched_job);
+void amd_sched_entity_set_rq(struct amd_sched_entity *entity,
+                            struct amd_sched_rq *rq);
int amd_sched_fence_slab_init(void);
  void amd_sched_fence_slab_fini(void);
struct amd_sched_fence *amd_sched_fence_create(
        struct amd_sched_entity *s_entity, void *owner);
  void amd_sched_fence_scheduled(struct amd_sched_fence *fence);
  void amd_sched_fence_finished(struct amd_sched_fence *fence);
  int amd_sched_job_init(struct amd_sched_job *job,
                       struct amd_gpu_scheduler *sched,
                       struct amd_sched_entity *entity,
                       void *owner);
  void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched);
  void amd_sched_job_recovery(struct amd_gpu_scheduler *sched);
  bool amd_sched_dependency_optimized(struct dma_fence* fence,


_______________________________________________
amd-gfx mailing list
[email protected]
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Reply via email to