Am 09.06.2017 um 00:06 schrieb Andres Rodriguez:
Introduce amdgpu_ctx_priority_get/put(). This a refcounted mechanism to
change a context's priority.

A context's priority will be set to the highest priority for which a
request exists.

If no active requests exist, the context will default to the priority
requested at context allocation time.

Note that the priority requested at allocation does not create a
request, therefore it can be overridden by a get() request for a lower
priority.

So you just use the maximum priority between the per process and the per context priority?

Sounds logical, since the per context priority can only be used by the DRM master as well.

But you don't need the complicated amd_sched_priority_ctr handling for this.

Just add a min priority field into amdgpu_ctx_mgr and use that to override the context priority.

Christian.


Signed-off-by: Andres Rodriguez <[email protected]>
---
  drivers/gpu/drm/amd/amdgpu/amdgpu.h     |  8 +++++-
  drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 43 +++++++++++++++++++++++++++++++++
  2 files changed, 50 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 04ea1b9..b998f42 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -813,47 +813,53 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
struct amdgpu_ctx_ring {
        uint64_t                sequence;
        struct dma_fence        **fences;
        struct amd_sched_entity entity;
  };
struct amdgpu_ctx {
        struct kref             refcount;
        struct amdgpu_device    *adev;
        struct amdgpu_queue_mgr queue_mgr;
        unsigned                reset_counter;
        spinlock_t              ring_lock;
        struct dma_fence        **fences;
        struct amdgpu_ctx_ring  rings[AMDGPU_MAX_RINGS];
-       bool preamble_presented;
+       bool                    preamble_presented;
+
+       struct amd_sched_priority_ctr   priority_ctr;
  };
struct amdgpu_ctx_mgr {
        struct amdgpu_device    *adev;
        struct mutex            lock;
        /* protected by lock */
        struct idr              ctx_handles;
  };
struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
  int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
                              struct dma_fence *fence);
  struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
                                   struct amdgpu_ring *ring, uint64_t seq);
+void amdgpu_ctx_priority_get(struct amdgpu_ctx *ctx,
+                            enum amd_sched_priority priority);
+void amdgpu_ctx_priority_put(struct amdgpu_ctx *ctx,
+                            enum amd_sched_priority priority);
int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
                     struct drm_file *filp);
void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
  void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
/*
   * file private structure
   */
struct amdgpu_fpriv {
        struct amdgpu_vm        vm;
        struct amdgpu_bo_va     *prt_va;
        struct mutex            bo_list_lock;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 0285eef..cc15b7e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -29,30 +29,53 @@ static amdgpu_ctx_priority_permit(struct drm_file *filp,
                                  enum amd_sched_priority priority)
  {
        /* NORMAL and below are accessible by everyone */
        if (priority <= AMD_SCHED_PRIORITY_NORMAL)
                return 0;
if (capable(CAP_SYS_NICE))
                return 0;
if (drm_is_current_master(filp))
                return 0;
return -EACCES;
  }
+static void amdgpu_ctx_priority_set(struct amd_sched_priority_ctr *p,
+                                   enum amd_sched_priority priority)
+{
+       int i;
+       struct amd_sched_rq *rq;
+       struct amd_sched_entity *entity;
+       struct amdgpu_ring *ring;
+       struct amdgpu_ctx *ctx = container_of(p, struct amdgpu_ctx,
+                                             priority_ctr);
+       struct amdgpu_device *adev = ctx->adev;
+
+       for (i = 0; i < adev->num_rings; i++) {
+               ring = adev->rings[i];
+               entity = &ctx->rings[i].entity;
+               rq = &ring->sched.sched_rq[priority];
+
+               if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
+                       continue;
+
+               amd_sched_entity_set_rq(entity, rq);
+       }
+}
+
  static int amdgpu_ctx_init(struct amdgpu_device *adev,
                           enum amd_sched_priority priority,
                           struct drm_file *filp,
                           struct amdgpu_ctx *ctx)
  {
        unsigned i, j;
        int r;
if (priority < 0 || priority >= AMD_SCHED_PRIORITY_MAX)
                return -EINVAL;
r = amdgpu_ctx_priority_permit(filp, priority);
        if (r)
                return r;
@@ -76,53 +99,59 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
        for (i = 0; i < adev->num_rings; i++) {
                struct amdgpu_ring *ring = adev->rings[i];
                struct amd_sched_rq *rq;
rq = &ring->sched.sched_rq[priority]; if (ring == &adev->gfx.kiq.ring)
                        continue;
r = amd_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
                                          rq, amdgpu_sched_jobs);
                if (r)
                        goto failed;
        }
+ amd_sched_priority_ctr_init(&ctx->priority_ctr,
+                                   priority,
+                                   amdgpu_ctx_priority_set);
+
        r = amdgpu_queue_mgr_init(adev, &ctx->queue_mgr);
        if (r)
                goto failed;
return 0; failed:
        for (j = 0; j < i; j++)
                amd_sched_entity_fini(&adev->rings[j]->sched,
                                      &ctx->rings[j].entity);
        kfree(ctx->fences);
        ctx->fences = NULL;
        return r;
  }
static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
  {
        struct amdgpu_device *adev = ctx->adev;
        unsigned i, j;
if (!adev)
                return;
+ amd_sched_priority_ctr_fini(&ctx->priority_ctr);
+
        for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
                for (j = 0; j < amdgpu_sched_jobs; ++j)
                        dma_fence_put(ctx->rings[i].fences[j]);
        kfree(ctx->fences);
        ctx->fences = NULL;
for (i = 0; i < adev->num_rings; i++)
                amd_sched_entity_fini(&adev->rings[i]->sched,
                                      &ctx->rings[i].entity);
amdgpu_queue_mgr_fini(adev, &ctx->queue_mgr);
  }
static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
                            struct amdgpu_fpriv *fpriv,
@@ -345,30 +374,44 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx 
*ctx,
                return ERR_PTR(-EINVAL);
        }
if (seq + amdgpu_sched_jobs < cring->sequence) {
                spin_unlock(&ctx->ring_lock);
                return NULL;
        }
fence = dma_fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]);
        spin_unlock(&ctx->ring_lock);
return fence;
  }
+void amdgpu_ctx_priority_get(struct amdgpu_ctx *ctx,
+                            enum amd_sched_priority priority)
+{
+       amd_sched_priority_ctr_get(&ctx->priority_ctr, priority);
+
+}
+
+void amdgpu_ctx_priority_put(struct amdgpu_ctx *ctx,
+                            enum amd_sched_priority priority)
+{
+       amd_sched_priority_ctr_put(&ctx->priority_ctr, priority);
+
+}
+
  void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
  {
        mutex_init(&mgr->lock);
        idr_init(&mgr->ctx_handles);
  }
void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
  {
        struct amdgpu_ctx *ctx;
        struct idr *idp;
        uint32_t id;
idp = &mgr->ctx_handles; idr_for_each_entry(idp, ctx, id) {


_______________________________________________
amd-gfx mailing list
[email protected]
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Reply via email to