Re: [PATCH 1/1] drm/amdgpu: rework context priority handling

2021-08-25 Thread Christian König




Am 25.08.21 um 15:35 schrieb Das, Nirmoy:


On 8/25/2021 2:29 PM, Christian König wrote:

Am 25.08.21 um 14:20 schrieb Lazar, Lijo:

On 8/25/2021 4:52 PM, Nirmoy Das wrote:

To get a hardware queue priority for a context, we are currently
mapping AMDGPU_CTX_PRIORITY_* to DRM_SCHED_PRIORITY_* and then
to hardware queue priority, which is not the right way to do that
as DRM_SCHED_PRIORITY_* is software scheduler's priority and it is
independent from a hardware queue priority.

Use userspace provided context priority, AMDGPU_CTX_PRIORITY_* to
map a context to proper hardware queue priority.

Signed-off-by: Nirmoy Das 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c   | 127 
--

  drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h   |   8 +-
  drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c |  44 ++--
  3 files changed, 105 insertions(+), 74 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c

index e7a010b7ca1f..c88c5c6c54a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -43,14 +43,61 @@ const unsigned int 
amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {

  [AMDGPU_HW_IP_VCN_JPEG]    =    1,
  };
  +bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio)
+{
+    switch (ctx_prio) {
+    case AMDGPU_CTX_PRIORITY_UNSET:
+    case AMDGPU_CTX_PRIORITY_VERY_LOW:
+    case AMDGPU_CTX_PRIORITY_LOW:
+    case AMDGPU_CTX_PRIORITY_NORMAL:
+    case AMDGPU_CTX_PRIORITY_HIGH:
+    case AMDGPU_CTX_PRIORITY_VERY_HIGH:
+    return true;
+    default:
+    return false;
+    }
+}
+
+static enum drm_sched_priority
+amdgpu_ctx_to_drm_sched_prio(int32_t ctx_prio)
+{
+    switch (ctx_prio) {
+    case AMDGPU_CTX_PRIORITY_UNSET:
+    return DRM_SCHED_PRIORITY_UNSET;
+
+    case AMDGPU_CTX_PRIORITY_VERY_LOW:
+    return DRM_SCHED_PRIORITY_MIN;
+
+    case AMDGPU_CTX_PRIORITY_LOW:
+    return DRM_SCHED_PRIORITY_MIN;
+
+    case AMDGPU_CTX_PRIORITY_NORMAL:
+    return DRM_SCHED_PRIORITY_NORMAL;
+
+    case AMDGPU_CTX_PRIORITY_HIGH:
+    return DRM_SCHED_PRIORITY_HIGH;
+
+    case AMDGPU_CTX_PRIORITY_VERY_HIGH:
+    return DRM_SCHED_PRIORITY_HIGH;
+
+    /* This should not happen as we sanitized userspace provided 
priority

+ * already, WARN if this happens.
+ */
+    default:
+    WARN(1, "Invalid context priority %d\n", ctx_prio);
+    return DRM_SCHED_PRIORITY_NORMAL;
+    }
+
+}
+
  static int amdgpu_ctx_priority_permit(struct drm_file *filp,
-  enum drm_sched_priority priority)
+  int32_t priority)
  {
-    if (priority < 0 || priority >= DRM_SCHED_PRIORITY_COUNT)
+    if (!amdgpu_ctx_priority_is_valid(priority))
  return -EINVAL;
    /* NORMAL and below are accessible by everyone */
-    if (priority <= DRM_SCHED_PRIORITY_NORMAL)
+    if (priority <= AMDGPU_CTX_PRIORITY_NORMAL)
  return 0;
    if (capable(CAP_SYS_NICE))
@@ -62,26 +109,35 @@ static int amdgpu_ctx_priority_permit(struct 
drm_file *filp,

  return -EACCES;
  }
  -static enum gfx_pipe_priority 
amdgpu_ctx_sched_prio_to_compute_prio(enum drm_sched_priority prio)
+static enum gfx_pipe_priority 
amdgpu_ctx_prio_to_compute_prio(int32_t prio)

  {
  switch (prio) {
-    case DRM_SCHED_PRIORITY_HIGH:
-    case DRM_SCHED_PRIORITY_KERNEL:
+    case AMDGPU_CTX_PRIORITY_HIGH:
+    case AMDGPU_CTX_PRIORITY_VERY_HIGH:
  return AMDGPU_GFX_PIPE_PRIO_HIGH;
  default:
  return AMDGPU_GFX_PIPE_PRIO_NORMAL;
  }
  }
  -static unsigned int amdgpu_ctx_prio_sched_to_hw(struct 
amdgpu_device *adev,

- enum drm_sched_priority prio,
- u32 hw_ip)
+static unsigned int amdgpu_ctx_get_hw_prio(struct amdgpu_ctx *ctx, 
u32 hw_ip)

  {
+    struct amdgpu_device *adev = ctx->adev;
+    int32_t ctx_prio;
  unsigned int hw_prio;
  -    hw_prio = (hw_ip == AMDGPU_HW_IP_COMPUTE) ?
-    amdgpu_ctx_sched_prio_to_compute_prio(prio) :
-    AMDGPU_RING_PRIO_DEFAULT;
+    ctx_prio = (ctx->override_priority == 
AMDGPU_CTX_PRIORITY_UNSET) ?

+    ctx->init_priority : ctx->override_priority;
+
+    switch (hw_ip) {
+    case AMDGPU_HW_IP_COMPUTE:
+    hw_prio = amdgpu_ctx_prio_to_compute_prio(ctx_prio);
+    break;
+    default:
+    hw_prio = AMDGPU_RING_PRIO_DEFAULT;
+    break;
+    }
+
  hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM);
  if (adev->gpu_sched[hw_ip][hw_prio].num_scheds == 0)
  hw_prio = AMDGPU_RING_PRIO_DEFAULT;
@@ -89,15 +145,17 @@ static unsigned int 
amdgpu_ctx_prio_sched_to_hw(struct amdgpu_device *adev,

  return hw_prio;
  }
  +
  static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
-   const u32 ring)
+  const u32 ring)
  {
  struct amdgpu_device *adev = ctx->adev;
  struct amdgpu_ctx_entity *entity;
  struct drm_gpu_scheduler **scheds = NULL, *sched 

Re: [PATCH 1/1] drm/amdgpu: rework context priority handling

2021-08-25 Thread Das, Nirmoy



On 8/25/2021 2:29 PM, Christian König wrote:

Am 25.08.21 um 14:20 schrieb Lazar, Lijo:

On 8/25/2021 4:52 PM, Nirmoy Das wrote:

To get a hardware queue priority for a context, we are currently
mapping AMDGPU_CTX_PRIORITY_* to DRM_SCHED_PRIORITY_* and then
to hardware queue priority, which is not the right way to do that
as DRM_SCHED_PRIORITY_* is software scheduler's priority and it is
independent from a hardware queue priority.

Use userspace provided context priority, AMDGPU_CTX_PRIORITY_* to
map a context to proper hardware queue priority.

Signed-off-by: Nirmoy Das 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c   | 127 
--

  drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h   |   8 +-
  drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c |  44 ++--
  3 files changed, 105 insertions(+), 74 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c

index e7a010b7ca1f..c88c5c6c54a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -43,14 +43,61 @@ const unsigned int 
amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {

  [AMDGPU_HW_IP_VCN_JPEG]    =    1,
  };
  +bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio)
+{
+    switch (ctx_prio) {
+    case AMDGPU_CTX_PRIORITY_UNSET:
+    case AMDGPU_CTX_PRIORITY_VERY_LOW:
+    case AMDGPU_CTX_PRIORITY_LOW:
+    case AMDGPU_CTX_PRIORITY_NORMAL:
+    case AMDGPU_CTX_PRIORITY_HIGH:
+    case AMDGPU_CTX_PRIORITY_VERY_HIGH:
+    return true;
+    default:
+    return false;
+    }
+}
+
+static enum drm_sched_priority
+amdgpu_ctx_to_drm_sched_prio(int32_t ctx_prio)
+{
+    switch (ctx_prio) {
+    case AMDGPU_CTX_PRIORITY_UNSET:
+    return DRM_SCHED_PRIORITY_UNSET;
+
+    case AMDGPU_CTX_PRIORITY_VERY_LOW:
+    return DRM_SCHED_PRIORITY_MIN;
+
+    case AMDGPU_CTX_PRIORITY_LOW:
+    return DRM_SCHED_PRIORITY_MIN;
+
+    case AMDGPU_CTX_PRIORITY_NORMAL:
+    return DRM_SCHED_PRIORITY_NORMAL;
+
+    case AMDGPU_CTX_PRIORITY_HIGH:
+    return DRM_SCHED_PRIORITY_HIGH;
+
+    case AMDGPU_CTX_PRIORITY_VERY_HIGH:
+    return DRM_SCHED_PRIORITY_HIGH;
+
+    /* This should not happen as we sanitized userspace provided 
priority

+ * already, WARN if this happens.
+ */
+    default:
+    WARN(1, "Invalid context priority %d\n", ctx_prio);
+    return DRM_SCHED_PRIORITY_NORMAL;
+    }
+
+}
+
  static int amdgpu_ctx_priority_permit(struct drm_file *filp,
-  enum drm_sched_priority priority)
+  int32_t priority)
  {
-    if (priority < 0 || priority >= DRM_SCHED_PRIORITY_COUNT)
+    if (!amdgpu_ctx_priority_is_valid(priority))
  return -EINVAL;
    /* NORMAL and below are accessible by everyone */
-    if (priority <= DRM_SCHED_PRIORITY_NORMAL)
+    if (priority <= AMDGPU_CTX_PRIORITY_NORMAL)
  return 0;
    if (capable(CAP_SYS_NICE))
@@ -62,26 +109,35 @@ static int amdgpu_ctx_priority_permit(struct 
drm_file *filp,

  return -EACCES;
  }
  -static enum gfx_pipe_priority 
amdgpu_ctx_sched_prio_to_compute_prio(enum drm_sched_priority prio)
+static enum gfx_pipe_priority 
amdgpu_ctx_prio_to_compute_prio(int32_t prio)

  {
  switch (prio) {
-    case DRM_SCHED_PRIORITY_HIGH:
-    case DRM_SCHED_PRIORITY_KERNEL:
+    case AMDGPU_CTX_PRIORITY_HIGH:
+    case AMDGPU_CTX_PRIORITY_VERY_HIGH:
  return AMDGPU_GFX_PIPE_PRIO_HIGH;
  default:
  return AMDGPU_GFX_PIPE_PRIO_NORMAL;
  }
  }
  -static unsigned int amdgpu_ctx_prio_sched_to_hw(struct 
amdgpu_device *adev,

- enum drm_sched_priority prio,
- u32 hw_ip)
+static unsigned int amdgpu_ctx_get_hw_prio(struct amdgpu_ctx *ctx, 
u32 hw_ip)

  {
+    struct amdgpu_device *adev = ctx->adev;
+    int32_t ctx_prio;
  unsigned int hw_prio;
  -    hw_prio = (hw_ip == AMDGPU_HW_IP_COMPUTE) ?
-    amdgpu_ctx_sched_prio_to_compute_prio(prio) :
-    AMDGPU_RING_PRIO_DEFAULT;
+    ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ?
+    ctx->init_priority : ctx->override_priority;
+
+    switch (hw_ip) {
+    case AMDGPU_HW_IP_COMPUTE:
+    hw_prio = amdgpu_ctx_prio_to_compute_prio(ctx_prio);
+    break;
+    default:
+    hw_prio = AMDGPU_RING_PRIO_DEFAULT;
+    break;
+    }
+
  hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM);
  if (adev->gpu_sched[hw_ip][hw_prio].num_scheds == 0)
  hw_prio = AMDGPU_RING_PRIO_DEFAULT;
@@ -89,15 +145,17 @@ static unsigned int 
amdgpu_ctx_prio_sched_to_hw(struct amdgpu_device *adev,

  return hw_prio;
  }
  +
  static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
-   const u32 ring)
+  const u32 ring)
  {
  struct amdgpu_device *adev = ctx->adev;
  struct amdgpu_ctx_entity *entity;
  struct drm_gpu_scheduler **scheds = NULL, *sched = NULL;
  unsigned num_scheds = 0;
+    

Re: [PATCH 1/1] drm/amdgpu: rework context priority handling

2021-08-25 Thread Lazar, Lijo




On 8/25/2021 4:52 PM, Nirmoy Das wrote:

To get a hardware queue priority for a context, we are currently
mapping AMDGPU_CTX_PRIORITY_* to DRM_SCHED_PRIORITY_* and then
to hardware queue priority, which is not the right way to do that
as DRM_SCHED_PRIORITY_* is software scheduler's priority and it is
independent from a hardware queue priority.

Use userspace provided context priority, AMDGPU_CTX_PRIORITY_* to
map a context to proper hardware queue priority.

Signed-off-by: Nirmoy Das 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c   | 127 --
  drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h   |   8 +-
  drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c |  44 ++--
  3 files changed, 105 insertions(+), 74 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index e7a010b7ca1f..c88c5c6c54a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -43,14 +43,61 @@ const unsigned int 
amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
[AMDGPU_HW_IP_VCN_JPEG] =   1,
  };
  
+bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio)

+{
+   switch (ctx_prio) {
+   case AMDGPU_CTX_PRIORITY_UNSET:
+   case AMDGPU_CTX_PRIORITY_VERY_LOW:
+   case AMDGPU_CTX_PRIORITY_LOW:
+   case AMDGPU_CTX_PRIORITY_NORMAL:
+   case AMDGPU_CTX_PRIORITY_HIGH:
+   case AMDGPU_CTX_PRIORITY_VERY_HIGH:
+   return true;
+   default:
+   return false;
+   }
+}
+
+static enum drm_sched_priority
+amdgpu_ctx_to_drm_sched_prio(int32_t ctx_prio)
+{
+   switch (ctx_prio) {
+   case AMDGPU_CTX_PRIORITY_UNSET:
+   return DRM_SCHED_PRIORITY_UNSET;
+
+   case AMDGPU_CTX_PRIORITY_VERY_LOW:
+   return DRM_SCHED_PRIORITY_MIN;
+
+   case AMDGPU_CTX_PRIORITY_LOW:
+   return DRM_SCHED_PRIORITY_MIN;
+
+   case AMDGPU_CTX_PRIORITY_NORMAL:
+   return DRM_SCHED_PRIORITY_NORMAL;
+
+   case AMDGPU_CTX_PRIORITY_HIGH:
+   return DRM_SCHED_PRIORITY_HIGH;
+
+   case AMDGPU_CTX_PRIORITY_VERY_HIGH:
+   return DRM_SCHED_PRIORITY_HIGH;
+
+   /* This should not happen as we sanitized userspace provided priority
+* already, WARN if this happens.
+*/
+   default:
+   WARN(1, "Invalid context priority %d\n", ctx_prio);
+   return DRM_SCHED_PRIORITY_NORMAL;
+   }
+
+}
+
  static int amdgpu_ctx_priority_permit(struct drm_file *filp,
- enum drm_sched_priority priority)
+ int32_t priority)
  {
-   if (priority < 0 || priority >= DRM_SCHED_PRIORITY_COUNT)
+   if (!amdgpu_ctx_priority_is_valid(priority))
return -EINVAL;
  
  	/* NORMAL and below are accessible by everyone */

-   if (priority <= DRM_SCHED_PRIORITY_NORMAL)
+   if (priority <= AMDGPU_CTX_PRIORITY_NORMAL)
return 0;
  
  	if (capable(CAP_SYS_NICE))

@@ -62,26 +109,35 @@ static int amdgpu_ctx_priority_permit(struct drm_file 
*filp,
return -EACCES;
  }
  
-static enum gfx_pipe_priority amdgpu_ctx_sched_prio_to_compute_prio(enum drm_sched_priority prio)

+static enum gfx_pipe_priority amdgpu_ctx_prio_to_compute_prio(int32_t prio)
  {
switch (prio) {
-   case DRM_SCHED_PRIORITY_HIGH:
-   case DRM_SCHED_PRIORITY_KERNEL:
+   case AMDGPU_CTX_PRIORITY_HIGH:
+   case AMDGPU_CTX_PRIORITY_VERY_HIGH:
return AMDGPU_GFX_PIPE_PRIO_HIGH;
default:
return AMDGPU_GFX_PIPE_PRIO_NORMAL;
}
  }
  
-static unsigned int amdgpu_ctx_prio_sched_to_hw(struct amdgpu_device *adev,

-enum drm_sched_priority prio,
-u32 hw_ip)
+static unsigned int amdgpu_ctx_get_hw_prio(struct amdgpu_ctx *ctx, u32 hw_ip)
  {
+   struct amdgpu_device *adev = ctx->adev;
+   int32_t ctx_prio;
unsigned int hw_prio;
  
-	hw_prio = (hw_ip == AMDGPU_HW_IP_COMPUTE) ?

-   amdgpu_ctx_sched_prio_to_compute_prio(prio) :
-   AMDGPU_RING_PRIO_DEFAULT;
+   ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ?
+   ctx->init_priority : ctx->override_priority;
+
+   switch (hw_ip) {
+   case AMDGPU_HW_IP_COMPUTE:
+   hw_prio = amdgpu_ctx_prio_to_compute_prio(ctx_prio);
+   break;
+   default:
+   hw_prio = AMDGPU_RING_PRIO_DEFAULT;
+   break;
+   }
+
hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM);
if (adev->gpu_sched[hw_ip][hw_prio].num_scheds == 0)
hw_prio = AMDGPU_RING_PRIO_DEFAULT;
@@ -89,15 +145,17 @@ static unsigned int amdgpu_ctx_prio_sched_to_hw(struct 
amdgpu_device *adev,
return hw_prio;
  }
  
+

  static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 

Re: [PATCH 1/1] drm/amdgpu: rework context priority handling

2021-08-25 Thread Christian König

Am 25.08.21 um 14:20 schrieb Lazar, Lijo:

On 8/25/2021 4:52 PM, Nirmoy Das wrote:

To get a hardware queue priority for a context, we are currently
mapping AMDGPU_CTX_PRIORITY_* to DRM_SCHED_PRIORITY_* and then
to hardware queue priority, which is not the right way to do that
as DRM_SCHED_PRIORITY_* is software scheduler's priority and it is
independent from a hardware queue priority.

Use userspace provided context priority, AMDGPU_CTX_PRIORITY_* to
map a context to proper hardware queue priority.

Signed-off-by: Nirmoy Das 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c   | 127 --
  drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h   |   8 +-
  drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c |  44 ++--
  3 files changed, 105 insertions(+), 74 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c

index e7a010b7ca1f..c88c5c6c54a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -43,14 +43,61 @@ const unsigned int 
amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {

  [AMDGPU_HW_IP_VCN_JPEG]    =    1,
  };
  +bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio)
+{
+    switch (ctx_prio) {
+    case AMDGPU_CTX_PRIORITY_UNSET:
+    case AMDGPU_CTX_PRIORITY_VERY_LOW:
+    case AMDGPU_CTX_PRIORITY_LOW:
+    case AMDGPU_CTX_PRIORITY_NORMAL:
+    case AMDGPU_CTX_PRIORITY_HIGH:
+    case AMDGPU_CTX_PRIORITY_VERY_HIGH:
+    return true;
+    default:
+    return false;
+    }
+}
+
+static enum drm_sched_priority
+amdgpu_ctx_to_drm_sched_prio(int32_t ctx_prio)
+{
+    switch (ctx_prio) {
+    case AMDGPU_CTX_PRIORITY_UNSET:
+    return DRM_SCHED_PRIORITY_UNSET;
+
+    case AMDGPU_CTX_PRIORITY_VERY_LOW:
+    return DRM_SCHED_PRIORITY_MIN;
+
+    case AMDGPU_CTX_PRIORITY_LOW:
+    return DRM_SCHED_PRIORITY_MIN;
+
+    case AMDGPU_CTX_PRIORITY_NORMAL:
+    return DRM_SCHED_PRIORITY_NORMAL;
+
+    case AMDGPU_CTX_PRIORITY_HIGH:
+    return DRM_SCHED_PRIORITY_HIGH;
+
+    case AMDGPU_CTX_PRIORITY_VERY_HIGH:
+    return DRM_SCHED_PRIORITY_HIGH;
+
+    /* This should not happen as we sanitized userspace provided 
priority

+ * already, WARN if this happens.
+ */
+    default:
+    WARN(1, "Invalid context priority %d\n", ctx_prio);
+    return DRM_SCHED_PRIORITY_NORMAL;
+    }
+
+}
+
  static int amdgpu_ctx_priority_permit(struct drm_file *filp,
-  enum drm_sched_priority priority)
+  int32_t priority)
  {
-    if (priority < 0 || priority >= DRM_SCHED_PRIORITY_COUNT)
+    if (!amdgpu_ctx_priority_is_valid(priority))
  return -EINVAL;
    /* NORMAL and below are accessible by everyone */
-    if (priority <= DRM_SCHED_PRIORITY_NORMAL)
+    if (priority <= AMDGPU_CTX_PRIORITY_NORMAL)
  return 0;
    if (capable(CAP_SYS_NICE))
@@ -62,26 +109,35 @@ static int amdgpu_ctx_priority_permit(struct 
drm_file *filp,

  return -EACCES;
  }
  -static enum gfx_pipe_priority 
amdgpu_ctx_sched_prio_to_compute_prio(enum drm_sched_priority prio)
+static enum gfx_pipe_priority 
amdgpu_ctx_prio_to_compute_prio(int32_t prio)

  {
  switch (prio) {
-    case DRM_SCHED_PRIORITY_HIGH:
-    case DRM_SCHED_PRIORITY_KERNEL:
+    case AMDGPU_CTX_PRIORITY_HIGH:
+    case AMDGPU_CTX_PRIORITY_VERY_HIGH:
  return AMDGPU_GFX_PIPE_PRIO_HIGH;
  default:
  return AMDGPU_GFX_PIPE_PRIO_NORMAL;
  }
  }
  -static unsigned int amdgpu_ctx_prio_sched_to_hw(struct 
amdgpu_device *adev,

- enum drm_sched_priority prio,
- u32 hw_ip)
+static unsigned int amdgpu_ctx_get_hw_prio(struct amdgpu_ctx *ctx, 
u32 hw_ip)

  {
+    struct amdgpu_device *adev = ctx->adev;
+    int32_t ctx_prio;
  unsigned int hw_prio;
  -    hw_prio = (hw_ip == AMDGPU_HW_IP_COMPUTE) ?
-    amdgpu_ctx_sched_prio_to_compute_prio(prio) :
-    AMDGPU_RING_PRIO_DEFAULT;
+    ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ?
+    ctx->init_priority : ctx->override_priority;
+
+    switch (hw_ip) {
+    case AMDGPU_HW_IP_COMPUTE:
+    hw_prio = amdgpu_ctx_prio_to_compute_prio(ctx_prio);
+    break;
+    default:
+    hw_prio = AMDGPU_RING_PRIO_DEFAULT;
+    break;
+    }
+
  hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM);
  if (adev->gpu_sched[hw_ip][hw_prio].num_scheds == 0)
  hw_prio = AMDGPU_RING_PRIO_DEFAULT;
@@ -89,15 +145,17 @@ static unsigned int 
amdgpu_ctx_prio_sched_to_hw(struct amdgpu_device *adev,

  return hw_prio;
  }
  +
  static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
-   const u32 ring)
+  const u32 ring)
  {
  struct amdgpu_device *adev = ctx->adev;
  struct amdgpu_ctx_entity *entity;
  struct drm_gpu_scheduler **scheds = NULL, *sched = NULL;
  unsigned num_scheds = 0;
+    int32_t ctx_prio;
  unsigned int hw_prio;
-    

[PATCH 1/1] drm/amdgpu: rework context priority handling

2021-08-25 Thread Nirmoy Das
To get a hardware queue priority for a context, we are currently
mapping AMDGPU_CTX_PRIORITY_* to DRM_SCHED_PRIORITY_* and then
to hardware queue priority, which is not the right way to do that
as DRM_SCHED_PRIORITY_* is software scheduler's priority and it is
independent from a hardware queue priority.

Use userspace provided context priority, AMDGPU_CTX_PRIORITY_* to
map a context to proper hardware queue priority.

Signed-off-by: Nirmoy Das 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c   | 127 --
 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h   |   8 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c |  44 ++--
 3 files changed, 105 insertions(+), 74 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index e7a010b7ca1f..c88c5c6c54a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -43,14 +43,61 @@ const unsigned int 
amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
[AMDGPU_HW_IP_VCN_JPEG] =   1,
 };
 
+bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio)
+{
+   switch (ctx_prio) {
+   case AMDGPU_CTX_PRIORITY_UNSET:
+   case AMDGPU_CTX_PRIORITY_VERY_LOW:
+   case AMDGPU_CTX_PRIORITY_LOW:
+   case AMDGPU_CTX_PRIORITY_NORMAL:
+   case AMDGPU_CTX_PRIORITY_HIGH:
+   case AMDGPU_CTX_PRIORITY_VERY_HIGH:
+   return true;
+   default:
+   return false;
+   }
+}
+
+static enum drm_sched_priority
+amdgpu_ctx_to_drm_sched_prio(int32_t ctx_prio)
+{
+   switch (ctx_prio) {
+   case AMDGPU_CTX_PRIORITY_UNSET:
+   return DRM_SCHED_PRIORITY_UNSET;
+
+   case AMDGPU_CTX_PRIORITY_VERY_LOW:
+   return DRM_SCHED_PRIORITY_MIN;
+
+   case AMDGPU_CTX_PRIORITY_LOW:
+   return DRM_SCHED_PRIORITY_MIN;
+
+   case AMDGPU_CTX_PRIORITY_NORMAL:
+   return DRM_SCHED_PRIORITY_NORMAL;
+
+   case AMDGPU_CTX_PRIORITY_HIGH:
+   return DRM_SCHED_PRIORITY_HIGH;
+
+   case AMDGPU_CTX_PRIORITY_VERY_HIGH:
+   return DRM_SCHED_PRIORITY_HIGH;
+
+   /* This should not happen as we sanitized userspace provided priority
+* already, WARN if this happens.
+*/
+   default:
+   WARN(1, "Invalid context priority %d\n", ctx_prio);
+   return DRM_SCHED_PRIORITY_NORMAL;
+   }
+
+}
+
 static int amdgpu_ctx_priority_permit(struct drm_file *filp,
- enum drm_sched_priority priority)
+ int32_t priority)
 {
-   if (priority < 0 || priority >= DRM_SCHED_PRIORITY_COUNT)
+   if (!amdgpu_ctx_priority_is_valid(priority))
return -EINVAL;
 
/* NORMAL and below are accessible by everyone */
-   if (priority <= DRM_SCHED_PRIORITY_NORMAL)
+   if (priority <= AMDGPU_CTX_PRIORITY_NORMAL)
return 0;
 
if (capable(CAP_SYS_NICE))
@@ -62,26 +109,35 @@ static int amdgpu_ctx_priority_permit(struct drm_file 
*filp,
return -EACCES;
 }
 
-static enum gfx_pipe_priority amdgpu_ctx_sched_prio_to_compute_prio(enum 
drm_sched_priority prio)
+static enum gfx_pipe_priority amdgpu_ctx_prio_to_compute_prio(int32_t prio)
 {
switch (prio) {
-   case DRM_SCHED_PRIORITY_HIGH:
-   case DRM_SCHED_PRIORITY_KERNEL:
+   case AMDGPU_CTX_PRIORITY_HIGH:
+   case AMDGPU_CTX_PRIORITY_VERY_HIGH:
return AMDGPU_GFX_PIPE_PRIO_HIGH;
default:
return AMDGPU_GFX_PIPE_PRIO_NORMAL;
}
 }
 
-static unsigned int amdgpu_ctx_prio_sched_to_hw(struct amdgpu_device *adev,
-enum drm_sched_priority prio,
-u32 hw_ip)
+static unsigned int amdgpu_ctx_get_hw_prio(struct amdgpu_ctx *ctx, u32 hw_ip)
 {
+   struct amdgpu_device *adev = ctx->adev;
+   int32_t ctx_prio;
unsigned int hw_prio;
 
-   hw_prio = (hw_ip == AMDGPU_HW_IP_COMPUTE) ?
-   amdgpu_ctx_sched_prio_to_compute_prio(prio) :
-   AMDGPU_RING_PRIO_DEFAULT;
+   ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ?
+   ctx->init_priority : ctx->override_priority;
+
+   switch (hw_ip) {
+   case AMDGPU_HW_IP_COMPUTE:
+   hw_prio = amdgpu_ctx_prio_to_compute_prio(ctx_prio);
+   break;
+   default:
+   hw_prio = AMDGPU_RING_PRIO_DEFAULT;
+   break;
+   }
+
hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM);
if (adev->gpu_sched[hw_ip][hw_prio].num_scheds == 0)
hw_prio = AMDGPU_RING_PRIO_DEFAULT;
@@ -89,15 +145,17 @@ static unsigned int amdgpu_ctx_prio_sched_to_hw(struct 
amdgpu_device *adev,
return hw_prio;
 }
 
+
 static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
-  const u32 ring)
+  

Re: [PATCH 1/1] drm/amdgpu: rework context priority handling

2021-08-25 Thread Christian König




Am 25.08.21 um 13:22 schrieb Nirmoy Das:

To get a hardware queue priority for a context, we are currently
mapping AMDGPU_CTX_PRIORITY_* to DRM_SCHED_PRIORITY_* and then
to hardware queue priority, which is not the right way to do that
as DRM_SCHED_PRIORITY_* is software scheduler's priority and it is
independent from a hardware queue priority.

Use userspace provided context priority, AMDGPU_CTX_PRIORITY_* to
map a context to proper hardware queue priority.

Signed-off-by: Nirmoy Das 


Reviewed-by: Christian König 


---
  drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c   | 127 --
  drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h   |   8 +-
  drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c |  44 ++--
  3 files changed, 105 insertions(+), 74 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index e7a010b7ca1f..c88c5c6c54a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -43,14 +43,61 @@ const unsigned int 
amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
[AMDGPU_HW_IP_VCN_JPEG] =   1,
  };
  
+bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio)

+{
+   switch (ctx_prio) {
+   case AMDGPU_CTX_PRIORITY_UNSET:
+   case AMDGPU_CTX_PRIORITY_VERY_LOW:
+   case AMDGPU_CTX_PRIORITY_LOW:
+   case AMDGPU_CTX_PRIORITY_NORMAL:
+   case AMDGPU_CTX_PRIORITY_HIGH:
+   case AMDGPU_CTX_PRIORITY_VERY_HIGH:
+   return true;
+   default:
+   return false;
+   }
+}
+
+static enum drm_sched_priority
+amdgpu_ctx_to_drm_sched_prio(int32_t ctx_prio)
+{
+   switch (ctx_prio) {
+   case AMDGPU_CTX_PRIORITY_UNSET:
+   return DRM_SCHED_PRIORITY_UNSET;
+
+   case AMDGPU_CTX_PRIORITY_VERY_LOW:
+   return DRM_SCHED_PRIORITY_MIN;
+
+   case AMDGPU_CTX_PRIORITY_LOW:
+   return DRM_SCHED_PRIORITY_MIN;
+
+   case AMDGPU_CTX_PRIORITY_NORMAL:
+   return DRM_SCHED_PRIORITY_NORMAL;
+
+   case AMDGPU_CTX_PRIORITY_HIGH:
+   return DRM_SCHED_PRIORITY_HIGH;
+
+   case AMDGPU_CTX_PRIORITY_VERY_HIGH:
+   return DRM_SCHED_PRIORITY_HIGH;
+
+   /* This should not happen as we sanitized userspace provided priority
+* already, WARN if this happens.
+*/
+   default:
+   WARN(1, "Invalid context priority %d\n", ctx_prio);
+   return DRM_SCHED_PRIORITY_NORMAL;
+   }
+
+}
+
  static int amdgpu_ctx_priority_permit(struct drm_file *filp,
- enum drm_sched_priority priority)
+ int32_t priority)
  {
-   if (priority < 0 || priority >= DRM_SCHED_PRIORITY_COUNT)
+   if (!amdgpu_ctx_priority_is_valid(priority))
return -EINVAL;
  
  	/* NORMAL and below are accessible by everyone */

-   if (priority <= DRM_SCHED_PRIORITY_NORMAL)
+   if (priority <= AMDGPU_CTX_PRIORITY_NORMAL)
return 0;
  
  	if (capable(CAP_SYS_NICE))

@@ -62,26 +109,35 @@ static int amdgpu_ctx_priority_permit(struct drm_file 
*filp,
return -EACCES;
  }
  
-static enum gfx_pipe_priority amdgpu_ctx_sched_prio_to_compute_prio(enum drm_sched_priority prio)

+static enum gfx_pipe_priority amdgpu_ctx_prio_to_compute_prio(int32_t prio)
  {
switch (prio) {
-   case DRM_SCHED_PRIORITY_HIGH:
-   case DRM_SCHED_PRIORITY_KERNEL:
+   case AMDGPU_CTX_PRIORITY_HIGH:
+   case AMDGPU_CTX_PRIORITY_VERY_HIGH:
return AMDGPU_GFX_PIPE_PRIO_HIGH;
default:
return AMDGPU_GFX_PIPE_PRIO_NORMAL;
}
  }
  
-static unsigned int amdgpu_ctx_prio_sched_to_hw(struct amdgpu_device *adev,

-enum drm_sched_priority prio,
-u32 hw_ip)
+static unsigned int amdgpu_ctx_get_hw_prio(struct amdgpu_ctx *ctx, u32 hw_ip)
  {
+   struct amdgpu_device *adev = ctx->adev;
+   int32_t ctx_prio;
unsigned int hw_prio;
  
-	hw_prio = (hw_ip == AMDGPU_HW_IP_COMPUTE) ?

-   amdgpu_ctx_sched_prio_to_compute_prio(prio) :
-   AMDGPU_RING_PRIO_DEFAULT;
+   ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ?
+   ctx->init_priority : ctx->override_priority;
+
+   switch (hw_ip) {
+   case AMDGPU_HW_IP_COMPUTE:
+   hw_prio = amdgpu_ctx_prio_to_compute_prio(ctx_prio);
+   break;
+   default:
+   hw_prio = AMDGPU_RING_PRIO_DEFAULT;
+   break;
+   }
+
hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM);
if (adev->gpu_sched[hw_ip][hw_prio].num_scheds == 0)
hw_prio = AMDGPU_RING_PRIO_DEFAULT;
@@ -89,15 +145,17 @@ static unsigned int amdgpu_ctx_prio_sched_to_hw(struct 
amdgpu_device *adev,
return hw_prio;
  }
  
+

  static int