init_priority will set second compute queue(gfx8 and gfx9) of a pipe to high 
priority
and 1st queue to normal priority.

Signed-off-by: Nirmoy Das <nirmoy....@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h |  1 +
 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c    | 14 ++++++++++++++
 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c    | 13 +++++++++++++
 3 files changed, 28 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 24caff085d00..a109373b9fe8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -170,6 +170,7 @@ struct amdgpu_ring_funcs {
        /* priority functions */
        void (*set_priority) (struct amdgpu_ring *ring,
                              enum drm_sched_priority priority);
+       void (*init_priority) (struct amdgpu_ring *ring);
        /* Try to soft recover the ring to make the fence signal */
        void (*soft_recovery)(struct amdgpu_ring *ring, unsigned vmid);
        int (*preempt_ib)(struct amdgpu_ring *ring);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index fa245973de12..14bab6e08bd6 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -6334,6 +6334,19 @@ static void gfx_v8_0_ring_set_priority_compute(struct 
amdgpu_ring *ring,
        gfx_v8_0_pipe_reserve_resources(adev, ring, acquire);
 }

+static void gfx_v8_0_ring_init_priority_compute(struct amdgpu_ring *ring)
+{
+       /* set pipe 0 to normal priority and pipe 1 to high priority*/
+       if (ring->queue == 1) {
+               gfx_v8_0_hqd_set_priority(ring->adev, ring, true);
+               gfx_v8_0_ring_set_pipe_percent(ring, true);
+       } else {
+               gfx_v8_0_hqd_set_priority(ring->adev, ring, false);
+               gfx_v8_0_ring_set_pipe_percent(ring, false);
+       }
+
+}
+
 static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
                                             u64 addr, u64 seq,
                                             unsigned flags)
@@ -6967,6 +6980,7 @@ static const struct amdgpu_ring_funcs 
gfx_v8_0_ring_funcs_compute = {
        .insert_nop = amdgpu_ring_insert_nop,
        .pad_ib = amdgpu_ring_generic_pad_ib,
        .set_priority = gfx_v8_0_ring_set_priority_compute,
+       .init_priority = gfx_v8_0_ring_init_priority_compute,
        .emit_wreg = gfx_v8_0_ring_emit_wreg,
 };

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 1c7a16b91686..0c66743fb6f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -5143,6 +5143,18 @@ static void gfx_v9_0_ring_set_priority_compute(struct 
amdgpu_ring *ring,
        gfx_v9_0_pipe_reserve_resources(adev, ring, acquire);
 }

+static void gfx_v9_0_ring_init_priority_compute(struct amdgpu_ring *ring)
+{
+       /* set pipe 0 to normal priority and pipe 1 to high priority*/
+       if (ring->queue == 1) {
+               gfx_v9_0_hqd_set_priority(ring->adev, ring, true);
+               gfx_v9_0_ring_set_pipe_percent(ring, true);
+       } else {
+               gfx_v9_0_hqd_set_priority(ring->adev, ring, false);
+               gfx_v9_0_ring_set_pipe_percent(ring, true);
+       }
+}
+
 static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
@@ -6514,6 +6526,7 @@ static const struct amdgpu_ring_funcs 
gfx_v9_0_ring_funcs_compute = {
        .insert_nop = amdgpu_ring_insert_nop,
        .pad_ib = amdgpu_ring_generic_pad_ib,
        .set_priority = gfx_v9_0_ring_set_priority_compute,
+       .init_priority = gfx_v9_0_ring_init_priority_compute,
        .emit_wreg = gfx_v9_0_ring_emit_wreg,
        .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
        .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
--
2.25.0

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Reply via email to