From: Le Ma <[email protected]>

On Arcturus, sdma instance 5~7 is connected to the second mmhub. The vmhub type
in amdgpu_ring_funcs is constant, so we create an individual amdgpu_ring_funcs
with different vmhub type(AMDGPU_MMHUB_1) for these sdma instances.

Signed-off-by: Le Ma <[email protected]>
Acked-by: Snow Zhang < [email protected]>
Signed-off-by: Alex Deucher <[email protected]>
---
 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 43 +++++++++++++++++++++++++-
 1 file changed, 42 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c 
b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index b6509525f4fc..1dce974e0c58 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -2213,6 +2213,42 @@ static const struct amdgpu_ring_funcs 
sdma_v4_0_ring_funcs = {
        .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
 };
 
+/*
+ * On Arcturus, SDMA instance 5~7 has a different vmhub type(AMDGPU_MMHUB_1).
+ * So create a individual constant ring_funcs for those instances.
+ */
+static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs_2nd_mmhub = {
+       .type = AMDGPU_RING_TYPE_SDMA,
+       .align_mask = 0xf,
+       .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
+       .support_64bit_ptrs = true,
+       .vmhub = AMDGPU_MMHUB_1,
+       .get_rptr = sdma_v4_0_ring_get_rptr,
+       .get_wptr = sdma_v4_0_ring_get_wptr,
+       .set_wptr = sdma_v4_0_ring_set_wptr,
+       .emit_frame_size =
+               6 + /* sdma_v4_0_ring_emit_hdp_flush */
+               3 + /* hdp invalidate */
+               6 + /* sdma_v4_0_ring_emit_pipeline_sync */
+               /* sdma_v4_0_ring_emit_vm_flush */
+               SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
+               SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
+               10 + 10 + 10, /* sdma_v4_0_ring_emit_fence x3 for user fence, 
vm fence */
+       .emit_ib_size = 7 + 6, /* sdma_v4_0_ring_emit_ib */
+       .emit_ib = sdma_v4_0_ring_emit_ib,
+       .emit_fence = sdma_v4_0_ring_emit_fence,
+       .emit_pipeline_sync = sdma_v4_0_ring_emit_pipeline_sync,
+       .emit_vm_flush = sdma_v4_0_ring_emit_vm_flush,
+       .emit_hdp_flush = sdma_v4_0_ring_emit_hdp_flush,
+       .test_ring = sdma_v4_0_ring_test_ring,
+       .test_ib = sdma_v4_0_ring_test_ib,
+       .insert_nop = sdma_v4_0_ring_insert_nop,
+       .pad_ib = sdma_v4_0_ring_pad_ib,
+       .emit_wreg = sdma_v4_0_ring_emit_wreg,
+       .emit_reg_wait = sdma_v4_0_ring_emit_reg_wait,
+       .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+};
+
 static const struct amdgpu_ring_funcs sdma_v4_0_page_ring_funcs = {
        .type = AMDGPU_RING_TYPE_SDMA,
        .align_mask = 0xf,
@@ -2250,7 +2286,12 @@ static void sdma_v4_0_set_ring_funcs(struct 
amdgpu_device *adev)
        int i;
 
        for (i = 0; i < adev->sdma.num_instances; i++) {
-               adev->sdma.instance[i].ring.funcs = &sdma_v4_0_ring_funcs;
+               if (adev->asic_type == CHIP_ARCTURUS && i >=5)
+                       adev->sdma.instance[i].ring.funcs =
+                                       &sdma_v4_0_ring_funcs_2nd_mmhub;
+               else
+                       adev->sdma.instance[i].ring.funcs =
+                                       &sdma_v4_0_ring_funcs;
                adev->sdma.instance[i].ring.me = i;
                if (adev->sdma.has_page_queue) {
                        adev->sdma.instance[i].page.funcs = 
&sdma_v4_0_page_ring_funcs;
-- 
2.20.1

_______________________________________________
amd-gfx mailing list
[email protected]
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Reply via email to