From: Huang Rui <ray.hu...@amd.com>

Add Renoir checks to gfx9 code.

Signed-off-by: Huang Rui <ray.hu...@amd.com>
Reviewed-by: Hawking Zhang <hawking.zh...@amd.com>
Signed-off-by: Alex Deucher <alexander.deuc...@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 26 ++++++++++++++++++++++++--
 1 file changed, 24 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 68f823820a08..23c7b6394506 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -104,6 +104,13 @@ MODULE_FIRMWARE("amdgpu/arcturus_mec.bin");
 MODULE_FIRMWARE("amdgpu/arcturus_mec2.bin");
 MODULE_FIRMWARE("amdgpu/arcturus_rlc.bin");
 
+MODULE_FIRMWARE("amdgpu/renoir_ce.bin");
+MODULE_FIRMWARE("amdgpu/renoir_pfp.bin");
+MODULE_FIRMWARE("amdgpu/renoir_me.bin");
+MODULE_FIRMWARE("amdgpu/renoir_mec.bin");
+MODULE_FIRMWARE("amdgpu/renoir_mec2.bin");
+MODULE_FIRMWARE("amdgpu/renoir_rlc.bin");
+
 #define mmTCP_CHAN_STEER_0_ARCT                                                
                0x0b03
 #define mmTCP_CHAN_STEER_0_ARCT_BASE_IDX                                       
                0
 #define mmTCP_CHAN_STEER_1_ARCT                                                
                0x0b04
@@ -1339,6 +1346,9 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device 
*adev)
        case CHIP_ARCTURUS:
                chip_name = "arcturus";
                break;
+       case CHIP_RENOIR:
+               chip_name = "renoir";
+               break;
        default:
                BUG();
        }
@@ -1598,7 +1608,7 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
                        return r;
        }
 
-       if (adev->asic_type == CHIP_RAVEN) {
+       if (adev->asic_type == CHIP_RAVEN || adev->asic_type == CHIP_RENOIR) {
                /* TODO: double check the cp_table_size for RV */
                adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 
1024); /* JT + GDS */
                r = amdgpu_gfx_rlc_init_cpt(adev);
@@ -1859,6 +1869,16 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device 
*adev)
                gb_addr_config &= ~0xf3e777ff;
                gb_addr_config |= 0x22014042;
                break;
+       case CHIP_RENOIR:
+               adev->gfx.config.max_hw_contexts = 8;
+               adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+               adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+               adev->gfx.config.sc_hiz_tile_fifo_size = 0x80;
+               adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
+               gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
+               gb_addr_config &= ~0xf3e777ff;
+               gb_addr_config |= 0x22010042;
+               break;
        default:
                BUG();
                break;
@@ -2136,6 +2156,7 @@ static int gfx_v9_0_sw_init(void *handle)
        case CHIP_VEGA20:
        case CHIP_RAVEN:
        case CHIP_ARCTURUS:
+       case CHIP_RENOIR:
                adev->gfx.mec.num_mec = 2;
                break;
        default:
@@ -2293,7 +2314,7 @@ static int gfx_v9_0_sw_fini(void *handle)
        gfx_v9_0_mec_fini(adev);
        gfx_v9_0_ngg_fini(adev);
        amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
-       if (adev->asic_type == CHIP_RAVEN) {
+       if (adev->asic_type == CHIP_RAVEN || adev->asic_type == CHIP_RENOIR) {
                amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
                                &adev->gfx.rlc.cp_table_gpu_addr,
                                (void **)&adev->gfx.rlc.cp_table_ptr);
@@ -2963,6 +2984,7 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
 
        switch (adev->asic_type) {
        case CHIP_RAVEN:
+       case CHIP_RENOIR:
                if (amdgpu_lbpw == 0)
                        gfx_v9_0_enable_lbpw(adev, false);
                else
-- 
2.20.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Reply via email to