Reviewed-by: Ken Wang<[email protected]>

________________________________
From: amd-gfx <[email protected]> on behalf of Rex Zhu 
<[email protected]>
Sent: Monday, June 19, 2017 6:05:16 PM
To: [email protected]
Cc: Zhu, Rex
Subject: [PATCH] drm/amd/powerplay: add support for ATOM GFXCLK table v2.

Change-Id: I42e3d7b684ee903ce3ced6135f954cea0d113503
Signed-off-by: Rex Zhu <[email protected]>
---
 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 14 +++++---
 .../gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h   |  9 +++++
 .../amd/powerplay/hwmgr/vega10_processpptables.c   | 42 +++++++++++++++++-----
 3 files changed, 51 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index d9744b6..e2e2822 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -2864,6 +2864,7 @@ static int vega10_get_pp_table_entry_callback_func(struct 
pp_hwmgr *hwmgr,
                 void *state, struct pp_power_state *power_state,
                 void *pp_table, uint32_t classification_flag)
 {
+       ATOM_Vega10_GFXCLK_Dependency_Record_V2 *patom_record_V2;
         struct vega10_power_state *vega10_power_state =
                         cast_phw_vega10_power_state(&(power_state->hardware));
         struct vega10_performance_level *performance_level;
@@ -2940,11 +2941,16 @@ static int 
vega10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,

         performance_level = &(vega10_power_state->performance_levels
                                 
[vega10_power_state->performance_level_count++]);
-
         performance_level->soc_clock = socclk_dep_table->entries
-                       [state_entry->ucSocClockIndexHigh].ulClk;
-       performance_level->gfx_clock = gfxclk_dep_table->entries
+                               [state_entry->ucSocClockIndexHigh].ulClk;
+       if (gfxclk_dep_table->ucRevId == 0) {
+               performance_level->gfx_clock = gfxclk_dep_table->entries
                         [state_entry->ucGfxClockIndexHigh].ulClk;
+       } else if (gfxclk_dep_table->ucRevId == 1) {
+               patom_record_V2 = (ATOM_Vega10_GFXCLK_Dependency_Record_V2 
*)gfxclk_dep_table->entries;
+               performance_level->gfx_clock = 
patom_record_V2[state_entry->ucGfxClockIndexHigh].ulClk;
+       }
+
         performance_level->mem_clock = mclk_dep_table->entries
                         [state_entry->ucMemClockIndexHigh].ulMemClk;
         return 0;
@@ -3348,7 +3354,6 @@ static int 
vega10_populate_and_upload_sclk_mclk_dpm_levels(
                                 dpm_table->
                                 
gfx_table.dpm_levels[dpm_table->gfx_table.count - 1].
                                 value = sclk;
-
                                 if 
(phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
                                                 
PHM_PlatformCaps_OD6PlusinACSupport) ||
                                         
phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -3471,7 +3476,6 @@ static int 
vega10_populate_and_upload_sclk_mclk_dpm_levels(
                                         return result);
                 }
         }
-
         return result;
 }

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h 
b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h
index 52beea3..b3e6300 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h
@@ -144,6 +144,15 @@
         USHORT usAVFSOffset;                                        /* AVFS 
Voltage offset */
 } ATOM_Vega10_GFXCLK_Dependency_Record;

+typedef struct _ATOM_Vega10_GFXCLK_Dependency_Record_V2 {
+       ULONG  ulClk;
+       UCHAR  ucVddInd;
+       USHORT usCKSVOffsetandDisable;
+       USHORT usAVFSOffset;
+       UCHAR  ucACGEnable;
+       UCHAR  ucReserved[3];
+} ATOM_Vega10_GFXCLK_Dependency_Record_V2;
+
 typedef struct _ATOM_Vega10_MCLK_Dependency_Record {
         ULONG  ulMemClk;                                            /* Clock 
Frequency */
         UCHAR  ucVddInd;                                            /* SOC_VDD 
index */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
index 2b892e4..1623644 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
@@ -585,6 +585,7 @@ static int get_gfxclk_voltage_dependency_table(
         uint32_t table_size, i;
         struct phm_ppt_v1_clock_voltage_dependency_table
                                 *clk_table;
+       ATOM_Vega10_GFXCLK_Dependency_Record_V2 *patom_record_v2;

         PP_ASSERT_WITH_CODE((clk_dep_table->ucNumEntries != 0),
                         "Invalid PowerPlay Table!", return -1);
@@ -601,18 +602,41 @@ static int get_gfxclk_voltage_dependency_table(

         clk_table->count = clk_dep_table->ucNumEntries;

-       for (i = 0; i < clk_table->count; i++) {
-               clk_table->entries[i].vddInd =
+       if (clk_dep_table->ucRevId == 0) {
+               for (i = 0; i < clk_table->count; i++) {
+                       clk_table->entries[i].vddInd =
                                 clk_dep_table->entries[i].ucVddInd;
-               clk_table->entries[i].clk =
+                       clk_table->entries[i].clk =
                                 le32_to_cpu(clk_dep_table->entries[i].ulClk);
-               clk_table->entries[i].cks_enable =
-                               
(((clk_dep_table->entries[i].usCKSVOffsetandDisable & 0x8000)
+                       clk_table->entries[i].cks_enable =
+                               
(((le16_to_cpu(clk_dep_table->entries[i].usCKSVOffsetandDisable) & 0x8000)
                                                 >> 15) == 0) ? 1 : 0;
-               clk_table->entries[i].cks_voffset =
-                               
(clk_dep_table->entries[i].usCKSVOffsetandDisable & 0x7F);
-               clk_table->entries[i].sclk_offset =
-                               clk_dep_table->entries[i].usAVFSOffset;
+                       clk_table->entries[i].cks_voffset =
+                               
le16_to_cpu(clk_dep_table->entries[i].usCKSVOffsetandDisable) & 0x7F;
+                       clk_table->entries[i].sclk_offset =
+                               
le16_to_cpu(clk_dep_table->entries[i].usAVFSOffset);
+               }
+       } else if (clk_dep_table->ucRevId == 1) {
+               patom_record_v2 = (ATOM_Vega10_GFXCLK_Dependency_Record_V2 
*)clk_dep_table->entries;
+               for (i = 0; i < clk_table->count; i++) {
+                       clk_table->entries[i].vddInd =
+                                       patom_record_v2->ucVddInd;
+                       clk_table->entries[i].clk =
+                                       le32_to_cpu(patom_record_v2->ulClk);
+                       clk_table->entries[i].cks_enable =
+                                       
(((le16_to_cpu(patom_record_v2->usCKSVOffsetandDisable) & 0x8000)
+                                                       >> 15) == 0) ? 1 : 0;
+                       clk_table->entries[i].cks_voffset =
+                                       
le16_to_cpu(patom_record_v2->usCKSVOffsetandDisable) & 0x7F;
+                       clk_table->entries[i].sclk_offset =
+                                       
le16_to_cpu(patom_record_v2->usAVFSOffset);
+                       patom_record_v2++;
+               }
+       } else {
+               kfree(clk_table);
+               PP_ASSERT_WITH_CODE(false,
+                       "Unsupported GFXClockDependencyTable Revision!",
+                       return -EINVAL);
         }

         *pp_vega10_clk_dep_table = clk_table;
--
1.9.1

_______________________________________________
amd-gfx mailing list
[email protected]
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
[email protected]
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Reply via email to