Use the generic dpm table structure instead of SMUv13 specific table.
Signed-off-by: Lijo Lazar <[email protected]>
---
drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h | 38 ++---
.../drm/amd/pm/swsmu/smu13/aldebaran_ppt.c | 100 ++++++------
.../gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c | 81 +++++-----
.../drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c | 97 +++++-------
.../drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c | 147 +++++++++---------
.../drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c | 97 +++++-------
6 files changed, 253 insertions(+), 307 deletions(-)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index 4263798d716b..4d1180030c8a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -44,7 +44,6 @@
#define SMU13_TOOL_SIZE 0x19000
-#define MAX_DPM_LEVELS 16
#define MAX_PCIE_CONF 3
#define CTF_OFFSET_EDGE 5
@@ -72,19 +71,6 @@ struct smu_13_0_max_sustainable_clocks {
uint32_t soc_clock;
};
-struct smu_13_0_dpm_clk_level {
- bool enabled;
- uint32_t value;
-};
-
-struct smu_13_0_dpm_table {
- uint32_t min; /* MHz */
- uint32_t max; /* MHz */
- uint32_t count;
- bool is_fine_grained;
- struct smu_13_0_dpm_clk_level dpm_levels[MAX_DPM_LEVELS];
-};
-
struct smu_13_0_pcie_table {
uint8_t pcie_gen[MAX_PCIE_CONF];
uint8_t pcie_lane[MAX_PCIE_CONF];
@@ -93,17 +79,17 @@ struct smu_13_0_pcie_table {
};
struct smu_13_0_dpm_tables {
- struct smu_13_0_dpm_table soc_table;
- struct smu_13_0_dpm_table gfx_table;
- struct smu_13_0_dpm_table uclk_table;
- struct smu_13_0_dpm_table eclk_table;
- struct smu_13_0_dpm_table vclk_table;
- struct smu_13_0_dpm_table dclk_table;
- struct smu_13_0_dpm_table dcef_table;
- struct smu_13_0_dpm_table pixel_table;
- struct smu_13_0_dpm_table display_table;
- struct smu_13_0_dpm_table phy_table;
- struct smu_13_0_dpm_table fclk_table;
+ struct smu_dpm_table soc_table;
+ struct smu_dpm_table gfx_table;
+ struct smu_dpm_table uclk_table;
+ struct smu_dpm_table eclk_table;
+ struct smu_dpm_table vclk_table;
+ struct smu_dpm_table dclk_table;
+ struct smu_dpm_table dcef_table;
+ struct smu_dpm_table pixel_table;
+ struct smu_dpm_table display_table;
+ struct smu_dpm_table phy_table;
+ struct smu_dpm_table fclk_table;
struct smu_13_0_pcie_table pcie_table;
};
@@ -226,7 +212,7 @@ int smu_v13_0_set_power_source(struct smu_context *smu,
int smu_v13_0_set_single_dpm_table(struct smu_context *smu,
enum smu_clk_type clk_type,
- struct smu_13_0_dpm_table *single_dpm_table);
+ struct smu_dpm_table *single_dpm_table);
int smu_v13_0_get_dpm_freq_by_index(struct smu_context *smu,
enum smu_clk_type clk_type, uint16_t level,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index 18d5d0704509..07adbec5c169 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -347,7 +347,7 @@ static int aldebaran_get_dpm_ultimate_freq(struct
smu_context *smu,
uint32_t *min, uint32_t *max)
{
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
- struct smu_13_0_dpm_table *dpm_table;
+ struct smu_dpm_table *dpm_table;
uint32_t min_clk, max_clk;
if (amdgpu_sriov_vf(smu->adev)) {
@@ -376,8 +376,8 @@ static int aldebaran_get_dpm_ultimate_freq(struct
smu_context *smu,
return -EINVAL;
}
- min_clk = dpm_table->min;
- max_clk = dpm_table->max;
+ min_clk = SMU_DPM_TABLE_MIN(dpm_table);
+ max_clk = SMU_DPM_TABLE_MAX(dpm_table);
if (min) {
if (!min_clk)
@@ -400,12 +400,13 @@ static int aldebaran_get_dpm_ultimate_freq(struct
smu_context *smu,
static int aldebaran_set_default_dpm_table(struct smu_context *smu)
{
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
- struct smu_13_0_dpm_table *dpm_table = NULL;
+ struct smu_dpm_table *dpm_table = NULL;
PPTable_t *pptable = smu->smu_table.driver_pptable;
int ret = 0;
/* socclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.soc_table;
+ dpm_table->clk_type = SMU_SOCCLK;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
ret = smu_v13_0_set_single_dpm_table(smu,
SMU_SOCCLK,
@@ -416,12 +417,11 @@ static int aldebaran_set_default_dpm_table(struct
smu_context *smu)
dpm_table->count = 1;
dpm_table->dpm_levels[0].value =
smu->smu_table.boot_values.socclk / 100;
dpm_table->dpm_levels[0].enabled = true;
- dpm_table->min = dpm_table->dpm_levels[0].value;
- dpm_table->max = dpm_table->dpm_levels[0].value;
}
/* gfxclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.gfx_table;
+ dpm_table->clk_type = SMU_GFXCLK;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
/* in the case of gfxclk, only fine-grained dpm is honored */
dpm_table->count = 2;
@@ -429,18 +429,15 @@ static int aldebaran_set_default_dpm_table(struct
smu_context *smu)
dpm_table->dpm_levels[0].enabled = true;
dpm_table->dpm_levels[1].value = pptable->GfxclkFmax;
dpm_table->dpm_levels[1].enabled = true;
- dpm_table->min = dpm_table->dpm_levels[0].value;
- dpm_table->max = dpm_table->dpm_levels[1].value;
} else {
dpm_table->count = 1;
dpm_table->dpm_levels[0].value =
smu->smu_table.boot_values.gfxclk / 100;
dpm_table->dpm_levels[0].enabled = true;
- dpm_table->min = dpm_table->dpm_levels[0].value;
- dpm_table->max = dpm_table->dpm_levels[0].value;
}
/* memclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.uclk_table;
+ dpm_table->clk_type = SMU_UCLK;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
ret = smu_v13_0_set_single_dpm_table(smu,
SMU_UCLK,
@@ -451,12 +448,11 @@ static int aldebaran_set_default_dpm_table(struct
smu_context *smu)
dpm_table->count = 1;
dpm_table->dpm_levels[0].value =
smu->smu_table.boot_values.uclk / 100;
dpm_table->dpm_levels[0].enabled = true;
- dpm_table->min = dpm_table->dpm_levels[0].value;
- dpm_table->max = dpm_table->dpm_levels[0].value;
}
/* fclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.fclk_table;
+ dpm_table->clk_type = SMU_FCLK;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT)) {
ret = smu_v13_0_set_single_dpm_table(smu,
SMU_FCLK,
@@ -467,8 +463,6 @@ static int aldebaran_set_default_dpm_table(struct
smu_context *smu)
dpm_table->count = 1;
dpm_table->dpm_levels[0].value =
smu->smu_table.boot_values.fclk / 100;
dpm_table->dpm_levels[0].enabled = true;
- dpm_table->min = dpm_table->dpm_levels[0].value;
- dpm_table->max = dpm_table->dpm_levels[0].value;
}
return 0;
@@ -593,29 +587,26 @@ static int aldebaran_populate_umd_state_clk(struct
smu_context *smu)
{
struct smu_13_0_dpm_context *dpm_context =
smu->smu_dpm.dpm_context;
- struct smu_13_0_dpm_table *gfx_table =
- &dpm_context->dpm_tables.gfx_table;
- struct smu_13_0_dpm_table *mem_table =
- &dpm_context->dpm_tables.uclk_table;
- struct smu_13_0_dpm_table *soc_table =
- &dpm_context->dpm_tables.soc_table;
+ struct smu_dpm_table *gfx_table = &dpm_context->dpm_tables.gfx_table;
+ struct smu_dpm_table *mem_table = &dpm_context->dpm_tables.uclk_table;
+ struct smu_dpm_table *soc_table = &dpm_context->dpm_tables.soc_table;
struct smu_umd_pstate_table *pstate_table =
&smu->pstate_table;
- pstate_table->gfxclk_pstate.min = gfx_table->min;
- pstate_table->gfxclk_pstate.peak = gfx_table->max;
- pstate_table->gfxclk_pstate.curr.min = gfx_table->min;
- pstate_table->gfxclk_pstate.curr.max = gfx_table->max;
+ pstate_table->gfxclk_pstate.min = SMU_DPM_TABLE_MIN(gfx_table);
+ pstate_table->gfxclk_pstate.peak = SMU_DPM_TABLE_MAX(gfx_table);
+ pstate_table->gfxclk_pstate.curr.min = SMU_DPM_TABLE_MIN(gfx_table);
+ pstate_table->gfxclk_pstate.curr.max = SMU_DPM_TABLE_MAX(gfx_table);
- pstate_table->uclk_pstate.min = mem_table->min;
- pstate_table->uclk_pstate.peak = mem_table->max;
- pstate_table->uclk_pstate.curr.min = mem_table->min;
- pstate_table->uclk_pstate.curr.max = mem_table->max;
+ pstate_table->uclk_pstate.min = SMU_DPM_TABLE_MIN(mem_table);
+ pstate_table->uclk_pstate.peak = SMU_DPM_TABLE_MAX(mem_table);
+ pstate_table->uclk_pstate.curr.min = SMU_DPM_TABLE_MIN(mem_table);
+ pstate_table->uclk_pstate.curr.max = SMU_DPM_TABLE_MAX(mem_table);
- pstate_table->socclk_pstate.min = soc_table->min;
- pstate_table->socclk_pstate.peak = soc_table->max;
- pstate_table->socclk_pstate.curr.min = soc_table->min;
- pstate_table->socclk_pstate.curr.max = soc_table->max;
+ pstate_table->socclk_pstate.min = SMU_DPM_TABLE_MIN(soc_table);
+ pstate_table->socclk_pstate.peak = SMU_DPM_TABLE_MAX(soc_table);
+ pstate_table->socclk_pstate.curr.min = SMU_DPM_TABLE_MIN(soc_table);
+ pstate_table->socclk_pstate.curr.max = SMU_DPM_TABLE_MAX(soc_table);
if (gfx_table->count > ALDEBARAN_UMD_PSTATE_GFXCLK_LEVEL &&
mem_table->count > ALDEBARAN_UMD_PSTATE_MCLK_LEVEL &&
@@ -640,7 +631,7 @@ static int aldebaran_populate_umd_state_clk(struct
smu_context *smu)
static void aldebaran_get_clk_table(struct smu_context *smu,
struct pp_clock_levels_with_latency *clocks,
- struct smu_13_0_dpm_table *dpm_table)
+ struct smu_dpm_table *dpm_table)
{
uint32_t i;
@@ -829,7 +820,7 @@ static int aldebaran_emit_clk_levels(struct smu_context
*smu,
int ret = 0;
struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
struct pp_clock_levels_with_latency clocks;
- struct smu_13_0_dpm_table *single_dpm_table;
+ struct smu_dpm_table *single_dpm_table;
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
struct smu_13_0_dpm_context *dpm_context = NULL;
uint32_t i;
@@ -1040,7 +1031,7 @@ static int aldebaran_force_clk_levels(struct smu_context
*smu,
enum smu_clk_type type, uint32_t mask)
{
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
- struct smu_13_0_dpm_table *single_dpm_table = NULL;
+ struct smu_dpm_table *single_dpm_table = NULL;
uint32_t soft_min_level, soft_max_level;
int ret = 0;
@@ -1322,8 +1313,7 @@ static int aldebaran_set_performance_level(struct
smu_context *smu,
{
struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
- struct smu_13_0_dpm_table *gfx_table =
- &dpm_context->dpm_tables.gfx_table;
+ struct smu_dpm_table *gfx_table = &dpm_context->dpm_tables.gfx_table;
struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
int r;
@@ -1331,7 +1321,8 @@ static int aldebaran_set_performance_level(struct
smu_context *smu,
if ((smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) &&
(level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)) {
smu_cmn_send_smc_msg(smu, SMU_MSG_DisableDeterminism, NULL);
- pstate_table->gfxclk_pstate.curr.max = gfx_table->max;
+ pstate_table->gfxclk_pstate.curr.max =
+ SMU_DPM_TABLE_MAX(gfx_table);
}
switch (level) {
@@ -1372,7 +1363,8 @@ static int aldebaran_set_soft_freq_limited_range(struct
smu_context *smu,
if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK)
return -EINVAL;
-
+ min_clk = SMU_DPM_TABLE_MIN(&dpm_context->dpm_tables.gfx_table);
+ max_clk = SMU_DPM_TABLE_MAX(&dpm_context->dpm_tables.gfx_table);
if ((smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
&& (smu_dpm->dpm_level !=
AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM))
return -EINVAL;
@@ -1399,16 +1391,13 @@ static int aldebaran_set_soft_freq_limited_range(struct
smu_context *smu,
}
if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
- if (!max || (max < dpm_context->dpm_tables.gfx_table.min) ||
- (max > dpm_context->dpm_tables.gfx_table.max)) {
+ if (!max || (max < min_clk) || (max > max_clk)) {
dev_warn(adev->dev,
"Invalid max frequency %d MHz specified
for determinism\n", max);
return -EINVAL;
}
/* Restore default min/max clocks and enable determinism */
- min_clk = dpm_context->dpm_tables.gfx_table.min;
- max_clk = dpm_context->dpm_tables.gfx_table.max;
ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK,
min_clk, max_clk, false);
if (!ret) {
usleep_range(500, 1000);
@@ -1449,11 +1438,14 @@ static int aldebaran_usr_edit_dpm_table(struct
smu_context *smu, enum PP_OD_DPM_
dev_err(smu->adev->dev, "Input parameter number not
correct\n");
return -EINVAL;
}
-
+ min_clk = SMU_DPM_TABLE_MIN(&dpm_context->dpm_tables.gfx_table);
+ max_clk = SMU_DPM_TABLE_MAX(&dpm_context->dpm_tables.gfx_table);
if (input[0] == 0) {
- if (input[1] < dpm_context->dpm_tables.gfx_table.min) {
- dev_warn(smu->adev->dev, "Minimum GFX clk (%ld)
MHz specified is less than the minimum allowed (%d) MHz\n",
- input[1],
dpm_context->dpm_tables.gfx_table.min);
+ if (input[1] < min_clk) {
+ dev_warn(
+ smu->adev->dev,
+ "Minimum GFX clk (%ld) MHz specified is
less than the minimum allowed (%d) MHz\n",
+ input[1], min_clk);
pstate_table->gfxclk_pstate.custom.min =
pstate_table->gfxclk_pstate.curr.min;
return -EINVAL;
@@ -1461,9 +1453,11 @@ static int aldebaran_usr_edit_dpm_table(struct
smu_context *smu, enum PP_OD_DPM_
pstate_table->gfxclk_pstate.custom.min = input[1];
} else if (input[0] == 1) {
- if (input[1] > dpm_context->dpm_tables.gfx_table.max) {
- dev_warn(smu->adev->dev, "Maximum GFX clk (%ld)
MHz specified is greater than the maximum allowed (%d) MHz\n",
- input[1],
dpm_context->dpm_tables.gfx_table.max);
+ if (input[1] > max_clk) {
+ dev_warn(
+ smu->adev->dev,
+ "Maximum GFX clk (%ld) MHz specified is
greater than the maximum allowed (%d) MHz\n",
+ input[1], max_clk);
pstate_table->gfxclk_pstate.custom.max =
pstate_table->gfxclk_pstate.curr.max;
return -EINVAL;
@@ -1480,8 +1474,10 @@ static int aldebaran_usr_edit_dpm_table(struct
smu_context *smu, enum PP_OD_DPM_
return -EINVAL;
} else {
/* Use the default frequencies for manual and
determinism mode */
- min_clk = dpm_context->dpm_tables.gfx_table.min;
- max_clk = dpm_context->dpm_tables.gfx_table.max;
+ min_clk = SMU_DPM_TABLE_MIN(
+ &dpm_context->dpm_tables.gfx_table);
+ max_clk = SMU_DPM_TABLE_MAX(
+ &dpm_context->dpm_tables.gfx_table);
ret = aldebaran_set_soft_freq_limited_range(
smu, SMU_GFXCLK, min_clk, max_clk, false);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index a89075e25717..417c6f42c4fa 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -1585,18 +1585,12 @@ int smu_v13_0_set_performance_level(struct smu_context
*smu,
{
struct smu_13_0_dpm_context *dpm_context =
smu->smu_dpm.dpm_context;
- struct smu_13_0_dpm_table *gfx_table =
- &dpm_context->dpm_tables.gfx_table;
- struct smu_13_0_dpm_table *mem_table =
- &dpm_context->dpm_tables.uclk_table;
- struct smu_13_0_dpm_table *soc_table =
- &dpm_context->dpm_tables.soc_table;
- struct smu_13_0_dpm_table *vclk_table =
- &dpm_context->dpm_tables.vclk_table;
- struct smu_13_0_dpm_table *dclk_table =
- &dpm_context->dpm_tables.dclk_table;
- struct smu_13_0_dpm_table *fclk_table =
- &dpm_context->dpm_tables.fclk_table;
+ struct smu_dpm_table *gfx_table = &dpm_context->dpm_tables.gfx_table;
+ struct smu_dpm_table *mem_table = &dpm_context->dpm_tables.uclk_table;
+ struct smu_dpm_table *soc_table = &dpm_context->dpm_tables.soc_table;
+ struct smu_dpm_table *vclk_table = &dpm_context->dpm_tables.vclk_table;
+ struct smu_dpm_table *dclk_table = &dpm_context->dpm_tables.dclk_table;
+ struct smu_dpm_table *fclk_table = &dpm_context->dpm_tables.fclk_table;
struct smu_umd_pstate_table *pstate_table =
&smu->pstate_table;
struct amdgpu_device *adev = smu->adev;
@@ -1611,34 +1605,34 @@ int smu_v13_0_set_performance_level(struct smu_context
*smu,
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
- sclk_min = sclk_max = gfx_table->max;
- mclk_min = mclk_max = mem_table->max;
- socclk_min = socclk_max = soc_table->max;
- vclk_min = vclk_max = vclk_table->max;
- dclk_min = dclk_max = dclk_table->max;
- fclk_min = fclk_max = fclk_table->max;
+ sclk_min = sclk_max = SMU_DPM_TABLE_MAX(gfx_table);
+ mclk_min = mclk_max = SMU_DPM_TABLE_MAX(mem_table);
+ socclk_min = socclk_max = SMU_DPM_TABLE_MAX(soc_table);
+ vclk_min = vclk_max = SMU_DPM_TABLE_MAX(vclk_table);
+ dclk_min = dclk_max = SMU_DPM_TABLE_MAX(dclk_table);
+ fclk_min = fclk_max = SMU_DPM_TABLE_MAX(fclk_table);
break;
case AMD_DPM_FORCED_LEVEL_LOW:
- sclk_min = sclk_max = gfx_table->min;
- mclk_min = mclk_max = mem_table->min;
- socclk_min = socclk_max = soc_table->min;
- vclk_min = vclk_max = vclk_table->min;
- dclk_min = dclk_max = dclk_table->min;
- fclk_min = fclk_max = fclk_table->min;
+ sclk_min = sclk_max = SMU_DPM_TABLE_MIN(gfx_table);
+ mclk_min = mclk_max = SMU_DPM_TABLE_MIN(mem_table);
+ socclk_min = socclk_max = SMU_DPM_TABLE_MIN(soc_table);
+ vclk_min = vclk_max = SMU_DPM_TABLE_MIN(vclk_table);
+ dclk_min = dclk_max = SMU_DPM_TABLE_MIN(dclk_table);
+ fclk_min = fclk_max = SMU_DPM_TABLE_MIN(fclk_table);
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
- sclk_min = gfx_table->min;
- sclk_max = gfx_table->max;
- mclk_min = mem_table->min;
- mclk_max = mem_table->max;
- socclk_min = soc_table->min;
- socclk_max = soc_table->max;
- vclk_min = vclk_table->min;
- vclk_max = vclk_table->max;
- dclk_min = dclk_table->min;
- dclk_max = dclk_table->max;
- fclk_min = fclk_table->min;
- fclk_max = fclk_table->max;
+ sclk_min = SMU_DPM_TABLE_MIN(gfx_table);
+ sclk_max = SMU_DPM_TABLE_MAX(gfx_table);
+ mclk_min = SMU_DPM_TABLE_MIN(mem_table);
+ mclk_max = SMU_DPM_TABLE_MAX(mem_table);
+ socclk_min = SMU_DPM_TABLE_MIN(soc_table);
+ socclk_max = SMU_DPM_TABLE_MAX(soc_table);
+ vclk_min = SMU_DPM_TABLE_MIN(vclk_table);
+ vclk_max = SMU_DPM_TABLE_MAX(vclk_table);
+ dclk_min = SMU_DPM_TABLE_MIN(dclk_table);
+ dclk_max = SMU_DPM_TABLE_MAX(dclk_table);
+ fclk_min = SMU_DPM_TABLE_MIN(fclk_table);
+ fclk_max = SMU_DPM_TABLE_MAX(fclk_table);
auto_level = true;
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
@@ -1909,11 +1903,12 @@ static int smu_v13_0_get_fine_grained_status(struct
smu_context *smu,
int smu_v13_0_set_single_dpm_table(struct smu_context *smu,
enum smu_clk_type clk_type,
- struct smu_13_0_dpm_table *single_dpm_table)
+ struct smu_dpm_table *single_dpm_table)
{
int ret = 0;
uint32_t clk;
int i;
+ bool is_fine_grained;
ret = smu_v13_0_get_dpm_level_count(smu,
clk_type,
@@ -1924,13 +1919,14 @@ int smu_v13_0_set_single_dpm_table(struct smu_context
*smu,
}
if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) != IP_VERSION(13, 0, 2)) {
- ret = smu_v13_0_get_fine_grained_status(smu,
- clk_type,
-
&single_dpm_table->is_fine_grained);
+ ret = smu_v13_0_get_fine_grained_status(smu, clk_type,
+ &is_fine_grained);
if (ret) {
dev_err(smu->adev->dev, "[%s] failed to get fine
grained status!\n", __func__);
return ret;
}
+ if (is_fine_grained)
+ single_dpm_table->flags |= SMU_DPM_TABLE_FINE_GRAINED;
}
for (i = 0; i < single_dpm_table->count; i++) {
@@ -1945,11 +1941,6 @@ int smu_v13_0_set_single_dpm_table(struct smu_context
*smu,
single_dpm_table->dpm_levels[i].value = clk;
single_dpm_table->dpm_levels[i].enabled = true;
-
- if (i == 0)
- single_dpm_table->min = clk;
- else if (i == single_dpm_table->count - 1)
- single_dpm_table->max = clk;
}
return 0;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index e6ae77223b7c..bc8293f45a13 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -571,11 +571,12 @@ static int smu_v13_0_0_set_default_dpm_table(struct
smu_context *smu)
struct smu_table_context *table_context = &smu->smu_table;
PPTable_t *pptable = table_context->driver_pptable;
SkuTable_t *skutable = &pptable->SkuTable;
- struct smu_13_0_dpm_table *dpm_table;
+ struct smu_dpm_table *dpm_table;
int ret = 0;
/* socclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.soc_table;
+ dpm_table->clk_type = SMU_SOCCLK;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
ret = smu_v13_0_set_single_dpm_table(smu,
SMU_SOCCLK,
@@ -586,12 +587,11 @@ static int smu_v13_0_0_set_default_dpm_table(struct
smu_context *smu)
dpm_table->count = 1;
dpm_table->dpm_levels[0].value =
smu->smu_table.boot_values.socclk / 100;
dpm_table->dpm_levels[0].enabled = true;
- dpm_table->min = dpm_table->dpm_levels[0].value;
- dpm_table->max = dpm_table->dpm_levels[0].value;
}
/* gfxclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.gfx_table;
+ dpm_table->clk_type = SMU_GFXCLK;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
ret = smu_v13_0_set_single_dpm_table(smu,
SMU_GFXCLK,
@@ -613,18 +613,16 @@ static int smu_v13_0_0_set_default_dpm_table(struct
smu_context *smu)
skutable->DriverReportedClocks.GameClockAc)) {
dpm_table->dpm_levels[dpm_table->count - 1].value =
skutable->DriverReportedClocks.GameClockAc;
- dpm_table->max =
skutable->DriverReportedClocks.GameClockAc;
}
} else {
dpm_table->count = 1;
dpm_table->dpm_levels[0].value =
smu->smu_table.boot_values.gfxclk / 100;
dpm_table->dpm_levels[0].enabled = true;
- dpm_table->min = dpm_table->dpm_levels[0].value;
- dpm_table->max = dpm_table->dpm_levels[0].value;
}
/* uclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.uclk_table;
+ dpm_table->clk_type = SMU_UCLK;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
ret = smu_v13_0_set_single_dpm_table(smu,
SMU_UCLK,
@@ -635,12 +633,11 @@ static int smu_v13_0_0_set_default_dpm_table(struct
smu_context *smu)
dpm_table->count = 1;
dpm_table->dpm_levels[0].value =
smu->smu_table.boot_values.uclk / 100;
dpm_table->dpm_levels[0].enabled = true;
- dpm_table->min = dpm_table->dpm_levels[0].value;
- dpm_table->max = dpm_table->dpm_levels[0].value;
}
/* fclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.fclk_table;
+ dpm_table->clk_type = SMU_FCLK;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT)) {
ret = smu_v13_0_set_single_dpm_table(smu,
SMU_FCLK,
@@ -651,12 +648,11 @@ static int smu_v13_0_0_set_default_dpm_table(struct
smu_context *smu)
dpm_table->count = 1;
dpm_table->dpm_levels[0].value =
smu->smu_table.boot_values.fclk / 100;
dpm_table->dpm_levels[0].enabled = true;
- dpm_table->min = dpm_table->dpm_levels[0].value;
- dpm_table->max = dpm_table->dpm_levels[0].value;
}
/* vclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.vclk_table;
+ dpm_table->clk_type = SMU_VCLK;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_VCLK_BIT)) {
ret = smu_v13_0_set_single_dpm_table(smu,
SMU_VCLK,
@@ -667,12 +663,11 @@ static int smu_v13_0_0_set_default_dpm_table(struct
smu_context *smu)
dpm_table->count = 1;
dpm_table->dpm_levels[0].value =
smu->smu_table.boot_values.vclk / 100;
dpm_table->dpm_levels[0].enabled = true;
- dpm_table->min = dpm_table->dpm_levels[0].value;
- dpm_table->max = dpm_table->dpm_levels[0].value;
}
/* dclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.dclk_table;
+ dpm_table->clk_type = SMU_DCLK;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCLK_BIT)) {
ret = smu_v13_0_set_single_dpm_table(smu,
SMU_DCLK,
@@ -683,12 +678,11 @@ static int smu_v13_0_0_set_default_dpm_table(struct
smu_context *smu)
dpm_table->count = 1;
dpm_table->dpm_levels[0].value =
smu->smu_table.boot_values.dclk / 100;
dpm_table->dpm_levels[0].enabled = true;
- dpm_table->min = dpm_table->dpm_levels[0].value;
- dpm_table->max = dpm_table->dpm_levels[0].value;
}
/* dcefclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.dcef_table;
+ dpm_table->clk_type = SMU_DCEFCLK;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCN_BIT)) {
ret = smu_v13_0_set_single_dpm_table(smu,
SMU_DCEFCLK,
@@ -699,8 +693,6 @@ static int smu_v13_0_0_set_default_dpm_table(struct
smu_context *smu)
dpm_table->count = 1;
dpm_table->dpm_levels[0].value =
smu->smu_table.boot_values.dcefclk / 100;
dpm_table->dpm_levels[0].enabled = true;
- dpm_table->min = dpm_table->dpm_levels[0].value;
- dpm_table->max = dpm_table->dpm_levels[0].value;
}
return 0;
@@ -876,7 +868,7 @@ static int smu_v13_0_0_get_dpm_ultimate_freq(struct
smu_context *smu,
{
struct smu_13_0_dpm_context *dpm_context =
smu->smu_dpm.dpm_context;
- struct smu_13_0_dpm_table *dpm_table;
+ struct smu_dpm_table *dpm_table;
switch (clk_type) {
case SMU_MCLK:
@@ -913,9 +905,9 @@ static int smu_v13_0_0_get_dpm_ultimate_freq(struct
smu_context *smu,
}
if (min)
- *min = dpm_table->min;
+ *min = SMU_DPM_TABLE_MIN(dpm_table);
if (max)
- *max = dpm_table->max;
+ *max = SMU_DPM_TABLE_MAX(dpm_table);
return 0;
}
@@ -1193,7 +1185,7 @@ static int smu_v13_0_0_emit_clk_levels(struct smu_context
*smu,
OverDriveTableExternal_t *od_table =
(OverDriveTableExternal_t *)smu->smu_table.overdrive_table;
int i, curr_freq, size = *offset, start_offset = *offset;
- struct smu_13_0_dpm_table *single_dpm_table;
+ struct smu_dpm_table *single_dpm_table;
struct smu_13_0_pcie_table *pcie_table;
uint32_t gen_speed, lane_width;
int32_t min_value, max_value;
@@ -1248,7 +1240,7 @@ static int smu_v13_0_0_emit_clk_levels(struct smu_context
*smu,
return ret;
}
- if (single_dpm_table->is_fine_grained) {
+ if (single_dpm_table->flags & SMU_DPM_TABLE_FINE_GRAINED) {
/*
* For fine grained dpms, there are only two dpm levels:
* - level 0 -> min clock freq
@@ -1988,7 +1980,7 @@ static int smu_v13_0_0_force_clk_levels(struct
smu_context *smu,
{
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
- struct smu_13_0_dpm_table *single_dpm_table;
+ struct smu_dpm_table *single_dpm_table;
uint32_t soft_min_level, soft_max_level;
uint32_t min_freq, max_freq;
int ret = 0;
@@ -2034,7 +2026,7 @@ static int smu_v13_0_0_force_clk_levels(struct
smu_context *smu,
case SMU_VCLK1:
case SMU_DCLK:
case SMU_DCLK1:
- if (single_dpm_table->is_fine_grained) {
+ if (single_dpm_table->flags & SMU_DPM_TABLE_FINE_GRAINED) {
/* There is only 2 levels for fine grained DPM */
soft_max_level = (soft_max_level >= 1 ? 1 : 0);
soft_min_level = (soft_min_level >= 1 ? 1 : 0);
@@ -2302,18 +2294,12 @@ static int smu_v13_0_0_populate_umd_state_clk(struct
smu_context *smu)
{
struct smu_13_0_dpm_context *dpm_context =
smu->smu_dpm.dpm_context;
- struct smu_13_0_dpm_table *gfx_table =
- &dpm_context->dpm_tables.gfx_table;
- struct smu_13_0_dpm_table *mem_table =
- &dpm_context->dpm_tables.uclk_table;
- struct smu_13_0_dpm_table *soc_table =
- &dpm_context->dpm_tables.soc_table;
- struct smu_13_0_dpm_table *vclk_table =
- &dpm_context->dpm_tables.vclk_table;
- struct smu_13_0_dpm_table *dclk_table =
- &dpm_context->dpm_tables.dclk_table;
- struct smu_13_0_dpm_table *fclk_table =
- &dpm_context->dpm_tables.fclk_table;
+ struct smu_dpm_table *gfx_table = &dpm_context->dpm_tables.gfx_table;
+ struct smu_dpm_table *mem_table = &dpm_context->dpm_tables.uclk_table;
+ struct smu_dpm_table *soc_table = &dpm_context->dpm_tables.soc_table;
+ struct smu_dpm_table *vclk_table = &dpm_context->dpm_tables.vclk_table;
+ struct smu_dpm_table *dclk_table = &dpm_context->dpm_tables.dclk_table;
+ struct smu_dpm_table *fclk_table = &dpm_context->dpm_tables.fclk_table;
struct smu_umd_pstate_table *pstate_table =
&smu->pstate_table;
struct smu_table_context *table_context = &smu->smu_table;
@@ -2321,38 +2307,39 @@ static int smu_v13_0_0_populate_umd_state_clk(struct
smu_context *smu)
DriverReportedClocks_t driver_clocks =
pptable->SkuTable.DriverReportedClocks;
- pstate_table->gfxclk_pstate.min = gfx_table->min;
+ pstate_table->gfxclk_pstate.min = SMU_DPM_TABLE_MIN(gfx_table);
if (driver_clocks.GameClockAc &&
- (driver_clocks.GameClockAc < gfx_table->max))
+ (driver_clocks.GameClockAc < SMU_DPM_TABLE_MAX(gfx_table)))
pstate_table->gfxclk_pstate.peak = driver_clocks.GameClockAc;
else
- pstate_table->gfxclk_pstate.peak = gfx_table->max;
+ pstate_table->gfxclk_pstate.peak = SMU_DPM_TABLE_MAX(gfx_table);
- pstate_table->uclk_pstate.min = mem_table->min;
- pstate_table->uclk_pstate.peak = mem_table->max;
+ pstate_table->uclk_pstate.min = SMU_DPM_TABLE_MIN(mem_table);
+ pstate_table->uclk_pstate.peak = SMU_DPM_TABLE_MAX(mem_table);
- pstate_table->socclk_pstate.min = soc_table->min;
- pstate_table->socclk_pstate.peak = soc_table->max;
+ pstate_table->socclk_pstate.min = SMU_DPM_TABLE_MIN(soc_table);
+ pstate_table->socclk_pstate.peak = SMU_DPM_TABLE_MAX(soc_table);
- pstate_table->vclk_pstate.min = vclk_table->min;
- pstate_table->vclk_pstate.peak = vclk_table->max;
+ pstate_table->vclk_pstate.min = SMU_DPM_TABLE_MIN(vclk_table);
+ pstate_table->vclk_pstate.peak = SMU_DPM_TABLE_MAX(vclk_table);
- pstate_table->dclk_pstate.min = dclk_table->min;
- pstate_table->dclk_pstate.peak = dclk_table->max;
+ pstate_table->dclk_pstate.min = SMU_DPM_TABLE_MIN(dclk_table);
+ pstate_table->dclk_pstate.peak = SMU_DPM_TABLE_MAX(dclk_table);
- pstate_table->fclk_pstate.min = fclk_table->min;
- pstate_table->fclk_pstate.peak = fclk_table->max;
+ pstate_table->fclk_pstate.min = SMU_DPM_TABLE_MIN(fclk_table);
+ pstate_table->fclk_pstate.peak = SMU_DPM_TABLE_MAX(fclk_table);
if (driver_clocks.BaseClockAc &&
- driver_clocks.BaseClockAc < gfx_table->max)
+ driver_clocks.BaseClockAc < SMU_DPM_TABLE_MAX(gfx_table))
pstate_table->gfxclk_pstate.standard =
driver_clocks.BaseClockAc;
else
- pstate_table->gfxclk_pstate.standard = gfx_table->max;
- pstate_table->uclk_pstate.standard = mem_table->max;
- pstate_table->socclk_pstate.standard = soc_table->min;
- pstate_table->vclk_pstate.standard = vclk_table->min;
- pstate_table->dclk_pstate.standard = dclk_table->min;
- pstate_table->fclk_pstate.standard = fclk_table->min;
+ pstate_table->gfxclk_pstate.standard =
+ SMU_DPM_TABLE_MAX(gfx_table);
+ pstate_table->uclk_pstate.standard = SMU_DPM_TABLE_MAX(mem_table);
+ pstate_table->socclk_pstate.standard = SMU_DPM_TABLE_MIN(soc_table);
+ pstate_table->vclk_pstate.standard = SMU_DPM_TABLE_MIN(vclk_table);
+ pstate_table->dclk_pstate.standard = SMU_DPM_TABLE_MIN(dclk_table);
+ pstate_table->fclk_pstate.standard = SMU_DPM_TABLE_MIN(fclk_table);
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index 7ba299d5d7ca..adf973ea2809 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -251,7 +251,7 @@ static const uint8_t smu_v13_0_6_throttler_map[] = {
struct smu_v13_0_6_dpm_map {
enum smu_clk_type clk_type;
uint32_t feature_num;
- struct smu_13_0_dpm_table *dpm_table;
+ struct smu_dpm_table *dpm_table;
uint32_t *freq_table;
};
@@ -987,7 +987,7 @@ static int smu_v13_0_6_get_dpm_ultimate_freq(struct
smu_context *smu,
struct smu_table_context *smu_table = &smu->smu_table;
struct PPTable_t *pptable =
(struct PPTable_t *)smu_table->driver_pptable;
- struct smu_13_0_dpm_table *dpm_table;
+ struct smu_dpm_table *dpm_table;
uint32_t min_clk, max_clk, param;
int ret = 0, clk_id = 0;
@@ -1018,9 +1018,8 @@ static int smu_v13_0_6_get_dpm_ultimate_freq(struct
smu_context *smu,
return -EINVAL;
}
- min_clk = dpm_table->min;
- max_clk = dpm_table->max;
-
+ min_clk = SMU_DPM_TABLE_MIN(dpm_table);
+ max_clk = SMU_DPM_TABLE_MAX(dpm_table);
if (min)
*min = min_clk;
if (max)
@@ -1090,7 +1089,7 @@ static int smu_v13_0_6_set_default_dpm_table(struct
smu_context *smu)
{
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
struct smu_table_context *smu_table = &smu->smu_table;
- struct smu_13_0_dpm_table *dpm_table = NULL;
+ struct smu_dpm_table *dpm_table = NULL;
struct PPTable_t *pptable =
(struct PPTable_t *)smu_table->driver_pptable;
uint32_t gfxclkmin, gfxclkmax, levels;
@@ -1126,6 +1125,7 @@ static int smu_v13_0_6_set_default_dpm_table(struct
smu_context *smu)
smu_v13_0_6_pm_policy_init(smu);
/* gfxclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.gfx_table;
+ dpm_table->clk_type = SMU_GFXCLK;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
/* In the case of gfxclk, only fine-grained dpm is honored.
* Get min/max values from FW.
@@ -1134,20 +1134,15 @@ static int smu_v13_0_6_set_default_dpm_table(struct
smu_context *smu)
&gfxclkmin, &gfxclkmax);
if (ret)
return ret;
-
dpm_table->count = 2;
dpm_table->dpm_levels[0].value = gfxclkmin;
dpm_table->dpm_levels[0].enabled = true;
dpm_table->dpm_levels[1].value = gfxclkmax;
dpm_table->dpm_levels[1].enabled = true;
- dpm_table->min = dpm_table->dpm_levels[0].value;
- dpm_table->max = dpm_table->dpm_levels[1].value;
} else {
dpm_table->count = 1;
dpm_table->dpm_levels[0].value = pptable->MinGfxclkFrequency;
dpm_table->dpm_levels[0].enabled = true;
- dpm_table->min = dpm_table->dpm_levels[0].value;
- dpm_table->max = dpm_table->dpm_levels[0].value;
}
for (j = 0; j < ARRAY_SIZE(dpm_map); j++) {
@@ -1160,15 +1155,12 @@ static int smu_v13_0_6_set_default_dpm_table(struct
smu_context *smu)
return ret;
}
dpm_table->count = levels;
+ dpm_table->clk_type = dpm_map[j].clk_type;
for (i = 0; i < dpm_table->count; ++i) {
dpm_table->dpm_levels[i].value =
dpm_map[j].freq_table[i];
dpm_table->dpm_levels[i].enabled = true;
-
}
- dpm_table->min = dpm_table->dpm_levels[0].value;
- dpm_table->max = dpm_table->dpm_levels[levels - 1].value;
-
}
return 0;
@@ -1205,28 +1197,25 @@ static int smu_v13_0_6_check_fw_status(struct
smu_context *smu)
static int smu_v13_0_6_populate_umd_state_clk(struct smu_context *smu)
{
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
- struct smu_13_0_dpm_table *gfx_table =
- &dpm_context->dpm_tables.gfx_table;
- struct smu_13_0_dpm_table *mem_table =
- &dpm_context->dpm_tables.uclk_table;
- struct smu_13_0_dpm_table *soc_table =
- &dpm_context->dpm_tables.soc_table;
+ struct smu_dpm_table *gfx_table = &dpm_context->dpm_tables.gfx_table;
+ struct smu_dpm_table *mem_table = &dpm_context->dpm_tables.uclk_table;
+ struct smu_dpm_table *soc_table = &dpm_context->dpm_tables.soc_table;
struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
- pstate_table->gfxclk_pstate.min = gfx_table->min;
- pstate_table->gfxclk_pstate.peak = gfx_table->max;
- pstate_table->gfxclk_pstate.curr.min = gfx_table->min;
- pstate_table->gfxclk_pstate.curr.max = gfx_table->max;
+ pstate_table->gfxclk_pstate.min = SMU_DPM_TABLE_MIN(gfx_table);
+ pstate_table->gfxclk_pstate.peak = SMU_DPM_TABLE_MAX(gfx_table);
+ pstate_table->gfxclk_pstate.curr.min = SMU_DPM_TABLE_MIN(gfx_table);
+ pstate_table->gfxclk_pstate.curr.max = SMU_DPM_TABLE_MAX(gfx_table);
- pstate_table->uclk_pstate.min = mem_table->min;
- pstate_table->uclk_pstate.peak = mem_table->max;
- pstate_table->uclk_pstate.curr.min = mem_table->min;
- pstate_table->uclk_pstate.curr.max = mem_table->max;
+ pstate_table->uclk_pstate.min = SMU_DPM_TABLE_MIN(mem_table);
+ pstate_table->uclk_pstate.peak = SMU_DPM_TABLE_MAX(mem_table);
+ pstate_table->uclk_pstate.curr.min = SMU_DPM_TABLE_MIN(mem_table);
+ pstate_table->uclk_pstate.curr.max = SMU_DPM_TABLE_MAX(mem_table);
- pstate_table->socclk_pstate.min = soc_table->min;
- pstate_table->socclk_pstate.peak = soc_table->max;
- pstate_table->socclk_pstate.curr.min = soc_table->min;
- pstate_table->socclk_pstate.curr.max = soc_table->max;
+ pstate_table->socclk_pstate.min = SMU_DPM_TABLE_MIN(soc_table);
+ pstate_table->socclk_pstate.peak = SMU_DPM_TABLE_MAX(soc_table);
+ pstate_table->socclk_pstate.curr.min = SMU_DPM_TABLE_MIN(soc_table);
+ pstate_table->socclk_pstate.curr.max = SMU_DPM_TABLE_MAX(soc_table);
if (gfx_table->count > SMU_13_0_6_UMD_PSTATE_GFXCLK_LEVEL &&
mem_table->count > SMU_13_0_6_UMD_PSTATE_MCLK_LEVEL &&
@@ -1249,9 +1238,10 @@ static int smu_v13_0_6_populate_umd_state_clk(struct
smu_context *smu)
return 0;
}
-static int smu_v13_0_6_get_clk_table(struct smu_context *smu,
- struct pp_clock_levels_with_latency
*clocks,
- struct smu_13_0_dpm_table *dpm_table)
+static int
+smu_v13_0_6_get_clk_table(struct smu_context *smu,
+ struct pp_clock_levels_with_latency *clocks,
+ struct smu_dpm_table *dpm_table)
{
int i, count;
@@ -1403,7 +1393,7 @@ static int
smu_v13_0_6_get_current_clk_freq_by_table(struct smu_context *smu,
}
static int smu_v13_0_6_print_clks(struct smu_context *smu, char *buf, int size,
- struct smu_13_0_dpm_table *single_dpm_table,
+ struct smu_dpm_table *single_dpm_table,
uint32_t curr_clk, const char *clk_name)
{
struct pp_clock_levels_with_latency clocks;
@@ -1460,7 +1450,7 @@ static int smu_v13_0_6_emit_clk_levels(struct smu_context
*smu,
int now, size = *offset, start_offset = *offset;
int ret = 0;
struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
- struct smu_13_0_dpm_table *single_dpm_table;
+ struct smu_dpm_table *single_dpm_table;
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
struct smu_13_0_dpm_context *dpm_context = NULL;
uint32_t min_clk, max_clk;
@@ -1489,8 +1479,8 @@ static int smu_v13_0_6_emit_clk_levels(struct smu_context
*smu,
}
single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
- min_clk = single_dpm_table->min;
- max_clk = single_dpm_table->max;
+ min_clk = SMU_DPM_TABLE_MIN(single_dpm_table);
+ max_clk = SMU_DPM_TABLE_MAX(single_dpm_table);
if (now < SMU_13_0_6_DSCLK_THRESHOLD) {
size += sysfs_emit_at(buf, size, "S: %uMhz *\n",
@@ -1690,7 +1680,7 @@ static int smu_v13_0_6_force_clk_levels(struct
smu_context *smu,
enum smu_clk_type type, uint32_t mask)
{
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
- struct smu_13_0_dpm_table *single_dpm_table = NULL;
+ struct smu_dpm_table *single_dpm_table = NULL;
uint32_t soft_min_level, soft_max_level;
int ret = 0;
@@ -2156,10 +2146,8 @@ static int smu_v13_0_6_set_performance_level(struct
smu_context *smu,
{
struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
- struct smu_13_0_dpm_table *gfx_table =
- &dpm_context->dpm_tables.gfx_table;
- struct smu_13_0_dpm_table *uclk_table =
- &dpm_context->dpm_tables.uclk_table;
+ struct smu_dpm_table *gfx_table = &dpm_context->dpm_tables.gfx_table;
+ struct smu_dpm_table *uclk_table = &dpm_context->dpm_tables.uclk_table;
struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
int ret;
@@ -2167,7 +2155,8 @@ static int smu_v13_0_6_set_performance_level(struct
smu_context *smu,
if ((smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) &&
(level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)) {
smu_cmn_send_smc_msg(smu, SMU_MSG_DisableDeterminism, NULL);
- pstate_table->gfxclk_pstate.curr.max = gfx_table->max;
+ pstate_table->gfxclk_pstate.curr.max =
+ SMU_DPM_TABLE_MAX(gfx_table);
}
switch (level) {
@@ -2175,24 +2164,32 @@ static int smu_v13_0_6_set_performance_level(struct
smu_context *smu,
return 0;
case AMD_DPM_FORCED_LEVEL_AUTO:
- if ((gfx_table->min != pstate_table->gfxclk_pstate.curr.min) ||
- (gfx_table->max != pstate_table->gfxclk_pstate.curr.max)) {
+ if ((SMU_DPM_TABLE_MIN(gfx_table) !=
+ pstate_table->gfxclk_pstate.curr.min) ||
+ (SMU_DPM_TABLE_MAX(gfx_table) !=
+ pstate_table->gfxclk_pstate.curr.max)) {
ret = smu_v13_0_6_set_gfx_soft_freq_limited_range(
- smu, gfx_table->min, gfx_table->max);
+ smu, SMU_DPM_TABLE_MIN(gfx_table),
+ SMU_DPM_TABLE_MAX(gfx_table));
if (ret)
return ret;
- pstate_table->gfxclk_pstate.curr.min = gfx_table->min;
- pstate_table->gfxclk_pstate.curr.max = gfx_table->max;
+ pstate_table->gfxclk_pstate.curr.min =
+ SMU_DPM_TABLE_MIN(gfx_table);
+ pstate_table->gfxclk_pstate.curr.max =
+ SMU_DPM_TABLE_MAX(gfx_table);
}
- if (uclk_table->max != pstate_table->uclk_pstate.curr.max) {
+ if (SMU_DPM_TABLE_MAX(uclk_table) !=
+ pstate_table->uclk_pstate.curr.max) {
/* Min UCLK is not expected to be changed */
ret = smu_v13_0_set_soft_freq_limited_range(
- smu, SMU_UCLK, 0, uclk_table->max, false);
+ smu, SMU_UCLK, 0, SMU_DPM_TABLE_MAX(uclk_table),
+ false);
if (ret)
return ret;
- pstate_table->uclk_pstate.curr.max = uclk_table->max;
+ pstate_table->uclk_pstate.curr.max =
+ SMU_DPM_TABLE_MAX(uclk_table);
}
smu_v13_0_reset_custom_level(smu);
@@ -2265,8 +2262,9 @@ static int smu_v13_0_6_set_soft_freq_limited_range(struct
smu_context *smu,
}
if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
- if (!max || (max < dpm_context->dpm_tables.gfx_table.min) ||
- (max > dpm_context->dpm_tables.gfx_table.max)) {
+ min_clk = SMU_DPM_TABLE_MIN(&dpm_context->dpm_tables.gfx_table);
+ max_clk = SMU_DPM_TABLE_MAX(&dpm_context->dpm_tables.gfx_table);
+ if (!max || (max < min_clk) || (max > max_clk)) {
dev_warn(
adev->dev,
"Invalid max frequency %d MHz specified for
determinism\n",
@@ -2275,10 +2273,8 @@ static int
smu_v13_0_6_set_soft_freq_limited_range(struct smu_context *smu,
}
/* Restore default min/max clocks and enable determinism */
- min_clk = dpm_context->dpm_tables.gfx_table.min;
- max_clk = dpm_context->dpm_tables.gfx_table.max;
ret = smu_v13_0_6_set_gfx_soft_freq_limited_range(smu, min_clk,
- max_clk);
+ max_clk);
if (!ret) {
usleep_range(500, 1000);
ret = smu_cmn_send_smc_msg_with_param(
@@ -2320,14 +2316,14 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct
smu_context *smu,
"Input parameter number not correct\n");
return -EINVAL;
}
-
+ min_clk = SMU_DPM_TABLE_MIN(&dpm_context->dpm_tables.gfx_table);
+ max_clk = SMU_DPM_TABLE_MAX(&dpm_context->dpm_tables.gfx_table);
if (input[0] == 0) {
- if (input[1] < dpm_context->dpm_tables.gfx_table.min) {
+ if (input[1] < min_clk) {
dev_warn(
smu->adev->dev,
"Minimum GFX clk (%ld) MHz specified is
less than the minimum allowed (%d) MHz\n",
- input[1],
- dpm_context->dpm_tables.gfx_table.min);
+ input[1], min_clk);
pstate_table->gfxclk_pstate.custom.min =
pstate_table->gfxclk_pstate.curr.min;
return -EINVAL;
@@ -2335,12 +2331,11 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct
smu_context *smu,
pstate_table->gfxclk_pstate.custom.min = input[1];
} else if (input[0] == 1) {
- if (input[1] > dpm_context->dpm_tables.gfx_table.max) {
+ if (input[1] > max_clk) {
dev_warn(
smu->adev->dev,
"Maximum GFX clk (%ld) MHz specified is
greater than the maximum allowed (%d) MHz\n",
- input[1],
- dpm_context->dpm_tables.gfx_table.max);
+ input[1], max_clk);
pstate_table->gfxclk_pstate.custom.max =
pstate_table->gfxclk_pstate.curr.max;
return -EINVAL;
@@ -2364,18 +2359,18 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct
smu_context *smu,
"UCLK_LIMITS setting not supported!\n");
return -EOPNOTSUPP;
}
-
+ max_clk =
+ SMU_DPM_TABLE_MAX(&dpm_context->dpm_tables.uclk_table);
if (input[0] == 0) {
dev_info(smu->adev->dev,
"Setting min UCLK level is not supported");
return -EINVAL;
} else if (input[0] == 1) {
- if (input[1] > dpm_context->dpm_tables.uclk_table.max) {
+ if (input[1] > max_clk) {
dev_warn(
smu->adev->dev,
"Maximum UCLK (%ld) MHz specified is
greater than the maximum allowed (%d) MHz\n",
- input[1],
- dpm_context->dpm_tables.uclk_table.max);
+ input[1], max_clk);
pstate_table->uclk_pstate.custom.max =
pstate_table->uclk_pstate.curr.max;
return -EINVAL;
@@ -2392,8 +2387,10 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct
smu_context *smu,
return -EINVAL;
} else {
/* Use the default frequencies for manual and
determinism mode */
- min_clk = dpm_context->dpm_tables.gfx_table.min;
- max_clk = dpm_context->dpm_tables.gfx_table.max;
+ min_clk = SMU_DPM_TABLE_MIN(
+ &dpm_context->dpm_tables.gfx_table);
+ max_clk = SMU_DPM_TABLE_MAX(
+ &dpm_context->dpm_tables.gfx_table);
ret = smu_v13_0_6_set_soft_freq_limited_range(
smu, SMU_GFXCLK, min_clk, max_clk, false);
@@ -2401,8 +2398,10 @@ static int smu_v13_0_6_usr_edit_dpm_table(struct
smu_context *smu,
if (ret)
return ret;
- min_clk = dpm_context->dpm_tables.uclk_table.min;
- max_clk = dpm_context->dpm_tables.uclk_table.max;
+ min_clk = SMU_DPM_TABLE_MIN(
+ &dpm_context->dpm_tables.uclk_table);
+ max_clk = SMU_DPM_TABLE_MAX(
+ &dpm_context->dpm_tables.uclk_table);
ret = smu_v13_0_6_set_soft_freq_limited_range(
smu, SMU_UCLK, min_clk, max_clk, false);
if (ret)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index 4e699d3ab1e3..8f664ef32570 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -578,11 +578,12 @@ static int smu_v13_0_7_set_default_dpm_table(struct
smu_context *smu)
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
PPTable_t *driver_ppt = smu->smu_table.driver_pptable;
SkuTable_t *skutable = &driver_ppt->SkuTable;
- struct smu_13_0_dpm_table *dpm_table;
+ struct smu_dpm_table *dpm_table;
int ret = 0;
/* socclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.soc_table;
+ dpm_table->clk_type = SMU_SOCCLK;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
ret = smu_v13_0_set_single_dpm_table(smu,
SMU_SOCCLK,
@@ -593,12 +594,11 @@ static int smu_v13_0_7_set_default_dpm_table(struct
smu_context *smu)
dpm_table->count = 1;
dpm_table->dpm_levels[0].value =
smu->smu_table.boot_values.socclk / 100;
dpm_table->dpm_levels[0].enabled = true;
- dpm_table->min = dpm_table->dpm_levels[0].value;
- dpm_table->max = dpm_table->dpm_levels[0].value;
}
/* gfxclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.gfx_table;
+ dpm_table->clk_type = SMU_GFXCLK;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
ret = smu_v13_0_set_single_dpm_table(smu,
SMU_GFXCLK,
@@ -611,18 +611,16 @@ static int smu_v13_0_7_set_default_dpm_table(struct
smu_context *smu)
skutable->DriverReportedClocks.GameClockAc)) {
dpm_table->dpm_levels[dpm_table->count - 1].value =
skutable->DriverReportedClocks.GameClockAc;
- dpm_table->max =
skutable->DriverReportedClocks.GameClockAc;
}
} else {
dpm_table->count = 1;
dpm_table->dpm_levels[0].value =
smu->smu_table.boot_values.gfxclk / 100;
dpm_table->dpm_levels[0].enabled = true;
- dpm_table->min = dpm_table->dpm_levels[0].value;
- dpm_table->max = dpm_table->dpm_levels[0].value;
}
/* uclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.uclk_table;
+ dpm_table->clk_type = SMU_UCLK;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
ret = smu_v13_0_set_single_dpm_table(smu,
SMU_UCLK,
@@ -633,12 +631,11 @@ static int smu_v13_0_7_set_default_dpm_table(struct
smu_context *smu)
dpm_table->count = 1;
dpm_table->dpm_levels[0].value =
smu->smu_table.boot_values.uclk / 100;
dpm_table->dpm_levels[0].enabled = true;
- dpm_table->min = dpm_table->dpm_levels[0].value;
- dpm_table->max = dpm_table->dpm_levels[0].value;
}
/* fclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.fclk_table;
+ dpm_table->clk_type = SMU_FCLK;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT)) {
ret = smu_v13_0_set_single_dpm_table(smu,
SMU_FCLK,
@@ -649,12 +646,11 @@ static int smu_v13_0_7_set_default_dpm_table(struct
smu_context *smu)
dpm_table->count = 1;
dpm_table->dpm_levels[0].value =
smu->smu_table.boot_values.fclk / 100;
dpm_table->dpm_levels[0].enabled = true;
- dpm_table->min = dpm_table->dpm_levels[0].value;
- dpm_table->max = dpm_table->dpm_levels[0].value;
}
/* vclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.vclk_table;
+ dpm_table->clk_type = SMU_VCLK;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_VCLK_BIT)) {
ret = smu_v13_0_set_single_dpm_table(smu,
SMU_VCLK,
@@ -665,12 +661,11 @@ static int smu_v13_0_7_set_default_dpm_table(struct
smu_context *smu)
dpm_table->count = 1;
dpm_table->dpm_levels[0].value =
smu->smu_table.boot_values.vclk / 100;
dpm_table->dpm_levels[0].enabled = true;
- dpm_table->min = dpm_table->dpm_levels[0].value;
- dpm_table->max = dpm_table->dpm_levels[0].value;
}
/* dclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.dclk_table;
+ dpm_table->clk_type = SMU_DCLK;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCLK_BIT)) {
ret = smu_v13_0_set_single_dpm_table(smu,
SMU_DCLK,
@@ -681,12 +676,11 @@ static int smu_v13_0_7_set_default_dpm_table(struct
smu_context *smu)
dpm_table->count = 1;
dpm_table->dpm_levels[0].value =
smu->smu_table.boot_values.dclk / 100;
dpm_table->dpm_levels[0].enabled = true;
- dpm_table->min = dpm_table->dpm_levels[0].value;
- dpm_table->max = dpm_table->dpm_levels[0].value;
}
/* dcefclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.dcef_table;
+ dpm_table->clk_type = SMU_DCEFCLK;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCN_BIT)) {
ret = smu_v13_0_set_single_dpm_table(smu,
SMU_DCEFCLK,
@@ -697,8 +691,6 @@ static int smu_v13_0_7_set_default_dpm_table(struct
smu_context *smu)
dpm_table->count = 1;
dpm_table->dpm_levels[0].value =
smu->smu_table.boot_values.dcefclk / 100;
dpm_table->dpm_levels[0].enabled = true;
- dpm_table->min = dpm_table->dpm_levels[0].value;
- dpm_table->max = dpm_table->dpm_levels[0].value;
}
return 0;
@@ -865,7 +857,7 @@ static int smu_v13_0_7_get_dpm_ultimate_freq(struct
smu_context *smu,
{
struct smu_13_0_dpm_context *dpm_context =
smu->smu_dpm.dpm_context;
- struct smu_13_0_dpm_table *dpm_table;
+ struct smu_dpm_table *dpm_table;
switch (clk_type) {
case SMU_MCLK:
@@ -902,9 +894,9 @@ static int smu_v13_0_7_get_dpm_ultimate_freq(struct
smu_context *smu,
}
if (min)
- *min = dpm_table->min;
+ *min = SMU_DPM_TABLE_MIN(dpm_table);
if (max)
- *max = dpm_table->max;
+ *max = SMU_DPM_TABLE_MAX(dpm_table);
return 0;
}
@@ -1182,7 +1174,7 @@ static int smu_v13_0_7_emit_clk_levels(struct smu_context
*smu,
OverDriveTableExternal_t *od_table =
(OverDriveTableExternal_t *)smu->smu_table.overdrive_table;
int i, curr_freq, size = *offset, start_offset = *offset;
- struct smu_13_0_dpm_table *single_dpm_table;
+ struct smu_dpm_table *single_dpm_table;
struct smu_13_0_pcie_table *pcie_table;
uint32_t gen_speed, lane_width;
int32_t min_value, max_value;
@@ -1237,7 +1229,7 @@ static int smu_v13_0_7_emit_clk_levels(struct smu_context
*smu,
return ret;
}
- if (single_dpm_table->is_fine_grained) {
+ if (single_dpm_table->flags & SMU_DPM_TABLE_FINE_GRAINED) {
/*
* For fine grained dpms, there are only two dpm levels:
* - level 0 -> min clock freq
@@ -1977,7 +1969,7 @@ static int smu_v13_0_7_force_clk_levels(struct
smu_context *smu,
{
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
- struct smu_13_0_dpm_table *single_dpm_table;
+ struct smu_dpm_table *single_dpm_table;
uint32_t soft_min_level, soft_max_level;
uint32_t min_freq, max_freq;
int ret = 0;
@@ -2023,7 +2015,7 @@ static int smu_v13_0_7_force_clk_levels(struct
smu_context *smu,
case SMU_VCLK1:
case SMU_DCLK:
case SMU_DCLK1:
- if (single_dpm_table->is_fine_grained) {
+ if (single_dpm_table->flags & SMU_DPM_TABLE_FINE_GRAINED) {
/* There is only 2 levels for fine grained DPM */
soft_max_level = (soft_max_level >= 1 ? 1 : 0);
soft_min_level = (soft_min_level >= 1 ? 1 : 0);
@@ -2288,18 +2280,12 @@ static int smu_v13_0_7_populate_umd_state_clk(struct
smu_context *smu)
{
struct smu_13_0_dpm_context *dpm_context =
smu->smu_dpm.dpm_context;
- struct smu_13_0_dpm_table *gfx_table =
- &dpm_context->dpm_tables.gfx_table;
- struct smu_13_0_dpm_table *mem_table =
- &dpm_context->dpm_tables.uclk_table;
- struct smu_13_0_dpm_table *soc_table =
- &dpm_context->dpm_tables.soc_table;
- struct smu_13_0_dpm_table *vclk_table =
- &dpm_context->dpm_tables.vclk_table;
- struct smu_13_0_dpm_table *dclk_table =
- &dpm_context->dpm_tables.dclk_table;
- struct smu_13_0_dpm_table *fclk_table =
- &dpm_context->dpm_tables.fclk_table;
+ struct smu_dpm_table *gfx_table = &dpm_context->dpm_tables.gfx_table;
+ struct smu_dpm_table *mem_table = &dpm_context->dpm_tables.uclk_table;
+ struct smu_dpm_table *soc_table = &dpm_context->dpm_tables.soc_table;
+ struct smu_dpm_table *vclk_table = &dpm_context->dpm_tables.vclk_table;
+ struct smu_dpm_table *dclk_table = &dpm_context->dpm_tables.dclk_table;
+ struct smu_dpm_table *fclk_table = &dpm_context->dpm_tables.fclk_table;
struct smu_umd_pstate_table *pstate_table =
&smu->pstate_table;
struct smu_table_context *table_context = &smu->smu_table;
@@ -2307,38 +2293,39 @@ static int smu_v13_0_7_populate_umd_state_clk(struct
smu_context *smu)
DriverReportedClocks_t driver_clocks =
pptable->SkuTable.DriverReportedClocks;
- pstate_table->gfxclk_pstate.min = gfx_table->min;
+ pstate_table->gfxclk_pstate.min = SMU_DPM_TABLE_MIN(gfx_table);
if (driver_clocks.GameClockAc &&
- (driver_clocks.GameClockAc < gfx_table->max))
+ (driver_clocks.GameClockAc < SMU_DPM_TABLE_MAX(gfx_table)))
pstate_table->gfxclk_pstate.peak = driver_clocks.GameClockAc;
else
- pstate_table->gfxclk_pstate.peak = gfx_table->max;
+ pstate_table->gfxclk_pstate.peak = SMU_DPM_TABLE_MAX(gfx_table);
- pstate_table->uclk_pstate.min = mem_table->min;
- pstate_table->uclk_pstate.peak = mem_table->max;
+ pstate_table->uclk_pstate.min = SMU_DPM_TABLE_MIN(mem_table);
+ pstate_table->uclk_pstate.peak = SMU_DPM_TABLE_MAX(mem_table);
- pstate_table->socclk_pstate.min = soc_table->min;
- pstate_table->socclk_pstate.peak = soc_table->max;
+ pstate_table->socclk_pstate.min = SMU_DPM_TABLE_MIN(soc_table);
+ pstate_table->socclk_pstate.peak = SMU_DPM_TABLE_MAX(soc_table);
- pstate_table->vclk_pstate.min = vclk_table->min;
- pstate_table->vclk_pstate.peak = vclk_table->max;
+ pstate_table->vclk_pstate.min = SMU_DPM_TABLE_MIN(vclk_table);
+ pstate_table->vclk_pstate.peak = SMU_DPM_TABLE_MAX(vclk_table);
- pstate_table->dclk_pstate.min = dclk_table->min;
- pstate_table->dclk_pstate.peak = dclk_table->max;
+ pstate_table->dclk_pstate.min = SMU_DPM_TABLE_MIN(dclk_table);
+ pstate_table->dclk_pstate.peak = SMU_DPM_TABLE_MAX(dclk_table);
- pstate_table->fclk_pstate.min = fclk_table->min;
- pstate_table->fclk_pstate.peak = fclk_table->max;
+ pstate_table->fclk_pstate.min = SMU_DPM_TABLE_MIN(fclk_table);
+ pstate_table->fclk_pstate.peak = SMU_DPM_TABLE_MAX(fclk_table);
if (driver_clocks.BaseClockAc &&
- driver_clocks.BaseClockAc < gfx_table->max)
+ driver_clocks.BaseClockAc < SMU_DPM_TABLE_MAX(gfx_table))
pstate_table->gfxclk_pstate.standard =
driver_clocks.BaseClockAc;
else
- pstate_table->gfxclk_pstate.standard = gfx_table->max;
- pstate_table->uclk_pstate.standard = mem_table->max;
- pstate_table->socclk_pstate.standard = soc_table->min;
- pstate_table->vclk_pstate.standard = vclk_table->min;
- pstate_table->dclk_pstate.standard = dclk_table->min;
- pstate_table->fclk_pstate.standard = fclk_table->min;
+ pstate_table->gfxclk_pstate.standard =
+ SMU_DPM_TABLE_MAX(gfx_table);
+ pstate_table->uclk_pstate.standard = SMU_DPM_TABLE_MAX(mem_table);
+ pstate_table->socclk_pstate.standard = SMU_DPM_TABLE_MIN(soc_table);
+ pstate_table->vclk_pstate.standard = SMU_DPM_TABLE_MIN(vclk_table);
+ pstate_table->dclk_pstate.standard = SMU_DPM_TABLE_MIN(dclk_table);
+ pstate_table->fclk_pstate.standard = SMU_DPM_TABLE_MIN(fclk_table);
return 0;
}
--
2.49.0