From: Asad Kamal <[email protected]>

Add SMUv13.0.12 PPT interface to fetch metrics data

Signed-off-by: Asad Kamal <[email protected]>
Reviewed-by: Lijo Lazar <[email protected]>
Signed-off-by: Alex Deucher <[email protected]>
---
 .../drm/amd/pm/swsmu/inc/smu_v13_0_12_ppt.h   |  35 ++
 drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile   |   2 +-
 .../drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c | 468 ++++++++++++++++++
 .../drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c  |  23 +-
 .../drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h  |   2 +
 5 files changed, 526 insertions(+), 4 deletions(-)
 create mode 100644 drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_12_ppt.h
 create mode 100644 drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c

diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_12_ppt.h 
b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_12_ppt.h
new file mode 100644
index 0000000000000..2855adde40f5e
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_12_ppt.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __SMU_V13_0_12_PPT_H__
+#define __SMU_V13_0_12_PPT_H__
+
+int smu_v13_0_12_tables_init(struct smu_context *smu);
+int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu);
+int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu,
+                                     MetricsMember_t member,
+                                     uint32_t *value);
+bool smu_v13_0_12_is_dpm_running(struct smu_context *smu);
+ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table);
+extern const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[];
+
+#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile 
b/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile
index 7f3493b6c53c3..e8d9b284869aa 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile
@@ -24,7 +24,7 @@
 # It provides the smu management services for the driver.
 
 SMU13_MGR = smu_v13_0.o aldebaran_ppt.o yellow_carp_ppt.o smu_v13_0_0_ppt.o 
smu_v13_0_4_ppt.o \
-           smu_v13_0_5_ppt.o smu_v13_0_7_ppt.o smu_v13_0_6_ppt.o
+           smu_v13_0_5_ppt.o smu_v13_0_7_ppt.o smu_v13_0_12_ppt.o 
smu_v13_0_6_ppt.o
 
 AMD_SWSMU_SMU13MGR = $(addprefix $(AMD_SWSMU_PATH)/smu13/,$(SMU13_MGR))
 
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c 
b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
new file mode 100644
index 0000000000000..b8a2cdaf09169
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
@@ -0,0 +1,468 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#define SWSMU_CODE_LAYER_L2
+
+#include <linux/firmware.h>
+#include "amdgpu.h"
+#include "amdgpu_smu.h"
+#include "smu_v13_0_12_pmfw.h"
+#include "smu_v13_0_6_ppt.h"
+#include "smu13_driver_if_v13_0_6.h"
+#include "smu_v13_0.h"
+#include "smu_v13_0_12_ppt.h"
+#include "amdgpu_xgmi.h"
+#include <linux/pci.h>
+#include "smu_cmn.h"
+
+#undef MP1_Public
+#undef smnMP1_FIRMWARE_FLAGS
+
+/*
+ * DO NOT use these for err/warn/info/debug messages.
+ * Use dev_err, dev_warn, dev_info and dev_dbg instead.
+ * They are more MGPU friendly.
+ */
+#undef pr_err
+#undef pr_warn
+#undef pr_info
+#undef pr_debug
+
+#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
+
+#define SMU_13_0_12_FEA_MAP(smu_feature, smu_13_0_12_feature)                  
  \
+       [smu_feature] = { 1, (smu_13_0_12_feature) }
+
+#define FEATURE_MASK(feature) (1ULL << feature)
+#define SMC_DPM_FEATURE                                                        
\
+       (FEATURE_MASK(FEATURE_DATA_CALCULATION) |                              \
+        FEATURE_MASK(FEATURE_DPM_GFXCLK) | FEATURE_MASK(FEATURE_DPM_UCLK) |   \
+        FEATURE_MASK(FEATURE_DPM_SOCCLK) | FEATURE_MASK(FEATURE_DPM_FCLK) |   \
+        FEATURE_MASK(FEATURE_DPM_LCLK) | FEATURE_MASK(FEATURE_DPM_XGMI) |     \
+        FEATURE_MASK(FEATURE_DPM_VCN))
+
+const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[SMU_FEATURE_COUNT] 
= {
+       SMU_13_0_12_FEA_MAP(SMU_FEATURE_DATA_CALCULATIONS_BIT,          
FEATURE_DATA_CALCULATION),
+       SMU_13_0_12_FEA_MAP(SMU_FEATURE_DPM_GFXCLK_BIT,                         
FEATURE_DPM_GFXCLK),
+       SMU_13_0_12_FEA_MAP(SMU_FEATURE_DPM_UCLK_BIT,                   
FEATURE_DPM_UCLK),
+       SMU_13_0_12_FEA_MAP(SMU_FEATURE_DPM_SOCCLK_BIT,                         
FEATURE_DPM_SOCCLK),
+       SMU_13_0_12_FEA_MAP(SMU_FEATURE_DPM_FCLK_BIT,                   
FEATURE_DPM_FCLK),
+       SMU_13_0_12_FEA_MAP(SMU_FEATURE_DPM_LCLK_BIT,                   
FEATURE_DPM_LCLK),
+       SMU_13_0_12_FEA_MAP(SMU_FEATURE_DPM_VCLK_BIT,                   
FEATURE_DPM_VCN),
+       SMU_13_0_12_FEA_MAP(SMU_FEATURE_DPM_DCLK_BIT,                   
FEATURE_DPM_VCN),
+       SMU_13_0_12_FEA_MAP(SMU_FEATURE_DPM_XGMI_BIT,                   
FEATURE_DPM_XGMI),
+       SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_GFXCLK_BIT,                  
FEATURE_DS_GFXCLK),
+       SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_SOCCLK_BIT,                  
FEATURE_DS_SOCCLK),
+       SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_LCLK_BIT,                    
FEATURE_DS_LCLK),
+       SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_FCLK_BIT,                    
FEATURE_DS_FCLK),
+       SMU_13_0_12_FEA_MAP(SMU_FEATURE_VCN_DPM_BIT,                    
FEATURE_DPM_VCN),
+       SMU_13_0_12_FEA_MAP(SMU_FEATURE_PPT_BIT,                        
FEATURE_PPT),
+       SMU_13_0_12_FEA_MAP(SMU_FEATURE_TDC_BIT,                        
FEATURE_TDC),
+       SMU_13_0_12_FEA_MAP(SMU_FEATURE_APCC_DFLL_BIT,                  
FEATURE_APCC_DFLL),
+       SMU_13_0_12_FEA_MAP(SMU_FEATURE_MP1_CG_BIT,                     
FEATURE_SMU_CG),
+       SMU_13_0_12_FEA_MAP(SMU_FEATURE_FW_CTF_BIT,                     
FEATURE_FW_CTF),
+       SMU_13_0_12_FEA_MAP(SMU_FEATURE_THERMAL_BIT,                    
FEATURE_THERMAL),
+       SMU_13_0_12_FEA_MAP(SMU_FEATURE_SOC_PCC_BIT,                    
FEATURE_SOC_PCC),
+       SMU_13_0_12_FEA_MAP(SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT,      
FEATURE_XGMI_PER_LINK_PWR_DOWN),
+};
+
+struct PPTable_t {
+       uint32_t MaxSocketPowerLimit;
+       uint32_t MaxGfxclkFrequency;
+       uint32_t MinGfxclkFrequency;
+       uint32_t FclkFrequencyTable[4];
+       uint32_t UclkFrequencyTable[4];
+       uint32_t SocclkFrequencyTable[4];
+       uint32_t VclkFrequencyTable[4];
+       uint32_t DclkFrequencyTable[4];
+       uint32_t LclkFrequencyTable[4];
+       uint32_t MaxLclkDpmRange;
+       uint32_t MinLclkDpmRange;
+       uint64_t PublicSerialNumber_AID;
+       bool Init;
+};
+
+#define SMUQ10_TO_UINT(x) ((x) >> 10)
+#define SMUQ10_FRAC(x) ((x) & 0x3ff)
+#define SMUQ10_ROUND(x) ((SMUQ10_TO_UINT(x)) + ((SMUQ10_FRAC(x)) >= 0x200))
+
+int smu_v13_0_12_tables_init(struct smu_context *smu)
+{
+       struct smu_table_context *smu_table = &smu->smu_table;
+       struct smu_table *tables = smu_table->tables;
+       struct amdgpu_device *adev = smu->adev;
+
+       if (!(adev->flags & AMD_IS_APU))
+               SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU13_TOOL_SIZE,
+                              PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+
+       SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS,
+                      sizeof(MetricsTable_t),
+                      PAGE_SIZE,
+                      AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT);
+
+       SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),
+                      PAGE_SIZE,
+                      AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT);
+
+       smu_table->metrics_table = kzalloc(sizeof(MetricsTable_t), GFP_KERNEL);
+       if (!smu_table->metrics_table)
+               return -ENOMEM;
+       smu_table->metrics_time = 0;
+
+       smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_6);
+       smu_table->gpu_metrics_table =
+               kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
+       if (!smu_table->gpu_metrics_table) {
+               kfree(smu_table->metrics_table);
+               return -ENOMEM;
+       }
+
+       smu_table->driver_pptable =
+               kzalloc(sizeof(struct PPTable_t), GFP_KERNEL);
+       if (!smu_table->driver_pptable) {
+               kfree(smu_table->metrics_table);
+               kfree(smu_table->gpu_metrics_table);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu)
+{
+       struct smu_table_context *smu_table = &smu->smu_table;
+       MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table;
+       struct PPTable_t *pptable =
+               (struct PPTable_t *)smu_table->driver_pptable;
+       int ret, i, retry = 100;
+       uint32_t table_version;
+
+       /* Store one-time values in driver PPTable */
+       if (!pptable->Init) {
+               while (--retry) {
+                       ret = smu_v13_0_6_get_metrics_table(smu, NULL, true);
+                       if (ret)
+                               return ret;
+
+                       /* Ensure that metrics have been updated */
+                       if (metrics->AccumulationCounter)
+                               break;
+
+                       usleep_range(1000, 1100);
+               }
+
+               if (!retry)
+                       return -ETIME;
+
+               ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetMetricsVersion,
+                                          &table_version);
+               if (ret)
+                       return ret;
+               smu_table->tables[SMU_TABLE_SMU_METRICS].version =
+                       table_version;
+
+               pptable->MaxSocketPowerLimit =
+                       SMUQ10_ROUND(metrics->MaxSocketPowerLimit);
+               pptable->MaxGfxclkFrequency =
+                       SMUQ10_ROUND(metrics->MaxGfxclkFrequency);
+               pptable->MinGfxclkFrequency =
+                       SMUQ10_ROUND(metrics->MinGfxclkFrequency);
+
+               for (i = 0; i < 4; ++i) {
+                       pptable->FclkFrequencyTable[i] =
+                               SMUQ10_ROUND(metrics->FclkFrequencyTable[i]);
+                       pptable->UclkFrequencyTable[i] =
+                               SMUQ10_ROUND(metrics->UclkFrequencyTable[i]);
+                       pptable->SocclkFrequencyTable[i] =
+                               SMUQ10_ROUND(metrics->SocclkFrequencyTable[i]);
+                       pptable->VclkFrequencyTable[i] =
+                               SMUQ10_ROUND(metrics->VclkFrequencyTable[i]);
+                       pptable->DclkFrequencyTable[i] =
+                               SMUQ10_ROUND(metrics->DclkFrequencyTable[i]);
+                       pptable->LclkFrequencyTable[i] =
+                               SMUQ10_ROUND(metrics->LclkFrequencyTable[i]);
+               }
+
+               /* use AID0 serial number by default */
+               pptable->PublicSerialNumber_AID = 
metrics->PublicSerialNumber_AID[0];
+
+               pptable->Init = true;
+       }
+
+       return 0;
+}
+
+int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu,
+                                     MetricsMember_t member,
+                                     uint32_t *value)
+{
+       struct smu_table_context *smu_table = &smu->smu_table;
+       MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table;
+       struct amdgpu_device *adev = smu->adev;
+       int ret = 0;
+       int xcc_id;
+
+       ret = smu_v13_0_6_get_metrics_table(smu, NULL, false);
+       if (ret)
+               return ret;
+
+       /* For clocks with multiple instances, only report the first one */
+       switch (member) {
+       case METRICS_CURR_GFXCLK:
+       case METRICS_AVERAGE_GFXCLK:
+               xcc_id = GET_INST(GC, 0);
+               *value = SMUQ10_ROUND(metrics->GfxclkFrequency[xcc_id]);
+               break;
+       case METRICS_CURR_SOCCLK:
+       case METRICS_AVERAGE_SOCCLK:
+               *value = SMUQ10_ROUND(metrics->SocclkFrequency[0]);
+               break;
+       case METRICS_CURR_UCLK:
+       case METRICS_AVERAGE_UCLK:
+               *value = SMUQ10_ROUND(metrics->UclkFrequency);
+               break;
+       case METRICS_CURR_VCLK:
+               *value = SMUQ10_ROUND(metrics->VclkFrequency[0]);
+               break;
+       case METRICS_CURR_DCLK:
+               *value = SMUQ10_ROUND(metrics->DclkFrequency[0]);
+               break;
+       case METRICS_CURR_FCLK:
+               *value = SMUQ10_ROUND(metrics->FclkFrequency);
+               break;
+       case METRICS_AVERAGE_GFXACTIVITY:
+               *value = SMUQ10_ROUND(metrics->SocketGfxBusy);
+               break;
+       case METRICS_AVERAGE_MEMACTIVITY:
+               *value = SMUQ10_ROUND(metrics->DramBandwidthUtilization);
+               break;
+       case METRICS_CURR_SOCKETPOWER:
+               *value = SMUQ10_ROUND(metrics->SocketPower) << 8;
+               break;
+       case METRICS_TEMPERATURE_HOTSPOT:
+               *value = SMUQ10_ROUND(metrics->MaxSocketTemperature) *
+                        SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+               break;
+       case METRICS_TEMPERATURE_MEM:
+               *value = SMUQ10_ROUND(metrics->MaxHbmTemperature) *
+                        SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+               break;
+       /* This is the max of all VRs and not just SOC VR.
+        * No need to define another data type for the same.
+        */
+       case METRICS_TEMPERATURE_VRSOC:
+               *value = SMUQ10_ROUND(metrics->MaxVrTemperature) *
+                        SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+               break;
+       default:
+               *value = UINT_MAX;
+               break;
+       }
+
+       return ret;
+}
+
+static int smu_v13_0_12_get_enabled_mask(struct smu_context *smu,
+                                        uint64_t *feature_mask)
+{
+       int ret;
+
+       ret = smu_cmn_get_enabled_mask(smu, feature_mask);
+
+       if (ret == -EIO) {
+               *feature_mask = 0;
+               ret = 0;
+       }
+
+       return ret;
+}
+
+bool smu_v13_0_12_is_dpm_running(struct smu_context *smu)
+{
+       int ret;
+       uint64_t feature_enabled;
+
+       ret = smu_v13_0_12_get_enabled_mask(smu, &feature_enabled);
+
+       if (ret)
+               return false;
+
+       return !!(feature_enabled & SMC_DPM_FEATURE);
+}
+
+ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table)
+{
+       struct smu_table_context *smu_table = &smu->smu_table;
+       struct gpu_metrics_v1_6 *gpu_metrics =
+               (struct gpu_metrics_v1_6 *)smu_table->gpu_metrics_table;
+       int ret = 0, xcc_id, inst, i, j, k, idx;
+       struct amdgpu_device *adev = smu->adev;
+       MetricsTable_t *metrics;
+       struct amdgpu_xcp *xcp;
+       u32 inst_mask;
+
+       metrics = kzalloc(sizeof(MetricsTable_t), GFP_KERNEL);
+       ret = smu_v13_0_6_get_metrics_table(smu, metrics, true);
+       if (ret) {
+               kfree(metrics);
+               return ret;
+       }
+
+       smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 6);
+
+       gpu_metrics->temperature_hotspot =
+               SMUQ10_ROUND(metrics->MaxSocketTemperature);
+       /* Individual HBM stack temperature is not reported */
+       gpu_metrics->temperature_mem =
+               SMUQ10_ROUND(metrics->MaxHbmTemperature);
+       /* Reports max temperature of all voltage rails */
+       gpu_metrics->temperature_vrsoc =
+               SMUQ10_ROUND(metrics->MaxVrTemperature);
+
+       gpu_metrics->average_gfx_activity =
+               SMUQ10_ROUND(metrics->SocketGfxBusy);
+       gpu_metrics->average_umc_activity =
+               SMUQ10_ROUND(metrics->DramBandwidthUtilization);
+
+       gpu_metrics->curr_socket_power =
+               SMUQ10_ROUND(metrics->SocketPower);
+       /* Energy counter reported in 15.259uJ (2^-16) units */
+       gpu_metrics->energy_accumulator = metrics->SocketEnergyAcc;
+
+       for (i = 0; i < MAX_GFX_CLKS; i++) {
+               xcc_id = GET_INST(GC, i);
+               if (xcc_id >= 0)
+                       gpu_metrics->current_gfxclk[i] =
+                               SMUQ10_ROUND(metrics->GfxclkFrequency[xcc_id]);
+
+               if (i < MAX_CLKS) {
+                       gpu_metrics->current_socclk[i] =
+                               SMUQ10_ROUND(metrics->SocclkFrequency[i]);
+                       inst = GET_INST(VCN, i);
+                       if (inst >= 0) {
+                               gpu_metrics->current_vclk0[i] =
+                                       
SMUQ10_ROUND(metrics->VclkFrequency[inst]);
+                               gpu_metrics->current_dclk0[i] =
+                                       
SMUQ10_ROUND(metrics->DclkFrequency[inst]);
+                       }
+               }
+       }
+
+       gpu_metrics->current_uclk = SMUQ10_ROUND(metrics->UclkFrequency);
+
+       /* Total accumulated cycle counter */
+       gpu_metrics->accumulation_counter = metrics->AccumulationCounter;
+
+       /* Accumulated throttler residencies */
+       gpu_metrics->prochot_residency_acc = metrics->ProchotResidencyAcc;
+       gpu_metrics->ppt_residency_acc = metrics->PptResidencyAcc;
+       gpu_metrics->socket_thm_residency_acc = metrics->SocketThmResidencyAcc;
+       gpu_metrics->vr_thm_residency_acc = metrics->VrThmResidencyAcc;
+       gpu_metrics->hbm_thm_residency_acc = metrics->HbmThmResidencyAcc;
+
+       /* Clock Lock Status. Each bit corresponds to each GFXCLK instance */
+       gpu_metrics->gfxclk_lock_status = metrics->GfxLockXCDMak >> 
GET_INST(GC, 0);
+
+       if (!(adev->flags & AMD_IS_APU)) {
+               gpu_metrics->pcie_link_width = metrics->PCIeLinkWidth;
+               gpu_metrics->pcie_link_speed =
+                               pcie_gen_to_speed(metrics->PCIeLinkSpeed);
+
+               gpu_metrics->pcie_bandwidth_acc =
+                               SMUQ10_ROUND(metrics->PcieBandwidthAcc[0]);
+               gpu_metrics->pcie_bandwidth_inst =
+                               SMUQ10_ROUND(metrics->PcieBandwidth[0]);
+               gpu_metrics->pcie_l0_to_recov_count_acc =
+                               metrics->PCIeL0ToRecoveryCountAcc;
+               gpu_metrics->pcie_replay_count_acc =
+                               metrics->PCIenReplayAAcc;
+               gpu_metrics->pcie_replay_rover_count_acc =
+                               metrics->PCIenReplayARolloverCountAcc;
+               gpu_metrics->pcie_nak_sent_count_acc =
+                               metrics->PCIeNAKSentCountAcc;
+               gpu_metrics->pcie_nak_rcvd_count_acc =
+                               metrics->PCIeNAKReceivedCountAcc;
+               gpu_metrics->pcie_lc_perf_other_end_recovery =
+                               metrics->PCIeOtherEndRecoveryAcc;
+
+       }
+
+       gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+
+       gpu_metrics->gfx_activity_acc =
+               SMUQ10_ROUND(metrics->SocketGfxBusyAcc);
+       gpu_metrics->mem_activity_acc =
+               SMUQ10_ROUND(metrics->DramBandwidthUtilizationAcc);
+
+       for (i = 0; i < NUM_XGMI_LINKS; i++) {
+               gpu_metrics->xgmi_read_data_acc[i] =
+                       SMUQ10_ROUND(metrics->XgmiReadDataSizeAcc[i]);
+               gpu_metrics->xgmi_write_data_acc[i] =
+                       SMUQ10_ROUND(metrics->XgmiWriteDataSizeAcc[i]);
+       }
+
+       gpu_metrics->num_partition = adev->xcp_mgr->num_xcps;
+
+       for_each_xcp(adev->xcp_mgr, xcp, i) {
+               amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask);
+               idx = 0;
+               for_each_inst(k, inst_mask) {
+                       /* Both JPEG and VCN has same instances */
+                       inst = GET_INST(VCN, k);
+
+                       for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
+                               gpu_metrics->xcp_stats[i].jpeg_busy
+                                       [(idx * adev->jpeg.num_jpeg_rings) + j] 
=
+                                       SMUQ10_ROUND(metrics->JpegBusy
+                                                       [(inst * 
adev->jpeg.num_jpeg_rings) + j]);
+                       }
+                       gpu_metrics->xcp_stats[i].vcn_busy[idx] =
+                              SMUQ10_ROUND(metrics->VcnBusy[inst]);
+                       idx++;
+
+               }
+
+               amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask);
+               idx = 0;
+               for_each_inst(k, inst_mask) {
+                       inst = GET_INST(GC, k);
+                       gpu_metrics->xcp_stats[i].gfx_busy_inst[idx] =
+                               SMUQ10_ROUND(metrics->GfxBusy[inst]);
+                       gpu_metrics->xcp_stats[i].gfx_busy_acc[idx] =
+                               SMUQ10_ROUND(metrics->GfxBusyAcc[inst]);
+                       idx++;
+               }
+       }
+
+       gpu_metrics->xgmi_link_width = SMUQ10_ROUND(metrics->XgmiWidth);
+       gpu_metrics->xgmi_link_speed = SMUQ10_ROUND(metrics->XgmiBitrate);
+
+       gpu_metrics->firmware_timestamp = metrics->Timestamp;
+
+       *table = (void *)gpu_metrics;
+       kfree(metrics);
+
+       return sizeof(*gpu_metrics);
+}
+
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c 
b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index f358a8abe91e5..6f25e657f1a29 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -36,6 +36,7 @@
 #include "atom.h"
 #include "power_state.h"
 #include "smu_v13_0.h"
+#include "smu_v13_0_12_ppt.h"
 #include "smu_v13_0_6_ppt.h"
 #include "nbio/nbio_7_4_offset.h"
 #include "nbio/nbio_7_4_sh_mask.h"
@@ -522,7 +523,10 @@ static int smu_v13_0_6_init_smc_tables(struct smu_context 
*smu)
 {
        int ret = 0;
 
-       ret = smu_v13_0_6_tables_init(smu);
+       if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12))
+               ret = smu_v13_0_12_tables_init(smu);
+       else
+               ret = smu_v13_0_6_tables_init(smu);
        if (ret)
                return ret;
 
@@ -544,7 +548,7 @@ static int smu_v13_0_6_get_allowed_feature_mask(struct 
smu_context *smu,
        return 0;
 }
 
-static int smu_v13_0_6_get_metrics_table(struct smu_context *smu,
+int smu_v13_0_6_get_metrics_table(struct smu_context *smu,
                                         void *metrics_table, bool bypass_cache)
 {
        struct smu_table_context *smu_table = &smu->smu_table;
@@ -620,6 +624,9 @@ static int smu_v13_0_6_setup_driver_pptable(struct 
smu_context *smu)
        int ret, i, retry = 100;
        uint32_t table_version;
 
+       if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12))
+               return smu_v13_0_12_setup_driver_pptable(smu);
+
        /* Store one-time values in driver PPTable */
        if (!pptable->Init) {
                while (--retry) {
@@ -995,6 +1002,9 @@ static int smu_v13_0_6_get_smu_metrics_data(struct 
smu_context *smu,
        int ret = 0;
        int xcc_id;
 
+       if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12))
+               return smu_v13_0_12_get_smu_metrics_data(smu, member, value);
+
        ret = smu_v13_0_6_get_metrics_table(smu, NULL, false);
        if (ret)
                return ret;
@@ -2054,6 +2064,9 @@ static bool smu_v13_0_6_is_dpm_running(struct smu_context 
*smu)
        int ret;
        uint64_t feature_enabled;
 
+       if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12))
+               return smu_v13_0_12_is_dpm_running(smu);
+
        ret = smu_v13_0_6_get_enabled_mask(smu, &feature_enabled);
 
        if (ret)
@@ -2347,6 +2360,9 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct 
smu_context *smu, void **table
        u16 link_width_level;
        u32 inst_mask;
 
+       if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12))
+               return smu_v13_0_12_get_gpu_metrics(smu, table);
+
        metrics_x = kzalloc(max(sizeof(MetricsTableX_t), 
sizeof(MetricsTableA_t)), GFP_KERNEL);
        ret = smu_v13_0_6_get_metrics_table(smu, metrics_x, true);
        if (ret) {
@@ -3452,7 +3468,8 @@ void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu)
        smu->ppt_funcs = &smu_v13_0_6_ppt_funcs;
        smu->message_map = smu_v13_0_6_message_map;
        smu->clock_map = smu_v13_0_6_clk_map;
-       smu->feature_map = smu_v13_0_6_feature_mask_map;
+       smu->feature_map = (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == 
IP_VERSION(13, 0, 12)) ?
+               smu_v13_0_12_feature_mask_map : smu_v13_0_6_feature_mask_map;
        smu->table_map = smu_v13_0_6_table_map;
        smu->smc_driver_if_version = SMU13_0_6_DRIVER_IF_VERSION;
        smu->smc_fw_caps |= SMU_FW_CAP_RAS_PRI;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h 
b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
index f0fa42a645c05..21a7e066c629c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
@@ -28,5 +28,7 @@
 #define SMU_13_0_6_UMD_PSTATE_MCLK_LEVEL 0x2
 
 extern void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu);
+extern int smu_v13_0_6_get_metrics_table(struct smu_context *smu,
+                                        void *metrics_table, bool 
bypass_cache);
 
 #endif
-- 
2.47.1

Reply via email to