Fill and publish GPU metrics in v1.9 format for SMUv13.0.6 SOCs Signed-off-by: Lijo Lazar <lijo.la...@amd.com> --- .../drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c | 113 +++++++++-------- .../drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h | 118 ++++++++++++++++++ 2 files changed, 177 insertions(+), 54 deletions(-)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index ebee659f8a1c..654612623341 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -539,6 +539,7 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu) struct smu_table_context *smu_table = &smu->smu_table; struct smu_table *tables = smu_table->tables; void *gpu_metrics_table __free(kfree) = NULL; + struct smu_v13_0_6_gpu_metrics *gpu_metrics; void *driver_pptable __free(kfree) = NULL; void *metrics_table __free(kfree) = NULL; struct amdgpu_device *adev = smu->adev; @@ -578,11 +579,22 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu) if (!driver_pptable) return -ENOMEM; + ret = smu_table_cache_init(smu, SMU_TABLE_SMU_METRICS, + sizeof(struct smu_v13_0_6_gpu_metrics), 1); + if (ret) + return ret; + + gpu_metrics = (struct smu_v13_0_6_gpu_metrics + *)(tables[SMU_TABLE_SMU_METRICS].cache.buffer); + + smu_v13_0_6_gpu_metrics_init(gpu_metrics, 1, 9); if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12)) { ret = smu_v13_0_12_tables_init(smu); - if (ret) + if (ret) { + smu_table_cache_fini(smu, SMU_TABLE_SMU_METRICS); return ret; + } } smu_table->gpu_metrics_table = no_free_ptr(gpu_metrics_table); @@ -721,6 +733,7 @@ static int smu_v13_0_6_fini_smc_tables(struct smu_context *smu) { if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12)) smu_v13_0_12_tables_fini(smu); + smu_table_cache_fini(smu, SMU_TABLE_SMU_METRICS); return smu_v13_0_fini_smc_tables(smu); } @@ -2736,18 +2749,16 @@ static ssize_t smu_v13_0_6_get_xcp_metrics(struct smu_context *smu, int xcp_id, static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table) { struct smu_table_context *smu_table = &smu->smu_table; - struct gpu_metrics_v1_8 *gpu_metrics = - (struct gpu_metrics_v1_8 *)smu_table->gpu_metrics_table; + struct smu_table *tables = smu_table->tables; + struct smu_v13_0_6_gpu_metrics *gpu_metrics; int version = smu_v13_0_6_get_metrics_version(smu); MetricsTableV0_t *metrics_v0 __free(kfree) = NULL; - int ret = 0, xcc_id, inst, i, j, k, idx; struct amdgpu_device *adev = smu->adev; + int ret = 0, xcc_id, inst, i, j; MetricsTableV1_t *metrics_v1; MetricsTableV2_t *metrics_v2; - struct amdgpu_xcp *xcp; u16 link_width_level; u8 num_jpeg_rings; - u32 inst_mask; bool per_inst; metrics_v0 = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL); @@ -2762,8 +2773,8 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table metrics_v1 = (MetricsTableV1_t *)metrics_v0; metrics_v2 = (MetricsTableV2_t *)metrics_v0; - - smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 8); + gpu_metrics = (struct smu_v13_0_6_gpu_metrics + *)(tables[SMU_TABLE_SMU_METRICS].cache.buffer); gpu_metrics->temperature_hotspot = SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature, version)); @@ -2885,55 +2896,49 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table gpu_metrics->xgmi_link_status[j] = ret; } - gpu_metrics->num_partition = adev->xcp_mgr->num_xcps; - per_inst = smu_v13_0_6_cap_supported(smu, SMU_CAP(PER_INST_METRICS)); num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS_4_0_3; - for_each_xcp(adev->xcp_mgr, xcp, i) { - amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask); - idx = 0; - for_each_inst(k, inst_mask) { - /* Both JPEG and VCN has same instances */ - inst = GET_INST(VCN, k); - - for (j = 0; j < num_jpeg_rings; ++j) { - gpu_metrics->xcp_stats[i].jpeg_busy - [(idx * num_jpeg_rings) + j] = - SMUQ10_ROUND(GET_METRIC_FIELD(JpegBusy, version) - [(inst * num_jpeg_rings) + j]); - } - gpu_metrics->xcp_stats[i].vcn_busy[idx] = - SMUQ10_ROUND(GET_METRIC_FIELD(VcnBusy, version)[inst]); - idx++; - - } + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + inst = GET_INST(JPEG, i); + for (j = 0; j < num_jpeg_rings; ++j) + gpu_metrics->jpeg_busy[(i * num_jpeg_rings) + j] = + SMUQ10_ROUND(GET_METRIC_FIELD( + JpegBusy, + version)[(inst * num_jpeg_rings) + j]); + } + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + inst = GET_INST(VCN, i); + gpu_metrics->vcn_busy[i] = + SMUQ10_ROUND(GET_METRIC_FIELD(VcnBusy, version)[inst]); + } - if (per_inst) { - amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask); - idx = 0; - for_each_inst(k, inst_mask) { - inst = GET_INST(GC, k); - gpu_metrics->xcp_stats[i].gfx_busy_inst[idx] = - SMUQ10_ROUND(GET_GPU_METRIC_FIELD(GfxBusy, version)[inst]); - gpu_metrics->xcp_stats[i].gfx_busy_acc[idx] = - SMUQ10_ROUND(GET_GPU_METRIC_FIELD(GfxBusyAcc, - version)[inst]); - if (smu_v13_0_6_cap_supported(smu, SMU_CAP(HST_LIMIT_METRICS))) { - gpu_metrics->xcp_stats[i].gfx_below_host_limit_ppt_acc[idx] = - SMUQ10_ROUND - (metrics_v0->GfxclkBelowHostLimitPptAcc[inst]); - gpu_metrics->xcp_stats[i].gfx_below_host_limit_thm_acc[idx] = - SMUQ10_ROUND - (metrics_v0->GfxclkBelowHostLimitThmAcc[inst]); - gpu_metrics->xcp_stats[i].gfx_low_utilization_acc[idx] = - SMUQ10_ROUND - (metrics_v0->GfxclkLowUtilizationAcc[inst]); - gpu_metrics->xcp_stats[i].gfx_below_host_limit_total_acc[idx] = - SMUQ10_ROUND - (metrics_v0->GfxclkBelowHostLimitTotalAcc[inst]); - } - idx++; + if (per_inst) { + for (i = 0; i < NUM_XCC(adev->gfx.xcc_mask); ++i) { + inst = GET_INST(GC, i); + gpu_metrics->gfx_busy_inst[i] = SMUQ10_ROUND( + GET_GPU_METRIC_FIELD(GfxBusy, version)[inst]); + gpu_metrics->gfx_busy_acc[i] = SMUQ10_ROUND( + GET_GPU_METRIC_FIELD(GfxBusyAcc, + version)[inst]); + if (smu_v13_0_6_cap_supported( + smu, SMU_CAP(HST_LIMIT_METRICS))) { + gpu_metrics->gfx_below_host_limit_ppt_acc + [i] = SMUQ10_ROUND( + metrics_v0->GfxclkBelowHostLimitPptAcc + [inst]); + gpu_metrics->gfx_below_host_limit_thm_acc + [i] = SMUQ10_ROUND( + metrics_v0->GfxclkBelowHostLimitThmAcc + [inst]); + gpu_metrics->gfx_low_utilization_acc + [i] = SMUQ10_ROUND( + metrics_v0 + ->GfxclkLowUtilizationAcc[inst]); + gpu_metrics->gfx_below_host_limit_total_acc + [i] = SMUQ10_ROUND( + metrics_v0->GfxclkBelowHostLimitTotalAcc + [inst]); } } } @@ -2943,7 +2948,7 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table gpu_metrics->firmware_timestamp = GET_METRIC_FIELD(Timestamp, version); - *table = (void *)gpu_metrics; + *table = tables[SMU_TABLE_SMU_METRICS].cache.buffer; return sizeof(*gpu_metrics); } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h index aae9a546a67e..0e336cd5e8cc 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h @@ -73,6 +73,13 @@ enum smu_v13_0_6_caps { SMU_CAP(ALL), }; +#define SMU_13_0_6_NUM_XGMI_LINKS 8 +#define SMU_13_0_6_MAX_GFX_CLKS 8 +#define SMU_13_0_6_MAX_CLKS 4 +#define SMU_13_0_6_MAX_XCC 8 +#define SMU_13_0_6_MAX_VCN 4 +#define SMU_13_0_6_MAX_JPEG 40 + extern void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu); extern void smu_v13_0_6_set_temp_funcs(struct smu_context *smu); bool smu_v13_0_6_cap_supported(struct smu_context *smu, enum smu_v13_0_6_caps cap); @@ -95,4 +102,115 @@ void smu_v13_0_12_tables_fini(struct smu_context *smu); extern const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[]; extern const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[]; extern const struct smu_temp_funcs smu_v13_0_12_temp_funcs; + +#if defined(SWSMU_CODE_LAYER_L2) +#include "smu_cmn.h" + +/* SMUv 13.0.6 GPU metrics*/ +#define SMU_13_0_6_METRICS_FIELDS(SMU_SCALAR, SMU_ARRAY) \ + SMU_SCALAR(SMU_MATTR(TEMPERATURE_HOTSPOT), SMU_MUNIT(TEMP_1), \ + SMU_MTYPE(U16), temperature_hotspot) \ + SMU_SCALAR(SMU_MATTR(TEMPERATURE_MEM), SMU_MUNIT(TEMP_1), \ + SMU_MTYPE(U16), temperature_mem) \ + SMU_SCALAR(SMU_MATTR(TEMPERATURE_VRSOC), SMU_MUNIT(TEMP_1), \ + SMU_MTYPE(U16), temperature_vrsoc) \ + SMU_SCALAR(SMU_MATTR(CURR_SOCKET_POWER), SMU_MUNIT(POWER_1), \ + SMU_MTYPE(U16), curr_socket_power) \ + SMU_SCALAR(SMU_MATTR(AVERAGE_GFX_ACTIVITY), SMU_MUNIT(PERCENT), \ + SMU_MTYPE(U16), average_gfx_activity) \ + SMU_SCALAR(SMU_MATTR(AVERAGE_UMC_ACTIVITY), SMU_MUNIT(PERCENT), \ + SMU_MTYPE(U16), average_umc_activity) \ + SMU_SCALAR(SMU_MATTR(MEM_MAX_BANDWIDTH), SMU_MUNIT(BW_1), \ + SMU_MTYPE(U64), mem_max_bandwidth) \ + SMU_SCALAR(SMU_MATTR(ENERGY_ACCUMULATOR), SMU_MUNIT(NONE), \ + SMU_MTYPE(U64), energy_accumulator) \ + SMU_SCALAR(SMU_MATTR(SYSTEM_CLOCK_COUNTER), SMU_MUNIT(TIME_1), \ + SMU_MTYPE(U64), system_clock_counter) \ + SMU_SCALAR(SMU_MATTR(ACCUMULATION_COUNTER), SMU_MUNIT(NONE), \ + SMU_MTYPE(U32), accumulation_counter) \ + SMU_SCALAR(SMU_MATTR(PROCHOT_RESIDENCY_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U32), prochot_residency_acc) \ + SMU_SCALAR(SMU_MATTR(PPT_RESIDENCY_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U32), ppt_residency_acc) \ + SMU_SCALAR(SMU_MATTR(SOCKET_THM_RESIDENCY_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U32), socket_thm_residency_acc) \ + SMU_SCALAR(SMU_MATTR(VR_THM_RESIDENCY_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U32), vr_thm_residency_acc) \ + SMU_SCALAR(SMU_MATTR(HBM_THM_RESIDENCY_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U32), hbm_thm_residency_acc) \ + SMU_SCALAR(SMU_MATTR(GFXCLK_LOCK_STATUS), SMU_MUNIT(NONE), \ + SMU_MTYPE(U32), gfxclk_lock_status) \ + SMU_SCALAR(SMU_MATTR(PCIE_LINK_WIDTH), SMU_MUNIT(NONE), \ + SMU_MTYPE(U16), pcie_link_width) \ + SMU_SCALAR(SMU_MATTR(PCIE_LINK_SPEED), SMU_MUNIT(SPEED_2), \ + SMU_MTYPE(U16), pcie_link_speed) \ + SMU_SCALAR(SMU_MATTR(XGMI_LINK_WIDTH), SMU_MUNIT(NONE), \ + SMU_MTYPE(U16), xgmi_link_width) \ + SMU_SCALAR(SMU_MATTR(XGMI_LINK_SPEED), SMU_MUNIT(SPEED_1), \ + SMU_MTYPE(U16), xgmi_link_speed) \ + SMU_SCALAR(SMU_MATTR(GFX_ACTIVITY_ACC), SMU_MUNIT(PERCENT), \ + SMU_MTYPE(U32), gfx_activity_acc) \ + SMU_SCALAR(SMU_MATTR(MEM_ACTIVITY_ACC), SMU_MUNIT(PERCENT), \ + SMU_MTYPE(U32), mem_activity_acc) \ + SMU_SCALAR(SMU_MATTR(PCIE_BANDWIDTH_ACC), SMU_MUNIT(PERCENT), \ + SMU_MTYPE(U64), pcie_bandwidth_acc) \ + SMU_SCALAR(SMU_MATTR(PCIE_BANDWIDTH_INST), SMU_MUNIT(BW_1), \ + SMU_MTYPE(U64), pcie_bandwidth_inst) \ + SMU_SCALAR(SMU_MATTR(PCIE_L0_TO_RECOV_COUNT_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U64), pcie_l0_to_recov_count_acc) \ + SMU_SCALAR(SMU_MATTR(PCIE_REPLAY_COUNT_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U64), pcie_replay_count_acc) \ + SMU_SCALAR(SMU_MATTR(PCIE_REPLAY_ROVER_COUNT_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U64), pcie_replay_rover_count_acc) \ + SMU_SCALAR(SMU_MATTR(PCIE_NAK_SENT_COUNT_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U32), pcie_nak_sent_count_acc) \ + SMU_SCALAR(SMU_MATTR(PCIE_NAK_RCVD_COUNT_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U32), pcie_nak_rcvd_count_acc) \ + SMU_ARRAY(SMU_MATTR(XGMI_READ_DATA_ACC), SMU_MUNIT(DATA_1), \ + SMU_MTYPE(U64), xgmi_read_data_acc, \ + SMU_13_0_6_NUM_XGMI_LINKS) \ + SMU_ARRAY(SMU_MATTR(XGMI_WRITE_DATA_ACC), SMU_MUNIT(DATA_1), \ + SMU_MTYPE(U64), xgmi_write_data_acc, \ + SMU_13_0_6_NUM_XGMI_LINKS) \ + SMU_ARRAY(SMU_MATTR(XGMI_LINK_STATUS), SMU_MUNIT(NONE), \ + SMU_MTYPE(U16), xgmi_link_status, SMU_13_0_6_NUM_XGMI_LINKS) \ + SMU_SCALAR(SMU_MATTR(FIRMWARE_TIMESTAMP), SMU_MUNIT(TIME_2), \ + SMU_MTYPE(U64), firmware_timestamp) \ + SMU_ARRAY(SMU_MATTR(CURRENT_GFXCLK), SMU_MUNIT(CLOCK_1), \ + SMU_MTYPE(U16), current_gfxclk, SMU_13_0_6_MAX_GFX_CLKS) \ + SMU_ARRAY(SMU_MATTR(CURRENT_SOCCLK), SMU_MUNIT(CLOCK_1), \ + SMU_MTYPE(U16), current_socclk, SMU_13_0_6_MAX_CLKS) \ + SMU_ARRAY(SMU_MATTR(CURRENT_VCLK0), SMU_MUNIT(CLOCK_1), \ + SMU_MTYPE(U16), current_vclk0, SMU_13_0_6_MAX_CLKS) \ + SMU_ARRAY(SMU_MATTR(CURRENT_DCLK0), SMU_MUNIT(CLOCK_1), \ + SMU_MTYPE(U16), current_dclk0, SMU_13_0_6_MAX_CLKS) \ + SMU_SCALAR(SMU_MATTR(CURRENT_UCLK), SMU_MUNIT(CLOCK_1), \ + SMU_MTYPE(U16), current_uclk) \ + SMU_SCALAR(SMU_MATTR(PCIE_LC_PERF_OTHER_END_RECOVERY), \ + SMU_MUNIT(NONE), SMU_MTYPE(U32), \ + pcie_lc_perf_other_end_recovery) \ + SMU_ARRAY(SMU_MATTR(GFX_BUSY_INST), SMU_MUNIT(PERCENT), \ + SMU_MTYPE(U32), gfx_busy_inst, SMU_13_0_6_MAX_XCC) \ + SMU_ARRAY(SMU_MATTR(JPEG_BUSY), SMU_MUNIT(PERCENT), SMU_MTYPE(U16), \ + jpeg_busy, SMU_13_0_6_MAX_JPEG) \ + SMU_ARRAY(SMU_MATTR(VCN_BUSY), SMU_MUNIT(PERCENT), SMU_MTYPE(U16), \ + vcn_busy, SMU_13_0_6_MAX_VCN) \ + SMU_ARRAY(SMU_MATTR(GFX_BUSY_ACC), SMU_MUNIT(PERCENT), SMU_MTYPE(U64), \ + gfx_busy_acc, SMU_13_0_6_MAX_XCC) \ + SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_PPT_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U64), gfx_below_host_limit_ppt_acc, \ + SMU_13_0_6_MAX_XCC) \ + SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_THM_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U64), gfx_below_host_limit_thm_acc, \ + SMU_13_0_6_MAX_XCC) \ + SMU_ARRAY(SMU_MATTR(GFX_LOW_UTILIZATION_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U64), gfx_low_utilization_acc, SMU_13_0_6_MAX_XCC) \ + SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_TOTAL_ACC), SMU_MUNIT(NONE), \ + SMU_MTYPE(U64), gfx_below_host_limit_total_acc, \ + SMU_13_0_6_MAX_XCC) + +DECLARE_SMU_METRICS_CLASS(smu_v13_0_6_gpu_metrics, SMU_13_0_6_METRICS_FIELDS); + +#endif /* SWSMU_CODE_LAYER_L2 */ + #endif -- 2.49.0