add support to query vram info from firmware
v2: change APU vram type, add multi-aid check
v3: seperate vram info query function into 3 parts and
call them in a helper func when requirements
are met.
Signed-off-by: Gangliang Xie <[email protected]>
---
.../gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c | 459
++++++++++--------
.../gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h | 4 +
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 22 +
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h | 2 +
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c | 2 +-
drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c | 2 +-
drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c | 2 +-
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 58 ++-
8 files changed, 308 insertions(+), 243 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/
drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index 7f4751e5caaf..cd9aa5b45e94 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -373,249 +373,280 @@ int
amdgpu_atomfirmware_get_uma_carveout_info(struct amdgpu_device *adev,
return -ENODEV;
}
-int
-amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
+int amdgpu_atomfirmware_get_integrated_system_info(struct
amdgpu_device *adev,
int *vram_width, int *vram_type,
int *vram_vendor)
{
struct amdgpu_mode_info *mode_info = &adev->mode_info;
- int index, i = 0;
+ int index;
u16 data_offset, size;
union igp_info *igp_info;
- union vram_info *vram_info;
- union umc_info *umc_info;
- union vram_module *vram_module;
u8 frev, crev;
u8 mem_type;
- u8 mem_vendor;
u32 mem_channel_number;
u32 mem_channel_width;
- u32 module_id;
- if (adev->flags & AMD_IS_APU)
- index =
get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ index =
get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
integratedsysteminfo);
- else {
- switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
- case IP_VERSION(12, 0, 0):
- case IP_VERSION(12, 0, 1):
- index =
get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
umc_info);
+ if (amdgpu_atom_parse_data_header(mode_info->atom_context,
+ index, &size,
+ &frev, &crev, &data_offset)) {
+ igp_info = (union igp_info *)
+ (mode_info->atom_context->bios + data_offset);
+ switch (frev) {
+ case 1:
+ switch (crev) {
+ case 11:
+ case 12:
+ mem_channel_number = igp_info->v11.umachannelnumber;
+ if (!mem_channel_number)
+ mem_channel_number = 1;
+ mem_type = igp_info->v11.memorytype;
+ if (mem_type == LpDdr5MemType)
+ mem_channel_width = 32;
+ else
+ mem_channel_width = 64;
+ if (vram_width)
+ *vram_width = mem_channel_number *
mem_channel_width;
+ if (vram_type)
+ *vram_type =
convert_atom_mem_type_to_vram_type(adev, mem_type);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case 2:
+ switch (crev) {
+ case 1:
+ case 2:
+ mem_channel_number = igp_info->v21.umachannelnumber;
+ if (!mem_channel_number)
+ mem_channel_number = 1;
+ mem_type = igp_info->v21.memorytype;
+ if (mem_type == LpDdr5MemType)
+ mem_channel_width = 32;
+ else
+ mem_channel_width = 64;
+ if (vram_width)
+ *vram_width = mem_channel_number *
mem_channel_width;
+ if (vram_type)
+ *vram_type =
convert_atom_mem_type_to_vram_type(adev, mem_type);
+ break;
+ case 3:
+ mem_channel_number = igp_info->v23.umachannelnumber;
+ if (!mem_channel_number)
+ mem_channel_number = 1;
+ mem_type = igp_info->v23.memorytype;
+ if (mem_type == LpDdr5MemType)
+ mem_channel_width = 32;
+ else
+ mem_channel_width = 64;
+ if (vram_width)
+ *vram_width = mem_channel_number *
mem_channel_width;
+ if (vram_type)
+ *vram_type =
convert_atom_mem_type_to_vram_type(adev, mem_type);
+ break;
+ default:
+ return -EINVAL;
+ }
break;
default:
- index =
get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
vram_info);
+ return -EINVAL;
}
+ } else {
+ return -EINVAL;
}
+ return 0;
+}
+
+int amdgpu_atomfirmware_get_umc_info(struct amdgpu_device *adev,
+ int *vram_width, int *vram_type,
+ int *vram_vendor)
+{
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ int index;
+ u16 data_offset, size;
+ union umc_info *umc_info;
+ u8 frev, crev;
+ u8 mem_type;
+ u8 mem_vendor;
+ u32 mem_channel_number;
+ u32 mem_channel_width;
+
+ index =
get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
umc_info);
+
if (amdgpu_atom_parse_data_header(mode_info->atom_context,
index, &size,
&frev, &crev, &data_offset)) {
- if (adev->flags & AMD_IS_APU) {
- igp_info = (union igp_info *)
- (mode_info->atom_context->bios + data_offset);
- switch (frev) {
- case 1:
- switch (crev) {
- case 11:
- case 12:
- mem_channel_number =
igp_info->v11.umachannelnumber;
- if (!mem_channel_number)
- mem_channel_number = 1;
- mem_type = igp_info->v11.memorytype;
- if (mem_type == LpDdr5MemType)
- mem_channel_width = 32;
- else
- mem_channel_width = 64;
- if (vram_width)
- *vram_width = mem_channel_number *
mem_channel_width;
- if (vram_type)
- *vram_type =
convert_atom_mem_type_to_vram_type(adev, mem_type);
- break;
- default:
- return -EINVAL;
- }
- break;
- case 2:
- switch (crev) {
- case 1:
- case 2:
- mem_channel_number =
igp_info->v21.umachannelnumber;
- if (!mem_channel_number)
- mem_channel_number = 1;
- mem_type = igp_info->v21.memorytype;
- if (mem_type == LpDdr5MemType)
- mem_channel_width = 32;
- else
- mem_channel_width = 64;
- if (vram_width)
- *vram_width = mem_channel_number *
mem_channel_width;
- if (vram_type)
- *vram_type =
convert_atom_mem_type_to_vram_type(adev, mem_type);
- break;
- case 3:
- mem_channel_number =
igp_info->v23.umachannelnumber;
- if (!mem_channel_number)
- mem_channel_number = 1;
- mem_type = igp_info->v23.memorytype;
- if (mem_type == LpDdr5MemType)
- mem_channel_width = 32;
- else
- mem_channel_width = 64;
- if (vram_width)
- *vram_width = mem_channel_number *
mem_channel_width;
- if (vram_type)
- *vram_type =
convert_atom_mem_type_to_vram_type(adev, mem_type);
- break;
- default:
- return -EINVAL;
- }
+ umc_info = (union umc_info *)(mode_info->atom_context->bios
++
data_offset);
+
+ if (frev == 4) {
+ switch (crev) {
+ case 0:
+ mem_channel_number = le32_to_cpu(umc_info-
v40.channel_num);
+ mem_type = le32_to_cpu(umc_info->v40.vram_type);
+ mem_channel_width = le32_to_cpu(umc_info-
v40.channel_width);
+ mem_vendor = RREG32(adev->bios_scratch_reg_offset +
4) & 0xF;
+ if (vram_vendor)
+ *vram_vendor = mem_vendor;
+ if (vram_type)
+ *vram_type =
convert_atom_mem_type_to_vram_type(adev, mem_type);
+ if (vram_width)
+ *vram_width = mem_channel_number * (1 <<
mem_channel_width);
break;
default:
return -EINVAL;
}
} else {
- switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
- case IP_VERSION(12, 0, 0):
- case IP_VERSION(12, 0, 1):
- umc_info = (union umc_info *)(mode_info-
atom_context->bios + data_offset);
-
- if (frev == 4) {
- switch (crev) {
- case 0:
- mem_channel_number = le32_to_cpu(umc_info-
v40.channel_num);
- mem_type =
le32_to_cpu(umc_info->v40.vram_type);
- mem_channel_width = le32_to_cpu(umc_info-
v40.channel_width);
- mem_vendor = RREG32(adev-
bios_scratch_reg_offset + 4) & 0xF;
- if (vram_vendor)
- *vram_vendor = mem_vendor;
- if (vram_type)
- *vram_type =
convert_atom_mem_type_to_vram_type(adev, mem_type);
- if (vram_width)
- *vram_width = mem_channel_number * (1 <<
mem_channel_width);
- break;
- default:
- return -EINVAL;
- }
- } else
- return -EINVAL;
+ return -EINVAL;
+ }
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
+ int *vram_width, int *vram_type,
+ int *vram_vendor)
+{
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ int index, i = 0;
+ u16 data_offset, size;
+ union vram_info *vram_info;
+ union vram_module *vram_module;
+ u8 frev, crev;
+ u8 mem_type;
+ u8 mem_vendor;
+ u32 mem_channel_number;
+ u32 mem_channel_width;
+ u32 module_id;
+
+ index =
get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
vram_info);
+
+ if (amdgpu_atom_parse_data_header(mode_info->atom_context,
+ index, &size,
+ &frev, &crev, &data_offset)) {
+ vram_info = (union vram_info *)
+ (mode_info->atom_context->bios + data_offset);
+
+ module_id = (RREG32(adev->bios_scratch_reg_offset + 4) &
0x00ff0000) >> 16;
+ if (frev == 3) {
+ switch (crev) {
+ /* v30 */
+ case 0:
+ vram_module = (union vram_module *)vram_info-
v30.vram_module;
+ mem_vendor = (vram_module->v30.dram_vendor_id) &
+0xF;
+ if (vram_vendor)
+ *vram_vendor = mem_vendor;
+ mem_type = vram_info->v30.memory_type;
+ if (vram_type)
+ *vram_type =
convert_atom_mem_type_to_vram_type(adev, mem_type);
+ mem_channel_number = vram_info->v30.channel_num;
+ mem_channel_width = vram_info->v30.channel_width;
+ if (vram_width)
+ *vram_width = mem_channel_number * 16;
break;
default:
- vram_info = (union vram_info *)
- (mode_info->atom_context->bios + data_offset);
-
- module_id = (RREG32(adev->bios_scratch_reg_offset +
4) & 0x00ff0000) >> 16;
- if (frev == 3) {
- switch (crev) {
- /* v30 */
- case 0:
- vram_module = (union vram_module
*)vram_info-
v30.vram_module;
- mem_vendor = (vram_module-
v30.dram_vendor_id) & 0xF;
- if (vram_vendor)
- *vram_vendor = mem_vendor;
- mem_type = vram_info->v30.memory_type;
- if (vram_type)
- *vram_type =
convert_atom_mem_type_to_vram_type(adev, mem_type);
- mem_channel_number =
vram_info->v30.channel_num;
- mem_channel_width = vram_info-
v30.channel_width;
- if (vram_width)
- *vram_width = mem_channel_number * 16;
- break;
- default:
- return -EINVAL;
- }
- } else if (frev == 2) {
- switch (crev) {
- /* v23 */
- case 3:
- if (module_id >
vram_info->v23.vram_module_num)
- module_id = 0;
- vram_module = (union vram_module
*)vram_info-
v23.vram_module;
- while (i < module_id) {
- vram_module = (union vram_module *)
- ((u8 *)vram_module + vram_module-
v9.vram_module_size);
- i++;
- }
- mem_type = vram_module->v9.memory_type;
- if (vram_type)
- *vram_type =
convert_atom_mem_type_to_vram_type(adev, mem_type);
- mem_channel_number = vram_module-
v9.channel_num;
- mem_channel_width = vram_module-
v9.channel_width;
- if (vram_width)
- *vram_width = mem_channel_number * (1 <<
mem_channel_width);
- mem_vendor = (vram_module->v9.vender_rev_id)
& 0xF;
- if (vram_vendor)
- *vram_vendor = mem_vendor;
- break;
- /* v24 */
- case 4:
- if (module_id >
vram_info->v24.vram_module_num)
- module_id = 0;
- vram_module = (union vram_module
*)vram_info-
v24.vram_module;
- while (i < module_id) {
- vram_module = (union vram_module *)
- ((u8 *)vram_module + vram_module-
v10.vram_module_size);
- i++;
- }
- mem_type = vram_module->v10.memory_type;
- if (vram_type)
- *vram_type =
convert_atom_mem_type_to_vram_type(adev, mem_type);
- mem_channel_number = vram_module-
v10.channel_num;
- mem_channel_width = vram_module-
v10.channel_width;
- if (vram_width)
- *vram_width = mem_channel_number * (1 <<
mem_channel_width);
- mem_vendor =
(vram_module->v10.vender_rev_id) & 0xF;
- if (vram_vendor)
- *vram_vendor = mem_vendor;
- break;
- /* v25 */
- case 5:
- if (module_id >
vram_info->v25.vram_module_num)
- module_id = 0;
- vram_module = (union vram_module
*)vram_info-
v25.vram_module;
- while (i < module_id) {
- vram_module = (union vram_module *)
- ((u8 *)vram_module + vram_module-
v11.vram_module_size);
- i++;
- }
- mem_type = vram_module->v11.memory_type;
- if (vram_type)
- *vram_type =
convert_atom_mem_type_to_vram_type(adev, mem_type);
- mem_channel_number = vram_module-
v11.channel_num;
- mem_channel_width = vram_module-
v11.channel_width;
- if (vram_width)
- *vram_width = mem_channel_number * (1 <<
mem_channel_width);
- mem_vendor =
(vram_module->v11.vender_rev_id) & 0xF;
- if (vram_vendor)
- *vram_vendor = mem_vendor;
- break;
- /* v26 */
- case 6:
- if (module_id >
vram_info->v26.vram_module_num)
- module_id = 0;
- vram_module = (union vram_module
*)vram_info-
v26.vram_module;
- while (i < module_id) {
- vram_module = (union vram_module *)
- ((u8 *)vram_module + vram_module-
v9.vram_module_size);
- i++;
- }
- mem_type = vram_module->v9.memory_type;
- if (vram_type)
- *vram_type =
convert_atom_mem_type_to_vram_type(adev, mem_type);
- mem_channel_number = vram_module-
v9.channel_num;
- mem_channel_width = vram_module-
v9.channel_width;
- if (vram_width)
- *vram_width = mem_channel_number * (1 <<
mem_channel_width);
- mem_vendor = (vram_module->v9.vender_rev_id)
& 0xF;
- if (vram_vendor)
- *vram_vendor = mem_vendor;
- break;
- default:
- return -EINVAL;
- }
- } else {
- /* invalid frev */
- return -EINVAL;
+ return -EINVAL;
+ }
+ } else if (frev == 2) {
+ switch (crev) {
+ /* v23 */
+ case 3:
+ if (module_id > vram_info->v23.vram_module_num)
+ module_id = 0;
+ vram_module = (union vram_module *)vram_info-
v23.vram_module;
+ while (i < module_id) {
+ vram_module = (union vram_module *)
+ ((u8 *)vram_module + vram_module-
v9.vram_module_size);
+ i++;
}
+ mem_type = vram_module->v9.memory_type;
+ if (vram_type)
+ *vram_type =
convert_atom_mem_type_to_vram_type(adev, mem_type);
+ mem_channel_number = vram_module->v9.channel_num;
+ mem_channel_width = vram_module->v9.channel_width;
+ if (vram_width)
+ *vram_width = mem_channel_number * (1 <<
mem_channel_width);
+ mem_vendor = (vram_module->v9.vender_rev_id) & 0xF;
+ if (vram_vendor)
+ *vram_vendor = mem_vendor;
+ break;
+ /* v24 */
+ case 4:
+ if (module_id > vram_info->v24.vram_module_num)
+ module_id = 0;
+ vram_module = (union vram_module *)vram_info-
v24.vram_module;
+ while (i < module_id) {
+ vram_module = (union vram_module *)
+ ((u8 *)vram_module + vram_module-
v10.vram_module_size);
+ i++;
+ }
+ mem_type = vram_module->v10.memory_type;
+ if (vram_type)
+ *vram_type =
convert_atom_mem_type_to_vram_type(adev, mem_type);
+ mem_channel_number = vram_module->v10.channel_num;
+ mem_channel_width = vram_module->v10.channel_width;
+ if (vram_width)
+ *vram_width = mem_channel_number * (1 <<
mem_channel_width);
+ mem_vendor = (vram_module->v10.vender_rev_id) & 0xF;
+ if (vram_vendor)
+ *vram_vendor = mem_vendor;
+ break;
+ /* v25 */
+ case 5:
+ if (module_id > vram_info->v25.vram_module_num)
+ module_id = 0;
+ vram_module = (union vram_module *)vram_info-
v25.vram_module;
+ while (i < module_id) {
+ vram_module = (union vram_module *)
+ ((u8 *)vram_module + vram_module-
v11.vram_module_size);
+ i++;
+ }
+ mem_type = vram_module->v11.memory_type;
+ if (vram_type)
+ *vram_type =
convert_atom_mem_type_to_vram_type(adev, mem_type);
+ mem_channel_number = vram_module->v11.channel_num;
+ mem_channel_width = vram_module->v11.channel_width;
+ if (vram_width)
+ *vram_width = mem_channel_number * (1 <<
mem_channel_width);
+ mem_vendor = (vram_module->v11.vender_rev_id) & 0xF;
+ if (vram_vendor)
+ *vram_vendor = mem_vendor;
+ break;
+ /* v26 */
+ case 6:
+ if (module_id > vram_info->v26.vram_module_num)
+ module_id = 0;
+ vram_module = (union vram_module *)vram_info-
v26.vram_module;
+ while (i < module_id) {
+ vram_module = (union vram_module *)
+ ((u8 *)vram_module + vram_module-
v9.vram_module_size);
+ i++;
+ }
+ mem_type = vram_module->v9.memory_type;
+ if (vram_type)
+ *vram_type =
convert_atom_mem_type_to_vram_type(adev, mem_type);
+ mem_channel_number = vram_module->v9.channel_num;
+ mem_channel_width = vram_module->v9.channel_width;
+ if (vram_width)
+ *vram_width = mem_channel_number * (1 <<
mem_channel_width);
+ mem_vendor = (vram_module->v9.vender_rev_id) & 0xF;
+ if (vram_vendor)
+ *vram_vendor = mem_vendor;
+ break;
+ default:
+ return -EINVAL;
}
+ } else {
+ /* invalid frev */
+ return -EINVAL;
}
+
+ } else {
+ return -EINVAL;
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h b/
drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
index 67c8d105729b..0760e4510513 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
@@ -30,6 +30,10 @@ uint32_t
amdgpu_atomfirmware_query_firmware_capability(struct amdgpu_device
*ade
bool amdgpu_atomfirmware_gpu_virtualization_supported(struct
amdgpu_device *adev);
void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device
*adev);
int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device
*adev);
+int amdgpu_atomfirmware_get_integrated_system_info(struct
amdgpu_device *adev,
+ int *vram_width, int *vram_type, int *vram_vendor); int
+amdgpu_atomfirmware_get_umc_info(struct amdgpu_device *adev,
+ int *vram_width, int *vram_type, int *vram_vendor);
int amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
int *vram_width, int *vram_type, int *vram_vendor);
int amdgpu_atomfirmware_get_uma_carveout_info(struct amdgpu_device
*adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
b/drivers/gpu/ drm/amd/amdgpu/amdgpu_gmc.c index
f165d4e401e8..ecb42b304ccc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -34,6 +34,7 @@
#include "amdgpu_ras.h"
#include "amdgpu_reset.h"
#include "amdgpu_xgmi.h"
+#include "amdgpu_atomfirmware.h"
#include <drm/drm_drv.h>
#include <drm/ttm/ttm_tt.h>
@@ -1748,3 +1749,24 @@ int amdgpu_gmc_init_mem_ranges(struct
amdgpu_device *adev)
return 0;
}
+
+int amdgpu_gmc_get_vram_info(struct amdgpu_device *adev,
+ int *vram_width, int *vram_type, int *vram_vendor) {
+ if (adev->flags & AMD_IS_APU)
+ return amdgpu_atomfirmware_get_integrated_system_info(adev,
+ vram_width, vram_type, vram_vendor);
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 0, 1):
+ case IP_VERSION(9, 5, 0):
+ case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 4, 3):