RE: [PATCH] drm/amd/powerplay: update driver if file for sienna_cichlid

2020-06-02 Thread Feng, Kenneth
[AMD Official Use Only - Internal Distribution Only]

Reviewed-by: Kenneth Feng 


Best Regards
Kenneth

-Original Message-
From: Gao, Likun  
Sent: Wednesday, June 3, 2020 12:36 PM
To: amd-gfx@lists.freedesktop.org
Cc: Feng, Kenneth ; Quan, Evan ; Gao, 
Likun 
Subject: [PATCH] drm/amd/powerplay: update driver if file for sienna_cichlid

From: Likun Gao 

Update sienna_cichlid driver if header file to match pptable changes.

Signed-off-by: Likun Gao 
Change-Id: Ie0652935d512124c03f16ae75c44e134567ef5da
---
 .../inc/smu11_driver_if_sienna_cichlid.h| 17 ++---
 drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h   |  2 +-
 2 files changed, 15 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_sienna_cichlid.h 
b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_sienna_cichlid.h
index bdffba1f0086..5322f6da3071 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_sienna_cichlid.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_sienna_cichlid.h
@@ -27,7 +27,7 @@
 // *** IMPORTANT ***
 // SMU TEAM: Always increment the interface version if  // any structure is 
changed in this file -#define SMU11_DRIVER_IF_VERSION 0x30
+#define SMU11_DRIVER_IF_VERSION 0x31
 
 #define PPTABLE_Sienna_Cichlid_SMU_VERSION 4
 
@@ -914,12 +914,14 @@ typedef struct {
   uint16_t GfxActivityLpfTau;
   uint16_t UclkActivityLpfTau;
   uint16_t SocketPowerLpfTau;  
+  uint16_t VcnClkAverageLpfTau;
+  uint16_t padding16; 
 } DriverSmuConfig_t;
 
 typedef struct {
   DriverSmuConfig_t DriverSmuConfig;
 
-  uint32_t Spare[8];  
+  uint32_t Spare[7];  
   // Padding - ignore
   uint32_t MmHubPadding[8]; // SMU internal use
 } DriverSmuConfigExternal_t;
@@ -984,11 +986,20 @@ typedef struct {
   uint8_t D3HotEntryCountPerMode[D3HOT_SEQUENCE_COUNT];
   uint8_t D3HotExitCountPerMode[D3HOT_SEQUENCE_COUNT];
   uint8_t ArmMsgReceivedCountPerMode[D3HOT_SEQUENCE_COUNT];
+
+  //PMFW-4362
+  uint32_t EnergyAccumulator;
+  uint16_t AverageVclk0Frequency  ;
+  uint16_t AverageDclk0Frequency  ;
+  uint16_t AverageVclk1Frequency  ;
+  uint16_t AverageDclk1Frequency  ;
+  uint16_t VcnActivityPercentage ; //place holder, David N. to provide 
+ full sequence  uint16_t padding16_2;
 } SmuMetrics_t;
 
 typedef struct {
   SmuMetrics_t SmuMetrics;
-  uint32_t Spare[5];
+  uint32_t Spare[1];
 
   // Padding - ignore
   uint32_t MmHubPadding[8]; // SMU internal use  
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h 
b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
index 4ad3f07891fe..282eb45e7b86 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
@@ -31,7 +31,7 @@
 #define SMU11_DRIVER_IF_VERSION_NV10 0x36  #define 
SMU11_DRIVER_IF_VERSION_NV12 0x33  #define SMU11_DRIVER_IF_VERSION_NV14 0x36 
-#define SMU11_DRIVER_IF_VERSION_Sienna_Cichlid 0x30
+#define SMU11_DRIVER_IF_VERSION_Sienna_Cichlid 0x31
 
 /* MP Apertures */
 #define MP0_Public 0x0380
--
2.25.1
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amd/powerplay: use work queue to perform throttling logging

2020-06-02 Thread Evan Quan
As IO operations(access to SMU internals) and possible sleep are
involved in throttling logging. Workqueue can handle them well.
Otherwise we may hit "scheduling while atomic" error.

Change-Id: I454d593e965e54b13fdf04c112abb0a022204278
Signed-off-by: Evan Quan 
---
 drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 10 ++
 drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h |  2 ++
 drivers/gpu/drm/amd/powerplay/smu_v11_0.c  |  2 +-
 3 files changed, 13 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c 
b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 342fd75b0806..e25a3b1ce7ac 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -1053,6 +1053,14 @@ static int smu_smc_table_sw_fini(struct smu_context *smu)
return 0;
 }
 
+static void smu_throttling_logging_work_fn(struct work_struct *work)
+{
+   struct smu_context *smu = container_of(work, struct smu_context,
+  throttling_logging_work);
+
+   smu_log_thermal_throttling(smu);
+}
+
 static int smu_sw_init(void *handle)
 {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1074,6 +1082,7 @@ static int smu_sw_init(void *handle)
spin_lock_init(>metrics_lock);
spin_lock_init(>message_lock);
 
+   INIT_WORK(>throttling_logging_work, 
smu_throttling_logging_work_fn);
smu->watermarks_bitmap = 0;
smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
@@ -1295,6 +1304,7 @@ static int smu_internal_hw_cleanup(struct smu_context 
*smu)
 
smu_i2c_eeprom_fini(smu, >pm.smu_i2c);
 
+   cancel_work_sync(>throttling_logging_work);
ret = smu_disable_thermal_alert(smu);
if (ret) {
pr_warn("Fail to stop thermal control!\n");
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h 
b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index 6f44ffb6eb51..b970b4d663b0 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -411,6 +411,8 @@ struct smu_context
 
bool uploading_custom_pp_table;
bool dc_controlled_by_gpio;
+
+   struct work_struct throttling_logging_work;
 };
 
 struct i2c_adapter;
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c 
b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index adde9eb7b283..69b1160d8ba2 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -1597,7 +1597,7 @@ static int smu_v11_0_irq_process(struct amdgpu_device 
*adev,
return 0;
 
if (__ratelimit(>throttling_logging_rs))
-   smu_log_thermal_throttling(smu);
+   
schedule_work(>throttling_logging_work);
 
break;
}
-- 
2.27.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amd/powerplay: update driver if file for sienna_cichlid

2020-06-02 Thread Likun Gao
From: Likun Gao 

Update sienna_cichlid driver if header file to match pptable changes.

Signed-off-by: Likun Gao 
Change-Id: Ie0652935d512124c03f16ae75c44e134567ef5da
---
 .../inc/smu11_driver_if_sienna_cichlid.h| 17 ++---
 drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h   |  2 +-
 2 files changed, 15 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_sienna_cichlid.h 
b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_sienna_cichlid.h
index bdffba1f0086..5322f6da3071 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_sienna_cichlid.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_sienna_cichlid.h
@@ -27,7 +27,7 @@
 // *** IMPORTANT ***
 // SMU TEAM: Always increment the interface version if 
 // any structure is changed in this file
-#define SMU11_DRIVER_IF_VERSION 0x30
+#define SMU11_DRIVER_IF_VERSION 0x31
 
 #define PPTABLE_Sienna_Cichlid_SMU_VERSION 4
 
@@ -914,12 +914,14 @@ typedef struct {
   uint16_t GfxActivityLpfTau;
   uint16_t UclkActivityLpfTau;
   uint16_t SocketPowerLpfTau;  
+  uint16_t VcnClkAverageLpfTau;
+  uint16_t padding16; 
 } DriverSmuConfig_t;
 
 typedef struct {
   DriverSmuConfig_t DriverSmuConfig;
 
-  uint32_t Spare[8];  
+  uint32_t Spare[7];  
   // Padding - ignore
   uint32_t MmHubPadding[8]; // SMU internal use
 } DriverSmuConfigExternal_t;
@@ -984,11 +986,20 @@ typedef struct {
   uint8_t D3HotEntryCountPerMode[D3HOT_SEQUENCE_COUNT];
   uint8_t D3HotExitCountPerMode[D3HOT_SEQUENCE_COUNT];
   uint8_t ArmMsgReceivedCountPerMode[D3HOT_SEQUENCE_COUNT];
+
+  //PMFW-4362
+  uint32_t EnergyAccumulator;
+  uint16_t AverageVclk0Frequency  ;
+  uint16_t AverageDclk0Frequency  ;  
+  uint16_t AverageVclk1Frequency  ;
+  uint16_t AverageDclk1Frequency  ;  
+  uint16_t VcnActivityPercentage ; //place holder, David N. to provide full 
sequence
+  uint16_t padding16_2;
 } SmuMetrics_t;
 
 typedef struct {
   SmuMetrics_t SmuMetrics;
-  uint32_t Spare[5];
+  uint32_t Spare[1];
 
   // Padding - ignore
   uint32_t MmHubPadding[8]; // SMU internal use  
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h 
b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
index 4ad3f07891fe..282eb45e7b86 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
@@ -31,7 +31,7 @@
 #define SMU11_DRIVER_IF_VERSION_NV10 0x36
 #define SMU11_DRIVER_IF_VERSION_NV12 0x33
 #define SMU11_DRIVER_IF_VERSION_NV14 0x36
-#define SMU11_DRIVER_IF_VERSION_Sienna_Cichlid 0x30
+#define SMU11_DRIVER_IF_VERSION_Sienna_Cichlid 0x31
 
 /* MP Apertures */
 #define MP0_Public 0x0380
-- 
2.25.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


RE: [PATCH] drm/amdgpu/sriov: Disable pm for multiple vf sriov

2020-06-02 Thread Deng, Emily
[AMD Official Use Only - Internal Distribution Only]

Thanks Frank, already sent out the modified patch, please help review again.

Best wishes
Emily Deng



>-Original Message-
>From: Min, Frank 
>Sent: Tuesday, June 2, 2020 8:34 PM
>To: Deng, Emily ; amd-gfx@lists.freedesktop.org
>Subject: 回复: [PATCH] drm/amdgpu/sriov: Disable pm for multiple vf sriov
>
>[AMD Official Use Only - Internal Distribution Only]
>
>Hi Emily,
>How about to move it into smu_hw_init()?
>
>Best Regards,
>Frank
>
>-邮件原件-
>发件人: Deng, Emily 
>发送时间: 2020年6月2日 20:08
>收件人: Deng, Emily ; amd-
>g...@lists.freedesktop.org
>抄送: Min, Frank 
>主题: RE: [PATCH] drm/amdgpu/sriov: Disable pm for multiple vf sriov
>
>[AMD Official Use Only - Internal Distribution Only]
>
>>-Original Message-
>>From: Emily Deng 
>>Sent: Tuesday, June 2, 2020 7:54 PM
>>To: amd-gfx@lists.freedesktop.org
>>Cc: Deng, Emily 
>>Subject: [PATCH] drm/amdgpu/sriov: Disable pm for multiple vf sriov
>>
>>Change-Id: Ic010440ef625f6f29e91f267a6f284f9b6554e1f
>>Signed-off-by: Emily Deng 
>>---
>> drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 3 +++
>> 1 file changed, 3 insertions(+)
>>
>>diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>>b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>>index b6331712..fcbd875 100644
>>--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>>+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>>@@ -2004,6 +2004,9 @@ static int amdgpu_device_ip_init(struct
>>amdgpu_device *adev)  if (amdgpu_sriov_vf(adev))
>>amdgpu_virt_init_data_exchange(adev);
>>
>>+if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
>>+adev->smu.pm_enabled = 0;
>>+
>> r = amdgpu_ib_pool_init(adev);
>> if (r) {
>> dev_err(adev->dev, "IB initialization failed (%d).\n", r);
>>--
>>2.7.4
>
>

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


RE: [PATCH] drm/amdgpu: temporarily read bounding box from gpu_info fw for navi12

2020-06-02 Thread Xu, Feifei
[AMD Official Use Only - Internal Distribution Only]

Reviewed-by: Feifei Xu 

-Original Message-
From: Tianci Yin 
Sent: Wednesday, June 3, 2020 10:08 AM
To: amd-gfx@lists.freedesktop.org
Cc: Deucher, Alexander ; Xu, Feifei 
; Yuan, Xiaojie ; Li, Pauline 
; Yin, Tianci (Rico) 
Subject: [PATCH] drm/amdgpu: temporarily read bounding box from gpu_info fw for 
navi12

From: "Tianci.Yin" 

The bounding box is still needed by Navi12, temporarily read it from gpu_info 
firmware. Should be droped when DAL no longer needs it.

Change-Id: Ifc330ec860f9b0665134a81df2fc80ca91c41a33
Reviewed-by: Alex Deucher 
Reviewed-by: Xiaojie Yuan 
Signed-off-by: Tianci.Yin 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 16 +++-
 1 file changed, 15 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 15de344438d2..1df28b7bf22e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1537,7 +1537,14 @@ static int amdgpu_device_parse_gpu_info_fw(struct 
amdgpu_device *adev)

 if (adev->discovery_bin) {
 amdgpu_discovery_get_gfx_info(adev);
-return 0;
+
+/*
+ * FIXME: The bounding box is still needed by Navi12, so
+ * temporarily read it from gpu_info firmware. Should be droped
+ * when DAL no longer needs it.
+ */
+if (adev->asic_type != CHIP_NAVI12)
+return 0;
 }

 switch (adev->asic_type) {
@@ -1627,6 +1634,12 @@ static int amdgpu_device_parse_gpu_info_fw(struct 
amdgpu_device *adev)
 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
 le32_to_cpu(hdr->header.ucode_array_offset_bytes));

+/*
+ * Should be droped when DAL no longer needs it.
+ */
+if (adev->asic_type == CHIP_NAVI12)
+goto parse_soc_bounding_box;
+
 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
@@ -1655,6 +1668,7 @@ static int amdgpu_device_parse_gpu_info_fw(struct 
amdgpu_device *adev)
 le32_to_cpu(gpu_info_fw->num_packer_per_sc);
 }

+parse_soc_bounding_box:
 /*
  * soc bounding box info is not integrated in disocovery table,
  * we always need to parse it from gpu info firmware if needed.
--
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amdgpu: temporarily read bounding box from gpu_info fw for navi12

2020-06-02 Thread Tianci Yin
From: "Tianci.Yin" 

The bounding box is still needed by Navi12, temporarily read it from gpu_info
firmware. Should be droped when DAL no longer needs it.

Change-Id: Ifc330ec860f9b0665134a81df2fc80ca91c41a33
Reviewed-by: Alex Deucher 
Reviewed-by: Xiaojie Yuan 
Signed-off-by: Tianci.Yin 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 16 +++-
 1 file changed, 15 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 15de344438d2..1df28b7bf22e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1537,7 +1537,14 @@ static int amdgpu_device_parse_gpu_info_fw(struct 
amdgpu_device *adev)
 
if (adev->discovery_bin) {
amdgpu_discovery_get_gfx_info(adev);
-   return 0;
+
+   /*
+* FIXME: The bounding box is still needed by Navi12, so
+* temporarily read it from gpu_info firmware. Should be droped
+* when DAL no longer needs it.
+*/
+   if (adev->asic_type != CHIP_NAVI12)
+   return 0;
}
 
switch (adev->asic_type) {
@@ -1627,6 +1634,12 @@ static int amdgpu_device_parse_gpu_info_fw(struct 
amdgpu_device *adev)
(const struct gpu_info_firmware_v1_0 
*)(adev->firmware.gpu_info_fw->data +

le32_to_cpu(hdr->header.ucode_array_offset_bytes));
 
+   /*
+* Should be droped when DAL no longer needs it.
+*/
+   if (adev->asic_type == CHIP_NAVI12)
+   goto parse_soc_bounding_box;
+
adev->gfx.config.max_shader_engines = 
le32_to_cpu(gpu_info_fw->gc_num_se);
adev->gfx.config.max_cu_per_sh = 
le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
adev->gfx.config.max_sh_per_se = 
le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
@@ -1655,6 +1668,7 @@ static int amdgpu_device_parse_gpu_info_fw(struct 
amdgpu_device *adev)
le32_to_cpu(gpu_info_fw->num_packer_per_sc);
}
 
+parse_soc_bounding_box:
/*
 * soc bounding box info is not integrated in disocovery table,
 * we always need to parse it from gpu info firmware if needed.
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] sound/pci/hda: add sienna_cichlid audio asic id for sienna_cichlid up

2020-06-02 Thread Alex Deucher
From: Hersen Wu 

dp/hdmi ati hda is not shown in audio settings

Signed-off-by: Hersen Wu 
Reviewed-by: Alex Deucher 
Signed-off-by: Alex Deucher 
---
 sound/pci/hda/hda_intel.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 92a042e34d3e..4188bc4bd4e2 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2643,6 +2643,9 @@ static const struct pci_device_id azx_ids[] = {
{ PCI_DEVICE(0x1002, 0xab38),
  .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS |
  AZX_DCAPS_PM_RUNTIME },
+   { PCI_DEVICE(0x1002, 0xab28),
+ .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS |
+ AZX_DCAPS_PM_RUNTIME },
/* VIA VT8251/VT8237A */
{ PCI_DEVICE(0x1106, 0x3288), .driver_data = AZX_DRIVER_VIA },
/* VIA GFX VT7122/VX900 */
-- 
2.25.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amd/display: Revalidate bandwidth before commiting DC updates

2020-06-02 Thread Nicholas Kazlauskas
[Why]
Whenever we switch between tiled formats without also switching pixel
formats or doing anything else that recreates the DC plane state we
can run into underflow or hangs since we're not updating the
DML parameters before committing to the hardware.

[How]
If the update type is FULL then call validate_bandwidth again to update
the DML parmeters before committing the state.

This is basically just a workaround and protective measure against
update types being added DC where we could run into this issue in
the future.

We can only fully validate the state in advance before applying it to
the hardware if we recreate all the plane and stream states since
we can't modify what's currently in use.

The next step is to update DM to ensure that we're creating the plane
and stream states for whatever could potentially be a full update in
DC to pre-emptively recreate the state for DC global validation.

The workaround can stay until this has been fixed in DM.

Cc: Hersen Wu 
Cc: Harry Wentland 
Cc: Leo Li 
Signed-off-by: Nicholas Kazlauskas 
---
 drivers/gpu/drm/amd/display/dc/core/dc.c | 6 ++
 1 file changed, 6 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 04c3d9f7e323..00a4f679759f 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -2523,6 +2523,12 @@ void dc_commit_updates_for_stream(struct dc *dc,
 
copy_stream_update_to_stream(dc, context, stream, stream_update);
 
+   if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
+   DC_ERROR("Mode validation failed for stream update!\n");
+   dc_release_state(context);
+   return;
+   }
+
commit_planes_for_stream(
dc,
srf_updates,
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 00/15] forward MSIx vector enable error code in pci_alloc_irq_vectors_affinity

2020-06-02 Thread Bjorn Helgaas
On Tue, Jun 02, 2020 at 11:16:17AM +0200, Piotr Stankiewicz wrote:
> The primary objective of this patch series is to change the behaviour
> of pci_alloc_irq_vectors_affinity such that it forwards the MSI-X enable
> error code when appropriate. In the process, though, it was pointed out
> that there are multiple places in the kernel which check/ask for message
> signalled interrupts (MSI or MSI-X), which spawned the first patch adding
> PCI_IRQ_MSI_TYPES. Finally the rest of the chain converts all users to
> take advantage of PCI_IRQ_MSI_TYPES or PCI_IRQ_ALL_TYPES, as
> appropriate.
> 
> Piotr Stankiewicz (15):
>   PCI: add shorthand define for message signalled interrupt types
>   PCI/MSI: forward MSIx vector enable error code in
> pci_alloc_irq_vectors_affinity
>   PCI: use PCI_IRQ_MSI_TYPES where appropriate
>   ahci: use PCI_IRQ_MSI_TYPES where appropriate
>   crypto: inside-secure - use PCI_IRQ_MSI_TYPES where appropriate
>   dmaengine: dw-edma: use PCI_IRQ_MSI_TYPES  where appropriate
>   drm/amdgpu: use PCI_IRQ_MSI_TYPES where appropriate
>   IB/qib: Use PCI_IRQ_MSI_TYPES where appropriate
>   media: ddbridge: use PCI_IRQ_MSI_TYPES where appropriate
>   vmw_vmci: use PCI_IRQ_ALL_TYPES where appropriate
>   mmc: sdhci: use PCI_IRQ_MSI_TYPES where appropriate
>   amd-xgbe: use PCI_IRQ_MSI_TYPES where appropriate
>   aquantia: atlantic: use PCI_IRQ_ALL_TYPES where appropriate
>   net: hns3: use PCI_IRQ_MSI_TYPES where appropriate
>   scsi: use PCI_IRQ_MSI_TYPES and PCI_IRQ_ALL_TYPES where appropriate
> 
>  Documentation/PCI/msi-howto.rst   | 5 +++--
>  drivers/ata/ahci.c| 2 +-
>  drivers/crypto/inside-secure/safexcel.c   | 2 +-
>  drivers/dma/dw-edma/dw-edma-pcie.c| 2 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c   | 8 
>  drivers/infiniband/hw/qib/qib_pcie.c  | 2 +-
>  drivers/media/pci/ddbridge/ddbridge-main.c| 2 +-
>  drivers/misc/vmw_vmci/vmci_guest.c| 3 +--
>  drivers/mmc/host/sdhci-pci-gli.c  | 3 +--
>  drivers/mmc/host/sdhci-pci-o2micro.c  | 3 +--
>  drivers/net/ethernet/amd/xgbe/xgbe-pci.c  | 2 +-
>  drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c  | 4 +---
>  drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c   | 3 +--
>  drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c | 2 +-
>  drivers/pci/msi.c | 4 ++--
>  drivers/pci/pcie/portdrv_core.c   | 4 ++--
>  drivers/pci/switch/switchtec.c| 3 +--
>  drivers/scsi/ipr.c| 2 +-
>  drivers/scsi/vmw_pvscsi.c | 2 +-
>  include/linux/pci.h   | 4 ++--
>  20 files changed, 28 insertions(+), 34 deletions(-)

I think I'm OK with this, and since they all depend on the first PCI
patch, it will probably be easiest to merge them all through the PCI
tree.  I'm happy to do that, but can you please:

  - Update the subject lines so they start with a capital letter to
match the historical convention.

  - Use "MSI-X" instead of "MSIx" so it matches the spec and other
usage in the kernel.

  - Add "()" after function names, e.g.,
"pci_alloc_irq_vectors_affinity()" instead of
"pci_alloc_irq_vectors_affinity".

  - Reorder them so the actual fix (02/15) is first and the cleanups
later.

  - Post them all to linux-pci (I only saw the drivers/pci patches).

  - If possible, post them with all the patches as replies to the
cover letter.  These all appear to be unrelated messages, which
makes it a bit of a hassle to collect them all up.
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu/display: use blanked rather than plane state for sync groups

2020-06-02 Thread Kazlauskas, Nicholas

On 2020-06-02 5:25 p.m., Alex Deucher wrote:

We may end up with no planes set yet, depending on the ordering, but we
should have the proper blanking state which is either handled by either
DPG or TG depending on the hardware generation.  Check both to determine
the proper blanked state.

Bug: https://gitlab.freedesktop.org/drm/amd/issues/781
Fixes: 5fc0cbfad45648 ("drm/amd/display: determine if a pipe is synced by plane 
state")
Cc: nicholas.kazlaus...@amd.com
Signed-off-by: Alex Deucher 


This looks good to me now from a conceptual level. I guess we'll find 
out later if it breaks anything.


Reviewed-by: Nicholas Kazlauskas 

Regards,
Nicholas Kazlauskas


---
  drivers/gpu/drm/amd/display/dc/core/dc.c | 24 
  1 file changed, 20 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 04c3d9f7e323..7fdb6149047d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1017,9 +1017,17 @@ static void program_timing_sync(
}
}
  
-		/* set first pipe with plane as master */

+   /* set first unblanked pipe as master */
for (j = 0; j < group_size; j++) {
-   if (pipe_set[j]->plane_state) {
+   bool is_blanked;
+
+   if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
+   is_blanked =
+   
pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
+   else
+   is_blanked =
+   
pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
+   if (!is_blanked) {
if (j == 0)
break;
  
@@ -1040,9 +1048,17 @@ static void program_timing_sync(

status->timing_sync_info.master = false;
  
  		}

-   /* remove any other pipes with plane as they have already been 
synced */
+   /* remove any other unblanked pipes as they have already been 
synced */
for (j = j + 1; j < group_size; j++) {
-   if (pipe_set[j]->plane_state) {
+   bool is_blanked;
+
+   if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
+   is_blanked =
+   
pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
+   else
+   is_blanked =
+   
pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
+   if (!is_blanked) {
group_size--;
pipe_set[j] = pipe_set[group_size];
j--;



___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amdgpu/display: use blanked rather than plane state for sync groups

2020-06-02 Thread Alex Deucher
We may end up with no planes set yet, depending on the ordering, but we
should have the proper blanking state which is either handled by either
DPG or TG depending on the hardware generation.  Check both to determine
the proper blanked state.

Bug: https://gitlab.freedesktop.org/drm/amd/issues/781
Fixes: 5fc0cbfad45648 ("drm/amd/display: determine if a pipe is synced by plane 
state")
Cc: nicholas.kazlaus...@amd.com
Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/display/dc/core/dc.c | 24 
 1 file changed, 20 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 04c3d9f7e323..7fdb6149047d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1017,9 +1017,17 @@ static void program_timing_sync(
}
}
 
-   /* set first pipe with plane as master */
+   /* set first unblanked pipe as master */
for (j = 0; j < group_size; j++) {
-   if (pipe_set[j]->plane_state) {
+   bool is_blanked;
+
+   if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
+   is_blanked =
+   
pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
+   else
+   is_blanked =
+   
pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
+   if (!is_blanked) {
if (j == 0)
break;
 
@@ -1040,9 +1048,17 @@ static void program_timing_sync(
status->timing_sync_info.master = false;
 
}
-   /* remove any other pipes with plane as they have already been 
synced */
+   /* remove any other unblanked pipes as they have already been 
synced */
for (j = j + 1; j < group_size; j++) {
-   if (pipe_set[j]->plane_state) {
+   bool is_blanked;
+
+   if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
+   is_blanked =
+   
pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
+   else
+   is_blanked =
+   
pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
+   if (!is_blanked) {
group_size--;
pipe_set[j] = pipe_set[group_size];
j--;
-- 
2.25.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 8/9] drm/amd/powerplay: drop unnecessary dynamic buffer allocation

2020-06-02 Thread Luben Tuikov
On 2020-06-01 3:30 a.m., Evan Quan wrote:
> Since the structure comes with only several bytes.
> 

This is not a good commit message as it doesn't describe
what is being done. It evokes the "Yes? Then what?" questions
from a reader.

Perhaps a better one would be:

Allocate the struct amdgpu_irq_src on the stack,
since it is only several bytes in size.

Regards,
Luben


> Change-Id: Ie9df0db543fdd4cf5b963a286ef40dee03c436bf
> Signed-off-by: Evan Quan 
> ---
>  drivers/gpu/drm/amd/powerplay/amdgpu_smu.c |  3 ---
>  drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h |  2 +-
>  drivers/gpu/drm/amd/powerplay/smu_v11_0.c  | 15 +++
>  3 files changed, 4 insertions(+), 16 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c 
> b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> index e55c6458b212..b353ac1b0f07 100644
> --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> @@ -1121,9 +1121,6 @@ static int smu_sw_fini(void *handle)
>   struct smu_context *smu = >smu;
>   int ret;
>  
> - kfree(smu->irq_source);
> - smu->irq_source = NULL;
> -
>   ret = smu_smc_table_sw_fini(smu);
>   if (ret) {
>   pr_err("Failed to sw fini smc table!\n");
> diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h 
> b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> index 4aa63dc79124..7fed2556213f 100644
> --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> @@ -356,7 +356,7 @@ struct smu_baco_context
>  struct smu_context
>  {
>   struct amdgpu_device*adev;
> - struct amdgpu_irq_src   *irq_source;
> + struct amdgpu_irq_src   irq_source;
>  
>   const struct pptable_funcs  *ppt_funcs;
>   struct mutexmutex;
> diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c 
> b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
> index 891781a5c0d4..e2b1c619151f 100644
> --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
> +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
> @@ -1167,7 +1167,7 @@ int smu_v11_0_enable_thermal_alert(struct smu_context 
> *smu)
>   if (ret)
>   return ret;
>  
> - ret = amdgpu_irq_get(adev, smu->irq_source, 0);
> + ret = amdgpu_irq_get(adev, >irq_source, 0);
>   if (ret)
>   return ret;
>  
> @@ -1191,7 +1191,7 @@ int smu_v11_0_enable_thermal_alert(struct smu_context 
> *smu)
>  
>  int smu_v11_0_disable_thermal_alert(struct smu_context *smu)
>  {
> - return amdgpu_irq_put(smu->adev, smu->irq_source, 0);
> + return amdgpu_irq_put(smu->adev, >irq_source, 0);
>  }
>  
>  static uint16_t convert_to_vddc(uint8_t vid)
> @@ -1607,18 +1607,9 @@ static const struct amdgpu_irq_src_funcs 
> smu_v11_0_irq_funcs =
>  int smu_v11_0_register_irq_handler(struct smu_context *smu)
>  {
>   struct amdgpu_device *adev = smu->adev;
> - struct amdgpu_irq_src *irq_src = smu->irq_source;
> + struct amdgpu_irq_src *irq_src = >irq_source;
>   int ret = 0;
>  
> - /* already register */
> - if (irq_src)
> - return 0;
> -
> - irq_src = kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
> - if (!irq_src)
> - return -ENOMEM;
> - smu->irq_source = irq_src;
> -
>   irq_src->num_types = 1;
>   irq_src->funcs = _v11_0_irq_funcs;
>  
> 

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu: restrict bo mapping within gpu address limits

2020-06-02 Thread Bhardwaj, Rajneesh


On 6/2/2020 3:51 PM, Christian König wrote:

Hi Rajneesh,

I think we have reviewed the patch multiple times now, you can push it 
to the amd-staging-drm-next branch.


Thanks Christian. Just wanted to make sure its sent once on the public 
list. I'll push it to the branch now.





Regards,
Christian.

Am 02.06.20 um 20:27 schrieb Rajneesh Bhardwaj:

Have strict check on bo mapping since on some systems, such as A+A or
hybrid, the cpu might support 5 level paging or can address memory above
48 bits but gpu might be limited by hardware to just use 48 bits. In
general, this applies to all asics where this limitation can be checked
against their max_pfn range. This restricts the range to map bo within
pratical limits of cpu and gpu for shared virtual memory access.

Reviewed-by: Oak Zeng 
Reviewed-by: Christian König 
Reviewed-by: Hawking Zhang 
Acked-by: Alex Deucher 
Signed-off-by: Rajneesh Bhardwaj 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 6 --
  1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c

index 7417754e9141..71e005cf2952 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -2208,7 +2208,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
  /* make sure object fit at this offset */
  eaddr = saddr + size - 1;
  if (saddr >= eaddr ||
-    (bo && offset + size > amdgpu_bo_size(bo)))
+    (bo && offset + size > amdgpu_bo_size(bo)) ||
+    (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
  return -EINVAL;
    saddr /= AMDGPU_GPU_PAGE_SIZE;
@@ -2273,7 +2274,8 @@ int amdgpu_vm_bo_replace_map(struct 
amdgpu_device *adev,

  /* make sure object fit at this offset */
  eaddr = saddr + size - 1;
  if (saddr >= eaddr ||
-    (bo && offset + size > amdgpu_bo_size(bo)))
+    (bo && offset + size > amdgpu_bo_size(bo)) ||
+    (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
  return -EINVAL;
    /* Allocate all the needed memory */



___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu: restrict bo mapping within gpu address limits

2020-06-02 Thread Christian König

Hi Rajneesh,

I think we have reviewed the patch multiple times now, you can push it 
to the amd-staging-drm-next branch.


Regards,
Christian.

Am 02.06.20 um 20:27 schrieb Rajneesh Bhardwaj:

Have strict check on bo mapping since on some systems, such as A+A or
hybrid, the cpu might support 5 level paging or can address memory above
48 bits but gpu might be limited by hardware to just use 48 bits. In
general, this applies to all asics where this limitation can be checked
against their max_pfn range. This restricts the range to map bo within
pratical limits of cpu and gpu for shared virtual memory access.

Reviewed-by: Oak Zeng 
Reviewed-by: Christian König 
Reviewed-by: Hawking Zhang 
Acked-by: Alex Deucher 
Signed-off-by: Rajneesh Bhardwaj 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 6 --
  1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 7417754e9141..71e005cf2952 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -2208,7 +2208,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
/* make sure object fit at this offset */
eaddr = saddr + size - 1;
if (saddr >= eaddr ||
-   (bo && offset + size > amdgpu_bo_size(bo)))
+   (bo && offset + size > amdgpu_bo_size(bo)) ||
+   (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
return -EINVAL;
  
  	saddr /= AMDGPU_GPU_PAGE_SIZE;

@@ -2273,7 +2274,8 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
/* make sure object fit at this offset */
eaddr = saddr + size - 1;
if (saddr >= eaddr ||
-   (bo && offset + size > amdgpu_bo_size(bo)))
+   (bo && offset + size > amdgpu_bo_size(bo)) ||
+   (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
return -EINVAL;
  
  	/* Allocate all the needed memory */


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] Revert "drm/[radeon|amdgpu]: Replace one-element array and use struct_size() helper"

2020-06-02 Thread Alex Deucher
This reverts commit 4dea25853a6c0c16e373665153bd9eb6edc6319e.

This changes structs used by the hardware and breaks dpm on some cards.

Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/si_dpm.c   | 5 +++--
 drivers/gpu/drm/amd/amdgpu/sislands_smc.h | 2 +-
 drivers/gpu/drm/radeon/si_dpm.c   | 5 +++--
 3 files changed, 7 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c 
b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 0fc56c5bac08..c00ba4b23c9a 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -5715,9 +5715,10 @@ static int si_upload_sw_state(struct amdgpu_device *adev,
int ret;
u32 address = si_pi->state_table_start +
offsetof(SISLANDS_SMC_STATETABLE, driverState);
+   u32 state_size = sizeof(SISLANDS_SMC_SWSTATE) +
+   ((new_state->performance_level_count - 1) *
+sizeof(SISLANDS_SMC_HW_PERFORMANCE_LEVEL));
SISLANDS_SMC_SWSTATE *smc_state = _pi->smc_statetable.driverState;
-   size_t state_size = struct_size(smc_state, levels,
-   new_state->performance_level_count);
 
memset(smc_state, 0, state_size);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/sislands_smc.h 
b/drivers/gpu/drm/amd/amdgpu/sislands_smc.h
index a089dbf8f7a9..d2930eceaf3c 100644
--- a/drivers/gpu/drm/amd/amdgpu/sislands_smc.h
+++ b/drivers/gpu/drm/amd/amdgpu/sislands_smc.h
@@ -186,7 +186,7 @@ struct SISLANDS_SMC_SWSTATE
 uint8_t levelCount;
 uint8_t padding2;
 uint8_t padding3;
-SISLANDS_SMC_HW_PERFORMANCE_LEVEL   levels[];
+SISLANDS_SMC_HW_PERFORMANCE_LEVEL   levels[1];
 };
 
 typedef struct SISLANDS_SMC_SWSTATE SISLANDS_SMC_SWSTATE;
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index bab01ca864c6..a167e1c36d24 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -5253,9 +5253,10 @@ static int si_upload_sw_state(struct radeon_device *rdev,
int ret;
u32 address = si_pi->state_table_start +
offsetof(SISLANDS_SMC_STATETABLE, driverState);
+   u32 state_size = sizeof(SISLANDS_SMC_SWSTATE) +
+   ((new_state->performance_level_count - 1) *
+sizeof(SISLANDS_SMC_HW_PERFORMANCE_LEVEL));
SISLANDS_SMC_SWSTATE *smc_state = _pi->smc_statetable.driverState;
-   size_t state_size = struct_size(smc_state, levels,
-   new_state->performance_level_count);
 
memset(smc_state, 0, state_size);
 
-- 
2.25.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 061/207] drm/amdgpu/mes10.1: implement the ring functions of mes specific

2020-06-02 Thread Alex Deucher
On Tue, Jun 2, 2020 at 6:00 AM Christian König
 wrote:
>
> Am 01.06.20 um 20:00 schrieb Alex Deucher:
> > From: Jack Xiao 
> >
> > Implement mes ring functions and set up them.
> >
> > Signed-off-by: Jack Xiao 
> > Acked-by: Alex Deucher 
> > Reviewed-by: Hawking Zhang 
> > Signed-off-by: Alex Deucher 
> > ---
> >   drivers/gpu/drm/amd/amdgpu/mes_v10_1.c | 43 ++
> >   1 file changed, 43 insertions(+)
> >
> > diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c 
> > b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
> > index 4f7e345673ca..80f6812d8ecf 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
> > @@ -33,6 +33,47 @@ MODULE_FIRMWARE("amdgpu/navi10_mes.bin");
> >
> >   #define MES_EOP_SIZE   2048
> >
> > +static void mes_v10_1_ring_set_wptr(struct amdgpu_ring *ring)
> > +{
> > + struct amdgpu_device *adev = ring->adev;
> > +
> > + if (ring->use_doorbell) {
> > + atomic64_set((atomic64_t*)>wb.wb[ring->wptr_offs],
> > +  ring->wptr);
>
> This atomic64_t type case still looks fishy to me. IIRC we agreed to not
> use them, don't we?

We use them for all the other ring code.  If we change it, we should
probably change it everywhere.  I don't think we ever agreed on a
replacement.

>
> > + WDOORBELL64(ring->doorbell_index, ring->wptr);
> > + } else {
> > + BUG();
>
> Do we really need the BUG() here and below?
>

We shouldn't ever actually hit these cases since the rings don't work
without doorbells.  Maybe it would be better to add a WARN_ON() if
someone tries to set ring->use_doorbell to false for MES.

Alex

> Christian.
>
> > + }
> > +}
> > +
> > +static u64 mes_v10_1_ring_get_rptr(struct amdgpu_ring *ring)
> > +{
> > + return ring->adev->wb.wb[ring->rptr_offs];
> > +}
> > +
> > +static u64 mes_v10_1_ring_get_wptr(struct amdgpu_ring *ring)
> > +{
> > + u64 wptr;
> > +
> > + if (ring->use_doorbell)
> > + wptr = atomic64_read((atomic64_t *)
> > +  >adev->wb.wb[ring->wptr_offs]);
> > + else
> > + BUG();
> > + return wptr;
> > +}
> > +
> > +static const struct amdgpu_ring_funcs mes_v10_1_ring_funcs = {
> > + .type = AMDGPU_RING_TYPE_MES,
> > + .align_mask = 1,
> > + .nop = 0,
> > + .support_64bit_ptrs = true,
> > + .get_rptr = mes_v10_1_ring_get_rptr,
> > + .get_wptr = mes_v10_1_ring_get_wptr,
> > + .set_wptr = mes_v10_1_ring_set_wptr,
> > + .insert_nop = amdgpu_ring_insert_nop,
> > +};
> > +
> >   static int mes_v10_1_add_hw_queue(struct amdgpu_mes *mes,
> > struct mes_add_queue_input *input)
> >   {
> > @@ -315,6 +356,8 @@ static int mes_v10_1_ring_init(struct amdgpu_device 
> > *adev)
> >
> >   ring = >mes.ring;
> >
> > + ring->funcs = _v10_1_ring_funcs;
> > +
> >   ring->me = 3;
> >   ring->pipe = 0;
> >   ring->queue = 0;
>
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amdgpu/fence: use the no_scheduler flag

2020-06-02 Thread Alex Deucher
Rather than checking the ring type manually.  We already set
this for MES and KIQ (and a few other special cases).

Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 0ce82686c983..8537f4704348 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -469,9 +469,8 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
if (!ring->fence_drv.fences)
return -ENOMEM;
 
-   /* No need to setup the GPU scheduler for KIQ and MES ring */
-   if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ &&
-   ring->funcs->type != AMDGPU_RING_TYPE_MES) {
+   /* No need to setup the GPU scheduler for rings that don't need it */
+   if (!ring->no_scheduler) {
switch (ring->funcs->type) {
case AMDGPU_RING_TYPE_GFX:
timeout = adev->gfx_timeout;
@@ -543,7 +542,8 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
if (ring->fence_drv.irq_src)
amdgpu_irq_put(adev, ring->fence_drv.irq_src,
   ring->fence_drv.irq_type);
-   drm_sched_fini(>sched);
+   if (!ring->no_scheduler)
+   drm_sched_fini(>sched);
del_timer_sync(>fence_drv.fallback_timer);
for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
dma_fence_put(ring->fence_drv.fences[j]);
-- 
2.25.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 077/207] drm/amdgpu: no need to set up GPU scheduler for mes ring

2020-06-02 Thread Alex Deucher
On Tue, Jun 2, 2020 at 5:30 AM Christian König  wrote:
>
> Am 01.06.20 um 20:00 schrieb Alex Deucher:
> > From: Jack Xiao 
> >
> > As mes ring directly submits to hardwared,
> > it's no need to set up GPU scheduler for mes ring.
> >
> > Signed-off-by: Jack Xiao 
> > Acked-by: Alex Deucher 
> > Reviewed-by: Hawking Zhang 
> > Reviewed-by: Christian König 
> > Signed-off-by: Alex Deucher 
> > ---
> >   drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 5 +++--
> >   1 file changed, 3 insertions(+), 2 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 
> > b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
> > index 3414e119f0cb..8712a2e1b869 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
> > @@ -469,8 +469,9 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring 
> > *ring,
> >   if (!ring->fence_drv.fences)
> >   return -ENOMEM;
> >
> > - /* No need to setup the GPU scheduler for KIQ ring */
> > - if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ) {
> > + /* No need to setup the GPU scheduler for KIQ and MES ring */
> > + if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ ||
> > + ring->funcs->type != AMDGPU_RING_TYPE_MES) {
>
> BTW: Making this a flag in the ring->funcs struct would probably be
> cleaner than checking the two types here.
>
> But not a must have right now.

Already added a flag for this, just missed this case.

Alex

>
> Christian.
>
> >   switch (ring->funcs->type) {
> >   case AMDGPU_RING_TYPE_GFX:
> >   timeout = adev->gfx_timeout;
>
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 2/2] drm/amdgpu: Add unique_id and serial_number for Arcturus v3

2020-06-02 Thread Alex Deucher
On Tue, Jun 2, 2020 at 12:22 PM Kent Russell  wrote:
>
> Add support for unique_id and serial_number, as these are now
> the same value, and will be for future ASICs as well.
>
> v2: Explicitly create unique_id only for VG10/20/ARC
> v3: Change set_unique_id to get_unique_id for clarity
>
> Signed-off-by: Kent Russell 
> Change-Id: I3b036a38b19cd84025399b0706b2dad9b7aff713

Reviewed-by: Alex Deucher 

> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c|  4 ++-
>  drivers/gpu/drm/amd/powerplay/amdgpu_smu.c|  2 ++
>  drivers/gpu/drm/amd/powerplay/arcturus_ppt.c  | 32 +++
>  .../gpu/drm/amd/powerplay/inc/amdgpu_smu.h|  1 +
>  drivers/gpu/drm/amd/powerplay/smu_internal.h  |  2 ++
>  5 files changed, 40 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> index b0dff9ecfb88..b2cdc8a1268f 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> @@ -1940,7 +1940,9 @@ static int default_attr_update(struct amdgpu_device 
> *adev, struct amdgpu_device_
> if (adev->flags & AMD_IS_APU)
> *states = ATTR_STATE_UNSUPPORTED;
> } else if (DEVICE_ATTR_IS(unique_id)) {
> -   if (!adev->unique_id)
> +   if (asic_type != CHIP_VEGA10 &&
> +   asic_type != CHIP_VEGA20 &&
> +   asic_type != CHIP_ARCTURUS)
> *states = ATTR_STATE_UNSUPPORTED;
> } else if (DEVICE_ATTR_IS(pp_features)) {
> if (adev->flags & AMD_IS_APU || asic_type < CHIP_VEGA10)
> diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c 
> b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> index 5294aa7cdde1..847502faca90 100644
> --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> @@ -793,6 +793,8 @@ static int smu_late_init(void *handle)
> if (!smu->pm_enabled)
> return 0;
>
> +   smu_get_unique_id(smu);
> +
> smu_handle_task(>smu,
> smu->smu_dpm.dpm_level,
> AMD_PP_TASK_COMPLETE_INIT,
> diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c 
> b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> index d66ac7457574..df7b408319f7 100644
> --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> @@ -2262,6 +2262,37 @@ static void arcturus_i2c_eeprom_control_fini(struct 
> i2c_adapter *control)
> i2c_del_adapter(control);
>  }
>
> +static void arcturus_get_unique_id(struct smu_context *smu)
> +{
> +   struct amdgpu_device *adev = smu->adev;
> +   uint32_t top32, bottom32, smu_version, size;
> +   char sn[16];
> +   uint64_t id;
> +
> +   if (smu_get_smc_version(smu, NULL, _version)) {
> +   pr_warn("Failed to get smu version, cannot get unique_id or 
> serial_number\n");
> +   return;
> +   }
> +
> +   /* PPSMC_MSG_ReadSerial* is supported by 54.23.0 and onwards */
> +   if (smu_version < 0x361700) {
> +   pr_warn("ReadSerial is only supported by PMFW 54.23.0 and 
> onwards\n");
> +   return;
> +   }
> +
> +   /* Get the SN to turn into a Unique ID */
> +   smu_send_smc_msg(smu, SMU_MSG_ReadSerialNumTop32, );
> +   smu_send_smc_msg(smu, SMU_MSG_ReadSerialNumBottom32, );
> +
> +   id = ((uint64_t)bottom32 << 32) | top32;
> +   adev->unique_id = id;
> +   /* For Arcturus-and-later, unique_id == serial_number, so convert it 
> to a
> +* 16-digit HEX string for convenience and backwards-compatibility
> +*/
> +   size = sprintf(sn, "%llx", id);
> +   memcpy(adev->serial, , size);
> +}
> +
>  static bool arcturus_is_baco_supported(struct smu_context *smu)
>  {
> struct amdgpu_device *adev = smu->adev;
> @@ -2416,6 +2447,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
> .dpm_set_uvd_enable = arcturus_dpm_set_uvd_enable,
> .i2c_eeprom_init = arcturus_i2c_eeprom_control_init,
> .i2c_eeprom_fini = arcturus_i2c_eeprom_control_fini,
> +   .get_unique_id = arcturus_get_unique_id,
> .init_microcode = smu_v11_0_init_microcode,
> .load_microcode = smu_v11_0_load_microcode,
> .init_smc_tables = smu_v11_0_init_smc_tables,
> diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h 
> b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> index 5bb1ac821aeb..13fc5773ba45 100644
> --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> @@ -495,6 +495,7 @@ struct pptable_funcs {
> int (*update_pcie_parameters)(struct smu_context *smu, uint32_t 
> pcie_gen_cap, uint32_t pcie_width_cap);
> int (*i2c_eeprom_init)(struct i2c_adapter *control);
> void (*i2c_eeprom_fini)(struct i2c_adapter *control);
> +   void 

[PATCH] drm/amdgpu: restrict bo mapping within gpu address limits

2020-06-02 Thread Rajneesh Bhardwaj
Have strict check on bo mapping since on some systems, such as A+A or
hybrid, the cpu might support 5 level paging or can address memory above
48 bits but gpu might be limited by hardware to just use 48 bits. In
general, this applies to all asics where this limitation can be checked
against their max_pfn range. This restricts the range to map bo within
pratical limits of cpu and gpu for shared virtual memory access.

Reviewed-by: Oak Zeng 
Reviewed-by: Christian König 
Reviewed-by: Hawking Zhang 
Acked-by: Alex Deucher 
Signed-off-by: Rajneesh Bhardwaj 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 7417754e9141..71e005cf2952 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -2208,7 +2208,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
/* make sure object fit at this offset */
eaddr = saddr + size - 1;
if (saddr >= eaddr ||
-   (bo && offset + size > amdgpu_bo_size(bo)))
+   (bo && offset + size > amdgpu_bo_size(bo)) ||
+   (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
return -EINVAL;
 
saddr /= AMDGPU_GPU_PAGE_SIZE;
@@ -2273,7 +2274,8 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
/* make sure object fit at this offset */
eaddr = saddr + size - 1;
if (saddr >= eaddr ||
-   (bo && offset + size > amdgpu_bo_size(bo)))
+   (bo && offset + size > amdgpu_bo_size(bo)) ||
+   (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
return -EINVAL;
 
/* Allocate all the needed memory */
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 1/2] drm/amdgpu: Add ReadSerial defines for Arcturus

2020-06-02 Thread Kent Russell
Add the ReadSerial definitions for Arcturus to the arcturus_ppsmc.h
header for use with unique_id

Unrevert: Supported in SMU 54.23, update values to match SMU spec

Signed-off-by: Kent Russell 
Reviewed-by: Alex Deucher 
Change-Id: I9a70368ea65b898b3c26f0d57dc088f21dab9c53
---
 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c   | 2 ++
 drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h | 3 +++
 2 files changed, 5 insertions(+)

diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c 
b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
index 302b7e9cb5ba..d66ac7457574 100644
--- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
@@ -129,6 +129,8 @@ static struct smu_11_0_msg_mapping 
arcturus_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(SetMemoryChannelEnable,  
PPSMC_MSG_SetMemoryChannelEnable,  0),
MSG_MAP(DFCstateControl, PPSMC_MSG_DFCstateControl, 
0),
MSG_MAP(GmiPwrDnControl, PPSMC_MSG_GmiPwrDnControl, 
0),
+   MSG_MAP(ReadSerialNumTop32,  
PPSMC_MSG_ReadSerialNumTop32,  1),
+   MSG_MAP(ReadSerialNumBottom32,   
PPSMC_MSG_ReadSerialNumBottom32,   1),
 };
 
 static struct smu_11_0_cmn2aisc_mapping arcturus_clk_map[SMU_CLK_COUNT] = {
diff --git a/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h 
b/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h
index e07478b6ac04..79afb132164e 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h
@@ -117,6 +117,9 @@
 #define PPSMC_MSG_GmiPwrDnControl0x3D
 #define PPSMC_Message_Count  0x3E
 
+#define PPSMC_MSG_ReadSerialNumTop320x40
+#define PPSMC_MSG_ReadSerialNumBottom32 0x41
+
 typedef uint32_t PPSMC_Result;
 typedef uint32_t PPSMC_Msg;
 #pragma pack(pop)
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 2/2] drm/amdgpu: Add unique_id and serial_number for Arcturus v3

2020-06-02 Thread Kent Russell
Add support for unique_id and serial_number, as these are now
the same value, and will be for future ASICs as well.

v2: Explicitly create unique_id only for VG10/20/ARC
v3: Change set_unique_id to get_unique_id for clarity

Signed-off-by: Kent Russell 
Change-Id: I3b036a38b19cd84025399b0706b2dad9b7aff713
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c|  4 ++-
 drivers/gpu/drm/amd/powerplay/amdgpu_smu.c|  2 ++
 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c  | 32 +++
 .../gpu/drm/amd/powerplay/inc/amdgpu_smu.h|  1 +
 drivers/gpu/drm/amd/powerplay/smu_internal.h  |  2 ++
 5 files changed, 40 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index b0dff9ecfb88..b2cdc8a1268f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -1940,7 +1940,9 @@ static int default_attr_update(struct amdgpu_device 
*adev, struct amdgpu_device_
if (adev->flags & AMD_IS_APU)
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(unique_id)) {
-   if (!adev->unique_id)
+   if (asic_type != CHIP_VEGA10 &&
+   asic_type != CHIP_VEGA20 &&
+   asic_type != CHIP_ARCTURUS)
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_features)) {
if (adev->flags & AMD_IS_APU || asic_type < CHIP_VEGA10)
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c 
b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 5294aa7cdde1..847502faca90 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -793,6 +793,8 @@ static int smu_late_init(void *handle)
if (!smu->pm_enabled)
return 0;
 
+   smu_get_unique_id(smu);
+
smu_handle_task(>smu,
smu->smu_dpm.dpm_level,
AMD_PP_TASK_COMPLETE_INIT,
diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c 
b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
index d66ac7457574..df7b408319f7 100644
--- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
@@ -2262,6 +2262,37 @@ static void arcturus_i2c_eeprom_control_fini(struct 
i2c_adapter *control)
i2c_del_adapter(control);
 }
 
+static void arcturus_get_unique_id(struct smu_context *smu)
+{
+   struct amdgpu_device *adev = smu->adev;
+   uint32_t top32, bottom32, smu_version, size;
+   char sn[16];
+   uint64_t id;
+
+   if (smu_get_smc_version(smu, NULL, _version)) {
+   pr_warn("Failed to get smu version, cannot get unique_id or 
serial_number\n");
+   return;
+   }
+
+   /* PPSMC_MSG_ReadSerial* is supported by 54.23.0 and onwards */
+   if (smu_version < 0x361700) {
+   pr_warn("ReadSerial is only supported by PMFW 54.23.0 and 
onwards\n");
+   return;
+   }
+
+   /* Get the SN to turn into a Unique ID */
+   smu_send_smc_msg(smu, SMU_MSG_ReadSerialNumTop32, );
+   smu_send_smc_msg(smu, SMU_MSG_ReadSerialNumBottom32, );
+
+   id = ((uint64_t)bottom32 << 32) | top32;
+   adev->unique_id = id;
+   /* For Arcturus-and-later, unique_id == serial_number, so convert it to 
a
+* 16-digit HEX string for convenience and backwards-compatibility
+*/
+   size = sprintf(sn, "%llx", id);
+   memcpy(adev->serial, , size);
+}
+
 static bool arcturus_is_baco_supported(struct smu_context *smu)
 {
struct amdgpu_device *adev = smu->adev;
@@ -2416,6 +2447,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.dpm_set_uvd_enable = arcturus_dpm_set_uvd_enable,
.i2c_eeprom_init = arcturus_i2c_eeprom_control_init,
.i2c_eeprom_fini = arcturus_i2c_eeprom_control_fini,
+   .get_unique_id = arcturus_get_unique_id,
.init_microcode = smu_v11_0_init_microcode,
.load_microcode = smu_v11_0_load_microcode,
.init_smc_tables = smu_v11_0_init_smc_tables,
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h 
b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index 5bb1ac821aeb..13fc5773ba45 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -495,6 +495,7 @@ struct pptable_funcs {
int (*update_pcie_parameters)(struct smu_context *smu, uint32_t 
pcie_gen_cap, uint32_t pcie_width_cap);
int (*i2c_eeprom_init)(struct i2c_adapter *control);
void (*i2c_eeprom_fini)(struct i2c_adapter *control);
+   void (*get_unique_id)(struct smu_context *smu);
int (*get_dpm_clock_table)(struct smu_context *smu, struct dpm_clocks 
*clock_table);
int (*init_microcode)(struct smu_context *smu);
int (*load_microcode)(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/powerplay/smu_internal.h 

RE: [PATCH 2/2] drm/amdgpu: Add unique_id and serial_number for Arcturus v2

2020-06-02 Thread Russell, Kent
[AMD Public Use]



> -Original Message-
> From: Alex Deucher 
> Sent: Tuesday, June 2, 2020 10:45 AM
> To: Russell, Kent 
> Cc: amd-gfx list 
> Subject: Re: [PATCH 2/2] drm/amdgpu: Add unique_id and serial_number for
> Arcturus v2
> 
> On Tue, Jun 2, 2020 at 8:53 AM Kent Russell  wrote:
> >
> > Add support for unique_id and serial_number, as these are now the same
> > value, and will be for future ASICs as well.
> >
> > v2: Explicitly create unique_id only for VG10/20/ARC
> >
> > Signed-off-by: Kent Russell 
> > Change-Id: I3b036a38b19cd84025399b0706b2dad9b7aff713
> > ---
> >  drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c|  4 ++-
> >  drivers/gpu/drm/amd/powerplay/amdgpu_smu.c|  2 ++
> >  drivers/gpu/drm/amd/powerplay/arcturus_ppt.c  | 32
> +++
> >  .../gpu/drm/amd/powerplay/inc/amdgpu_smu.h|  1 +
> >  drivers/gpu/drm/amd/powerplay/smu_internal.h  |  2 ++
> >  5 files changed, 40 insertions(+), 1 deletion(-)
> >
> > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> > b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> > index b0dff9ecfb88..b2cdc8a1268f 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> > @@ -1940,7 +1940,9 @@ static int default_attr_update(struct
> amdgpu_device *adev, struct amdgpu_device_
> > if (adev->flags & AMD_IS_APU)
> > *states = ATTR_STATE_UNSUPPORTED;
> > } else if (DEVICE_ATTR_IS(unique_id)) {
> > -   if (!adev->unique_id)
> > +   if (asic_type != CHIP_VEGA10 &&
> > +   asic_type != CHIP_VEGA20 &&
> > +   asic_type != CHIP_ARCTURUS)
> > *states = ATTR_STATE_UNSUPPORTED;
> > } else if (DEVICE_ATTR_IS(pp_features)) {
> > if (adev->flags & AMD_IS_APU || asic_type <
> > CHIP_VEGA10) diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> > b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> > index 5294aa7cdde1..7946fd8444a3 100644
> > --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> > +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> > @@ -793,6 +793,8 @@ static int smu_late_init(void *handle)
> > if (!smu->pm_enabled)
> > return 0;
> >
> > +   smu_set_unique_id(smu);
> > +
> > smu_handle_task(>smu,
> > smu->smu_dpm.dpm_level,
> > AMD_PP_TASK_COMPLETE_INIT, diff --git
> > a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> > b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> > index d66ac7457574..855e609650d9 100644
> > --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> > +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> > @@ -2262,6 +2262,37 @@ static void
> arcturus_i2c_eeprom_control_fini(struct i2c_adapter *control)
> > i2c_del_adapter(control);
> >  }
> >
> > +static void arcturus_set_unique_id(struct smu_context *smu) {
> > +   struct amdgpu_device *adev = smu->adev;
> > +   uint32_t top32, bottom32, smu_version, size;
> > +   char sn[16];
> > +   uint64_t id;
> > +
> > +   if (smu_get_smc_version(smu, NULL, _version)) {
> > +   pr_warn("Failed to get smu version, cannot get unique_id or
> serial_number\n");
> > +   return;
> > +   }
> > +
> > +   /* PPSMC_MSG_ReadSerial* is supported by 54.23.0 and onwards */
> > +   if (smu_version < 0x361700) {
> > +   pr_warn("ReadSerial is only supported by PMFW 54.23.0 and
> onwards\n");
> > +   return;
> > +   }
> > +
> > +   /* Get the SN to turn into a Unique ID */
> > +   smu_send_smc_msg(smu, SMU_MSG_ReadSerialNumTop32, );
> > +   smu_send_smc_msg(smu, SMU_MSG_ReadSerialNumBottom32,
> > + );
> > +
> > +   id = ((uint64_t)bottom32 << 32) | top32;
> > +   adev->unique_id = id;
> > +   /* For Arcturus-and-later, unique_id == serial_number, so convert 
> > it to a
> > +* 16-digit HEX string for convenience and backwards-compatibility
> > +*/
> > +   size = sprintf(sn, "%llx", id);
> > +   memcpy(adev->serial, , size); }
> > +
> >  static bool arcturus_is_baco_supported(struct smu_context *smu)  {
> > struct amdgpu_device *adev = smu->adev; @@ -2416,6 +2447,7 @@
> > static const struct pptable_funcs arcturus_ppt_funcs = {
> > .dpm_set_uvd_enable = arcturus_dpm_set_uvd_enable,
> > .i2c_eeprom_init = arcturus_i2c_eeprom_control_init,
> > .i2c_eeprom_fini = arcturus_i2c_eeprom_control_fini,
> > +   .set_unique_id = arcturus_set_unique_id,
> > .init_microcode = smu_v11_0_init_microcode,
> > .load_microcode = smu_v11_0_load_microcode,
> > .init_smc_tables = smu_v11_0_init_smc_tables, diff --git
> > a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> > b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> > index 5bb1ac821aeb..bfa5211de079 100644
> > --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> > +++ 

Re: [PATCH 9/9] drm/amd/powerplay: add firmware cleanup on sw_fini

2020-06-02 Thread Alex Deucher
On Mon, Jun 1, 2020 at 3:31 AM Evan Quan  wrote:
>
> To avoid possible memory leak.
>
> Change-Id: I4740eac7fc2c6e934ec8f503e5a98057f0902f4a
> Signed-off-by: Evan Quan 

Reviewed-by: Alex Deucher 

> ---
>  drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 2 ++
>  drivers/gpu/drm/amd/powerplay/arcturus_ppt.c   | 1 +
>  drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h | 1 +
>  drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h  | 2 ++
>  drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 1 +
>  drivers/gpu/drm/amd/powerplay/smu_internal.h   | 2 ++
>  drivers/gpu/drm/amd/powerplay/smu_v11_0.c  | 9 +
>  7 files changed, 18 insertions(+)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c 
> b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> index b353ac1b0f07..197fef6f59a8 100644
> --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> @@ -1127,6 +1127,8 @@ static int smu_sw_fini(void *handle)
> return ret;
> }
>
> +   smu_fini_microcode(smu);
> +
> return 0;
>  }
>
> diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c 
> b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> index c5c23126ec2d..db2d86e3953b 100644
> --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> @@ -2435,6 +2435,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
> .i2c_eeprom_fini = arcturus_i2c_eeprom_control_fini,
> .init_microcode = smu_v11_0_init_microcode,
> .load_microcode = smu_v11_0_load_microcode,
> +   .fini_microcode = smu_v11_0_fini_microcode,
> .init_smc_tables = smu_v11_0_init_smc_tables,
> .fini_smc_tables = smu_v11_0_fini_smc_tables,
> .init_power = smu_v11_0_init_power,
> diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h 
> b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> index 7fed2556213f..718aecde88c0 100644
> --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> @@ -495,6 +495,7 @@ struct pptable_funcs {
> int (*get_dpm_clock_table)(struct smu_context *smu, struct dpm_clocks 
> *clock_table);
> int (*init_microcode)(struct smu_context *smu);
> int (*load_microcode)(struct smu_context *smu);
> +   void (*fini_microcode)(struct smu_context *smu);
> int (*init_smc_tables)(struct smu_context *smu);
> int (*fini_smc_tables)(struct smu_context *smu);
> int (*init_power)(struct smu_context *smu);
> diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h 
> b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
> index 8d317e05f65b..4da5f5e87c81 100644
> --- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
> +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
> @@ -145,6 +145,8 @@ enum smu_v11_0_baco_seq {
>
>  int smu_v11_0_init_microcode(struct smu_context *smu);
>
> +void smu_v11_0_fini_microcode(struct smu_context *smu);
> +
>  int smu_v11_0_load_microcode(struct smu_context *smu);
>
>  int smu_v11_0_init_smc_tables(struct smu_context *smu);
> diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c 
> b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
> index caa4355b601e..ebbbe38dfb63 100644
> --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
> +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
> @@ -2311,6 +2311,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
> .update_pcie_parameters = navi10_update_pcie_parameters,
> .init_microcode = smu_v11_0_init_microcode,
> .load_microcode = smu_v11_0_load_microcode,
> +   .fini_microcode = smu_v11_0_fini_microcode,
> .init_smc_tables = smu_v11_0_init_smc_tables,
> .fini_smc_tables = smu_v11_0_fini_smc_tables,
> .init_power = smu_v11_0_init_power,
> diff --git a/drivers/gpu/drm/amd/powerplay/smu_internal.h 
> b/drivers/gpu/drm/amd/powerplay/smu_internal.h
> index 0c7d5f0b1cd1..dbdb870011d3 100644
> --- a/drivers/gpu/drm/amd/powerplay/smu_internal.h
> +++ b/drivers/gpu/drm/amd/powerplay/smu_internal.h
> @@ -27,6 +27,8 @@
>
>  #define smu_init_microcode(smu) \
> ((smu)->ppt_funcs->init_microcode ? 
> (smu)->ppt_funcs->init_microcode((smu)) : 0)
> +#define smu_fini_microcode(smu) \
> +   ((smu)->ppt_funcs->fini_microcode ? 
> (smu)->ppt_funcs->fini_microcode((smu)) : 0)
>  #define smu_init_smc_tables(smu) \
> ((smu)->ppt_funcs->init_smc_tables ? 
> (smu)->ppt_funcs->init_smc_tables((smu)) : 0)
>  #define smu_fini_smc_tables(smu) \
> diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c 
> b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
> index e2b1c619151f..10ae4575ccb2 100644
> --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
> +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
> @@ -195,6 +195,15 @@ int smu_v11_0_init_microcode(struct smu_context *smu)
> return err;
>  }
>
> +void smu_v11_0_fini_microcode(struct smu_context *smu)
> +{
> +   struct amdgpu_device *adev = smu->adev;
> +

Re: [PATCH 8/9] drm/amd/powerplay: drop unnecessary dynamic buffer allocation

2020-06-02 Thread Alex Deucher
On Mon, Jun 1, 2020 at 3:31 AM Evan Quan  wrote:
>
> Since the structure comes with only several bytes.
>
> Change-Id: Ie9df0db543fdd4cf5b963a286ef40dee03c436bf
> Signed-off-by: Evan Quan 

Reviewed-by: Alex Deucher 

> ---
>  drivers/gpu/drm/amd/powerplay/amdgpu_smu.c |  3 ---
>  drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h |  2 +-
>  drivers/gpu/drm/amd/powerplay/smu_v11_0.c  | 15 +++
>  3 files changed, 4 insertions(+), 16 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c 
> b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> index e55c6458b212..b353ac1b0f07 100644
> --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> @@ -1121,9 +1121,6 @@ static int smu_sw_fini(void *handle)
> struct smu_context *smu = >smu;
> int ret;
>
> -   kfree(smu->irq_source);
> -   smu->irq_source = NULL;
> -
> ret = smu_smc_table_sw_fini(smu);
> if (ret) {
> pr_err("Failed to sw fini smc table!\n");
> diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h 
> b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> index 4aa63dc79124..7fed2556213f 100644
> --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> @@ -356,7 +356,7 @@ struct smu_baco_context
>  struct smu_context
>  {
> struct amdgpu_device*adev;
> -   struct amdgpu_irq_src   *irq_source;
> +   struct amdgpu_irq_src   irq_source;
>
> const struct pptable_funcs  *ppt_funcs;
> struct mutexmutex;
> diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c 
> b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
> index 891781a5c0d4..e2b1c619151f 100644
> --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
> +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
> @@ -1167,7 +1167,7 @@ int smu_v11_0_enable_thermal_alert(struct smu_context 
> *smu)
> if (ret)
> return ret;
>
> -   ret = amdgpu_irq_get(adev, smu->irq_source, 0);
> +   ret = amdgpu_irq_get(adev, >irq_source, 0);
> if (ret)
> return ret;
>
> @@ -1191,7 +1191,7 @@ int smu_v11_0_enable_thermal_alert(struct smu_context 
> *smu)
>
>  int smu_v11_0_disable_thermal_alert(struct smu_context *smu)
>  {
> -   return amdgpu_irq_put(smu->adev, smu->irq_source, 0);
> +   return amdgpu_irq_put(smu->adev, >irq_source, 0);
>  }
>
>  static uint16_t convert_to_vddc(uint8_t vid)
> @@ -1607,18 +1607,9 @@ static const struct amdgpu_irq_src_funcs 
> smu_v11_0_irq_funcs =
>  int smu_v11_0_register_irq_handler(struct smu_context *smu)
>  {
> struct amdgpu_device *adev = smu->adev;
> -   struct amdgpu_irq_src *irq_src = smu->irq_source;
> +   struct amdgpu_irq_src *irq_src = >irq_source;
> int ret = 0;
>
> -   /* already register */
> -   if (irq_src)
> -   return 0;
> -
> -   irq_src = kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
> -   if (!irq_src)
> -   return -ENOMEM;
> -   smu->irq_source = irq_src;
> -
> irq_src->num_types = 1;
> irq_src->funcs = _v11_0_irq_funcs;
>
> --
> 2.26.2
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 7/9] drm/amd/powerplay: clean up the SMU hw setup operations

2020-06-02 Thread Alex Deucher
On Mon, Jun 1, 2020 at 3:31 AM Evan Quan  wrote:
>
> Postpone some operations which are not must for hw setup to
> late_init. Thus, code sharing is possible between hw_init/fini and
> suspend/resume. Also this makes code more clean and readable.
>
> Change-Id: Id3996fd9e2dbf2ff59d8a6032cc5f6730db1295c
> Signed-off-by: Evan Quan 

I'm having trouble parsing all of the changes in this patch.  I get
the general idea, but Is there any way to break this up into more
logical patches or provide a more detailed description?

Alex

> ---
>  drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 327 ++---
>  1 file changed, 157 insertions(+), 170 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c 
> b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> index 9b81b6519a96..e55c6458b212 100644
> --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> @@ -789,10 +789,36 @@ static int smu_late_init(void *handle)
>  {
> struct amdgpu_device *adev = (struct amdgpu_device *)handle;
> struct smu_context *smu = >smu;
> +   int ret = 0;
>
> if (!smu->pm_enabled)
> return 0;
>
> +   ret = smu_set_default_od_settings(smu);
> +   if (ret)
> +   return ret;
> +
> +   /*
> +* Set initialized values (get from vbios) to dpm tables context such 
> as
> +* gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for 
> each
> +* type of clks.
> +*/
> +   ret = smu_populate_smc_tables(smu);
> +   if (ret)
> +   return ret;
> +
> +   ret = smu_init_max_sustainable_clocks(smu);
> +   if (ret)
> +   return ret;
> +
> +   ret = smu_populate_umd_state_clk(smu);
> +   if (ret)
> +   return ret;
> +
> +   ret = smu_get_power_limit(smu, >default_power_limit, false, 
> false);
> +   if (ret)
> +   return ret;
> +
> smu_handle_task(>smu,
> smu->smu_dpm.dpm_level,
> AMD_PP_TASK_COMPLETE_INIT,
> @@ -1107,8 +1133,7 @@ static int smu_sw_fini(void *handle)
> return 0;
>  }
>
> -static int smu_smc_table_hw_init(struct smu_context *smu,
> -bool initialize)
> +static int smu_internal_hw_setup(struct smu_context *smu)
>  {
> struct amdgpu_device *adev = smu->adev;
> int ret;
> @@ -1122,26 +1147,22 @@ static int smu_smc_table_hw_init(struct smu_context 
> *smu,
> if (ret)
> return ret;
>
> -   if (initialize) {
> -   /* get boot_values from vbios to set revision, gfxclk, and 
> etc. */
> -   ret = smu_get_vbios_bootup_values(smu);
> -   if (ret)
> -   return ret;
> -
> -   ret = smu_setup_pptable(smu);
> -   if (ret)
> -   return ret;
> +   ret = smu_set_driver_table_location(smu);
> +   if (ret)
> +   return ret;
>
> -   /*
> -* Send msg GetDriverIfVersion to check if the return value 
> is equal
> -* with DRIVER_IF_VERSION of smc header.
> -*/
> -   ret = smu_check_fw_version(smu);
> -   if (ret)
> -   return ret;
> -   }
> +   /*
> +* Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for 
> tools.
> +*/
> +   ret = smu_set_tool_table_location(smu);
> +   if (ret)
> +   return ret;
>
> -   ret = smu_set_driver_table_location(smu);
> +   /*
> +* Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
> +* pool location.
> +*/
> +   ret = smu_notify_memory_pool_location(smu);
> if (ret)
> return ret;
>
> @@ -1158,6 +1179,11 @@ static int smu_smc_table_hw_init(struct smu_context 
> *smu,
> ret = smu_run_btc(smu);
> if (ret)
> return ret;
> +
> +   ret = smu_feature_init_dpm(smu);
> +   if (ret)
> +   return ret;
> +
> ret = smu_feature_set_allowed_mask(smu);
> if (ret)
> return ret;
> @@ -1166,12 +1192,19 @@ static int smu_smc_table_hw_init(struct smu_context 
> *smu,
> if (ret)
> return ret;
>
> +   if (!smu_is_dpm_running(smu))
> +   pr_info("dpm has been disabled\n");
> +
> ret = smu_disable_umc_cdr_12gbps_workaround(smu);
> if (ret) {
> pr_err("Workaround failed to disable UMC CDR feature on 
> 12Gbps SKU!\n");
> return ret;
> }
>
> +   ret = smu_override_pcie_parameters(smu);
> +   if (ret)
> +   return ret;
> +
> /*
>  * For Navi1X, manually switch it to AC mode as PMFW
>  * may boot it with DC mode.
> @@ -1184,6 +1217,14 @@ static int smu_smc_table_hw_init(struct smu_context 
> *smu,
> 

Re: [PATCH 6/9] drm/amd/powerplay: clean up the overdrive settings

2020-06-02 Thread Alex Deucher
On Mon, Jun 1, 2020 at 3:31 AM Evan Quan  wrote:
>
> Eliminate the buffer allocation and drop the unnecessary
> overdrive table uploading.
>
> Change-Id: I8ba5383a330e6d5355cea219147500c1b4a43f47
> Signed-off-by: Evan Quan 

Acked-by: Alex Deucher 

> ---
>  drivers/gpu/drm/amd/powerplay/amdgpu_smu.c|  2 +-
>  .../gpu/drm/amd/powerplay/inc/amdgpu_smu.h|  2 +-
>  drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h |  2 -
>  drivers/gpu/drm/amd/powerplay/navi10_ppt.c| 74 +--
>  drivers/gpu/drm/amd/powerplay/smu_internal.h  |  4 +-
>  drivers/gpu/drm/amd/powerplay/smu_v11_0.c | 20 -
>  6 files changed, 38 insertions(+), 66 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c 
> b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> index b079ac6325d0..9b81b6519a96 100644
> --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> @@ -1215,7 +1215,7 @@ static int smu_smc_table_hw_init(struct smu_context 
> *smu,
> if (ret)
> return ret;
>
> -   ret = smu_set_default_od_settings(smu, initialize);
> +   ret = smu_set_default_od_settings(smu);
> if (ret)
> return ret;
>
> diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h 
> b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> index 14f4a850b553..4aa63dc79124 100644
> --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> @@ -480,7 +480,7 @@ struct pptable_funcs {
>  uint32_t *value);
> int (*get_thermal_temperature_range)(struct smu_context *smu, struct 
> smu_temperature_range *range);
> int (*get_uclk_dpm_states)(struct smu_context *smu, uint32_t 
> *clocks_in_khz, uint32_t *num_states);
> -   int (*set_default_od_settings)(struct smu_context *smu, bool 
> initialize);
> +   int (*set_default_od_settings)(struct smu_context *smu);
> int (*set_performance_level)(struct smu_context *smu, enum 
> amd_dpm_forced_level level);
> int (*display_disable_memory_clock_switch)(struct smu_context *smu, 
> bool disable_memory_clock_switch);
> void (*dump_pptable)(struct smu_context *smu);
> diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h 
> b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
> index 51868dc33238..8d317e05f65b 100644
> --- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
> +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
> @@ -258,8 +258,6 @@ int smu_v11_0_set_soft_freq_limited_range(struct 
> smu_context *smu, enum smu_clk_
>
>  int smu_v11_0_override_pcie_parameters(struct smu_context *smu);
>
> -int smu_v11_0_set_default_od_settings(struct smu_context *smu, bool 
> initialize, size_t overdrive_table_size);
> -
>  uint32_t smu_v11_0_get_max_power_limit(struct smu_context *smu);
>
>  int smu_v11_0_set_performance_level(struct smu_context *smu,
> diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c 
> b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
> index db38fb10524d..caa4355b601e 100644
> --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
> +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
> @@ -1969,55 +1969,49 @@ static bool navi10_is_baco_supported(struct 
> smu_context *smu)
> return (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true : false;
>  }
>
> -static int navi10_set_default_od_settings(struct smu_context *smu, bool 
> initialize) {
> -   OverDriveTable_t *od_table, *boot_od_table;
> +static int navi10_set_default_od_settings(struct smu_context *smu)
> +{
> +   OverDriveTable_t *od_table =
> +   (OverDriveTable_t *)smu->smu_table.overdrive_table;
> +   OverDriveTable_t *boot_od_table =
> +   (OverDriveTable_t *)smu->smu_table.boot_overdrive_table;
> int ret = 0;
>
> -   ret = smu_v11_0_set_default_od_settings(smu, initialize, 
> sizeof(OverDriveTable_t));
> -   if (ret)
> +   ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)od_table, 
> false);
> +   if (ret) {
> +   pr_err("Failed to get overdrive table!\n");
> return ret;
> +   }
>
> -   od_table = (OverDriveTable_t *)smu->smu_table.overdrive_table;
> -   boot_od_table = (OverDriveTable_t 
> *)smu->smu_table.boot_overdrive_table;
> -   if (initialize) {
> -   if (od_table) {
> -   if (!od_table->GfxclkVolt1) {
> -   ret = 
> navi10_overdrive_get_gfx_clk_base_voltage(smu,
> - 
>   _table->GfxclkVolt1,
> - 
>   od_table->GfxclkFreq1);
> -   if (ret)
> -   od_table->GfxclkVolt1 = 0;
> -   if (boot_od_table)
> -   boot_od_table->GfxclkVolt1 = 

Re: [PATCH 5/9] drm/amd/powerplay: clean up the APIs for pptable setup

2020-06-02 Thread Alex Deucher
On Mon, Jun 1, 2020 at 3:31 AM Evan Quan  wrote:
>
> Combine and simplify the logics for setup pptable.
>
> Change-Id: I062f15eab586050593afd960432c4c70fbdd5d41
> Signed-off-by: Evan Quan 

Acked-by: Alex Deucher 

> ---
>  drivers/gpu/drm/amd/powerplay/amdgpu_smu.c| 17 
>  drivers/gpu/drm/amd/powerplay/arcturus_ppt.c  | 66 -
>  .../gpu/drm/amd/powerplay/inc/amdgpu_smu.h|  5 -
>  drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h |  4 -
>  drivers/gpu/drm/amd/powerplay/navi10_ppt.c| 92 ++-
>  drivers/gpu/drm/amd/powerplay/smu_internal.h  | 10 --
>  drivers/gpu/drm/amd/powerplay/smu_v11_0.c | 21 -
>  7 files changed, 89 insertions(+), 126 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c 
> b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> index 9bafa6b3e123..b079ac6325d0 100644
> --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> @@ -1132,23 +1132,6 @@ static int smu_smc_table_hw_init(struct smu_context 
> *smu,
> if (ret)
> return ret;
>
> -   /*
> -* check if the format_revision in vbios is up to pptable 
> header
> -* version, and the structure size is not 0.
> -*/
> -   ret = smu_check_pptable(smu);
> -   if (ret)
> -   return ret;
> -
> -   /*
> -* Parse pptable format and fill PPTable_t smc_pptable to
> -* smu_table_context structure. And read the smc_dpm_table 
> from vbios,
> -* then fill it into smc_pptable.
> -*/
> -   ret = smu_parse_pptable(smu);
> -   if (ret)
> -   return ret;
> -
> /*
>  * Send msg GetDriverIfVersion to check if the return value 
> is equal
>  * with DRIVER_IF_VERSION of smc header.
> diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c 
> b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> index 902c8cfa4a3b..c5c23126ec2d 100644
> --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> @@ -487,33 +487,33 @@ static int arcturus_set_default_dpm_table(struct 
> smu_context *smu)
>
>  static int arcturus_check_powerplay_table(struct smu_context *smu)
>  {
> +   struct smu_table_context *table_context = >smu_table;
> +   struct smu_11_0_powerplay_table *powerplay_table =
> +   table_context->power_play_table;
> +   struct smu_baco_context *smu_baco = >smu_baco;
> +
> +   mutex_lock(_baco->mutex);
> +   if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_BACO ||
> +   powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_MACO)
> +   smu_baco->platform_support = true;
> +   mutex_unlock(_baco->mutex);
> +
> +   table_context->thermal_controller_type =
> +   powerplay_table->thermal_controller_type;
> +
> return 0;
>  }
>
>  static int arcturus_store_powerplay_table(struct smu_context *smu)
>  {
> -   struct smu_11_0_powerplay_table *powerplay_table = NULL;
> struct smu_table_context *table_context = >smu_table;
> -   struct smu_baco_context *smu_baco = >smu_baco;
> -   int ret = 0;
> -
> -   if (!table_context->power_play_table)
> -   return -EINVAL;
> -
> -   powerplay_table = table_context->power_play_table;
> +   struct smu_11_0_powerplay_table *powerplay_table =
> +   table_context->power_play_table;
>
> memcpy(table_context->driver_pptable, _table->smc_pptable,
>sizeof(PPTable_t));
>
> -   table_context->thermal_controller_type = 
> powerplay_table->thermal_controller_type;
> -
> -   mutex_lock(_baco->mutex);
> -   if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_BACO ||
> -   powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_MACO)
> -   smu_baco->platform_support = true;
> -   mutex_unlock(_baco->mutex);
> -
> -   return ret;
> +   return 0;
>  }
>
>  static int arcturus_append_powerplay_table(struct smu_context *smu)
> @@ -544,6 +544,29 @@ static int arcturus_append_powerplay_table(struct 
> smu_context *smu)
> return 0;
>  }
>
> +static int arcturus_setup_pptable(struct smu_context *smu)
> +{
> +   int ret = 0;
> +
> +   ret = smu_v11_0_setup_pptable(smu);
> +   if (ret)
> +   return ret;
> +
> +   ret = arcturus_store_powerplay_table(smu);
> +   if (ret)
> +   return ret;
> +
> +   ret = arcturus_append_powerplay_table(smu);
> +   if (ret)
> +   return ret;
> +
> +   ret = arcturus_check_powerplay_table(smu);
> +   if (ret)
> +   return ret;
> +
> +   return ret;
> +}
> +
>  static int arcturus_run_btc(struct smu_context *smu)
>  {
> int ret = 0;
> @@ -2383,10 

Re: [PATCH 4/9] drm/amd/powerplay: clean up the APIs for bootup clocks

2020-06-02 Thread Alex Deucher
On Mon, Jun 1, 2020 at 3:30 AM Evan Quan  wrote:
>
> Combine and simplify the logics for retrieving bootup
> clocks.
>
> Change-Id: Ifca28c454f3769dece0cc705ba054ff34db0ab60
> Signed-off-by: Evan Quan 

Reviewed-by: Alex Deucher 

> ---
>  drivers/gpu/drm/amd/powerplay/amdgpu_smu.c|   4 -
>  drivers/gpu/drm/amd/powerplay/arcturus_ppt.c  |   1 -
>  .../gpu/drm/amd/powerplay/inc/amdgpu_smu.h|   1 -
>  drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h |   2 -
>  drivers/gpu/drm/amd/powerplay/navi10_ppt.c|   1 -
>  drivers/gpu/drm/amd/powerplay/smu_internal.h  |   2 -
>  drivers/gpu/drm/amd/powerplay/smu_v11_0.c | 141 +++---
>  7 files changed, 51 insertions(+), 101 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c 
> b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> index 70c7b3fdee79..9bafa6b3e123 100644
> --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> @@ -1132,10 +1132,6 @@ static int smu_smc_table_hw_init(struct smu_context 
> *smu,
> if (ret)
> return ret;
>
> -   ret = smu_get_clk_info_from_vbios(smu);
> -   if (ret)
> -   return ret;
> -
> /*
>  * check if the format_revision in vbios is up to pptable 
> header
>  * version, and the structure size is not 0.
> diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c 
> b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> index e856ad36ab01..902c8cfa4a3b 100644
> --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> @@ -2423,7 +2423,6 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
> .check_fw_status = smu_v11_0_check_fw_status,
> .setup_pptable = smu_v11_0_setup_pptable,
> .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
> -   .get_clk_info_from_vbios = smu_v11_0_get_clk_info_from_vbios,
> .check_pptable = smu_v11_0_check_pptable,
> .parse_pptable = smu_v11_0_parse_pptable,
> .populate_smc_tables = smu_v11_0_populate_smc_pptable,
> diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h 
> b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> index 5bb1ac821aeb..223678e329a5 100644
> --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> @@ -505,7 +505,6 @@ struct pptable_funcs {
> int (*check_fw_status)(struct smu_context *smu);
> int (*setup_pptable)(struct smu_context *smu);
> int (*get_vbios_bootup_values)(struct smu_context *smu);
> -   int (*get_clk_info_from_vbios)(struct smu_context *smu);
> int (*check_pptable)(struct smu_context *smu);
> int (*parse_pptable)(struct smu_context *smu);
> int (*populate_smc_tables)(struct smu_context *smu);
> diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h 
> b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
> index 71f829ab306e..5b785816aa64 100644
> --- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
> +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
> @@ -161,8 +161,6 @@ int smu_v11_0_setup_pptable(struct smu_context *smu);
>
>  int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu);
>
> -int smu_v11_0_get_clk_info_from_vbios(struct smu_context *smu);
> -
>  int smu_v11_0_check_pptable(struct smu_context *smu);
>
>  int smu_v11_0_parse_pptable(struct smu_context *smu);
> diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c 
> b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
> index 652728f18271..bea6a96b5afb 100644
> --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
> +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
> @@ -2320,7 +2320,6 @@ static const struct pptable_funcs navi10_ppt_funcs = {
> .check_fw_status = smu_v11_0_check_fw_status,
> .setup_pptable = smu_v11_0_setup_pptable,
> .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
> -   .get_clk_info_from_vbios = smu_v11_0_get_clk_info_from_vbios,
> .check_pptable = smu_v11_0_check_pptable,
> .parse_pptable = smu_v11_0_parse_pptable,
> .populate_smc_tables = smu_v11_0_populate_smc_pptable,
> diff --git a/drivers/gpu/drm/amd/powerplay/smu_internal.h 
> b/drivers/gpu/drm/amd/powerplay/smu_internal.h
> index 6c59eeef2590..a31df7f4e91a 100644
> --- a/drivers/gpu/drm/amd/powerplay/smu_internal.h
> +++ b/drivers/gpu/drm/amd/powerplay/smu_internal.h
> @@ -47,8 +47,6 @@
>
>  #define smu_get_vbios_bootup_values(smu) \
> ((smu)->ppt_funcs->get_vbios_bootup_values ? 
> (smu)->ppt_funcs->get_vbios_bootup_values((smu)) : 0)
> -#define smu_get_clk_info_from_vbios(smu) \
> -   ((smu)->ppt_funcs->get_clk_info_from_vbios ? 
> (smu)->ppt_funcs->get_clk_info_from_vbios((smu)) : 0)
>  #define smu_check_pptable(smu) \
> ((smu)->ppt_funcs->check_pptable ? 
> (smu)->ppt_funcs->check_pptable((smu)) : 0)
>  #define 

Re: [PATCH 3/9] drm/amd/powerplay: centralize all buffer allocation in sw_init phase

2020-06-02 Thread Alex Deucher
On Mon, Jun 1, 2020 at 3:30 AM Evan Quan  wrote:
>
> To fit common design. And this can simplify the buffer deallocation.
>
> Change-Id: Iee682e76aadb5f34861d69d5794ced44f0a78789
> Signed-off-by: Evan Quan 

Took me a little while to sort out the functional changes from the
non-functional moves.  Might be clearer to split those up.  Either
way:
Reviewed-by: Alex Deucher 

> ---
>  drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 330 ++---
>  drivers/gpu/drm/amd/powerplay/smu_v11_0.c  | 105 ---
>  2 files changed, 223 insertions(+), 212 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c 
> b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> index b4f108cb52fa..70c7b3fdee79 100644
> --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> @@ -817,6 +817,147 @@ int smu_get_atom_data_table(struct smu_context *smu, 
> uint32_t table,
> return 0;
>  }
>
> +static int smu_init_fb_allocations(struct smu_context *smu)
> +{
> +   struct amdgpu_device *adev = smu->adev;
> +   struct smu_table_context *smu_table = >smu_table;
> +   struct smu_table *tables = smu_table->tables;
> +   struct smu_table *driver_table = &(smu_table->driver_table);
> +   uint32_t max_table_size = 0;
> +   int ret, i;
> +
> +   /* VRAM allocation for tool table */
> +   if (tables[SMU_TABLE_PMSTATUSLOG].size) {
> +   ret = amdgpu_bo_create_kernel(adev,
> + 
> tables[SMU_TABLE_PMSTATUSLOG].size,
> + 
> tables[SMU_TABLE_PMSTATUSLOG].align,
> + 
> tables[SMU_TABLE_PMSTATUSLOG].domain,
> + 
> [SMU_TABLE_PMSTATUSLOG].bo,
> + 
> [SMU_TABLE_PMSTATUSLOG].mc_address,
> + 
> [SMU_TABLE_PMSTATUSLOG].cpu_addr);
> +   if (ret) {
> +   pr_err("VRAM allocation for tool table failed!\n");
> +   return ret;
> +   }
> +   }
> +
> +   /* VRAM allocation for driver table */
> +   for (i = 0; i < SMU_TABLE_COUNT; i++) {
> +   if (tables[i].size == 0)
> +   continue;
> +
> +   if (i == SMU_TABLE_PMSTATUSLOG)
> +   continue;
> +
> +   if (max_table_size < tables[i].size)
> +   max_table_size = tables[i].size;
> +   }
> +
> +   driver_table->size = max_table_size;
> +   driver_table->align = PAGE_SIZE;
> +   driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
> +
> +   ret = amdgpu_bo_create_kernel(adev,
> + driver_table->size,
> + driver_table->align,
> + driver_table->domain,
> + _table->bo,
> + _table->mc_address,
> + _table->cpu_addr);
> +   if (ret) {
> +   pr_err("VRAM allocation for driver table failed!\n");
> +   if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
> +   
> amdgpu_bo_free_kernel([SMU_TABLE_PMSTATUSLOG].bo,
> + 
> [SMU_TABLE_PMSTATUSLOG].mc_address,
> + 
> [SMU_TABLE_PMSTATUSLOG].cpu_addr);
> +   }
> +
> +   return ret;
> +}
> +
> +static int smu_fini_fb_allocations(struct smu_context *smu)
> +{
> +   struct smu_table_context *smu_table = >smu_table;
> +   struct smu_table *tables = smu_table->tables;
> +   struct smu_table *driver_table = &(smu_table->driver_table);
> +
> +   if (!tables)
> +   return 0;
> +
> +   if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
> +   amdgpu_bo_free_kernel([SMU_TABLE_PMSTATUSLOG].bo,
> + 
> [SMU_TABLE_PMSTATUSLOG].mc_address,
> + 
> [SMU_TABLE_PMSTATUSLOG].cpu_addr);
> +
> +   amdgpu_bo_free_kernel(_table->bo,
> + _table->mc_address,
> + _table->cpu_addr);
> +
> +   return 0;
> +}
> +
> +/**
> + * smu_alloc_memory_pool - allocate memory pool in the system memory
> + *
> + * @smu: amdgpu_device pointer
> + *
> + * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
> + * and DramLogSetDramAddr can notify it changed.
> + *
> + * Returns 0 on success, error on failure.
> + */
> +static int smu_alloc_memory_pool(struct smu_context *smu)
> +{
> +   struct amdgpu_device *adev = smu->adev;
> +   struct smu_table_context *smu_table = >smu_table;
> +   struct smu_table *memory_pool = _table->memory_pool;
> +   uint64_t pool_size = smu->pool_size;
> +   int ret = 0;
> +
> +   if 

Re: [PATCH 2/9] drm/amd/powerplay: some cosmetic fixes

2020-06-02 Thread Alex Deucher
On Mon, Jun 1, 2020 at 3:30 AM Evan Quan  wrote:
>
> To make code more clean and readable by moving ASIC
> specific code to its own file, more code sharing and
> dropping unused code.

There seem to be multiple things going on here.  It's kind of hard to
follow all of the changes.  Maybe split this patch up?  One additional
comment below.

Alex

>
> Change-Id: I6b299f9e98c7678b48281cbed9beb17b644bb4cc
> Signed-off-by: Evan Quan 
> ---
>  drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 213 -
>  drivers/gpu/drm/amd/powerplay/navi10_ppt.c |  19 ++
>  2 files changed, 102 insertions(+), 130 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c 
> b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> index 4998ea942760..b4f108cb52fa 100644
> --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> @@ -817,22 +817,10 @@ int smu_get_atom_data_table(struct smu_context *smu, 
> uint32_t table,
> return 0;
>  }
>
> -static int smu_initialize_pptable(struct smu_context *smu)
> -{
> -   /* TODO */
> -   return 0;
> -}
> -
>  static int smu_smc_table_sw_init(struct smu_context *smu)
>  {
> int ret;
>
> -   ret = smu_initialize_pptable(smu);
> -   if (ret) {
> -   pr_err("Failed to init smu_initialize_pptable!\n");
> -   return ret;
> -   }
> -
> /**
>  * Create smu_table structure, and init smc tables such as
>  * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
> @@ -860,6 +848,12 @@ static int smu_smc_table_sw_fini(struct smu_context *smu)
>  {
> int ret;
>
> +   ret = smu_fini_power(smu);
> +   if (ret) {
> +   pr_err("Failed to init smu_fini_power!\n");
> +   return ret;
> +   }
> +
> ret = smu_fini_smc_tables(smu);
> if (ret) {
> pr_err("Failed to smu_fini_smc_tables!\n");
> @@ -950,12 +944,6 @@ static int smu_sw_fini(void *handle)
> return ret;
> }
>
> -   ret = smu_fini_power(smu);
> -   if (ret) {
> -   pr_err("Failed to init smu_fini_power!\n");
> -   return ret;
> -   }
> -
> return 0;
>  }
>
> @@ -1125,36 +1113,22 @@ static int smu_smc_table_hw_init(struct smu_context 
> *smu,
> if (ret)
> return ret;
>
> -   if (adev->asic_type == CHIP_NAVI10) {
> -   if ((adev->pdev->device == 0x731f && (adev->pdev->revision == 
> 0xc2 ||
> - adev->pdev->revision == 
> 0xc3 ||
> - adev->pdev->revision == 
> 0xca ||
> - adev->pdev->revision == 
> 0xcb)) ||
> -   (adev->pdev->device == 0x66af && (adev->pdev->revision == 
> 0xf3 ||
> - adev->pdev->revision == 
> 0xf4 ||
> - adev->pdev->revision == 
> 0xf5 ||
> - adev->pdev->revision == 
> 0xf6))) {
> -   ret = smu_disable_umc_cdr_12gbps_workaround(smu);
> -   if (ret) {
> -   pr_err("Workaround failed to disable UMC CDR 
> feature on 12Gbps SKU!\n");
> -   return ret;
> -   }
> -   }
> +   ret = smu_disable_umc_cdr_12gbps_workaround(smu);
> +   if (ret) {
> +   pr_err("Workaround failed to disable UMC CDR feature on 
> 12Gbps SKU!\n");
> +   return ret;
> }
>
> -   if (smu->ppt_funcs->set_power_source) {
> -   /*
> -* For Navi1X, manually switch it to AC mode as PMFW
> -* may boot it with DC mode.
> -*/
> -   if (adev->pm.ac_power)
> -   ret = smu_set_power_source(smu, SMU_POWER_SOURCE_AC);
> -   else
> -   ret = smu_set_power_source(smu, SMU_POWER_SOURCE_DC);
> -   if (ret) {
> -   pr_err("Failed to switch to %s mode!\n", 
> adev->pm.ac_power ? "AC" : "DC");
> -   return ret;
> -   }
> +   /*
> +* For Navi1X, manually switch it to AC mode as PMFW
> +* may boot it with DC mode.
> +*/
> +   ret = smu_set_power_source(smu,
> +  adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
> +  SMU_POWER_SOURCE_DC);
> +   if (ret) {
> +   pr_err("Failed to switch to %s mode!\n", adev->pm.ac_power ? 
> "AC" : "DC");
> +   return ret;
> }
>
> ret = smu_notify_display_change(smu);
> @@ -1362,9 +1336,65 @@ static int smu_hw_init(void *handle)
> return ret;
>  }
>
> -static int smu_stop_dpms(struct smu_context *smu)
> +static int 

Re: [PATCH 1/9] drm/amd/powerplay: drop unnecessary CHIP_ARCTURUS guard

2020-06-02 Thread Alex Deucher
On Mon, Jun 1, 2020 at 3:30 AM Evan Quan  wrote:
>
> These APIs internally guard they will not break ARCTURUS.
>
> Change-Id: Ib6775c1c8c5211ea45db6c3fb604a8279411ab37
> Signed-off-by: Evan Quan 

Reviewed-by: Alex Deucher 

> ---
>  drivers/gpu/drm/amd/powerplay/amdgpu_smu.c   | 38 +---
>  drivers/gpu/drm/amd/powerplay/arcturus_ppt.c |  8 ++---
>  2 files changed, 20 insertions(+), 26 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c 
> b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> index 5294aa7cdde1..4998ea942760 100644
> --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> @@ -1049,11 +1049,9 @@ static int smu_smc_table_hw_init(struct smu_context 
> *smu,
> return 0;
> }
>
> -   if (adev->asic_type != CHIP_ARCTURUS) {
> -   ret = smu_init_display_count(smu, 0);
> -   if (ret)
> -   return ret;
> -   }
> +   ret = smu_init_display_count(smu, 0);
> +   if (ret)
> +   return ret;
>
> if (initialize) {
> /* get boot_values from vbios to set revision, gfxclk, and 
> etc. */
> @@ -1159,19 +1157,17 @@ static int smu_smc_table_hw_init(struct smu_context 
> *smu,
> }
> }
>
> -   if (adev->asic_type != CHIP_ARCTURUS) {
> -   ret = smu_notify_display_change(smu);
> -   if (ret)
> -   return ret;
> +   ret = smu_notify_display_change(smu);
> +   if (ret)
> +   return ret;
>
> -   /*
> -* Set min deep sleep dce fclk with bootup value from vbios 
> via
> -* SetMinDeepSleepDcefclk MSG.
> -*/
> -   ret = smu_set_min_dcef_deep_sleep(smu);
> -   if (ret)
> -   return ret;
> -   }
> +   /*
> +* Set min deep sleep dce fclk with bootup value from vbios via
> +* SetMinDeepSleepDcefclk MSG.
> +*/
> +   ret = smu_set_min_dcef_deep_sleep(smu);
> +   if (ret)
> +   return ret;
>
> /*
>  * Set initialized values (get from vbios) to dpm tables context such 
> as
> @@ -1188,11 +1184,9 @@ static int smu_smc_table_hw_init(struct smu_context 
> *smu,
> return ret;
> }
>
> -   if (adev->asic_type != CHIP_ARCTURUS) {
> -   ret = smu_override_pcie_parameters(smu);
> -   if (ret)
> -   return ret;
> -   }
> +   ret = smu_override_pcie_parameters(smu);
> +   if (ret)
> +   return ret;
>
> ret = smu_set_default_od_settings(smu, initialize);
> if (ret)
> diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c 
> b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> index 302b7e9cb5ba..e856ad36ab01 100644
> --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> @@ -2429,16 +2429,16 @@ static const struct pptable_funcs arcturus_ppt_funcs 
> = {
> .populate_smc_tables = smu_v11_0_populate_smc_pptable,
> .check_fw_version = smu_v11_0_check_fw_version,
> .write_pptable = smu_v11_0_write_pptable,
> -   .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
> +   .set_min_dcef_deep_sleep = NULL,
> .set_driver_table_location = smu_v11_0_set_driver_table_location,
> .set_tool_table_location = smu_v11_0_set_tool_table_location,
> .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
> .system_features_control = smu_v11_0_system_features_control,
> .send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
> -   .init_display_count = smu_v11_0_init_display_count,
> +   .init_display_count = NULL,
> .set_allowed_mask = smu_v11_0_set_allowed_mask,
> .get_enabled_mask = smu_v11_0_get_enabled_mask,
> -   .notify_display_change = smu_v11_0_notify_display_change,
> +   .notify_display_change = NULL,
> .set_power_limit = smu_v11_0_set_power_limit,
> .get_current_clk_freq = smu_v11_0_get_current_clk_freq,
> .init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
> @@ -2462,7 +2462,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
> .baco_exit = smu_v11_0_baco_exit,
> .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
> .set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
> -   .override_pcie_parameters = smu_v11_0_override_pcie_parameters,
> +   .override_pcie_parameters = NULL,
> .get_pptable_power_limit = arcturus_get_pptable_power_limit,
> .set_df_cstate = arcturus_set_df_cstate,
> .allow_xgmi_power_down = arcturus_allow_xgmi_power_down,
> --
> 2.26.2
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> 

Re: [PATCH 2/2] drm/amdgpu: Add unique_id and serial_number for Arcturus v2

2020-06-02 Thread Alex Deucher
On Tue, Jun 2, 2020 at 8:53 AM Kent Russell  wrote:
>
> Add support for unique_id and serial_number, as these are now
> the same value, and will be for future ASICs as well.
>
> v2: Explicitly create unique_id only for VG10/20/ARC
>
> Signed-off-by: Kent Russell 
> Change-Id: I3b036a38b19cd84025399b0706b2dad9b7aff713
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c|  4 ++-
>  drivers/gpu/drm/amd/powerplay/amdgpu_smu.c|  2 ++
>  drivers/gpu/drm/amd/powerplay/arcturus_ppt.c  | 32 +++
>  .../gpu/drm/amd/powerplay/inc/amdgpu_smu.h|  1 +
>  drivers/gpu/drm/amd/powerplay/smu_internal.h  |  2 ++
>  5 files changed, 40 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> index b0dff9ecfb88..b2cdc8a1268f 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> @@ -1940,7 +1940,9 @@ static int default_attr_update(struct amdgpu_device 
> *adev, struct amdgpu_device_
> if (adev->flags & AMD_IS_APU)
> *states = ATTR_STATE_UNSUPPORTED;
> } else if (DEVICE_ATTR_IS(unique_id)) {
> -   if (!adev->unique_id)
> +   if (asic_type != CHIP_VEGA10 &&
> +   asic_type != CHIP_VEGA20 &&
> +   asic_type != CHIP_ARCTURUS)
> *states = ATTR_STATE_UNSUPPORTED;
> } else if (DEVICE_ATTR_IS(pp_features)) {
> if (adev->flags & AMD_IS_APU || asic_type < CHIP_VEGA10)
> diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c 
> b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> index 5294aa7cdde1..7946fd8444a3 100644
> --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> @@ -793,6 +793,8 @@ static int smu_late_init(void *handle)
> if (!smu->pm_enabled)
> return 0;
>
> +   smu_set_unique_id(smu);
> +
> smu_handle_task(>smu,
> smu->smu_dpm.dpm_level,
> AMD_PP_TASK_COMPLETE_INIT,
> diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c 
> b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> index d66ac7457574..855e609650d9 100644
> --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> @@ -2262,6 +2262,37 @@ static void arcturus_i2c_eeprom_control_fini(struct 
> i2c_adapter *control)
> i2c_del_adapter(control);
>  }
>
> +static void arcturus_set_unique_id(struct smu_context *smu)
> +{
> +   struct amdgpu_device *adev = smu->adev;
> +   uint32_t top32, bottom32, smu_version, size;
> +   char sn[16];
> +   uint64_t id;
> +
> +   if (smu_get_smc_version(smu, NULL, _version)) {
> +   pr_warn("Failed to get smu version, cannot get unique_id or 
> serial_number\n");
> +   return;
> +   }
> +
> +   /* PPSMC_MSG_ReadSerial* is supported by 54.23.0 and onwards */
> +   if (smu_version < 0x361700) {
> +   pr_warn("ReadSerial is only supported by PMFW 54.23.0 and 
> onwards\n");
> +   return;
> +   }
> +
> +   /* Get the SN to turn into a Unique ID */
> +   smu_send_smc_msg(smu, SMU_MSG_ReadSerialNumTop32, );
> +   smu_send_smc_msg(smu, SMU_MSG_ReadSerialNumBottom32, );
> +
> +   id = ((uint64_t)bottom32 << 32) | top32;
> +   adev->unique_id = id;
> +   /* For Arcturus-and-later, unique_id == serial_number, so convert it 
> to a
> +* 16-digit HEX string for convenience and backwards-compatibility
> +*/
> +   size = sprintf(sn, "%llx", id);
> +   memcpy(adev->serial, , size);
> +}
> +
>  static bool arcturus_is_baco_supported(struct smu_context *smu)
>  {
> struct amdgpu_device *adev = smu->adev;
> @@ -2416,6 +2447,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
> .dpm_set_uvd_enable = arcturus_dpm_set_uvd_enable,
> .i2c_eeprom_init = arcturus_i2c_eeprom_control_init,
> .i2c_eeprom_fini = arcturus_i2c_eeprom_control_fini,
> +   .set_unique_id = arcturus_set_unique_id,
> .init_microcode = smu_v11_0_init_microcode,
> .load_microcode = smu_v11_0_load_microcode,
> .init_smc_tables = smu_v11_0_init_smc_tables,
> diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h 
> b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> index 5bb1ac821aeb..bfa5211de079 100644
> --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> @@ -495,6 +495,7 @@ struct pptable_funcs {
> int (*update_pcie_parameters)(struct smu_context *smu, uint32_t 
> pcie_gen_cap, uint32_t pcie_width_cap);
> int (*i2c_eeprom_init)(struct i2c_adapter *control);
> void (*i2c_eeprom_fini)(struct i2c_adapter *control);
> +   void (*set_unique_id)(struct smu_context *smu);

As I mentioned in my previous email, I think these functions 

Re: [PATCH 07/15] drm/amdgpu: use PCI_IRQ_MSI_TYPES where appropriate

2020-06-02 Thread Alex Deucher
On Tue, Jun 2, 2020 at 10:35 AM Andy Shevchenko
 wrote:
>
> On Tue, Jun 2, 2020 at 5:21 PM Alex Deucher  wrote:
> > On Tue, Jun 2, 2020 at 10:00 AM Andy Shevchenko
> >  wrote:
> > > On Tue, Jun 2, 2020 at 4:38 PM Ruhl, Michael J  
> > > wrote:
> > > > >From: dri-devel  On Behalf Of
> > > > >Piotr Stankiewicz
>
> > > > >   int nvec = pci_msix_vec_count(adev->pdev);
> > > > >   unsigned int flags;
> > > > >
> > > > >-  if (nvec <= 0) {
> > > > >+  if (nvec > 0)
> > > > >+  flags = PCI_IRQ_MSI_TYPES;
> > > > >+  else
> > > > >   flags = PCI_IRQ_MSI;
> > > > >-  } else {
> > > > >-  flags = PCI_IRQ_MSI | PCI_IRQ_MSIX;
> > > > >-  }
> > > >
> > > > Minor nit:
> > > >
> > > > Is it really necessary to set do this check?  Can flags just
> > > > be set?
> > > >
> > > > I.e.:
> > > > flags = PCI_IRQ_MSI_TYPES;
> > > >
> > > > pci_alloc_irq_vector() tries stuff in order.  If MSIX is not available,
> > > > it will try MSI.
> > >
> > > That's also what I proposed earlier. But I suggested as well to wait
> > > for AMD people to confirm that neither pci_msix_vec_count() nor flags
> > > is needed and we can directly supply MSI_TYPES to the below call.
> > >
> >
> > I think it was leftover from debugging and just to be careful.  We had
> > some issues when we originally enabled MSI-X on certain boards.  The
> > fix was to just allocate a single vector (since that is all we use
> > anyway) and we were using the wrong irq (pdev->irq vs
> > pci_irq_vector(pdev, 0)).
>
> Do you agree that simple
>
>   nvec = pci_alloc_irq_vectors(adev->pdev, 1, 1, PCI_IRQ_MSI_TYPES);
>
> will work and we can remove that leftover?

Yes, I believe so.  Tom, can you give this a quick spin on raven just
in case if you get a chance?  Something like this:

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 0cc4c67f95f7..c59111b57cc2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -248,16 +248,10 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
adev->irq.msi_enabled = false;

if (amdgpu_msi_ok(adev)) {
-   int nvec = pci_msix_vec_count(adev->pdev);
-   unsigned int flags;
+   int nvec;

-   if (nvec <= 0) {
-   flags = PCI_IRQ_MSI;
-   } else {
-   flags = PCI_IRQ_MSI | PCI_IRQ_MSIX;
-   }
/* we only need one vector */
-   nvec = pci_alloc_irq_vectors(adev->pdev, 1, 1, flags);
+   nvec = pci_alloc_irq_vectors(adev->pdev, 1, 1,
PCI_IRQ_MSI | PCI_IRQ_MSIX);
if (nvec > 0) {
adev->irq.msi_enabled = true;
dev_dbg(adev->dev, "using MSI/MSI-X.\n");


Thanks,

Alex
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 07/15] drm/amdgpu: use PCI_IRQ_MSI_TYPES where appropriate

2020-06-02 Thread Andy Shevchenko
On Tue, Jun 2, 2020 at 5:21 PM Alex Deucher  wrote:
> On Tue, Jun 2, 2020 at 10:00 AM Andy Shevchenko
>  wrote:
> > On Tue, Jun 2, 2020 at 4:38 PM Ruhl, Michael J  
> > wrote:
> > > >From: dri-devel  On Behalf Of
> > > >Piotr Stankiewicz

> > > >   int nvec = pci_msix_vec_count(adev->pdev);
> > > >   unsigned int flags;
> > > >
> > > >-  if (nvec <= 0) {
> > > >+  if (nvec > 0)
> > > >+  flags = PCI_IRQ_MSI_TYPES;
> > > >+  else
> > > >   flags = PCI_IRQ_MSI;
> > > >-  } else {
> > > >-  flags = PCI_IRQ_MSI | PCI_IRQ_MSIX;
> > > >-  }
> > >
> > > Minor nit:
> > >
> > > Is it really necessary to set do this check?  Can flags just
> > > be set?
> > >
> > > I.e.:
> > > flags = PCI_IRQ_MSI_TYPES;
> > >
> > > pci_alloc_irq_vector() tries stuff in order.  If MSIX is not available,
> > > it will try MSI.
> >
> > That's also what I proposed earlier. But I suggested as well to wait
> > for AMD people to confirm that neither pci_msix_vec_count() nor flags
> > is needed and we can directly supply MSI_TYPES to the below call.
> >
>
> I think it was leftover from debugging and just to be careful.  We had
> some issues when we originally enabled MSI-X on certain boards.  The
> fix was to just allocate a single vector (since that is all we use
> anyway) and we were using the wrong irq (pdev->irq vs
> pci_irq_vector(pdev, 0)).

Do you agree that simple

  nvec = pci_alloc_irq_vectors(adev->pdev, 1, 1, PCI_IRQ_MSI_TYPES);

will work and we can remove that leftover?

-- 
With Best Regards,
Andy Shevchenko
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu: enable renoir discovery for gc info retrieved

2020-06-02 Thread Alex Deucher
On Tue, Jun 2, 2020 at 9:56 AM Liang, Prike  wrote:
>
> [AMD Official Use Only - Internal Distribution Only]
>
> Ah not aware the enable patch before. But the already enable patch seems 
> can't fallback to legacy gpuinfo FW load method when not support discovery 
> and also may miss destroy the discovery_bin object when driver shut down.
>

I think it's already handled.  See these patches:
https://cgit.freedesktop.org/~agd5f/linux/commit/?h=amd-staging-drm-next=4d09621cc55bcec9ec0aa038c8ffcffd2017837f
https://cgit.freedesktop.org/~agd5f/linux/commit/?h=amd-staging-drm-next=6206aa0f74e7d22ca43975bd8f2979cdfd128b40

Alex

> Thanks,
> Prike
> > -Original Message-
> > From: Alex Deucher 
> > Sent: Tuesday, June 2, 2020 9:35 PM
> > To: Liang, Prike 
> > Cc: amd-gfx@lists.freedesktop.org; Deucher, Alexander
> > ; Huang, Ray 
> > Subject: Re: [PATCH] drm/amdgpu: enable renoir discovery for gc info
> > retrieved
> >
> > On Mon, Jun 1, 2020 at 10:14 PM Liang, Prike  wrote:
> > >
> > > [AMD Official Use Only - Internal Distribution Only]
> > >
> > > Ping...
> >
> > Already enabled:
> > https://nam11.safelinks.protection.outlook.com/?url=https:%2F%2Fcgit.free
> > desktop.org%2F~agd5f%2Flinux%2Fcommit%2F%3Fh%3Damd-staging-drm-
> > next%26id%3De467ab869f5783cf93d4cf24c6ac647cc29d1fb5data=02%
> > 7C01%7CPrike.Liang%40amd.com%7C5bcc45116bb042163cec08d806f9bd58
> > %7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637267016987033
> > 430sdata=KJ0xmSPXwlZ4LEfhAYoFzAwaWyx3laoLAsQccMM0pcs%3D&
> > amp;reserved=0
> >
> > Alex
> >
> > >
> > > Thanks,
> > > > -Original Message-
> > > > From: Liang, Prike 
> > > > Sent: Friday, May 29, 2020 11:28 AM
> > > > To: amd-gfx@lists.freedesktop.org
> > > > Cc: Deucher, Alexander ; Huang, Ray
> > > > ; Liang, Prike 
> > > > Subject: [PATCH] drm/amdgpu: enable renoir discovery for gc info
> > > > retrieved
> > > >
> > > > Use ip discovery GC table instead of gpu info firmware for exporting
> > > > gpu info to inquire interface.As Renoir discovery has same version
> > > > with Navi1x therefore just enable it same way as Navi1x.
> > > >
> > > > Signed-off-by: Prike.Liang 
> > > > ---
> > > >  drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 23
> > > > ---
> > > >  1 file changed, 20 insertions(+), 3 deletions(-)
> > > >
> > > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> > > > b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> > > > index 2f0e8da..bff740ccd 100644
> > > > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> > > > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> > > > @@ -1528,7 +1528,7 @@ static int
> > > > amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)  {
> > > > const char *chip_name;  char fw_name[30]; -int err;
> > > > +int err, r;
> > > >  const struct gpu_info_firmware_header_v1_0 *hdr;
> > > >
> > > >  adev->firmware.gpu_info_fw = NULL;
> > > > @@ -1578,6 +1578,23 @@ static int
> > > > amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
> > > > chip_name = "arcturus";  break;  case CHIP_RENOIR:
> > > > +if (amdgpu_discovery) {
> > > > +/**
> > > > + * For RENOIR series seems needn't reinitialize the reg base
> > > > again as it already set during
> > > > + * early init,if any concern here will need export
> > > > amdgpu_discovery_init() for this case.
> > > > + */
> > > > +r = amdgpu_discovery_reg_base_init(adev);
> > > > +if (r) {
> > > > +DRM_WARN("failed to get ip discovery table,
> > > > "
> > > > +"fallback to get gpu info in legacy
> > > > method\n");
> > > > +goto legacy_gpuinfo;
> > > > +}
> > > > +
> > > > +amdgpu_discovery_get_gfx_info(adev);
> > > > +
> > > > +return 0;
> > > > +}
> > > > +legacy_gpuinfo:
> > > >  chip_name = "renoir";
> > > >  break;
> > > >  case CHIP_NAVI10:
> > > > @@ -1617,7 +1634,7 @@ static int
> > > > amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)  (const
> > > > struct gpu_info_firmware_v1_0 *)(adev-
> > > > >firmware.gpu_info_fw->data +
> > > >
> > > > le32_to_cpu(hdr->header.ucode_array_offset_bytes));
> > > >
> > > > -if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) {
> > > > +if (amdgpu_discovery && adev->asic_type >= CHIP_RENOIR
> > > > && !r) {
> > > >  amdgpu_discovery_get_gfx_info(adev);
> > > >  goto parse_soc_bounding_box;
> > > >  }
> > > > @@ -3364,7 +3381,7 @@ void amdgpu_device_fini(struct
> > amdgpu_device
> > > > *adev)
> > > >  sysfs_remove_files(>dev->kobj, amdgpu_dev_attributes);  if
> > > > (IS_ENABLED(CONFIG_PERF_EVENTS))  amdgpu_pmu_fini(adev); -if
> > > > (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
> > > > +if (amdgpu_discovery && adev->asic_type >= CHIP_RENOIR)
> > > >  amdgpu_discovery_fini(adev);
> > > >  }
> > > >
> > > > --
> > > > 2.7.4
> > >
> > > ___
> > > amd-gfx mailing list
> > > amd-gfx@lists.freedesktop.org
> > > https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flist
> > > s.freedesktop.org%2Fmailman%2Flistinfo%2Famd-
> > gfxdata=02%7C01%7CPr
> > >

Re: [PATCH] drm/amd/powerplay: use existed smu_dpm_set* interfaces to implement powergate functions

2020-06-02 Thread Alex Deucher
On Mon, Jun 1, 2020 at 10:08 AM Huang Rui  wrote:
>
> Abstract powergate_vcn/jpeg functions, using smu_dpm_set* to implement it.
>
> Signed-off-by: Huang Rui 
> Reviewed-by: Kevin Wang 

Acked-by: Alex Deucher 

> ---
>  drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 16 
>  drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h |  2 --
>  drivers/gpu/drm/amd/powerplay/renoir_ppt.c |  2 --
>  drivers/gpu/drm/amd/powerplay/smu_internal.h   |  7 +++
>  drivers/gpu/drm/amd/powerplay/smu_v12_0.c  | 22 --
>  5 files changed, 19 insertions(+), 30 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c 
> b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> index 3bb6ab4..846d099 100644
> --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> @@ -2736,3 +2736,19 @@ uint32_t smu_get_pptable_power_limit(struct 
> smu_context *smu)
>
> return ret;
>  }
> +
> +int smu_powergate_vcn(struct smu_context *smu, bool gate)
> +{
> +   if (!smu->is_apu)
> +   return 0;
> +
> +   return smu_dpm_set_uvd_enable(smu, !gate);
> +}
> +
> +int smu_powergate_jpeg(struct smu_context *smu, bool gate)
> +{
> +   if (!smu->is_apu)
> +   return 0;
> +
> +   return smu_dpm_set_jpeg_enable(smu, !gate);
> +}
> diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h 
> b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> index ae2c318..e68d554 100644
> --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> @@ -508,8 +508,6 @@ struct pptable_funcs {
> int (*populate_smc_tables)(struct smu_context *smu);
> int (*check_fw_version)(struct smu_context *smu);
> int (*powergate_sdma)(struct smu_context *smu, bool gate);
> -   int (*powergate_vcn)(struct smu_context *smu, bool gate);
> -   int (*powergate_jpeg)(struct smu_context *smu, bool gate);
> int (*set_gfx_cgpg)(struct smu_context *smu, bool enable);
> int (*write_pptable)(struct smu_context *smu);
> int (*set_min_dcef_deep_sleep)(struct smu_context *smu);
> diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c 
> b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
> index 95eb445..6241d2e 100644
> --- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
> +++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
> @@ -930,8 +930,6 @@ static const struct pptable_funcs renoir_ppt_funcs = {
> .check_fw_status = smu_v12_0_check_fw_status,
> .check_fw_version = smu_v12_0_check_fw_version,
> .powergate_sdma = smu_v12_0_powergate_sdma,
> -   .powergate_vcn = smu_v12_0_powergate_vcn,
> -   .powergate_jpeg = smu_v12_0_powergate_jpeg,
> .send_smc_msg_with_param = smu_v12_0_send_msg_with_param,
> .set_gfx_cgpg = smu_v12_0_set_gfx_cgpg,
> .gfx_off_control = smu_v12_0_gfx_off_control,
> diff --git a/drivers/gpu/drm/amd/powerplay/smu_internal.h 
> b/drivers/gpu/drm/amd/powerplay/smu_internal.h
> index 40c35bc..2889eb2 100644
> --- a/drivers/gpu/drm/amd/powerplay/smu_internal.h
> +++ b/drivers/gpu/drm/amd/powerplay/smu_internal.h
> @@ -40,10 +40,6 @@
> ((smu)->ppt_funcs->setup_pptable ? 
> (smu)->ppt_funcs->setup_pptable((smu)) : 0)
>  #define smu_powergate_sdma(smu, gate) \
> ((smu)->ppt_funcs->powergate_sdma ? 
> (smu)->ppt_funcs->powergate_sdma((smu), (gate)) : 0)
> -#define smu_powergate_vcn(smu, gate) \
> -   ((smu)->ppt_funcs->powergate_vcn ? 
> (smu)->ppt_funcs->powergate_vcn((smu), (gate)) : 0)
> -#define smu_powergate_jpeg(smu, gate) \
> -   ((smu)->ppt_funcs->powergate_jpeg ? 
> (smu)->ppt_funcs->powergate_jpeg((smu), (gate)) : 0)
>
>  #define smu_get_vbios_bootup_values(smu) \
> ((smu)->ppt_funcs->get_vbios_bootup_values ? 
> (smu)->ppt_funcs->get_vbios_bootup_values((smu)) : 0)
> @@ -214,4 +210,7 @@ static inline int smu_send_smc_msg(struct smu_context 
> *smu, enum smu_message_typ
>  #define smu_set_power_source(smu, power_src) \
> ((smu)->ppt_funcs->set_power_source ? 
> (smu)->ppt_funcs->set_power_source((smu), (power_src)) : 0)
>
> +int smu_powergate_vcn(struct smu_context *smu, bool gate);
> +int smu_powergate_jpeg(struct smu_context *smu, bool gate);
> +
>  #endif
> diff --git a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c 
> b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
> index 4fc68d4..7c3da532 100644
> --- a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
> +++ b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
> @@ -182,28 +182,6 @@ int smu_v12_0_powergate_sdma(struct smu_context *smu, 
> bool gate)
> return smu_send_smc_msg(smu, SMU_MSG_PowerUpSdma, NULL);
>  }
>
> -int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate)
> -{
> -   if (!smu->is_apu)
> -   return 0;
> -
> -   if (gate)
> -   return smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL);
> -   else
> -   return smu_send_smc_msg(smu, 

Re: [PATCH 07/15] drm/amdgpu: use PCI_IRQ_MSI_TYPES where appropriate

2020-06-02 Thread Alex Deucher
On Tue, Jun 2, 2020 at 10:00 AM Andy Shevchenko
 wrote:
>
> On Tue, Jun 2, 2020 at 4:38 PM Ruhl, Michael J  
> wrote:
> > >-Original Message-
> > >From: dri-devel  On Behalf Of
> > >Piotr Stankiewicz
> > >Sent: Tuesday, June 2, 2020 5:21 AM
> > >To: Alex Deucher ; Christian König
> > >; David Zhou ; David
> > >Airlie ; Daniel Vetter 
> > >Cc: Stankiewicz, Piotr ; dri-
> > >de...@lists.freedesktop.org; amd-gfx@lists.freedesktop.org; linux-
> > >ker...@vger.kernel.org
> > >Subject: [PATCH 07/15] drm/amdgpu: use PCI_IRQ_MSI_TYPES where
> > >appropriate
>
> ...
>
> > >   int nvec = pci_msix_vec_count(adev->pdev);
> > >   unsigned int flags;
> > >
> > >-  if (nvec <= 0) {
> > >+  if (nvec > 0)
> > >+  flags = PCI_IRQ_MSI_TYPES;
> > >+  else
> > >   flags = PCI_IRQ_MSI;
> > >-  } else {
> > >-  flags = PCI_IRQ_MSI | PCI_IRQ_MSIX;
> > >-  }
> >
> > Minor nit:
> >
> > Is it really necessary to set do this check?  Can flags just
> > be set?
> >
> > I.e.:
> > flags = PCI_IRQ_MSI_TYPES;
> >
> > pci_alloc_irq_vector() tries stuff in order.  If MSIX is not available,
> > it will try MSI.
>
> That's also what I proposed earlier. But I suggested as well to wait
> for AMD people to confirm that neither pci_msix_vec_count() nor flags
> is needed and we can directly supply MSI_TYPES to the below call.
>

I think it was leftover from debugging and just to be careful.  We had
some issues when we originally enabled MSI-X on certain boards.  The
fix was to just allocate a single vector (since that is all we use
anyway) and we were using the wrong irq (pdev->irq vs
pci_irq_vector(pdev, 0)).  For reference, the original patch to add
MSI-X:

commit bd660f4f61f60392dd02424c3a3d2240dc2f
Author: shaoyunl 
Date:   Tue Oct 1 15:52:31 2019 -0400

drm/amdgpu : enable msix for amdgpu driver

We might used out of the msi resources in some cloud project
which have a lot gpu devices(including PF and VF), msix can
provide enough resources from system level view

Signed-off-by: shaoyunl 
Reviewed-by: Alex Deucher 
Signed-off-by: Alex Deucher 

And the fix:

commit 8a745c7ff2ddb8511ef760b4d9cb4cf56a15fc8d
Author: Alex Deucher 
Date:   Thu Oct 3 10:34:30 2019 -0500

drm/amdgpu: improve MSI-X handling (v3)

Check the number of supported vectors and fall back to MSI if
we return or error or 0 MSI-X vectors.

v2: only allocate one vector.  We can't currently use more than
one anyway.

v3: install the irq on vector 0.

Tested-by: Tom St Denis 
Reviewed-by: Shaoyun liu  
Signed-off-by: Alex Deucher 

Alex
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 07/15] drm/amdgpu: use PCI_IRQ_MSI_TYPES where appropriate

2020-06-02 Thread Andy Shevchenko
On Tue, Jun 2, 2020 at 4:38 PM Ruhl, Michael J  wrote:
> >-Original Message-
> >From: dri-devel  On Behalf Of
> >Piotr Stankiewicz
> >Sent: Tuesday, June 2, 2020 5:21 AM
> >To: Alex Deucher ; Christian König
> >; David Zhou ; David
> >Airlie ; Daniel Vetter 
> >Cc: Stankiewicz, Piotr ; dri-
> >de...@lists.freedesktop.org; amd-gfx@lists.freedesktop.org; linux-
> >ker...@vger.kernel.org
> >Subject: [PATCH 07/15] drm/amdgpu: use PCI_IRQ_MSI_TYPES where
> >appropriate

...

> >   int nvec = pci_msix_vec_count(adev->pdev);
> >   unsigned int flags;
> >
> >-  if (nvec <= 0) {
> >+  if (nvec > 0)
> >+  flags = PCI_IRQ_MSI_TYPES;
> >+  else
> >   flags = PCI_IRQ_MSI;
> >-  } else {
> >-  flags = PCI_IRQ_MSI | PCI_IRQ_MSIX;
> >-  }
>
> Minor nit:
>
> Is it really necessary to set do this check?  Can flags just
> be set?
>
> I.e.:
> flags = PCI_IRQ_MSI_TYPES;
>
> pci_alloc_irq_vector() tries stuff in order.  If MSIX is not available,
> it will try MSI.

That's also what I proposed earlier. But I suggested as well to wait
for AMD people to confirm that neither pci_msix_vec_count() nor flags
is needed and we can directly supply MSI_TYPES to the below call.

> >   /* we only need one vector */
> >   nvec = pci_alloc_irq_vectors(adev->pdev, 1, 1, flags);

-- 
With Best Regards,
Andy Shevchenko
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


RE: [PATCH] drm/amdgpu: enable renoir discovery for gc info retrieved

2020-06-02 Thread Liang, Prike
[AMD Official Use Only - Internal Distribution Only]

Ah not aware the enable patch before. But the already enable patch seems can't 
fallback to legacy gpuinfo FW load method when not support discovery and also 
may miss destroy the discovery_bin object when driver shut down.

Thanks,
Prike
> -Original Message-
> From: Alex Deucher 
> Sent: Tuesday, June 2, 2020 9:35 PM
> To: Liang, Prike 
> Cc: amd-gfx@lists.freedesktop.org; Deucher, Alexander
> ; Huang, Ray 
> Subject: Re: [PATCH] drm/amdgpu: enable renoir discovery for gc info
> retrieved
>
> On Mon, Jun 1, 2020 at 10:14 PM Liang, Prike  wrote:
> >
> > [AMD Official Use Only - Internal Distribution Only]
> >
> > Ping...
>
> Already enabled:
> https://nam11.safelinks.protection.outlook.com/?url=https:%2F%2Fcgit.free
> desktop.org%2F~agd5f%2Flinux%2Fcommit%2F%3Fh%3Damd-staging-drm-
> next%26id%3De467ab869f5783cf93d4cf24c6ac647cc29d1fb5data=02%
> 7C01%7CPrike.Liang%40amd.com%7C5bcc45116bb042163cec08d806f9bd58
> %7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637267016987033
> 430sdata=KJ0xmSPXwlZ4LEfhAYoFzAwaWyx3laoLAsQccMM0pcs%3D&
> amp;reserved=0
>
> Alex
>
> >
> > Thanks,
> > > -Original Message-
> > > From: Liang, Prike 
> > > Sent: Friday, May 29, 2020 11:28 AM
> > > To: amd-gfx@lists.freedesktop.org
> > > Cc: Deucher, Alexander ; Huang, Ray
> > > ; Liang, Prike 
> > > Subject: [PATCH] drm/amdgpu: enable renoir discovery for gc info
> > > retrieved
> > >
> > > Use ip discovery GC table instead of gpu info firmware for exporting
> > > gpu info to inquire interface.As Renoir discovery has same version
> > > with Navi1x therefore just enable it same way as Navi1x.
> > >
> > > Signed-off-by: Prike.Liang 
> > > ---
> > >  drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 23
> > > ---
> > >  1 file changed, 20 insertions(+), 3 deletions(-)
> > >
> > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> > > b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> > > index 2f0e8da..bff740ccd 100644
> > > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> > > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> > > @@ -1528,7 +1528,7 @@ static int
> > > amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)  {
> > > const char *chip_name;  char fw_name[30]; -int err;
> > > +int err, r;
> > >  const struct gpu_info_firmware_header_v1_0 *hdr;
> > >
> > >  adev->firmware.gpu_info_fw = NULL;
> > > @@ -1578,6 +1578,23 @@ static int
> > > amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
> > > chip_name = "arcturus";  break;  case CHIP_RENOIR:
> > > +if (amdgpu_discovery) {
> > > +/**
> > > + * For RENOIR series seems needn't reinitialize the reg base
> > > again as it already set during
> > > + * early init,if any concern here will need export
> > > amdgpu_discovery_init() for this case.
> > > + */
> > > +r = amdgpu_discovery_reg_base_init(adev);
> > > +if (r) {
> > > +DRM_WARN("failed to get ip discovery table,
> > > "
> > > +"fallback to get gpu info in legacy
> > > method\n");
> > > +goto legacy_gpuinfo;
> > > +}
> > > +
> > > +amdgpu_discovery_get_gfx_info(adev);
> > > +
> > > +return 0;
> > > +}
> > > +legacy_gpuinfo:
> > >  chip_name = "renoir";
> > >  break;
> > >  case CHIP_NAVI10:
> > > @@ -1617,7 +1634,7 @@ static int
> > > amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)  (const
> > > struct gpu_info_firmware_v1_0 *)(adev-
> > > >firmware.gpu_info_fw->data +
> > >
> > > le32_to_cpu(hdr->header.ucode_array_offset_bytes));
> > >
> > > -if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) {
> > > +if (amdgpu_discovery && adev->asic_type >= CHIP_RENOIR
> > > && !r) {
> > >  amdgpu_discovery_get_gfx_info(adev);
> > >  goto parse_soc_bounding_box;
> > >  }
> > > @@ -3364,7 +3381,7 @@ void amdgpu_device_fini(struct
> amdgpu_device
> > > *adev)
> > >  sysfs_remove_files(>dev->kobj, amdgpu_dev_attributes);  if
> > > (IS_ENABLED(CONFIG_PERF_EVENTS))  amdgpu_pmu_fini(adev); -if
> > > (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
> > > +if (amdgpu_discovery && adev->asic_type >= CHIP_RENOIR)
> > >  amdgpu_discovery_fini(adev);
> > >  }
> > >
> > > --
> > > 2.7.4
> >
> > ___
> > amd-gfx mailing list
> > amd-gfx@lists.freedesktop.org
> > https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flist
> > s.freedesktop.org%2Fmailman%2Flistinfo%2Famd-
> gfxdata=02%7C01%7CPr
> >
> ike.Liang%40amd.com%7C5bcc45116bb042163cec08d806f9bd58%7C3dd896
> 1fe4884
> >
> e608e11a82d994e183d%7C0%7C0%7C637267016987033430sdata=R%
> 2F%2BY%2B
> >
> z%2BKHh09WazkQqS%2FbwH%2BeBM97%2Fx5hvqWAjUYEtM%3Dres
> erved=0
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


RE: [PATCH 07/15] drm/amdgpu: use PCI_IRQ_MSI_TYPES where appropriate

2020-06-02 Thread Ruhl, Michael J
>-Original Message-
>From: dri-devel  On Behalf Of
>Piotr Stankiewicz
>Sent: Tuesday, June 2, 2020 5:21 AM
>To: Alex Deucher ; Christian König
>; David Zhou ; David
>Airlie ; Daniel Vetter 
>Cc: Stankiewicz, Piotr ; dri-
>de...@lists.freedesktop.org; amd-gfx@lists.freedesktop.org; linux-
>ker...@vger.kernel.org
>Subject: [PATCH 07/15] drm/amdgpu: use PCI_IRQ_MSI_TYPES where
>appropriate
>
>Seeing as there is shorthand available to use when asking for any type
>of interrupt, or any type of message signalled interrupt, leverage it.
>
>Signed-off-by: Piotr Stankiewicz 
>Reviewed-by: Andy Shevchenko 
>---
> drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c | 8 
> 1 file changed, 4 insertions(+), 4 deletions(-)
>
>diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
>b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
>index 5ed4227f304b..6dbe173a9fd4 100644
>--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
>+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
>@@ -251,11 +251,11 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
>   int nvec = pci_msix_vec_count(adev->pdev);
>   unsigned int flags;
>
>-  if (nvec <= 0) {
>+  if (nvec > 0)
>+  flags = PCI_IRQ_MSI_TYPES;
>+  else
>   flags = PCI_IRQ_MSI;
>-  } else {
>-  flags = PCI_IRQ_MSI | PCI_IRQ_MSIX;
>-  }

Minor nit:

Is it really necessary to set do this check?  Can flags just
be set?

I.e.: 
flags = PCI_IRQ_MSI_TYPES;

pci_alloc_irq_vector() tries stuff in order.  If MSIX is not available,
it will try MSI.

M

>+
>   /* we only need one vector */
>   nvec = pci_alloc_irq_vectors(adev->pdev, 1, 1, flags);
>   if (nvec > 0) {
>--
>2.17.2
>
>___
>dri-devel mailing list
>dri-de...@lists.freedesktop.org
>https://lists.freedesktop.org/mailman/listinfo/dri-devel
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu: enable renoir discovery for gc info retrieved

2020-06-02 Thread Alex Deucher
On Mon, Jun 1, 2020 at 10:14 PM Liang, Prike  wrote:
>
> [AMD Official Use Only - Internal Distribution Only]
>
> Ping...

Already enabled:
https://cgit.freedesktop.org/~agd5f/linux/commit/?h=amd-staging-drm-next=e467ab869f5783cf93d4cf24c6ac647cc29d1fb5

Alex

>
> Thanks,
> > -Original Message-
> > From: Liang, Prike 
> > Sent: Friday, May 29, 2020 11:28 AM
> > To: amd-gfx@lists.freedesktop.org
> > Cc: Deucher, Alexander ; Huang, Ray
> > ; Liang, Prike 
> > Subject: [PATCH] drm/amdgpu: enable renoir discovery for gc info retrieved
> >
> > Use ip discovery GC table instead of gpu info firmware for exporting gpu 
> > info
> > to inquire interface.As Renoir discovery has same version with Navi1x
> > therefore just enable it same way as Navi1x.
> >
> > Signed-off-by: Prike.Liang 
> > ---
> >  drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 23
> > ---
> >  1 file changed, 20 insertions(+), 3 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> > b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> > index 2f0e8da..bff740ccd 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> > @@ -1528,7 +1528,7 @@ static int
> > amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)  {
> >  const char *chip_name;
> >  char fw_name[30];
> > -int err;
> > +int err, r;
> >  const struct gpu_info_firmware_header_v1_0 *hdr;
> >
> >  adev->firmware.gpu_info_fw = NULL;
> > @@ -1578,6 +1578,23 @@ static int
> > amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
> >  chip_name = "arcturus";
> >  break;
> >  case CHIP_RENOIR:
> > +if (amdgpu_discovery) {
> > +/**
> > + * For RENOIR series seems needn't reinitialize the reg base
> > again as it already set during
> > + * early init,if any concern here will need export
> > amdgpu_discovery_init() for this case.
> > + */
> > +r = amdgpu_discovery_reg_base_init(adev);
> > +if (r) {
> > +DRM_WARN("failed to get ip discovery table,
> > "
> > +"fallback to get gpu info in legacy
> > method\n");
> > +goto legacy_gpuinfo;
> > +}
> > +
> > +amdgpu_discovery_get_gfx_info(adev);
> > +
> > +return 0;
> > +}
> > +legacy_gpuinfo:
> >  chip_name = "renoir";
> >  break;
> >  case CHIP_NAVI10:
> > @@ -1617,7 +1634,7 @@ static int
> > amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
> >  (const struct gpu_info_firmware_v1_0 *)(adev-
> > >firmware.gpu_info_fw->data +
> >
> > le32_to_cpu(hdr->header.ucode_array_offset_bytes));
> >
> > -if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) {
> > +if (amdgpu_discovery && adev->asic_type >= CHIP_RENOIR
> > && !r) {
> >  amdgpu_discovery_get_gfx_info(adev);
> >  goto parse_soc_bounding_box;
> >  }
> > @@ -3364,7 +3381,7 @@ void amdgpu_device_fini(struct amdgpu_device
> > *adev)
> >  sysfs_remove_files(>dev->kobj, amdgpu_dev_attributes);
> >  if (IS_ENABLED(CONFIG_PERF_EVENTS))
> >  amdgpu_pmu_fini(adev);
> > -if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
> > +if (amdgpu_discovery && adev->asic_type >= CHIP_RENOIR)
> >  amdgpu_discovery_fini(adev);
> >  }
> >
> > --
> > 2.7.4
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 07/15] drm/amdgpu: use PCI_IRQ_MSI_TYPES where appropriate

2020-06-02 Thread Andy Shevchenko
On Tue, Jun 2, 2020 at 12:24 PM Piotr Stankiewicz
 wrote:
>
> Seeing as there is shorthand available to use when asking for any type
> of interrupt, or any type of message signalled interrupt, leverage it.
>
> Signed-off-by: Piotr Stankiewicz 
> Reviewed-by: Andy Shevchenko 
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c | 8 
>  1 file changed, 4 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
> index 5ed4227f304b..6dbe173a9fd4 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
> @@ -251,11 +251,11 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
> int nvec = pci_msix_vec_count(adev->pdev);
> unsigned int flags;
>
> -   if (nvec <= 0) {
> +   if (nvec > 0)
> +   flags = PCI_IRQ_MSI_TYPES;
> +   else
> flags = PCI_IRQ_MSI;
> -   } else {
> -   flags = PCI_IRQ_MSI | PCI_IRQ_MSIX;
> -   }
> +
> /* we only need one vector */
> nvec = pci_alloc_irq_vectors(adev->pdev, 1, 1, flags);

I'm not sure if you have seen my last comment internally about this patch.

I don't understand why we need these pci_msix_vec_count() followed by
conditional at all.
Perhaps we may simple drop all these and supply flag directly?

But OTOH, I don't know the initial motivation, so, the above patch is
non-intrusive and keeps original logic.

> if (nvec > 0) {
> --
> 2.17.2
>


-- 
With Best Regards,
Andy Shevchenko
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


RE: [PATCH 07/15] drm/amdgpu: use PCI_IRQ_MSI_TYPES where appropriate

2020-06-02 Thread Stankiewicz, Piotr
> -Original Message-
> From: Andy Shevchenko 
> Sent: Tuesday, June 2, 2020 11:49 AM
> To: Stankiewicz, Piotr 
> Cc: Alex Deucher ; Christian König
> ; David Zhou ; David
> Airlie ; Daniel Vetter ; amd-
> g...@lists.freedesktop.org; dri-devel ; Linux
> Kernel Mailing List 
> Subject: Re: [PATCH 07/15] drm/amdgpu: use PCI_IRQ_MSI_TYPES where
> appropriate
> 
> On Tue, Jun 2, 2020 at 12:24 PM Piotr Stankiewicz
>  wrote:
> >
> > Seeing as there is shorthand available to use when asking for any type
> > of interrupt, or any type of message signalled interrupt, leverage it.
> >
> > Signed-off-by: Piotr Stankiewicz 
> > Reviewed-by: Andy Shevchenko 
> > ---
> >  drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c | 8 
> >  1 file changed, 4 insertions(+), 4 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
> > index 5ed4227f304b..6dbe173a9fd4 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
> > @@ -251,11 +251,11 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
> > int nvec = pci_msix_vec_count(adev->pdev);
> > unsigned int flags;
> >
> > -   if (nvec <= 0) {
> > +   if (nvec > 0)
> > +   flags = PCI_IRQ_MSI_TYPES;
> > +   else
> > flags = PCI_IRQ_MSI;
> > -   } else {
> > -   flags = PCI_IRQ_MSI | PCI_IRQ_MSIX;
> > -   }
> > +
> > /* we only need one vector */
> > nvec = pci_alloc_irq_vectors(adev->pdev, 1, 1, flags);
> 
> I'm not sure if you have seen my last comment internally about this patch.
> 
> I don't understand why we need these pci_msix_vec_count() followed by
> conditional at all.
> Perhaps we may simple drop all these and supply flag directly?
> 
> But OTOH, I don't know the initial motivation, so, the above patch is
> non-intrusive and keeps original logic.
> 

Sorry, I must have misunderstood or missed that comment. I am happy
to do a V2 if dropping the conditional is preferable.

> > if (nvec > 0) {
> > --
> > 2.17.2
> >
> 
> 
> --
> With Best Regards,
> Andy Shevchenko

BR,
Piotr Stankiewicz
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 00/15] forward MSIx vector enable error code in pci_alloc_irq_vectors_affinity

2020-06-02 Thread Piotr Stankiewicz
The primary objective of this patch series is to change the behaviour
of pci_alloc_irq_vectors_affinity such that it forwards the MSI-X enable
error code when appropriate. In the process, though, it was pointed out
that there are multiple places in the kernel which check/ask for message
signalled interrupts (MSI or MSI-X), which spawned the first patch adding
PCI_IRQ_MSI_TYPES. Finally the rest of the chain converts all users to
take advantage of PCI_IRQ_MSI_TYPES or PCI_IRQ_ALL_TYPES, as
appropriate.

Piotr Stankiewicz (15):
  PCI: add shorthand define for message signalled interrupt types
  PCI/MSI: forward MSIx vector enable error code in
pci_alloc_irq_vectors_affinity
  PCI: use PCI_IRQ_MSI_TYPES where appropriate
  ahci: use PCI_IRQ_MSI_TYPES where appropriate
  crypto: inside-secure - use PCI_IRQ_MSI_TYPES where appropriate
  dmaengine: dw-edma: use PCI_IRQ_MSI_TYPES  where appropriate
  drm/amdgpu: use PCI_IRQ_MSI_TYPES where appropriate
  IB/qib: Use PCI_IRQ_MSI_TYPES where appropriate
  media: ddbridge: use PCI_IRQ_MSI_TYPES where appropriate
  vmw_vmci: use PCI_IRQ_ALL_TYPES where appropriate
  mmc: sdhci: use PCI_IRQ_MSI_TYPES where appropriate
  amd-xgbe: use PCI_IRQ_MSI_TYPES where appropriate
  aquantia: atlantic: use PCI_IRQ_ALL_TYPES where appropriate
  net: hns3: use PCI_IRQ_MSI_TYPES where appropriate
  scsi: use PCI_IRQ_MSI_TYPES and PCI_IRQ_ALL_TYPES where appropriate

 Documentation/PCI/msi-howto.rst   | 5 +++--
 drivers/ata/ahci.c| 2 +-
 drivers/crypto/inside-secure/safexcel.c   | 2 +-
 drivers/dma/dw-edma/dw-edma-pcie.c| 2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c   | 8 
 drivers/infiniband/hw/qib/qib_pcie.c  | 2 +-
 drivers/media/pci/ddbridge/ddbridge-main.c| 2 +-
 drivers/misc/vmw_vmci/vmci_guest.c| 3 +--
 drivers/mmc/host/sdhci-pci-gli.c  | 3 +--
 drivers/mmc/host/sdhci-pci-o2micro.c  | 3 +--
 drivers/net/ethernet/amd/xgbe/xgbe-pci.c  | 2 +-
 drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c  | 4 +---
 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c   | 3 +--
 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c | 2 +-
 drivers/pci/msi.c | 4 ++--
 drivers/pci/pcie/portdrv_core.c   | 4 ++--
 drivers/pci/switch/switchtec.c| 3 +--
 drivers/scsi/ipr.c| 2 +-
 drivers/scsi/vmw_pvscsi.c | 2 +-
 include/linux/pci.h   | 4 ++--
 20 files changed, 28 insertions(+), 34 deletions(-)

-- 
2.17.2

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 07/15] drm/amdgpu: use PCI_IRQ_MSI_TYPES where appropriate

2020-06-02 Thread Andy Shevchenko
On Tue, Jun 2, 2020 at 12:58 PM Stankiewicz, Piotr
 wrote:
> > -Original Message-
> > From: Andy Shevchenko 
> > Sent: Tuesday, June 2, 2020 11:49 AM
> > To: Stankiewicz, Piotr 
> > Cc: Alex Deucher ; Christian König
> > ; David Zhou ; David
> > Airlie ; Daniel Vetter ; amd-
> > g...@lists.freedesktop.org; dri-devel ; 
> > Linux
> > Kernel Mailing List 
> > Subject: Re: [PATCH 07/15] drm/amdgpu: use PCI_IRQ_MSI_TYPES where
> > appropriate
> > On Tue, Jun 2, 2020 at 12:24 PM Piotr Stankiewicz
> >  wrote:

...

> > > int nvec = pci_msix_vec_count(adev->pdev);
> > > unsigned int flags;
> > >
> > > -   if (nvec <= 0) {
> > > +   if (nvec > 0)
> > > +   flags = PCI_IRQ_MSI_TYPES;
> > > +   else
> > > flags = PCI_IRQ_MSI;
> > > -   } else {
> > > -   flags = PCI_IRQ_MSI | PCI_IRQ_MSIX;
> > > -   }
> > > +
> > > /* we only need one vector */
> > > nvec = pci_alloc_irq_vectors(adev->pdev, 1, 1, flags);
> >
> > I'm not sure if you have seen my last comment internally about this patch.
> >
> > I don't understand why we need these pci_msix_vec_count() followed by
> > conditional at all.
> > Perhaps we may simple drop all these and supply flag directly?
> >
> > But OTOH, I don't know the initial motivation, so, the above patch is
> > non-intrusive and keeps original logic.
> >
>
> Sorry, I must have misunderstood or missed that comment. I am happy
> to do a V2 if dropping the conditional is preferable.

Let's wait for AMD people to confirm either.

-- 
With Best Regards,
Andy Shevchenko
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 07/15] drm/amdgpu: use PCI_IRQ_MSI_TYPES where appropriate

2020-06-02 Thread Piotr Stankiewicz
Seeing as there is shorthand available to use when asking for any type
of interrupt, or any type of message signalled interrupt, leverage it.

Signed-off-by: Piotr Stankiewicz 
Reviewed-by: Andy Shevchenko 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 5ed4227f304b..6dbe173a9fd4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -251,11 +251,11 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
int nvec = pci_msix_vec_count(adev->pdev);
unsigned int flags;
 
-   if (nvec <= 0) {
+   if (nvec > 0)
+   flags = PCI_IRQ_MSI_TYPES;
+   else
flags = PCI_IRQ_MSI;
-   } else {
-   flags = PCI_IRQ_MSI | PCI_IRQ_MSIX;
-   }
+
/* we only need one vector */
nvec = pci_alloc_irq_vectors(adev->pdev, 1, 1, flags);
if (nvec > 0) {
-- 
2.17.2

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 2/2] drm/amdgpu: Add unique_id and serial_number for Arcturus v2

2020-06-02 Thread Kent Russell
Add support for unique_id and serial_number, as these are now
the same value, and will be for future ASICs as well.

v2: Explicitly create unique_id only for VG10/20/ARC

Signed-off-by: Kent Russell 
Change-Id: I3b036a38b19cd84025399b0706b2dad9b7aff713
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c|  4 ++-
 drivers/gpu/drm/amd/powerplay/amdgpu_smu.c|  2 ++
 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c  | 32 +++
 .../gpu/drm/amd/powerplay/inc/amdgpu_smu.h|  1 +
 drivers/gpu/drm/amd/powerplay/smu_internal.h  |  2 ++
 5 files changed, 40 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index b0dff9ecfb88..b2cdc8a1268f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -1940,7 +1940,9 @@ static int default_attr_update(struct amdgpu_device 
*adev, struct amdgpu_device_
if (adev->flags & AMD_IS_APU)
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(unique_id)) {
-   if (!adev->unique_id)
+   if (asic_type != CHIP_VEGA10 &&
+   asic_type != CHIP_VEGA20 &&
+   asic_type != CHIP_ARCTURUS)
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_features)) {
if (adev->flags & AMD_IS_APU || asic_type < CHIP_VEGA10)
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c 
b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 5294aa7cdde1..7946fd8444a3 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -793,6 +793,8 @@ static int smu_late_init(void *handle)
if (!smu->pm_enabled)
return 0;
 
+   smu_set_unique_id(smu);
+
smu_handle_task(>smu,
smu->smu_dpm.dpm_level,
AMD_PP_TASK_COMPLETE_INIT,
diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c 
b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
index d66ac7457574..855e609650d9 100644
--- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
@@ -2262,6 +2262,37 @@ static void arcturus_i2c_eeprom_control_fini(struct 
i2c_adapter *control)
i2c_del_adapter(control);
 }
 
+static void arcturus_set_unique_id(struct smu_context *smu)
+{
+   struct amdgpu_device *adev = smu->adev;
+   uint32_t top32, bottom32, smu_version, size;
+   char sn[16];
+   uint64_t id;
+
+   if (smu_get_smc_version(smu, NULL, _version)) {
+   pr_warn("Failed to get smu version, cannot get unique_id or 
serial_number\n");
+   return;
+   }
+
+   /* PPSMC_MSG_ReadSerial* is supported by 54.23.0 and onwards */
+   if (smu_version < 0x361700) {
+   pr_warn("ReadSerial is only supported by PMFW 54.23.0 and 
onwards\n");
+   return;
+   }
+
+   /* Get the SN to turn into a Unique ID */
+   smu_send_smc_msg(smu, SMU_MSG_ReadSerialNumTop32, );
+   smu_send_smc_msg(smu, SMU_MSG_ReadSerialNumBottom32, );
+
+   id = ((uint64_t)bottom32 << 32) | top32;
+   adev->unique_id = id;
+   /* For Arcturus-and-later, unique_id == serial_number, so convert it to 
a
+* 16-digit HEX string for convenience and backwards-compatibility
+*/
+   size = sprintf(sn, "%llx", id);
+   memcpy(adev->serial, , size);
+}
+
 static bool arcturus_is_baco_supported(struct smu_context *smu)
 {
struct amdgpu_device *adev = smu->adev;
@@ -2416,6 +2447,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.dpm_set_uvd_enable = arcturus_dpm_set_uvd_enable,
.i2c_eeprom_init = arcturus_i2c_eeprom_control_init,
.i2c_eeprom_fini = arcturus_i2c_eeprom_control_fini,
+   .set_unique_id = arcturus_set_unique_id,
.init_microcode = smu_v11_0_init_microcode,
.load_microcode = smu_v11_0_load_microcode,
.init_smc_tables = smu_v11_0_init_smc_tables,
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h 
b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index 5bb1ac821aeb..bfa5211de079 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -495,6 +495,7 @@ struct pptable_funcs {
int (*update_pcie_parameters)(struct smu_context *smu, uint32_t 
pcie_gen_cap, uint32_t pcie_width_cap);
int (*i2c_eeprom_init)(struct i2c_adapter *control);
void (*i2c_eeprom_fini)(struct i2c_adapter *control);
+   void (*set_unique_id)(struct smu_context *smu);
int (*get_dpm_clock_table)(struct smu_context *smu, struct dpm_clocks 
*clock_table);
int (*init_microcode)(struct smu_context *smu);
int (*load_microcode)(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/powerplay/smu_internal.h 
b/drivers/gpu/drm/amd/powerplay/smu_internal.h
index 

[PATCH 1/2] drm/amdgpu: Add ReadSerial defines for Arcturus

2020-06-02 Thread Kent Russell
Add the ReadSerial definitions for Arcturus to the arcturus_ppsmc.h
header for use with unique_id

Unrevert: Supported in SMU 54.23, update values to match SMU spec

Signed-off-by: Kent Russell 
Reviewed-by: Alex Deucher 
Change-Id: I9a70368ea65b898b3c26f0d57dc088f21dab9c53
---
 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c   | 2 ++
 drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h | 3 +++
 2 files changed, 5 insertions(+)

diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c 
b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
index 302b7e9cb5ba..d66ac7457574 100644
--- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
@@ -129,6 +129,8 @@ static struct smu_11_0_msg_mapping 
arcturus_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(SetMemoryChannelEnable,  
PPSMC_MSG_SetMemoryChannelEnable,  0),
MSG_MAP(DFCstateControl, PPSMC_MSG_DFCstateControl, 
0),
MSG_MAP(GmiPwrDnControl, PPSMC_MSG_GmiPwrDnControl, 
0),
+   MSG_MAP(ReadSerialNumTop32,  
PPSMC_MSG_ReadSerialNumTop32,  1),
+   MSG_MAP(ReadSerialNumBottom32,   
PPSMC_MSG_ReadSerialNumBottom32,   1),
 };
 
 static struct smu_11_0_cmn2aisc_mapping arcturus_clk_map[SMU_CLK_COUNT] = {
diff --git a/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h 
b/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h
index e07478b6ac04..79afb132164e 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h
@@ -117,6 +117,9 @@
 #define PPSMC_MSG_GmiPwrDnControl0x3D
 #define PPSMC_Message_Count  0x3E
 
+#define PPSMC_MSG_ReadSerialNumTop320x40
+#define PPSMC_MSG_ReadSerialNumBottom32 0x41
+
 typedef uint32_t PPSMC_Result;
 typedef uint32_t PPSMC_Msg;
 #pragma pack(pop)
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amdgpu/sriov: Disable pm for multiple vf sriov

2020-06-02 Thread Emily Deng
Signed-off-by: Emily Deng 
---
 drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c 
b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 5294aa7..8ed6c90 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -1311,8 +1311,10 @@ static int smu_hw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = >smu;
 
-   if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+   if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) {
+   smu->pm_enabled = false;
return 0;
+   }
 
ret = smu_start_smc_engine(smu);
if (ret) {
-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


回复: [PATCH] drm/amdgpu/sriov: Disable pm for multiple vf sriov

2020-06-02 Thread Min, Frank
[AMD Official Use Only - Internal Distribution Only]

Hi Emily,
How about to move it into smu_hw_init()?

Best Regards,
Frank

-邮件原件-
发件人: Deng, Emily 
发送时间: 2020年6月2日 20:08
收件人: Deng, Emily ; amd-gfx@lists.freedesktop.org
抄送: Min, Frank 
主题: RE: [PATCH] drm/amdgpu/sriov: Disable pm for multiple vf sriov

[AMD Official Use Only - Internal Distribution Only]

>-Original Message-
>From: Emily Deng 
>Sent: Tuesday, June 2, 2020 7:54 PM
>To: amd-gfx@lists.freedesktop.org
>Cc: Deng, Emily 
>Subject: [PATCH] drm/amdgpu/sriov: Disable pm for multiple vf sriov
>
>Change-Id: Ic010440ef625f6f29e91f267a6f284f9b6554e1f
>Signed-off-by: Emily Deng 
>---
> drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 3 +++
> 1 file changed, 3 insertions(+)
>
>diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>index b6331712..fcbd875 100644
>--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>@@ -2004,6 +2004,9 @@ static int amdgpu_device_ip_init(struct
>amdgpu_device *adev)  if (amdgpu_sriov_vf(adev))
>amdgpu_virt_init_data_exchange(adev);
>
>+if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
>+adev->smu.pm_enabled = 0;
>+
> r = amdgpu_ib_pool_init(adev);
> if (r) {
> dev_err(adev->dev, "IB initialization failed (%d).\n", r);
>--
>2.7.4


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 112/207] drm/amdgpu: fix SDMA hdp flush engine conflict

2020-06-02 Thread Christian König

Am 01.06.20 um 20:19 schrieb Alex Deucher:

From: Likun Gao 

Each of HDP flush engine should be used by one ring, correct allocate of
hdp flush engine to SDMA ring.
Correct me value of each SDMA ring, as it was cleared when init microcode.

Signed-off-by: Likun Gao 
Signed-off-by: Alex Deucher 


Reviewed-by: Christian König 


---
  drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c | 6 ++
  1 file changed, 2 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c 
b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index 52206050adb9..f072cef28b60 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -391,10 +391,7 @@ static void sdma_v5_2_ring_emit_hdp_flush(struct 
amdgpu_ring *ring)
u32 ref_and_mask = 0;
const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
  
-	if (ring->me == 0)

-   ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0;
-   else
-   ref_and_mask = nbio_hf_reg->ref_and_mask_sdma1;
+   ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
  
  	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |

  SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
@@ -1224,6 +1221,7 @@ static int sdma_v5_2_sw_init(void *handle)
ring = >sdma.instance[i].ring;
ring->ring_obj = NULL;
ring->use_doorbell = true;
+   ring->me = i;
  
  		DRM_INFO("use_doorbell being set to: [%s]\n",

ring->use_doorbell?"true":"false");


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


RE: [PATCH] drm/amdgpu/sriov: Disable pm for multiple vf sriov

2020-06-02 Thread Deng, Emily
[AMD Official Use Only - Internal Distribution Only]

>-Original Message-
>From: Emily Deng 
>Sent: Tuesday, June 2, 2020 7:54 PM
>To: amd-gfx@lists.freedesktop.org
>Cc: Deng, Emily 
>Subject: [PATCH] drm/amdgpu/sriov: Disable pm for multiple vf sriov
>
>Change-Id: Ic010440ef625f6f29e91f267a6f284f9b6554e1f
>Signed-off-by: Emily Deng 
>---
> drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 3 +++
> 1 file changed, 3 insertions(+)
>
>diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>index b6331712..fcbd875 100644
>--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>@@ -2004,6 +2004,9 @@ static int amdgpu_device_ip_init(struct
>amdgpu_device *adev)
> if (amdgpu_sriov_vf(adev))
> amdgpu_virt_init_data_exchange(adev);
>
>+if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
>+adev->smu.pm_enabled = 0;
>+
> r = amdgpu_ib_pool_init(adev);
> if (r) {
> dev_err(adev->dev, "IB initialization failed (%d).\n", r);
>--
>2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amdgpu/sriov: Disable pm for multiple vf sriov

2020-06-02 Thread Emily Deng
Change-Id: Ic010440ef625f6f29e91f267a6f284f9b6554e1f
Signed-off-by: Emily Deng 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index b6331712..fcbd875 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2004,6 +2004,9 @@ static int amdgpu_device_ip_init(struct amdgpu_device 
*adev)
if (amdgpu_sriov_vf(adev))
amdgpu_virt_init_data_exchange(adev);
 
+   if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+   adev->smu.pm_enabled = 0;
+
r = amdgpu_ib_pool_init(adev);
if (r) {
dev_err(adev->dev, "IB initialization failed (%d).\n", r);
-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amd/powerplay: move powerplay table operation out of smu_v11_0.c

2020-06-02 Thread Wang, Kevin(Yang)
[AMD Official Use Only - Internal Distribution Only]

Acked-by: Kevin Wang 

Best Regards,
Kevin


From: Gao, Likun 
Sent: Tuesday, June 2, 2020 5:08 PM
To: amd-gfx@lists.freedesktop.org 
Cc: Feng, Kenneth ; Quan, Evan ; Wang, 
Kevin(Yang) ; Gao, Likun 
Subject: [PATCH] drm/amd/powerplay: move powerplay table operation out of 
smu_v11_0.c

From: Likun Gao 

move smu_v11_0_get_max_power_limit and smu_v11_0_set_thermal_range
function from smu_v11_0.c to asic specific _ppt.c to avoid powerplay
table conflict with different ASIC with smu11.

Signed-off-by: Likun Gao 
Change-Id: I194f44e9f59daf19fa4758ed746fa13ccece4308
---
 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c  | 64 ++-
 .../gpu/drm/amd/powerplay/inc/amdgpu_smu.h|  2 +
 drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h |  2 -
 drivers/gpu/drm/amd/powerplay/navi10_ppt.c| 64 ++-
 drivers/gpu/drm/amd/powerplay/smu_internal.h  |  5 ++
 drivers/gpu/drm/amd/powerplay/smu_v11_0.c | 63 +-
 6 files changed, 135 insertions(+), 65 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c 
b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
index 1c66b7d7139c..d5527e834a8e 100644
--- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
@@ -37,6 +37,8 @@
 #include "arcturus_ppsmc.h"
 #include "nbio/nbio_7_4_offset.h"
 #include "nbio/nbio_7_4_sh_mask.h"
+#include "thm/thm_11_0_2_offset.h"
+#include "thm/thm_11_0_2_sh_mask.h"
 #include "amdgpu_xgmi.h"
 #include 
 #include 
@@ -1324,7 +1326,7 @@ static int arcturus_get_power_limit(struct smu_context 
*smu,
 }

 if (cap)
-   *limit = smu_v11_0_get_max_power_limit(smu);
+   *limit = smu_get_max_power_limit(smu);
 else
 *limit = smu->power_limit;

@@ -2286,6 +2288,64 @@ static int arcturus_set_df_cstate(struct smu_context 
*smu,
 return smu_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, 
state, NULL);
 }

+static int arcturus_set_thermal_range(struct smu_context *smu,
+  struct smu_temperature_range range)
+{
+   struct amdgpu_device *adev = smu->adev;
+   int low = SMU_THERMAL_MINIMUM_ALERT_TEMP;
+   int high = SMU_THERMAL_MAXIMUM_ALERT_TEMP;
+   uint32_t val;
+   struct smu_table_context *table_context = >smu_table;
+   struct smu_11_0_powerplay_table *powerplay_table = 
table_context->power_play_table;
+
+   low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP,
+   range.min / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES);
+   high = min((uint16_t)SMU_THERMAL_MAXIMUM_ALERT_TEMP, 
powerplay_table->software_shutdown_temp);
+
+   if (low > high)
+   return -EINVAL;
+
+   val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL);
+   val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
+   val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
+   val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0);
+   val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0);
+   val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high & 
0xff));
+   val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low & 
0xff));
+   val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
+
+   WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
+
+   return 0;
+}
+
+static uint32_t atcturus_get_max_power_limit(struct smu_context *smu) {
+   uint32_t od_limit, max_power_limit;
+   struct smu_11_0_powerplay_table *powerplay_table = NULL;
+   struct smu_table_context *table_context = >smu_table;
+   powerplay_table = table_context->power_play_table;
+
+   max_power_limit = smu_get_pptable_power_limit(smu);
+
+   if (!max_power_limit) {
+   // If we couldn't get the table limit, fall back on first-read 
value
+   if (!smu->default_power_limit)
+   smu->default_power_limit = smu->power_limit;
+   max_power_limit = smu->default_power_limit;
+   }
+
+   if (smu->od_enabled) {
+   od_limit = 
le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
+
+   pr_debug("ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", 
od_limit, smu->default_power_limit);
+
+   max_power_limit *= (100 + od_limit);
+   max_power_limit /= 100;
+   }
+
+   return max_power_limit;
+}
+
 static const struct pptable_funcs arcturus_ppt_funcs = {
 /* translate smu index into arcturus specific index */
 .get_smu_msg_index = arcturus_get_smu_msg_index,
@@ -2379,6 +2439,8 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
 .override_pcie_parameters = smu_v11_0_override_pcie_parameters,
 .get_pptable_power_limit = arcturus_get_pptable_power_limit,
 .set_df_cstate 

RE: [PATCH] drm/amd/powerplay: move powerplay table operation out of smu_v11_0.c

2020-06-02 Thread Quan, Evan
[AMD Official Use Only - Internal Distribution Only]

-Original Message-
From: Gao, Likun 
Sent: Tuesday, June 2, 2020 5:09 PM
To: amd-gfx@lists.freedesktop.org
Cc: Feng, Kenneth ; Quan, Evan ; Wang, 
Kevin(Yang) ; Gao, Likun 
Subject: [PATCH] drm/amd/powerplay: move powerplay table operation out of 
smu_v11_0.c

From: Likun Gao 

move smu_v11_0_get_max_power_limit and smu_v11_0_set_thermal_range
function from smu_v11_0.c to asic specific _ppt.c to avoid powerplay
table conflict with different ASIC with smu11.

Signed-off-by: Likun Gao 
Change-Id: I194f44e9f59daf19fa4758ed746fa13ccece4308
---
 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c  | 64 ++-
 .../gpu/drm/amd/powerplay/inc/amdgpu_smu.h|  2 +
 drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h |  2 -
 drivers/gpu/drm/amd/powerplay/navi10_ppt.c| 64 ++-
 drivers/gpu/drm/amd/powerplay/smu_internal.h  |  5 ++
 drivers/gpu/drm/amd/powerplay/smu_v11_0.c | 63 +-
 6 files changed, 135 insertions(+), 65 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c 
b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
index 1c66b7d7139c..d5527e834a8e 100644
--- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
@@ -37,6 +37,8 @@
 #include "arcturus_ppsmc.h"
 #include "nbio/nbio_7_4_offset.h"
 #include "nbio/nbio_7_4_sh_mask.h"
+#include "thm/thm_11_0_2_offset.h"
+#include "thm/thm_11_0_2_sh_mask.h"
 #include "amdgpu_xgmi.h"
 #include 
 #include 
@@ -1324,7 +1326,7 @@ static int arcturus_get_power_limit(struct smu_context 
*smu,
 }

 if (cap)
-*limit = smu_v11_0_get_max_power_limit(smu);
+*limit = smu_get_max_power_limit(smu);
[Quan, Evan] I think you can just call atcturus_get_max_power_limit directly 
here without need of another wrapper.
 else
 *limit = smu->power_limit;

@@ -2286,6 +2288,64 @@ static int arcturus_set_df_cstate(struct smu_context 
*smu,
 return smu_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL);
 }

+static int arcturus_set_thermal_range(struct smu_context *smu,
+   struct smu_temperature_range range)
+{
+struct amdgpu_device *adev = smu->adev;
+int low = SMU_THERMAL_MINIMUM_ALERT_TEMP;
+int high = SMU_THERMAL_MAXIMUM_ALERT_TEMP;
+uint32_t val;
+struct smu_table_context *table_context = >smu_table;
+struct smu_11_0_powerplay_table *powerplay_table = 
table_context->power_play_table;
+
+low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP,
+range.min / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES);
+high = min((uint16_t)SMU_THERMAL_MAXIMUM_ALERT_TEMP, 
powerplay_table->software_shutdown_temp);
+
+if (low > high)
+return -EINVAL;
+
+val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL);
+val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
+val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
+val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0);
+val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0);
+val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high & 0xff));
+val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low & 0xff));
+val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
+
+WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
+
+return 0;
+}
+
+static uint32_t atcturus_get_max_power_limit(struct smu_context *smu) {
+uint32_t od_limit, max_power_limit;
+struct smu_11_0_powerplay_table *powerplay_table = NULL;
+struct smu_table_context *table_context = >smu_table;
+powerplay_table = table_context->power_play_table;
+
+max_power_limit = smu_get_pptable_power_limit(smu);
+
+if (!max_power_limit) {
+// If we couldn't get the table limit, fall back on first-read value
+if (!smu->default_power_limit)
+smu->default_power_limit = smu->power_limit;
+max_power_limit = smu->default_power_limit;
+}
+
+if (smu->od_enabled) {
+od_limit = 
le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
+
+pr_debug("ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_limit, 
smu->default_power_limit);
+
+max_power_limit *= (100 + od_limit);
+max_power_limit /= 100;
+}
+
+return max_power_limit;
+}
+
 static const struct pptable_funcs arcturus_ppt_funcs = {
 /* translate smu index into arcturus specific index */
 .get_smu_msg_index = arcturus_get_smu_msg_index,
@@ -2379,6 +2439,8 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
 .override_pcie_parameters = smu_v11_0_override_pcie_parameters,
 .get_pptable_power_limit = arcturus_get_pptable_power_limit,
 .set_df_cstate = arcturus_set_df_cstate,
+.set_thermal_range = arcturus_set_thermal_range,
+.get_max_power_limit = atcturus_get_max_power_limit,
 };

 void arcturus_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h 
b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index 928eed220f93..0453482fb748 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -574,6 

Re: [PATCH 061/207] drm/amdgpu/mes10.1: implement the ring functions of mes specific

2020-06-02 Thread Christian König

Am 01.06.20 um 20:00 schrieb Alex Deucher:

From: Jack Xiao 

Implement mes ring functions and set up them.

Signed-off-by: Jack Xiao 
Acked-by: Alex Deucher 
Reviewed-by: Hawking Zhang 
Signed-off-by: Alex Deucher 
---
  drivers/gpu/drm/amd/amdgpu/mes_v10_1.c | 43 ++
  1 file changed, 43 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c 
b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
index 4f7e345673ca..80f6812d8ecf 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
@@ -33,6 +33,47 @@ MODULE_FIRMWARE("amdgpu/navi10_mes.bin");
  
  #define MES_EOP_SIZE   2048
  
+static void mes_v10_1_ring_set_wptr(struct amdgpu_ring *ring)

+{
+   struct amdgpu_device *adev = ring->adev;
+
+   if (ring->use_doorbell) {
+   atomic64_set((atomic64_t*)>wb.wb[ring->wptr_offs],
+ring->wptr);


This atomic64_t type case still looks fishy to me. IIRC we agreed to not 
use them, don't we?



+   WDOORBELL64(ring->doorbell_index, ring->wptr);
+   } else {
+   BUG();


Do we really need the BUG() here and below?

Christian.


+   }
+}
+
+static u64 mes_v10_1_ring_get_rptr(struct amdgpu_ring *ring)
+{
+   return ring->adev->wb.wb[ring->rptr_offs];
+}
+
+static u64 mes_v10_1_ring_get_wptr(struct amdgpu_ring *ring)
+{
+   u64 wptr;
+
+   if (ring->use_doorbell)
+   wptr = atomic64_read((atomic64_t *)
+>adev->wb.wb[ring->wptr_offs]);
+   else
+   BUG();
+   return wptr;
+}
+
+static const struct amdgpu_ring_funcs mes_v10_1_ring_funcs = {
+   .type = AMDGPU_RING_TYPE_MES,
+   .align_mask = 1,
+   .nop = 0,
+   .support_64bit_ptrs = true,
+   .get_rptr = mes_v10_1_ring_get_rptr,
+   .get_wptr = mes_v10_1_ring_get_wptr,
+   .set_wptr = mes_v10_1_ring_set_wptr,
+   .insert_nop = amdgpu_ring_insert_nop,
+};
+
  static int mes_v10_1_add_hw_queue(struct amdgpu_mes *mes,
  struct mes_add_queue_input *input)
  {
@@ -315,6 +356,8 @@ static int mes_v10_1_ring_init(struct amdgpu_device *adev)
  
  	ring = >mes.ring;
  
+	ring->funcs = _v10_1_ring_funcs;

+
ring->me = 3;
ring->pipe = 0;
ring->queue = 0;


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [RFC 01/17] dma-fence: add might_sleep annotation to _wait()

2020-06-02 Thread Maarten Lankhorst
Op 12-05-2020 om 11:08 schreef Christian König:
> Am 12.05.20 um 10:59 schrieb Daniel Vetter:
>> But only for non-zero timeout, to avoid false positives.
>>
>> One question here is whether the might_sleep should be unconditional,
>> or only for real timeouts. I'm not sure, so went with the more
>> defensive option. But in the interest of locking down the cross-driver
>> dma_fence rules we might want to be more aggressive.
>>
>> Cc: linux-me...@vger.kernel.org
>> Cc: linaro-mm-...@lists.linaro.org
>> Cc: linux-r...@vger.kernel.org
>> Cc: amd-gfx@lists.freedesktop.org
>> Cc: intel-...@lists.freedesktop.org
>> Cc: Chris Wilson 
>> Cc: Maarten Lankhorst 
>> Cc: Christian König 
>> Signed-off-by: Daniel Vetter 
>> ---
>>   drivers/dma-buf/dma-fence.c | 3 +++
>>   1 file changed, 3 insertions(+)
>>
>> diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
>> index 052a41e2451c..6802125349fb 100644
>> --- a/drivers/dma-buf/dma-fence.c
>> +++ b/drivers/dma-buf/dma-fence.c
>> @@ -208,6 +208,9 @@ dma_fence_wait_timeout(struct dma_fence *fence, bool 
>> intr, signed long timeout)
>>   if (WARN_ON(timeout < 0))
>>   return -EINVAL;
>>   +    if (timeout > 0)
>> +    might_sleep();
>> +
>
> I would rather like to see might_sleep() called here all the time even with 
> timeout==0.
>
> IIRC I removed the code in TTM abusing this in atomic context quite a while 
> ago, but could be that some leaked in again or it is called in atomic context 
> elsewhere as well. 


Same, glad I'm not the only one who wants it. :)

~Maarten

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 077/207] drm/amdgpu: no need to set up GPU scheduler for mes ring

2020-06-02 Thread Christian König

Am 01.06.20 um 20:00 schrieb Alex Deucher:

From: Jack Xiao 

As mes ring directly submits to hardwared,
it's no need to set up GPU scheduler for mes ring.

Signed-off-by: Jack Xiao 
Acked-by: Alex Deucher 
Reviewed-by: Hawking Zhang 
Reviewed-by: Christian König 
Signed-off-by: Alex Deucher 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 5 +++--
  1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 3414e119f0cb..8712a2e1b869 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -469,8 +469,9 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
if (!ring->fence_drv.fences)
return -ENOMEM;
  
-	/* No need to setup the GPU scheduler for KIQ ring */

-   if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ) {
+   /* No need to setup the GPU scheduler for KIQ and MES ring */
+   if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ ||
+   ring->funcs->type != AMDGPU_RING_TYPE_MES) {


BTW: Making this a flag in the ring->funcs struct would probably be 
cleaner than checking the two types here.


But not a must have right now.

Christian.


switch (ring->funcs->type) {
case AMDGPU_RING_TYPE_GFX:
timeout = adev->gfx_timeout;


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amd/powerplay: move powerplay table operation out of smu_v11_0.c

2020-06-02 Thread Likun Gao
From: Likun Gao 

move smu_v11_0_get_max_power_limit and smu_v11_0_set_thermal_range
function from smu_v11_0.c to asic specific _ppt.c to avoid powerplay
table conflict with different ASIC with smu11.

Signed-off-by: Likun Gao 
Change-Id: I194f44e9f59daf19fa4758ed746fa13ccece4308
---
 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c  | 64 ++-
 .../gpu/drm/amd/powerplay/inc/amdgpu_smu.h|  2 +
 drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h |  2 -
 drivers/gpu/drm/amd/powerplay/navi10_ppt.c| 64 ++-
 drivers/gpu/drm/amd/powerplay/smu_internal.h  |  5 ++
 drivers/gpu/drm/amd/powerplay/smu_v11_0.c | 63 +-
 6 files changed, 135 insertions(+), 65 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c 
b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
index 1c66b7d7139c..d5527e834a8e 100644
--- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
@@ -37,6 +37,8 @@
 #include "arcturus_ppsmc.h"
 #include "nbio/nbio_7_4_offset.h"
 #include "nbio/nbio_7_4_sh_mask.h"
+#include "thm/thm_11_0_2_offset.h"
+#include "thm/thm_11_0_2_sh_mask.h"
 #include "amdgpu_xgmi.h"
 #include 
 #include 
@@ -1324,7 +1326,7 @@ static int arcturus_get_power_limit(struct smu_context 
*smu,
}
 
if (cap)
-   *limit = smu_v11_0_get_max_power_limit(smu);
+   *limit = smu_get_max_power_limit(smu);
else
*limit = smu->power_limit;
 
@@ -2286,6 +2288,64 @@ static int arcturus_set_df_cstate(struct smu_context 
*smu,
return smu_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, 
NULL);
 }
 
+static int arcturus_set_thermal_range(struct smu_context *smu,
+  struct smu_temperature_range range)
+{
+   struct amdgpu_device *adev = smu->adev;
+   int low = SMU_THERMAL_MINIMUM_ALERT_TEMP;
+   int high = SMU_THERMAL_MAXIMUM_ALERT_TEMP;
+   uint32_t val;
+   struct smu_table_context *table_context = >smu_table;
+   struct smu_11_0_powerplay_table *powerplay_table = 
table_context->power_play_table;
+
+   low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP,
+   range.min / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES);
+   high = min((uint16_t)SMU_THERMAL_MAXIMUM_ALERT_TEMP, 
powerplay_table->software_shutdown_temp);
+
+   if (low > high)
+   return -EINVAL;
+
+   val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL);
+   val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
+   val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
+   val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0);
+   val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0);
+   val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high & 
0xff));
+   val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low & 
0xff));
+   val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
+
+   WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
+
+   return 0;
+}
+
+static uint32_t atcturus_get_max_power_limit(struct smu_context *smu) {
+   uint32_t od_limit, max_power_limit;
+   struct smu_11_0_powerplay_table *powerplay_table = NULL;
+   struct smu_table_context *table_context = >smu_table;
+   powerplay_table = table_context->power_play_table;
+
+   max_power_limit = smu_get_pptable_power_limit(smu);
+
+   if (!max_power_limit) {
+   // If we couldn't get the table limit, fall back on first-read 
value
+   if (!smu->default_power_limit)
+   smu->default_power_limit = smu->power_limit;
+   max_power_limit = smu->default_power_limit;
+   }
+
+   if (smu->od_enabled) {
+   od_limit = 
le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
+
+   pr_debug("ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", 
od_limit, smu->default_power_limit);
+
+   max_power_limit *= (100 + od_limit);
+   max_power_limit /= 100;
+   }
+
+   return max_power_limit;
+}
+
 static const struct pptable_funcs arcturus_ppt_funcs = {
/* translate smu index into arcturus specific index */
.get_smu_msg_index = arcturus_get_smu_msg_index,
@@ -2379,6 +2439,8 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.override_pcie_parameters = smu_v11_0_override_pcie_parameters,
.get_pptable_power_limit = arcturus_get_pptable_power_limit,
.set_df_cstate = arcturus_set_df_cstate,
+   .set_thermal_range = arcturus_set_thermal_range,
+   .get_max_power_limit = atcturus_get_max_power_limit,
 };
 
 void arcturus_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h 
b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index 928eed220f93..0453482fb748 100644
--- 

RE: [PATCH] drm/amdgpu: remove RAS's useless code

2020-06-02 Thread Chen, Guchun
[AMD Public Use]

Exactly Tao.
When hw_support capability is aligned with AMDGPU_RAS_BLOCK_MASK by and 
operation, sw_support capability is calculated on top of hw_support value and 
amdgpu_ras_mask accordingly.

Regards,
Guchun

-Original Message-
From: Zhou1, Tao  
Sent: Tuesday, June 2, 2020 2:12 PM
To: Chen, Guchun ; amd-gfx@lists.freedesktop.org; Zhang, 
Hawking ; Li, Dennis ; Clements, John 

Subject: RE: [PATCH] drm/amdgpu: remove RAS's useless code

[AMD Public Use]

I think the real reason is we have " *hw_supported &= AMDGPU_RAS_BLOCK_MASK;" 
already, but the patch is:

Reviewed-by: Tao Zhou 

> -Original Message-
> From: Chen, Guchun 
> Sent: 2020年6月2日 13:58
> To: amd-gfx@lists.freedesktop.org; Zhang, Hawking 
> ; Zhou1, Tao ; Li, Dennis 
> ; Clements, John 
> Cc: Chen, Guchun 
> Subject: [PATCH] drm/amdgpu: remove RAS's useless code
> 
> Module parameter amdgpu_ras_mask has been involved in the calculation 
> of ras support capability, so drop this redundant code.
> 
> Signed-off-by: Guchun Chen 
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 2 --
>  1 file changed, 2 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
> index 79bb05d791bc..337bf2da7bdc 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
> @@ -1902,8 +1902,6 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
>   goto err_out;
>   }
> 
> - amdgpu_ras_mask &= AMDGPU_RAS_BLOCK_MASK;
> -
>   if (amdgpu_ras_fs_init(adev)) {
>   r = -EINVAL;
>   goto err_out;
> --
> 2.17.1
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


RE: [PATCH] drm/amdgpu: remove RAS's useless code

2020-06-02 Thread Zhou1, Tao
[AMD Public Use]

I think the real reason is we have " *hw_supported &= AMDGPU_RAS_BLOCK_MASK;" 
already, but the patch is:

Reviewed-by: Tao Zhou 

> -Original Message-
> From: Chen, Guchun 
> Sent: 2020年6月2日 13:58
> To: amd-gfx@lists.freedesktop.org; Zhang, Hawking
> ; Zhou1, Tao ; Li,
> Dennis ; Clements, John 
> Cc: Chen, Guchun 
> Subject: [PATCH] drm/amdgpu: remove RAS's useless code
> 
> Module parameter amdgpu_ras_mask has been involved in the calculation of
> ras support capability, so drop this redundant code.
> 
> Signed-off-by: Guchun Chen 
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 2 --
>  1 file changed, 2 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
> index 79bb05d791bc..337bf2da7bdc 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
> @@ -1902,8 +1902,6 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
>   goto err_out;
>   }
> 
> - amdgpu_ras_mask &= AMDGPU_RAS_BLOCK_MASK;
> -
>   if (amdgpu_ras_fs_init(adev)) {
>   r = -EINVAL;
>   goto err_out;
> --
> 2.17.1
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


RE: [PATCH] drm/amdgpu: fix RAS memory leak in error case

2020-06-02 Thread Zhou1, Tao
[AMD Public Use]

Reviewed-by: Tao Zhou 

> -Original Message-
> From: Chen, Guchun 
> Sent: 2020年6月2日 13:50
> To: amd-gfx@lists.freedesktop.org; Zhang, Hawking
> ; Zhou1, Tao 
> Cc: Chen, Guchun 
> Subject: [PATCH] drm/amdgpu: fix RAS memory leak in error case
> 
> RAS context memory needs to freed in failure case.
> 
> Signed-off-by: Guchun Chen 
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 19 ++-
>  1 file changed, 10 insertions(+), 9 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
> index 5c73f444eaef..79bb05d791bc 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
> @@ -1881,9 +1881,8 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
>   amdgpu_ras_check_supported(adev, >hw_supported,
>   >supported);
>   if (!con->hw_supported) {
> - amdgpu_ras_set_context(adev, NULL);
> - kfree(con);
> - return 0;
> + r = 0;
> + goto err_out;
>   }
> 
>   con->features = 0;
> @@ -1894,29 +1893,31 @@ int amdgpu_ras_init(struct amdgpu_device
> *adev)
>   if (adev->nbio.funcs->init_ras_controller_interrupt) {
>   r = adev->nbio.funcs->init_ras_controller_interrupt(adev);
>   if (r)
> - return r;
> + goto err_out;
>   }
> 
>   if (adev->nbio.funcs->init_ras_err_event_athub_interrupt) {
>   r = adev->nbio.funcs-
> >init_ras_err_event_athub_interrupt(adev);
>   if (r)
> - return r;
> + goto err_out;
>   }
> 
>   amdgpu_ras_mask &= AMDGPU_RAS_BLOCK_MASK;
> 
> - if (amdgpu_ras_fs_init(adev))
> - goto fs_out;
> + if (amdgpu_ras_fs_init(adev)) {
> + r = -EINVAL;
> + goto err_out;
> + }
> 
>   dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
>   "hardware ability[%x] ras_mask[%x]\n",
>   con->hw_supported, con->supported);
>   return 0;
> -fs_out:
> +err_out:
>   amdgpu_ras_set_context(adev, NULL);
>   kfree(con);
> 
> - return -EINVAL;
> + return r;
>  }
> 
>  /* helper function to handle common stuff in ip late init phase */
> --
> 2.17.1
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx