[PATCH 1/2] drm/amd/pm: Skip use smc fw data in SRIOV

2020-09-17 Thread Jingwen Chen
smc fw is not needed in SRIOV, thus driver should not try to get smc
fw data.

Signed-off-by: Jingwen Chen 
---
 .../gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c| 61 ++-
 1 file changed, 32 insertions(+), 29 deletions(-)

diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c 
b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index b53872eb4398..d8ca6d968813 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -322,39 +322,42 @@ int smu_v11_0_setup_pptable(struct smu_context *smu)
void *table;
uint16_t version_major, version_minor;
 
-   hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
-   version_major = le16_to_cpu(hdr->header.header_version_major);
-   version_minor = le16_to_cpu(hdr->header.header_version_minor);
-   if ((version_major == 2 && smu->smu_table.boot_values.pp_table_id > 0) 
||
-   adev->asic_type == CHIP_NAVY_FLOUNDER) {
-   dev_info(adev->dev, "use driver provided pptable %d\n", 
smu->smu_table.boot_values.pp_table_id);
-   switch (version_minor) {
-   case 0:
-   ret = smu_v11_0_set_pptable_v2_0(smu, , );
-   break;
-   case 1:
-   ret = smu_v11_0_set_pptable_v2_1(smu, , ,
-
smu->smu_table.boot_values.pp_table_id);
-   break;
-   default:
-   ret = -EINVAL;
-   break;
+   if (!amdgpu_sriov_vf(adev)) {
+   hdr = (const struct smc_firmware_header_v1_0 *) 
adev->pm.fw->data;
+   version_major = le16_to_cpu(hdr->header.header_version_major);
+   version_minor = le16_to_cpu(hdr->header.header_version_minor);
+   if ((version_major == 2 && 
smu->smu_table.boot_values.pp_table_id > 0) ||
+   adev->asic_type == CHIP_NAVY_FLOUNDER) {
+   dev_info(adev->dev, "use driver provided pptable %d\n", 
smu->smu_table.boot_values.pp_table_id);
+   switch (version_minor) {
+   case 0:
+   ret = smu_v11_0_set_pptable_v2_0(smu, , 
);
+   break;
+   case 1:
+   ret = smu_v11_0_set_pptable_v2_1(smu, , 
,
+   
smu->smu_table.boot_values.pp_table_id);
+   break;
+   default:
+   ret = -EINVAL;
+   break;
+   }
+   if (ret)
+   return ret;
+   goto out;
}
-   if (ret)
-   return ret;
+   }
 
-   } else {
-   dev_info(adev->dev, "use vbios provided pptable\n");
-   index = 
get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
-   powerplayinfo);
+   dev_info(adev->dev, "use vbios provided pptable\n");
+   index = 
get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+   powerplayinfo);
 
-   ret = amdgpu_atombios_get_data_table(adev, index, 
_table_size, , ,
- (uint8_t **));
-   if (ret)
-   return ret;
-   size = atom_table_size;
-   }
+   ret = amdgpu_atombios_get_data_table(adev, index, _table_size, 
, ,
+   (uint8_t **));
+   if (ret)
+   return ret;
+   size = atom_table_size;
 
+out:
if (!smu->smu_table.power_play_table)
smu->smu_table.power_play_table = table;
if (!smu->smu_table.power_play_table_size)
-- 
2.25.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 2/2] drm/amd: Skip not used microcode loading in SRIOV

2020-09-17 Thread Jingwen Chen
smc, sdma, sos and asd fw is not used in SRIOV. Skip them to
accelerate sw_init.

Signed-off-by: Jingwen Chen 
---
 drivers/gpu/drm/amd/amdgpu/psp_v11_0.c | 16 +---
 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c |  3 +++
 drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c |  3 +++
 3 files changed, 15 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c 
b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index 6c5d9612abcb..11252f41ab12 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -109,15 +109,17 @@ static int psp_v11_0_init_microcode(struct psp_context 
*psp)
BUG();
}
 
-   err = psp_init_sos_microcode(psp, chip_name);
-   if (err)
-   return err;
-
-   if (adev->asic_type != CHIP_SIENNA_CICHLID &&
-   adev->asic_type != CHIP_NAVY_FLOUNDER) {
-   err = psp_init_asd_microcode(psp, chip_name);
+   if (!amdgpu_sriov_vf(adev)) {
+   err = psp_init_sos_microcode(psp, chip_name);
if (err)
return err;
+
+   if (adev->asic_type != CHIP_SIENNA_CICHLID &&
+   adev->asic_type != CHIP_NAVY_FLOUNDER) {
+   err = psp_init_asd_microcode(psp, chip_name);
+   if (err)
+   return err;
+   }
}
 
switch (adev->asic_type) {
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 
b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index 48c95a78a173..d2c26b5fa00c 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -219,6 +219,9 @@ static int sdma_v5_0_init_microcode(struct amdgpu_device 
*adev)
BUG();
}
 
+   if(amdgpu_sriov_vf(adev))
+   return 0;
+
for (i = 0; i < adev->sdma.num_instances; i++) {
if (i == 0)
snprintf(fw_name, sizeof(fw_name), 
"amdgpu/%s_sdma.bin", chip_name);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c 
b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index d8ca6d968813..85860e08c198 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -114,6 +114,9 @@ int smu_v11_0_init_microcode(struct smu_context *smu)
return -EINVAL;
}
 
+   if (amdgpu_sriov_vf(adev))
+   return 0;
+
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);
 
err = request_firmware(>pm.fw, fw_name, adev->dev);
-- 
2.25.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


RE: [PATCH 2/3] SWDEV-220451 - Query guest's information by VF2PF message - Guest side - part 2

2020-09-17 Thread Liu, Monk
[AMD Official Use Only - Internal Distribution Only]

Those three patches are
Reviewed-by: Monk Liu 

_
Monk Liu|GPU Virtualization Team |AMD


-Original Message-
From: amd-gfx  On Behalf Of Bokun Zhang
Sent: Wednesday, September 16, 2020 10:57 PM
To: amd-gfx@lists.freedesktop.org
Cc: Zhang, Bokun 
Subject: [PATCH 2/3] SWDEV-220451 - Query guest's information by VF2PF message 
- Guest side - part 2

- Add VF2PF message support
- Remove incorrect Macro to avoid compile error
- Remove duplicated struct and use amdgv_sriovmsg.h

Change-Id: I8175d304871f4b5aab75fd071a6bdf8008137dbe
Signed-off-by: Bokun Zhang 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |   4 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c   | 244 -
 drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h   |  75 +--
 3 files changed, 198 insertions(+), 125 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 5d702f6e77de..483ec0136332 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -3443,8 +3443,10 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
 /* make sure IB test finished before entering exclusive mode
  * to avoid preemption on IB test
  * */
-if (amdgpu_sriov_vf(adev))
+if (amdgpu_sriov_vf(adev)) {
 amdgpu_virt_request_full_gpu(adev, false);
+amdgpu_virt_fini_data_exchange(adev);
+}

 /* disable all interrupts */
 amdgpu_irq_disable_all(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index f76961d17246..1f1171812e35 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -31,6 +31,12 @@
 #include "soc15.h"
 #include "nv.h"

+#define POPULATE_UCODE_INFO(vf2pf_info, ucode, ver) \
+do { \
+vf2pf_info->ucode_info[ucode].id = ucode; \
+vf2pf_info->ucode_info[ucode].version = ver; \
+} while (0)
+
 bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)  {
 /* By now all MMIO pages except mailbox are blocked */ @@ -239,10 +245,10 @@ 
void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)  }


-int amdgpu_virt_fw_reserve_get_checksum(void *obj,
-unsigned long obj_size,
-unsigned int key,
-unsigned int chksum)
+unsigned int amd_sriov_msg_checksum(void *obj,
+unsigned long obj_size,
+unsigned int key,
+unsigned int checksum)
 {
 unsigned int ret = key;
 unsigned long i = 0;
@@ -252,9 +258,9 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj,
 /* calculate checksum */
 for (i = 0; i < obj_size; ++i)
 ret += *(pos + i);
-/* minus the chksum itself */
-pos = (char *)
-for (i = 0; i < sizeof(chksum); ++i)
+/* minus the checksum itself */
+pos = (char *)
+for (i = 0; i < sizeof(checksum); ++i)
 ret -= *(pos + i);
 return ret;
 }
@@ -415,33 +421,187 @@ static void amdgpu_virt_add_bad_page(struct 
amdgpu_device *adev,
 }
 }

-void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
+static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev)
 {
-uint32_t pf2vf_size = 0;
-uint32_t checksum = 0;
+struct amd_sriov_msg_pf2vf_info_header *pf2vf_info = 
adev->virt.fw_reserve.p_pf2vf;
+uint32_t checksum;
 uint32_t checkval;
-char *str;
+
+if (adev->virt.fw_reserve.p_pf2vf == NULL)
+return -EINVAL;
+
+if (pf2vf_info->size > 1024) {
+DRM_ERROR("invalid pf2vf message size\n");
+return -EINVAL;
+}
+
+switch (pf2vf_info->version) {
+case 1:
+checksum = ((struct amdgim_pf2vf_info_v1 *)pf2vf_info)->checksum;
+checkval = amd_sriov_msg_checksum(
+adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size,
+adev->virt.fw_reserve.checksum_key, checksum);
+if (checksum != checkval) {
+DRM_ERROR("invalid pf2vf message\n");
+return -EINVAL;
+}
+
+adev->virt.gim_feature =
+((struct amdgim_pf2vf_info_v1 *)pf2vf_info)->feature_flags;
+break;
+case 2:
+/* TODO: missing key, need to add it later */
+checksum = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->checksum;
+checkval = amd_sriov_msg_checksum(
+adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size,
+0, checksum);
+if (checksum != checkval) {
+DRM_ERROR("invalid pf2vf message\n");
+return -EINVAL;
+}
+
+adev->virt.vf2pf_update_interval_ms =
+((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->vf2pf_update_interval_ms;
+adev->virt.gim_feature =
+((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->feature_flags.all;
+
+break;
+default:
+DRM_ERROR("invalid pf2vf version\n");
+return -EINVAL;
+}
+
+/* correct too large or too little interval value */
+if (adev->virt.vf2pf_update_interval_ms < 200 || 
adev->virt.vf2pf_update_interval_ms > 1)
+adev->virt.vf2pf_update_interval_ms = 2000;
+
+return 0;
+}
+
+static void amdgpu_virt_populate_vf2pf_ucode_info(struct amdgpu_device
+*adev) {
+struct amd_sriov_msg_vf2pf_info *vf2pf_info;
+vf2pf_info = (struct amd_sriov_msg_vf2pf_info *)
+adev->virt.fw_reserve.p_vf2pf;
+
+if (adev->virt.fw_reserve.p_vf2pf == NULL)
+return;
+
+POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_VCE,  

[PATCH 2/2] drm/amdgpu: Fix dead lock issue for vblank

2020-09-17 Thread Emily . Deng
Always start vblank timer, but only calls vblank function
when vblank is enabled.

This is used to fix the dead lock issue.
When drm_crtc_vblank_off want to disable vblank,
it first get event_lock, and then call hrtimer_cancel,
but hrtimer_cancel want to wait timer handler function finished.
Timer handler also want to aquire event_lock in drm_handle_vblank.

Signed-off-by: Emily.Deng 
Change-Id: I7d3cfb1202cd030fdcdec3e7483fcc4c9fa8db70
---
 drivers/gpu/drm/amd/amdgpu/dce_virtual.c | 155 +++
 1 file changed, 77 insertions(+), 78 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c 
b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index cc93577dee03..8c02ab74c1de 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -226,6 +226,74 @@ static const struct drm_crtc_helper_funcs 
dce_virtual_crtc_helper_funcs = {
.get_scanout_position = amdgpu_crtc_get_scanout_position,
 };
 
+static int dce_virtual_pageflip(struct amdgpu_device *adev,
+   unsigned crtc_id)
+{
+   unsigned long flags;
+   struct amdgpu_crtc *amdgpu_crtc;
+   struct amdgpu_flip_work *works;
+
+   amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
+
+   if (crtc_id >= adev->mode_info.num_crtc) {
+   DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
+   return -EINVAL;
+   }
+
+   /* IRQ could occur when in initial stage */
+   if (amdgpu_crtc == NULL)
+   return 0;
+
+   spin_lock_irqsave(>ddev->event_lock, flags);
+   works = amdgpu_crtc->pflip_works;
+   if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
+   DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
+   "AMDGPU_FLIP_SUBMITTED(%d)\n",
+   amdgpu_crtc->pflip_status,
+   AMDGPU_FLIP_SUBMITTED);
+   spin_unlock_irqrestore(>ddev->event_lock, flags);
+   return 0;
+   }
+
+   /* page flip completed. clean up */
+   amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
+   amdgpu_crtc->pflip_works = NULL;
+
+   /* wakeup usersapce */
+   if (works->event)
+   drm_crtc_send_vblank_event(_crtc->base, works->event);
+
+   spin_unlock_irqrestore(>ddev->event_lock, flags);
+
+   drm_crtc_vblank_put(_crtc->base);
+   amdgpu_bo_unref(>old_abo);
+   kfree(works->shared);
+   kfree(works);
+
+   return 0;
+}
+
+static enum hrtimer_restart dce_virtual_vblank_timer_handle(struct hrtimer 
*vblank_timer)
+{
+   struct amdgpu_crtc *amdgpu_crtc = container_of(vblank_timer,
+  struct amdgpu_crtc, 
vblank_timer);
+   struct drm_device *ddev = amdgpu_crtc->base.dev;
+   struct amdgpu_device *adev = ddev->dev_private;
+   struct amdgpu_irq_src *source = 
adev->irq.client[AMDGPU_IRQ_CLIENTID_LEGACY].sources
+   [VISLANDS30_IV_SRCID_SMU_DISP_TIMER2_TRIGGER];
+   int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev,
+   amdgpu_crtc->crtc_id);
+
+   if (amdgpu_irq_enabled(adev, source, irq_type)) {
+   drm_handle_vblank(ddev, amdgpu_crtc->crtc_id);
+   dce_virtual_pageflip(adev, amdgpu_crtc->crtc_id);
+   }
+   hrtimer_start(vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD),
+ HRTIMER_MODE_REL);
+
+   return HRTIMER_NORESTART;
+}
+
 static int dce_virtual_crtc_init(struct amdgpu_device *adev, int index)
 {
struct amdgpu_crtc *amdgpu_crtc;
@@ -247,6 +315,14 @@ static int dce_virtual_crtc_init(struct amdgpu_device 
*adev, int index)
amdgpu_crtc->vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE;
drm_crtc_helper_add(_crtc->base, _virtual_crtc_helper_funcs);
 
+   hrtimer_init(_crtc->vblank_timer,
+CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+   hrtimer_set_expires(_crtc->vblank_timer,
+   ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD));
+   amdgpu_crtc->vblank_timer.function =
+   dce_virtual_vblank_timer_handle;
+   hrtimer_start(_crtc->vblank_timer,
+ ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), 
HRTIMER_MODE_REL);
return 0;
 }
 
@@ -476,7 +552,7 @@ static int dce_virtual_hw_fini(void *handle)
 
for (i = 0; imode_info.num_crtc; i++)
if (adev->mode_info.crtcs[i])
-   dce_virtual_set_crtc_vblank_interrupt_state(adev, i, 
AMDGPU_IRQ_STATE_DISABLE);
+   hrtimer_cancel(>mode_info.crtcs[i]->vblank_timer);
 
return 0;
 }
@@ -645,68 +721,6 @@ static void dce_virtual_set_display_funcs(struct 
amdgpu_device *adev)
adev->mode_info.funcs = _virtual_display_funcs;
 }
 
-static int dce_virtual_pageflip(struct amdgpu_device *adev,
-   unsigned crtc_id)
-{
-   unsigned long flags;

[PATCH 1/2] drm/amdgpu/sriov: Add one parameter for mcbp debug

2020-09-17 Thread Emily . Deng
For debug convenient, add sriov_mcbp parameter.

Signed-off-by: Emily.Deng 
Change-Id: I84019eb4344e00d85b2ecc853145aabb312412fe
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 9 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c  | 3 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 2 +-
 4 files changed, 13 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 13f92dea182a..a255fbf4d370 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -183,6 +183,7 @@ extern uint amdgpu_ras_mask;
 extern int amdgpu_bad_page_threshold;
 extern int amdgpu_async_gfx_ring;
 extern int amdgpu_mcbp;
+extern int amdgpu_sriov_mcbp;
 extern int amdgpu_discovery;
 extern int amdgpu_mes;
 extern int amdgpu_noretry;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 3f07d1475bd2..b0b2f0f7be94 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -145,6 +145,7 @@ uint amdgpu_dc_feature_mask = 0;
 uint amdgpu_dc_debug_mask = 0;
 int amdgpu_async_gfx_ring = 1;
 int amdgpu_mcbp = 0;
+int amdgpu_sriov_mcbp = 1;
 int amdgpu_discovery = -1;
 int amdgpu_mes = 0;
 int amdgpu_noretry;
@@ -578,6 +579,14 @@ MODULE_PARM_DESC(mcbp,
"Enable Mid-command buffer preemption (0 = disabled (default), 1 = 
enabled)");
 module_param_named(mcbp, amdgpu_mcbp, int, 0444);
 
+/**
+ * DOC: sriov_mcbp (int)
+ * It is used to enable mid command buffer preemption. (0 = disabled, 1 = 
enabled(default))
+ */
+MODULE_PARM_DESC(sriov_mcbp,
+   "Enable sriov Mid-command buffer preemption (0 = disabled (default), 1 
= enabled)");
+module_param_named(sriov_mcbp, amdgpu_sriov_mcbp, int, 0444);
+
 /**
  * DOC: discovery (int)
  * Allow driver to discover hardware IP information from IP Discovery table at 
the top of VRAM.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 2f53fa0ae9a6..ca0e17688bdf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -236,7 +236,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned 
num_ibs,
 
for (i = 0; i < num_ibs; ++i) {
ib = [i];
-
+   if (!amdgpu_sriov_mcbp)
+   ib->flags &= ~AMDGPU_IB_FLAG_PREEMPT;
/* drop preamble IBs if we don't have a context switch */
if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
skip_preamble &&
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index d7f37cb92a97..156e76a5a6e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -742,7 +742,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void 
*data, struct drm_file
dev_info.ids_flags = 0;
if (adev->flags & AMD_IS_APU)
dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
-   if (amdgpu_mcbp || amdgpu_sriov_vf(adev))
+   if (amdgpu_mcbp || (amdgpu_sriov_vf(adev) && amdgpu_sriov_mcbp))
dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
if (amdgpu_is_tmz(adev))
dev_info.ids_flags |= AMDGPU_IDS_FLAGS_TMZ;
-- 
2.25.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 1/1] drm/amdgpu: Convert to using devm_drm_dev_alloc() (v2)

2020-09-17 Thread Alex Deucher
On Thu, Sep 17, 2020 at 6:13 PM Luben Tuikov  wrote:
>
> Convert to using devm_drm_dev_alloc(),
> as drm_dev_init() is going away.
>
> v2: Remove drm_dev_put() since
> a) devres doesn't do refcounting, see
> Documentation/driver-api/driver-model/devres.rst,
> Section 4, paragraph 1; and since
> b) devres acts as garbage collector when
> the DRM device's parent's devres "action" callback
> is called to free the container device (amdgpu_device),
> which embeds the DRM dev.
>
> Signed-off-by: Luben Tuikov 

Reviewed-by: Alex Deucher 

> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 16 
>  1 file changed, 4 insertions(+), 12 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
> index 3f07d1475bd2..8d658d2a16fe 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
> @@ -1159,25 +1159,20 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
> if (ret)
> return ret;
>
> -   adev = kzalloc(sizeof(*adev), GFP_KERNEL);
> -   if (!adev)
> -   return -ENOMEM;
> +   adev = devm_drm_dev_alloc(>dev, _driver, typeof(*adev), 
> ddev);
> +   if (IS_ERR(adev))
> +   return PTR_ERR(adev);
>
> adev->dev  = >dev;
> adev->pdev = pdev;
> ddev = adev_to_drm(adev);
> -   ret = drm_dev_init(ddev, _driver, >dev);
> -   if (ret)
> -   goto err_free;
> -
> -   drmm_add_final_kfree(ddev, adev);
>
> if (!supports_atomic)
> ddev->driver_features &= ~DRIVER_ATOMIC;
>
> ret = pci_enable_device(pdev);
> if (ret)
> -   goto err_free;
> +   return ret;
>
> ddev->pdev = pdev;
> pci_set_drvdata(pdev, ddev);
> @@ -1205,8 +1200,6 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
>
>  err_pci:
> pci_disable_device(pdev);
> -err_free:
> -   drm_dev_put(ddev);
> return ret;
>  }
>
> @@ -1223,7 +1216,6 @@ amdgpu_pci_remove(struct pci_dev *pdev)
> amdgpu_driver_unload_kms(dev);
> pci_disable_device(pdev);
> pci_set_drvdata(pdev, NULL);
> -   drm_dev_put(dev);
>  }
>
>  static void
> --
> 2.28.0.394.ge197136389
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amd/pm: apply dummy reads workaround for CDR enabled only

2020-09-17 Thread Deucher, Alexander
[AMD Official Use Only - Internal Distribution Only]

Acked-by: Alex Deucher 

From: Quan, Evan 
Sent: Thursday, September 17, 2020 10:36 PM
To: amd-gfx@lists.freedesktop.org 
Cc: Deucher, Alexander ; Quan, Evan 

Subject: [PATCH] drm/amd/pm: apply dummy reads workaround for CDR enabled only

For CDR disabled case, the dummy reads workaround is not needed.

Change-Id: I474619b3d82792151870811c289ab311028de211
Signed-off-by: Evan Quan 
---
 drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c | 8 +---
 1 file changed, 5 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c 
b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 338a9fdeef6e..5b87690c1e61 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -2303,10 +2303,12 @@ static int navi10_run_umc_cdr_workaround(struct 
smu_context *smu)
 if (umc_fw_greater_than_v136)
 return 0;

-   if (umc_fw_disable_cdr && adev->asic_type == CHIP_NAVI10)
-   return navi10_umc_hybrid_cdr_workaround(smu);
-   else
+   if (umc_fw_disable_cdr) {
+   if (adev->asic_type == CHIP_NAVI10)
+   return navi10_umc_hybrid_cdr_workaround(smu);
+   } else {
 return navi10_set_dummy_pstates_table_location(smu);
+   }
 } else {
 if (adev->asic_type == CHIP_NAVI10)
 return navi10_umc_hybrid_cdr_workaround(smu);
--
2.28.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


RE: [PATCH] drm/amd/pm: Skip smu_post_init in SRIOV

2020-09-17 Thread Deng, Emily
[AMD Official Use Only - Internal Distribution Only]

Reviewed-by: Emily.Deng 

>-Original Message-
>From: amd-gfx  On Behalf Of
>Jingwen Chen
>Sent: Thursday, September 17, 2020 5:43 PM
>To: amd-gfx@lists.freedesktop.org
>Cc: Chen, JingWen 
>Subject: [PATCH] drm/amd/pm: Skip smu_post_init in SRIOV
>
>smu_post_init needs to enable SMU feature, while this require virtualization
>off. Skip it since this feature is not used in SRIOV.
>
>v2: move the check to the early stage of smu_post_init.
>
>v3: fix typo
>
>Signed-off-by: Jingwen Chen 
>---
> drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c | 3 +++
> 1 file changed, 3 insertions(+)
>
>diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
>b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
>index a027c7fdad56..05cb1fdd15ce 100644
>--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
>+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
>@@ -2631,6 +2631,9 @@ static int navi10_post_smu_init(struct smu_context
>*smu)
> uint64_t feature_mask = 0;
> int ret = 0;
>
>+if (amdgpu_sriov_vf(adev))
>+return 0;
>+
> /* For Naiv1x, enable these features only after DAL initialization */
> if (adev->pm.pp_feature & PP_SOCCLK_DPM_MASK)
> feature_mask |= FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT);
>--
>2.25.1
>
>___
>amd-gfx mailing list
>amd-gfx@lists.freedesktop.org
>https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.fre
>edesktop.org%2Fmailman%2Flistinfo%2Famd-
>gfxdata=02%7C01%7CEmily.Deng%40amd.com%7Ca1fbb64ca45945c3f3
>e008d85aee24df%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C6
>37359326168883628sdata=Ix4oI%2FxGMb3vUimmLO%2Bix%2Bgqp9OY
>O0WfTOlZvieZj3Y%3Dreserved=0
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amd/pm: apply dummy reads workaround for CDR enabled only

2020-09-17 Thread Evan Quan
For CDR disabled case, the dummy reads workaround is not needed.

Change-Id: I474619b3d82792151870811c289ab311028de211
Signed-off-by: Evan Quan 
---
 drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c | 8 +---
 1 file changed, 5 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c 
b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 338a9fdeef6e..5b87690c1e61 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -2303,10 +2303,12 @@ static int navi10_run_umc_cdr_workaround(struct 
smu_context *smu)
if (umc_fw_greater_than_v136)
return 0;
 
-   if (umc_fw_disable_cdr && adev->asic_type == CHIP_NAVI10)
-   return navi10_umc_hybrid_cdr_workaround(smu);
-   else
+   if (umc_fw_disable_cdr) {
+   if (adev->asic_type == CHIP_NAVI10)
+   return navi10_umc_hybrid_cdr_workaround(smu);
+   } else {
return navi10_set_dummy_pstates_table_location(smu);
+   }
} else {
if (adev->asic_type == CHIP_NAVI10)
return navi10_umc_hybrid_cdr_workaround(smu);
-- 
2.28.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amdgpu: use function pointer for gfxhub functions

2020-09-17 Thread Oak Zeng
gfxhub functions are now called from function pointers,
instead of from asic-specific functions.

Signed-off-by: Oak Zeng 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h|  4 ++
 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c |  3 +-
 .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c   |  3 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c  |  5 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h | 43 ++
 drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c   | 10 
 drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h   |  1 +
 drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c   | 13 +-
 drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.h   |  2 +-
 drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c   | 24 +++---
 drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.h   | 10 +---
 drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c   | 27 +++
 drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.h   | 12 +
 drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c | 53 +-
 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c  | 28 +---
 15 files changed, 155 insertions(+), 83 deletions(-)
 create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 13f92de..0d8ace9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -104,6 +104,7 @@
 #include "amdgpu_mes.h"
 #include "amdgpu_umc.h"
 #include "amdgpu_mmhub.h"
+#include "amdgpu_gfxhub.h"
 #include "amdgpu_df.h"
 
 #define MAX_GPU_INSTANCE   16
@@ -884,6 +885,9 @@ struct amdgpu_device {
/* mmhub */
struct amdgpu_mmhub mmhub;
 
+   /* gfxhub */
+   struct amdgpu_gfxhubgfxhub;
+
/* gfx */
struct amdgpu_gfx   gfx;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
index df0aab0..1529815 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
@@ -32,7 +32,6 @@
 #include "v10_structs.h"
 #include "nv.h"
 #include "nvd.h"
-#include "gfxhub_v2_0.h"
 
 enum hqd_dequeue_request_type {
NO_ACTION = 0,
@@ -753,7 +752,7 @@ static void set_vm_context_page_table_base(struct kgd_dev 
*kgd, uint32_t vmid,
}
 
/* SDMA is on gfxhub as well for Navi1* series */
-   gfxhub_v2_0_setup_vm_pt_regs(adev, vmid, page_table_base);
+   adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
 }
 
 const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
index e12623a..b7ea20e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
@@ -31,7 +31,6 @@
 #include "v10_structs.h"
 #include "nv.h"
 #include "nvd.h"
-#include "gfxhub_v2_1.h"
 
 enum hqd_dequeue_request_type {
NO_ACTION = 0,
@@ -657,7 +656,7 @@ static void set_vm_context_page_table_base_v10_3(struct 
kgd_dev *kgd, uint32_t v
struct amdgpu_device *adev = get_amdgpu_device(kgd);
 
/* SDMA is on gfxhub as well for Navi1* series */
-   gfxhub_v2_1_setup_vm_pt_regs(adev, vmid, page_table_base);
+   adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
 }
 
 #if 0
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index e6aede7..b824582 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -36,9 +36,6 @@
 #include "v9_structs.h"
 #include "soc15.h"
 #include "soc15d.h"
-#include "mmhub_v1_0.h"
-#include "gfxhub_v1_0.h"
-
 
 enum hqd_dequeue_request_type {
NO_ACTION = 0,
@@ -703,7 +700,7 @@ void kgd_gfx_v9_set_vm_context_page_table_base(struct 
kgd_dev *kgd,
 
adev->mmhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
 
-   gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
+   adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
 }
 
 const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h
new file mode 100644
index 000..66ebc2e
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the 

[PATCH 0/1] Convert to using devm_drm_dev_alloc() (v2)

2020-09-17 Thread Luben Tuikov
This is based on top of Daniel's documentation
patch and it applies cleanly onto amd-staging-drm-next.
I'm also running this live.

This version removes explicit drm_dev_put(), as it seems
that devres acts as a garbage collector.

Luben Tuikov (1):
  drm/amdgpu: Convert to using devm_drm_dev_alloc() (v2)

 drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 16 
 1 file changed, 4 insertions(+), 12 deletions(-)

-- 
2.28.0.394.ge197136389

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 1/1] drm/amdgpu: Convert to using devm_drm_dev_alloc() (v2)

2020-09-17 Thread Luben Tuikov
Convert to using devm_drm_dev_alloc(),
as drm_dev_init() is going away.

v2: Remove drm_dev_put() since
a) devres doesn't do refcounting, see
Documentation/driver-api/driver-model/devres.rst,
Section 4, paragraph 1; and since
b) devres acts as garbage collector when
the DRM device's parent's devres "action" callback
is called to free the container device (amdgpu_device),
which embeds the DRM dev.

Signed-off-by: Luben Tuikov 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 16 
 1 file changed, 4 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 3f07d1475bd2..8d658d2a16fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -1159,25 +1159,20 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
 
-   adev = kzalloc(sizeof(*adev), GFP_KERNEL);
-   if (!adev)
-   return -ENOMEM;
+   adev = devm_drm_dev_alloc(>dev, _driver, typeof(*adev), ddev);
+   if (IS_ERR(adev))
+   return PTR_ERR(adev);
 
adev->dev  = >dev;
adev->pdev = pdev;
ddev = adev_to_drm(adev);
-   ret = drm_dev_init(ddev, _driver, >dev);
-   if (ret)
-   goto err_free;
-
-   drmm_add_final_kfree(ddev, adev);
 
if (!supports_atomic)
ddev->driver_features &= ~DRIVER_ATOMIC;
 
ret = pci_enable_device(pdev);
if (ret)
-   goto err_free;
+   return ret;
 
ddev->pdev = pdev;
pci_set_drvdata(pdev, ddev);
@@ -1205,8 +1200,6 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
 
 err_pci:
pci_disable_device(pdev);
-err_free:
-   drm_dev_put(ddev);
return ret;
 }
 
@@ -1223,7 +1216,6 @@ amdgpu_pci_remove(struct pci_dev *pdev)
amdgpu_driver_unload_kms(dev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
-   drm_dev_put(dev);
 }
 
 static void
-- 
2.28.0.394.ge197136389

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 2/3] drm/amdgpu: add per device user friendly xgmi events for vega20

2020-09-17 Thread Jonathan Kim
Non-outbound data metrics are non useful so mark them as legacy.
Bucket new perf counters into device and not device ip.
Bind events to chip instead of IP.
Report available event counters and not number of hw counter banks.
Move DF public macros to private since not needed outside of IP version.

v3: attr groups const array is global but attr groups are allocated per
device which doesn't work and causes problems on memory allocation and
de-allocation for pmu unregister. Switch to building const attr groups
per pmu instead to simplify solution.

v2: add comments on sysfs structure and formatting.

Signed-off-by: Jonathan Kim 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h |  13 -
 drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c | 341 
 drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.h |   6 +-
 drivers/gpu/drm/amd/amdgpu/df_v3_6.c|  72 +
 drivers/gpu/drm/amd/amdgpu/df_v3_6.h|   9 -
 5 files changed, 304 insertions(+), 137 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 13f92dea182a..f43dfdd2716a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1279,19 +1279,6 @@ bool amdgpu_device_load_pci_state(struct pci_dev *pdev);
 
 #include "amdgpu_object.h"
 
-/* used by df_v3_6.c and amdgpu_pmu.c */
-#define AMDGPU_PMU_ATTR(_name, _object)
\
-static ssize_t \
-_name##_show(struct device *dev,   \
-  struct device_attribute *attr,   \
-  char *page)  \
-{  \
-   BUILD_BUG_ON(sizeof(_object) >= PAGE_SIZE - 1); \
-   return sprintf(page, _object "\n"); \
-}  \
-   \
-static struct device_attribute pmu_attr_##_name = __ATTR_RO(_name)
-
 static inline bool amdgpu_is_tmz(struct amdgpu_device *adev)
 {
return adev->gmc.tmz_enabled;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
index 1b0ec715c8ba..74fe8fbdc0d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
@@ -27,9 +27,19 @@
 #include 
 #include "amdgpu.h"
 #include "amdgpu_pmu.h"
-#include "df_v3_6.h"
 
 #define PMU_NAME_SIZE 32
+#define NUM_FORMATS_AMDGPU_PMU 4
+#define NUM_FORMATS_DF_LEGACY  3
+#define NUM_EVENTS_DF_LEGACY   8
+#define NUM_EVENTS_VEGA20_XGMI 2
+#define NUM_EVENTS_VEGA20_MAX  NUM_EVENTS_VEGA20_XGMI
+
+struct amdgpu_pmu_event_attribute {
+   struct device_attribute attr;
+   const char *event_str;
+   unsigned int type;
+};
 
 /* record to keep track of pmu entry per pmu type per device */
 struct amdgpu_pmu_entry {
@@ -37,10 +47,74 @@ struct amdgpu_pmu_entry {
struct amdgpu_device *adev;
struct pmu pmu;
unsigned int pmu_perf_type;
+   struct attribute_group fmt_attr_group;
+   struct amdgpu_pmu_event_attribute *fmt_attr;
+   struct attribute_group evt_attr_group;
+   struct amdgpu_pmu_event_attribute *evt_attr;
 };
 
+static ssize_t amdgpu_pmu_event_show(struct device *dev,
+   struct device_attribute *attr, char *buf)
+{
+   struct amdgpu_pmu_event_attribute *amdgpu_pmu_attr;
+
+   amdgpu_pmu_attr = container_of(attr, struct amdgpu_pmu_event_attribute,
+   attr);
+
+   if (!amdgpu_pmu_attr->type)
+   return sprintf(buf, "%s\n", amdgpu_pmu_attr->event_str);
+
+   return sprintf(buf, "%s,type=0x%x\n",
+   amdgpu_pmu_attr->event_str, amdgpu_pmu_attr->type);
+}
+
 static LIST_HEAD(amdgpu_pmu_list);
 
+/*
+ * Event formatting is global to all amdgpu events under sysfs folder
+ * /sys/bus/event_source/devices/amdgpu_ where dev_num is the
+ * primary device index. Registered events can be found in subfolder "events"
+ * and formatting under subfolder "format".
+ *
+ * Formats "event", "instance", and "umask" are currently used by xGMI but can
+ * be for generalized for other IP usage.  If format naming is insufficient
+ * for newly registered IP events, append to the list below and handle the
+ * perf events hardware configuration (see hwc->config) as required by the IP.
+ *
+ * Format "type" indicates IP type generated on pmu registration (see
+ * init_pmu_by_type) so non-legacy events omit this in the per-chip event
+ * list (e.g. vega20_events).
+ */
+static const char *amdgpu_pmu_formats[NUM_FORMATS_AMDGPU_PMU][2] = {
+   { "event", "config:0-7" },
+   { "instance", "config:8-15" },
+   { "umask", "config:16-23"},

[PATCH 3/3] drm/amdgpu: add xgmi perfmons for arcturus

2020-09-17 Thread Jonathan Kim
Add xgmi perfmons for Arcturus.

Signed-off-by: Jonathan Kim 

v2: Resend for re-review with alignment for v3 in patch 2.
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c | 41 -
 drivers/gpu/drm/amd/amdgpu/df_v3_6.c|  3 ++
 2 files changed, 43 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
index 74fe8fbdc0d1..c58af4495c5a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
@@ -34,6 +34,8 @@
 #define NUM_EVENTS_DF_LEGACY   8
 #define NUM_EVENTS_VEGA20_XGMI 2
 #define NUM_EVENTS_VEGA20_MAX  NUM_EVENTS_VEGA20_XGMI
+#define NUM_EVENTS_ARCTURUS_XGMI   6
+#define NUM_EVENTS_ARCTURUS_MAXNUM_EVENTS_ARCTURUS_XGMI
 
 struct amdgpu_pmu_event_attribute {
struct device_attribute attr;
@@ -98,6 +100,16 @@ static const char *vega20_events[NUM_EVENTS_VEGA20_MAX][2] 
= {
{ "xgmi_link1_data_outbound", "event=0x7,instance=0x47,umask=0x2" }
 };
 
+/* Arcturus events */
+static const char *arcturus_events[NUM_EVENTS_ARCTURUS_MAX][2] = {
+   { "xgmi_link0_data_outbound", "event=0x7,instance=0x4b,umask=0x2" },
+   { "xgmi_link1_data_outbound", "event=0x7,instance=0x4c,umask=0x2" },
+   { "xgmi_link2_data_outbound", "event=0x7,instance=0x4d,umask=0x2" },
+   { "xgmi_link3_data_outbound", "event=0x7,instance=0x4e,umask=0x2" },
+   { "xgmi_link4_data_outbound", "event=0x7,instance=0x4f,umask=0x2" },
+   { "xgmi_link5_data_outbound", "event=0x7,instance=0x50,umask=0x2" }
+};
+
 /* All df_vega20_* items are DEPRECATED. Use vega20_ items above instead. */
 static const char *df_vega20_formats[NUM_FORMATS_DF_LEGACY][2] = {
{ "event", "config:0-7" },
@@ -413,6 +425,32 @@ static int init_pmu_by_type(struct amdgpu_pmu_entry 
*pmu_entry,
 
/* other events can be added here */
 
+   break;
+   case CHIP_ARCTURUS:
+   ret = amdgpu_pmu_alloc_pmu_attrs(_entry->fmt_attr_group,
+   _entry->fmt_attr,
+   NUM_FORMATS_AMDGPU_PMU,
+   _entry->evt_attr_group,
+   _entry->evt_attr,
+   NUM_EVENTS_ARCTURUS_MAX);
+
+   if (ret)
+   goto err_out;
+
+   amdgpu_pmu_create_attributes(_entry->fmt_attr_group,
+   pmu_entry->fmt_attr,
+   amdgpu_pmu_formats,
+   0, NUM_FORMATS_AMDGPU_PMU, 0);
+
+   amdgpu_pmu_create_attributes(_entry->evt_attr_group,
+   pmu_entry->evt_attr,
+   arcturus_events,
+   0, NUM_EVENTS_ARCTURUS_XGMI,
+   PERF_TYPE_AMDGPU_XGMI);
+   num_events += NUM_EVENTS_ARCTURUS_XGMI;
+
+   /* other events can be added here */
+
break;
default:
ret = -ENODEV;
@@ -473,7 +511,8 @@ void amdgpu_pmu_fini(struct amdgpu_device *adev)
 
 static bool amdgpu_pmu_is_supported(struct amdgpu_device *adev)
 {
-   return adev->asic_type == CHIP_VEGA20;
+   return adev->asic_type == CHIP_VEGA20 ||
+   adev->asic_type == CHIP_ARCTURUS;
 }
 
 /* init amdgpu_pmu */
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c 
b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
index 6e57ae95f997..6b4b30a8dce5 100644
--- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
+++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
@@ -513,6 +513,7 @@ static int df_v3_6_pmc_start(struct amdgpu_device *adev, 
uint64_t config,
 
switch (adev->asic_type) {
case CHIP_VEGA20:
+   case CHIP_ARCTURUS:
if (is_add)
return df_v3_6_pmc_add_cntr(adev, config);
 
@@ -554,6 +555,7 @@ static int df_v3_6_pmc_stop(struct amdgpu_device *adev, 
uint64_t config,
 
switch (adev->asic_type) {
case CHIP_VEGA20:
+   case CHIP_ARCTURUS:
ret = df_v3_6_pmc_get_ctrl_settings(adev,
config,
counter_idx,
@@ -590,6 +592,7 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device 
*adev,
 
switch (adev->asic_type) {
case CHIP_VEGA20:
+   case CHIP_ARCTURUS:
df_v3_6_pmc_get_read_settings(adev, config, counter_idx,
_base_addr, _base_addr);
 
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 1/3] drm/amdgpu: fix xgmi perfmon a-b-a problem

2020-09-17 Thread Jonathan Kim
Mapping hw counters per event config will cause ABA problems so map per
event instead.

v2: Discontinue starting perf counters if add fails.  Make it clear what's
happening with pmc_start.

Signed-off-by: Jonathan Kim 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_df.h  |   6 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c |  42 ++
 drivers/gpu/drm/amd/amdgpu/df_v3_6.c| 105 +++-
 3 files changed, 78 insertions(+), 75 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h
index 373cdebe0e2f..52488bb45112 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h
@@ -44,11 +44,11 @@ struct amdgpu_df_funcs {
void (*enable_ecc_force_par_wr_rmw)(struct amdgpu_device *adev,
bool enable);
int (*pmc_start)(struct amdgpu_device *adev, uint64_t config,
-int is_add);
+int counter_idx, int is_add);
int (*pmc_stop)(struct amdgpu_device *adev, uint64_t config,
-int is_remove);
+int counter_idx, int is_remove);
void (*pmc_get_count)(struct amdgpu_device *adev, uint64_t config,
-uint64_t *count);
+int counter_idx, uint64_t *count);
uint64_t (*get_fica)(struct amdgpu_device *adev, uint32_t ficaa_val);
void (*set_fica)(struct amdgpu_device *adev, uint32_t ficaa_val,
 uint32_t ficadl_val, uint32_t ficadh_val);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
index 69af462db34d..1b0ec715c8ba 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
@@ -64,6 +64,7 @@ static void amdgpu_perf_start(struct perf_event *event, int 
flags)
struct amdgpu_pmu_entry *pe = container_of(event->pmu,
  struct amdgpu_pmu_entry,
  pmu);
+   int target_cntr = 0;
 
if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
return;
@@ -73,17 +74,24 @@ static void amdgpu_perf_start(struct perf_event *event, int 
flags)
 
switch (pe->pmu_perf_type) {
case PERF_TYPE_AMDGPU_DF:
-   if (!(flags & PERF_EF_RELOAD))
-   pe->adev->df.funcs->pmc_start(pe->adev, hwc->config, 1);
+   if (!(flags & PERF_EF_RELOAD)) {
+   target_cntr = pe->adev->df.funcs->pmc_start(pe->adev,
+   hwc->config, 0 /* unused */,
+   1 /* add counter */);
+   if (target_cntr < 0)
+   break;
+
+   hwc->idx = target_cntr;
+   }
 
-   pe->adev->df.funcs->pmc_start(pe->adev, hwc->config, 0);
+   pe->adev->df.funcs->pmc_start(pe->adev, hwc->config,
+   hwc->idx, 0);
break;
default:
break;
}
 
perf_event_update_userpage(event);
-
 }
 
 /* read perf counter */
@@ -101,8 +109,8 @@ static void amdgpu_perf_read(struct perf_event *event)
 
switch (pe->pmu_perf_type) {
case PERF_TYPE_AMDGPU_DF:
-   pe->adev->df.funcs->pmc_get_count(pe->adev, hwc->config,
- );
+   pe->adev->df.funcs->pmc_get_count(pe->adev,
+   hwc->config, hwc->idx, );
break;
default:
count = 0;
@@ -126,7 +134,8 @@ static void amdgpu_perf_stop(struct perf_event *event, int 
flags)
 
switch (pe->pmu_perf_type) {
case PERF_TYPE_AMDGPU_DF:
-   pe->adev->df.funcs->pmc_stop(pe->adev, hwc->config, 0);
+   pe->adev->df.funcs->pmc_stop(pe->adev, hwc->config, hwc->idx,
+   0);
break;
default:
break;
@@ -142,12 +151,11 @@ static void amdgpu_perf_stop(struct perf_event *event, 
int flags)
hwc->state |= PERF_HES_UPTODATE;
 }
 
-/* add perf counter  */
+/* add perf counter */
 static int amdgpu_perf_add(struct perf_event *event, int flags)
 {
struct hw_perf_event *hwc = >hw;
-   int retval;
-
+   int retval = 0, target_cntr;
struct amdgpu_pmu_entry *pe = container_of(event->pmu,
  struct amdgpu_pmu_entry,
  pmu);
@@ -156,8 +164,14 @@ static int amdgpu_perf_add(struct perf_event *event, 

RE: [PATCH 3/3] drm/amdgpu: add xgmi perfmons for arcturus

2020-09-17 Thread Kim, Jonathan
Hi Harish.  Thanks for the review.  As discussed offline, patch 2 has a problem 
where attr groups array is global but allocation is done per-device causing 
problems with mem free and pmu unregister.  I'm sending out a second series 
that should fix this and simplify the solution as well as hopefully address 
your concerns.  Sorry again for the churn.

Jon

> -Original Message-
> From: Kasiviswanathan, Harish 
> Sent: Tuesday, September 15, 2020 10:10 PM
> To: Kim, Jonathan ; amd-
> g...@lists.freedesktop.org
> Subject: RE: [PATCH 3/3] drm/amdgpu: add xgmi perfmons for arcturus
> 
> [AMD Official Use Only - Internal Distribution Only]
> 
> Reviewed-by: Harish Kasiviswanathan 
> 
> -Original Message-
> From: Kim, Jonathan 
> Sent: Tuesday, September 15, 2020 6:00 PM
> To: amd-gfx@lists.freedesktop.org
> Cc: Kasiviswanathan, Harish ; Kim,
> Jonathan ; Kim, Jonathan
> 
> Subject: [PATCH 3/3] drm/amdgpu: add xgmi perfmons for arcturus
> 
> Add xgmi perfmons for Arcturus.
> 
> Signed-off-by: Jonathan Kim 
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c | 55
> +
>  drivers/gpu/drm/amd/amdgpu/df_v3_6.c|  3 ++
>  2 files changed, 58 insertions(+)
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
> index f3d2ac0e88a7..ec521c72e631 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
> @@ -34,6 +34,8 @@
>  #define NUM_EVENTS_DF_LEGACY 8
>  #define NUM_EVENTS_VEGA20_XGMI   2
>  #define NUM_EVENTS_VEGA20_MAX2
> +#define NUM_EVENTS_ARCTURUS_XGMI 6
> +#define NUM_EVENTS_ARCTURUS_MAX  6
> 
>  /* record to keep track of pmu entry per pmu type per device */  struct
> amdgpu_pmu_entry { @@ -110,6 +112,27 @@ const struct attribute_group
> *vega20_attr_groups[] = {
>   NULL
>  };
> 
> +/* Arcturus events */
> +static const char *arcturus_events[NUM_EVENTS_ARCTURUS_MAX][2] = {
> + { "xgmi_link0_data_outbound",
> "event=0x7,instance=0x4b,umask=0x2" },
> + { "xgmi_link1_data_outbound",
> "event=0x7,instance=0x4c,umask=0x2" },
> + { "xgmi_link2_data_outbound",
> "event=0x7,instance=0x4d,umask=0x2" },
> + { "xgmi_link3_data_outbound",
> "event=0x7,instance=0x4e,umask=0x2" },
> + { "xgmi_link4_data_outbound",
> "event=0x7,instance=0x4f,umask=0x2" },
> + { "xgmi_link5_data_outbound",
> "event=0x7,instance=0x50,umask=0x2" } };
> +
> +static struct attribute_group arcturus_event_attr_group = {
> + .name = "events",
> + .attrs = NULL
> +};
> +
> +const struct attribute_group *arcturus_attr_groups[] = {
> + _pmu_format_attr_group,
> + _event_attr_group,
> + NULL
> +};
> +
>  /* All df_vega20_* items are DEPRECATED. Use vega20_ items above
> instead. */  static const char
> *df_vega20_formats[NUM_FORMATS_DF_LEGACY][2] = {
>   { "event", "config:0-7" },
> @@ -400,6 +423,16 @@ static int init_pmu_by_type(struct amdgpu_device
> *adev,
> 
>   pmu_entry->pmu.attr_groups = vega20_attr_groups;
>   break;
> + case CHIP_ARCTURUS:
> + amdgpu_pmu_create_attributes(evt_attr_group, evt_attr,
> + arcturus_events, 0,
> NUM_EVENTS_ARCTURUS_XGMI,
> + PERF_TYPE_AMDGPU_XGMI);
> + num_events += NUM_EVENTS_ARCTURUS_XGMI;
> +
> + /* other events can be added here */
> +
> + pmu_entry->pmu.attr_groups = arcturus_attr_groups;
> + break;
>   default:
>   return -ENODEV;
>   };
> @@ -530,6 +563,28 @@ int amdgpu_pmu_init(struct amdgpu_device *adev)
>   goto err_pmu;
>   }
> 
> + break;
> + case CHIP_ARCTURUS:
> + ret =
> amdgpu_pmu_alloc_pmu_attrs(_pmu_format_attr_group,
> + _attr,
> +
>   NUM_FORMATS_AMDGPU_PMU,
> + _event_attr_group,
> + _attr,
> +
>   NUM_EVENTS_ARCTURUS_MAX);
> +
> + if (ret)
> + goto err_alloc;
> +
> + ret = init_pmu_by_type(adev,
> + _pmu_format_attr_group,
> fmt_attr,
> + _event_attr_group, evt_attr,
> + "Event", "amdgpu",
> PERF_TYPE_AMDGPU_MAX);
> +
> + if (ret) {
> + kfree(arcturus_event_attr_group.attrs);
> + goto err_pmu;
> + }
> +
>   break;
>   default:
>   return 0;
> diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
> b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
> index 6e57ae95f997..6b4b30a8dce5 100644
> --- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
> +++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
> @@ -513,6 +513,7 @@ static int df_v3_6_pmc_start(struct amdgpu_device
> *adev, uint64_t config,
> 
>   switch 

Re: [PATCH] Revert "drm/radeon: handle PCIe root ports with addressing limitations"

2020-09-17 Thread Christoph Hellwig
On Wed, Sep 16, 2020 at 06:16:25PM -0400, Alex Deucher wrote:
> On Wed, Sep 16, 2020 at 3:04 AM Christoph Hellwig  wrote:
> >
> > On Tue, Sep 15, 2020 at 02:46:07PM -0400, Alex Deucher wrote:
> > > This change breaks tons of systems.
> >
> > Did you do at least some basic root causing on why?  Do GPUs get
> > fed address they can't deal with?  Any examples?
> >
> > Bug 1 doesn't seem to contain any analysis and was reported against
> > a very old kernel that had all kind of fixes since.
> >
> > Bug 2 seems to imply a drm kthread is accessing some structure it
> > shouldn't, which would imply a mismatch between pools used by radeon
> > now and those actually provided by the core.  Something that should
> > be pretty to trivial to fix for someone understanding the whole ttm
> > pool maze.
> >
> > Bug 3: same as 1, but an even older kernel.
> >
> > Bug 4: looks like 1 and 3, and actually verified to work properly
> > in 5.9-rc.  Did you try to get the other reporters test this as well?
> 
> It would appear that the change in 5.9 to disable AGP on radeon fixed
> the issue.  I'm following up on the other tickets to see if I can get
> confirmation.  On another thread[1], the user was able to avoid the
> issue by disabling HIMEM.  Looks like some issue with HIMEM and/or
> AGP.

Thanks.  I'll try to spend some time to figure out what could be
highmem related.  I'd much rather get this fixed properly.
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 3/3] drm/amd/amdkfd: Surface files in Sysfs to allow users to get number of compute units that are in use.

2020-09-17 Thread Ramesh Errabolu
[Why]
Allow user to know how many compute units (CU) are in use at any given
moment.

[How]
Surface files in Sysfs that allow user to determine the number of compute
units that are in use for a given process. One Sysfs file is used per
device.

Signed-off-by: Ramesh Errabolu 
---
 drivers/gpu/drm/amd/amdkfd/kfd_priv.h|  28 ++-
 drivers/gpu/drm/amd/amdkfd/kfd_process.c | 101 +++
 2 files changed, 114 insertions(+), 15 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h 
b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 023629f28495..2ce03586d0dc 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -631,7 +631,7 @@ enum kfd_pdd_bound {
PDD_BOUND_SUSPENDED,
 };
 
-#define MAX_SYSFS_FILENAME_LEN 11
+#define MAX_SYSFS_FILENAME_LEN 32
 
 /*
  * SDMA counter runs at 100MHz frequency.
@@ -692,6 +692,32 @@ struct kfd_process_device {
uint64_t sdma_past_activity_counter;
struct attribute attr_sdma;
char sdma_filename[MAX_SYSFS_FILENAME_LEN];
+
+   /*
+* @cu_occupancy: Reports occupancy of Compute Units (CU) of a process
+* that is associated with device encoded by "this" struct instance. The
+* value reflects CU usage by all of the waves launched by this process
+* on this device. A very important property of occupancy parameter is
+* that its value is a a snapshot of current use.
+*
+* Following is to be noted regarding how this parameter is reported:
+*
+*  The number of waves that a CU can launch is limited by couple of
+*  parameters. These are encoded by struct amdgpu_cu_info instance
+*  that is part of every device definition. For GFX9 devices this
+*  translates to 40 waves (simd_per_cu * max_waves_per_simd) when waves
+*  do not use scratch memory and 32 waves (max_scratch_slots_per_cu)
+*  when they use. This could change for future devices and therefore
+*  this example should be considered as a guide.
+*
+*  All CU's of a device are available for the process. This may not be 
true
+*  under certain conditions - e.g. CU masking.
+*
+*  Finally number of CU's that are occupied by a process is affected 
by both
+*  number of CU's a device has along with number of other competing 
processes
+*/
+   struct attribute attr_cu_occupancy;
+   char cu_occupancy_filename[MAX_SYSFS_FILENAME_LEN];
 };
 
 #define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index a0e12a79ab7d..3baffbc828b2 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -249,6 +249,63 @@ static void kfd_sdma_activity_worker(struct work_struct 
*work)
}
 }
 
+/**
+ * @kfd_get_cu_occupancy() - Collect number of waves in-flight on this device
+ * by current process. Translates acquired wave count into number of compute 
units
+ * that are occupied.
+ *
+ * @atr: Handle of attribute that allows reporting of wave count. The attribute
+ * handle encapsulates GPU device it is associated with, thereby allowing 
collection
+ * of waves in flight, etc
+ *
+ * @buffer: Handle of user provided buffer updated with wave count
+ *
+ * Return: Number of bytes written to user buffer or an error value
+ */
+static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer)
+{
+   int cu_cnt;
+   int wave_cnt;
+   int max_waves_per_cu;
+   struct kfd_dev *dev = NULL;
+   struct kfd_process *proc = NULL;
+   struct amdgpu_device *adev = NULL;
+   struct kfd_process_device *pdd = NULL;
+
+   /* Acquire handle of Process-Device-Data associated with attribute */
+   pdd = container_of(attr, struct kfd_process_device, attr_cu_occupancy);
+
+   /*
+* Acquire handle of Gpu Device associated with attribute. Determine
+* if ability to collect CU occupancy is defined for this device. If
+* so acquire the handle of process which encapsulates the PASID of
+* process. It is possible that the process might have zero work on
+* device. This is determined by checking if process has any queues
+* @note: Ability to collect is defined only for Gfx9 devices
+*/
+   cu_cnt = 0;
+   dev = pdd->dev;
+   proc = pdd->process;
+   if (pdd->qpd.queue_count == 0) {
+   pr_info("%s: Gpu-Id: %d has no active queues for process %d\n",
+   __func__, dev->id, proc->pasid);
+   return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt);
+   }
+
+   /* Collect wave count from device by reading relevant registers */
+   wave_cnt = 0;
+   if (dev->kfd2kgd->get_cu_occupancy != NULL)
+   dev->kfd2kgd->get_cu_occupancy(dev->kgd, proc->pasid,
+   

[PATCH 2/3] drm/amd/amdgpu: Define and implement a function that collects number of waves that are in flight.

2020-09-17 Thread Ramesh Errabolu
[Why]
Allow user to know how many compute units (CU) are in use at any given
moment.

[How]
Read registers of SQ that give number of waves that are in flight
of various queues. Use this information to determine number of CU's
in use.

Signed-off-by: Ramesh Errabolu 
---
 .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 206 ++
 .../gpu/drm/amd/include/kgd_kfd_interface.h   |  11 +
 2 files changed, 217 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index e6aede725197..2f8c8140734e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -38,7 +38,9 @@
 #include "soc15d.h"
 #include "mmhub_v1_0.h"
 #include "gfxhub_v1_0.h"
+#include "gfx_v9_0.h"
 
+struct kfd_dev;
 
 enum hqd_dequeue_request_type {
NO_ACTION = 0,
@@ -706,6 +708,209 @@ void kgd_gfx_v9_set_vm_context_page_table_base(struct 
kgd_dev *kgd,
gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
 }
 
+static void lock_spi_csq_mutexes(struct amdgpu_device *adev)
+{
+   mutex_lock(>srbm_mutex);
+   mutex_lock(>grbm_idx_mutex);
+
+}
+
+static void unlock_spi_csq_mutexes(struct amdgpu_device *adev)
+{
+   mutex_unlock(>grbm_idx_mutex);
+   mutex_unlock(>srbm_mutex);
+}
+
+/**
+ * @get_wave_count: Read device registers to get number of waves in flight for
+ * a particulare queue. The method also returns the VMID associated with the
+ * queue.
+ *
+ * @adev: Handle of device whose registers are to be read
+ *
+ * @queue_idx: Index of queue in the queue-map bit-field
+ *
+ * @wave_cnt: Output parameter updated with number of waves in flight
+ *
+ * @vmid: Output parameter updated with VMID of queue whose wave count
+ * is being collected
+ */
+static void get_wave_count(struct amdgpu_device *adev, int queue_idx,
+  int *wave_cnt, int *vmid)
+{
+   int pipe_idx;
+   int queue_slot;
+   unsigned int reg_val;
+
+   /*
+* By policy queues at slots 0 and 1 are reserved for non-compute
+* queues i.e. those managed for graphic functions.
+*/
+   if ((queue_idx % adev->gfx.mec.num_queue_per_pipe) < 2)
+   return;
+
+   /*
+* Queue belongs to a compute workload. Determine the PIPE index
+* associated wit queue and program GRBM accordingly:
+* MEID = 1, PIPEID = pipe_idx, QUEUEID = queue_idx, VMID = 0
+*/
+   pipe_idx = queue_idx / adev->gfx.mec.num_queue_per_pipe;
+   queue_slot = queue_idx % adev->gfx.mec.num_queue_per_pipe;
+   soc15_grbm_select(adev, 1, pipe_idx, queue_slot, 0);
+
+   /*
+* Read from register number of waves in flight. If non-zero get the
+* VMID associated with queue
+*/
+   reg_val = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_CSQ_WF_ACTIVE_COUNT_0) +
+queue_slot);
+   *wave_cnt = reg_val & SPI_CSQ_WF_ACTIVE_COUNT_0__COUNT_MASK;
+   if (*wave_cnt != 0)
+   *vmid = (RREG32_SOC15(GC, 0, mmCP_HQD_VMID) &
+CP_HQD_VMID__VMID_MASK) >> CP_HQD_VMID__VMID__SHIFT;
+}
+
+/**
+ * @kgd_gfx_v9_get_cu_occupancy: Reads relevant registers associated with each
+ * shader engine and aggregates the number of waves that are in fight for the
+ * process whose pasid is provided as a parameter. The process could have ZERO
+ * or more queues running and submitting waves to compute units.
+ *
+ * @note: It's possible that the device has too many queues (oversubscription)
+ * in which case a VMID could be remapped to a different PASID. This could lead
+ * to in accurate wave count. Following is a high-level sequence:
+ *Time T1: vmid = getVmid(); vmid is associated with Pasid P1
+ *Time T2: passId = getPasId(vmid); vmid is associated with Pasid P2
+ * In the sequence above wave count obtained from time T1 will be incorrectly
+ * lost or added to total wave count.
+ *
+ * @kgd: Handle of device from which to get number of waves in flight
+ *
+ * @pasid: Identifies the process for which this query call is invoked
+ *
+ * @wave_cnt: Output parameter updated with number of waves in flight that
+ * belong to process with given pasid
+ *
+ * The registers that provide the waves in flight are:
+ *
+ *  SPI_CSQ_WF_ACTIVE_STATUS - bit-map of queues per pipe. At any moment there
+ *  can be a max of 32 queues that could submit wave fronts to be run by 
compute
+ *  units. The bit is ON if a queue is slotted, OFF if there is no queue. The
+ *  process could have ZERO or more queues slotted and submitting waves to be
+ *  run compute units. Even when there is a queue it is possible there could
+ *  be zero wave fronts, this can happen when queue is waiting on top-of-pipe
+ *  events - e.g. waitRegMem command
+ *
+ *  For each bit that is ON from above:
+ *
+ *Read (SPI_CSQ_WF_ACTIVE_COUNT_0 + queue_idx) register. It provides the
+ *number of waves 

[PATCH 1/3] drm/amd/amdgpu: Prepare implementation to support reporting of CU usage

2020-09-17 Thread Ramesh Errabolu
[Why]
Allow user to know number of compute units (CU) that are in use at any
given moment.

[How]
Read registers of SQ that give number of waves that are in flight
of various queues. Use this information to determine number of CU's
in use.

Signed-off-by: Ramesh Errabolu 
---
 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 14 ++
 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h |  3 +--
 2 files changed, 7 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index d898c9ff3526..b31879be2c05 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -49,6 +49,7 @@
 #include "amdgpu_ras.h"
 
 #include "gfx_v9_4.h"
+#include "gfx_v9_0.h"
 
 #include "asic_reg/pwr/pwr_10_0_offset.h"
 #include "asic_reg/pwr/pwr_10_0_sh_mask.h"
@@ -785,10 +786,6 @@ static void gfx_v9_0_set_ring_funcs(struct amdgpu_device 
*adev);
 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev);
 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev);
 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev);
-static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
- struct amdgpu_cu_info *cu_info);
-static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev);
-static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 
sh_num, u32 instance);
 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring);
 static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring);
 static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
@@ -2402,7 +2399,8 @@ static void gfx_v9_0_tiling_mode_table_init(struct 
amdgpu_device *adev)
/* TODO */
 }
 
-static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 
sh_num, u32 instance)
+void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num,
+  u32 instance)
 {
u32 data;
 
@@ -4117,7 +4115,7 @@ static uint64_t gfx_v9_0_kiq_read_clock(struct 
amdgpu_device *adev)
return ~0;
 }
 
-static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
+uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
 {
uint64_t clock;
 
@@ -6928,8 +6926,8 @@ static u32 gfx_v9_0_get_cu_active_bitmap(struct 
amdgpu_device *adev)
return (~data) & mask;
 }
 
-static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
-struct amdgpu_cu_info *cu_info)
+int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
+struct amdgpu_cu_info *cu_info)
 {
int i, j, k, counter, active_cu_number = 0;
u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h 
b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h
index fa5a3fbaf6ab..a3d73667e60a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h
@@ -26,9 +26,8 @@
 
 extern const struct amdgpu_ip_block_version gfx_v9_0_ip_block;
 
-void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num);
-
 uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev);
 int gfx_v9_0_get_cu_info(struct amdgpu_device *adev, struct amdgpu_cu_info 
*cu_info);
+void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, 
u32 instance);
 
 #endif
-- 
2.27.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH v1] powerplay:hwmgr - modify the return value

2020-09-17 Thread Alex Deucher
On Thu, Sep 17, 2020 at 4:28 AM Christian König
 wrote:
>
> Am 17.09.20 um 05:46 schrieb Xiaoliang Pang:
> > modify the return value is -EINVAL
>
> Maybe better write something like "The correct return value should be
> -EINVAL." With that done feel free to add my acked-by.

Applied with updated commit message.

Thanks!

Alex


>
> Christian.
>
> >
> > Fixes: f83a9991648bb("drm/amd/powerplay: add Vega10 powerplay support (v5)")
> > Fixes: 2cac05dee6e30("drm/amd/powerplay: add the hw manager for vega12 
> > (v4)")
> > Cc: Eric Huang 
> > Cc: Evan Quan 
> > Signed-off-by: Xiaoliang Pang 
> > ---
> >   drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 2 +-
> >   drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 2 +-
> >   2 files changed, 2 insertions(+), 2 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c 
> > b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
> > index c378a000c934..7eada3098ffc 100644
> > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
> > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
> > @@ -4659,7 +4659,7 @@ static int 
> > vega10_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
> >   if ((data->water_marks_bitmap & WaterMarksExist) &&
> >   !(data->water_marks_bitmap & WaterMarksLoaded)) {
> >   result = smum_smc_table_manager(hwmgr, (uint8_t *)wm_table, 
> > WMTABLE, false);
> > - PP_ASSERT_WITH_CODE(result, "Failed to update WMTABLE!", 
> > return EINVAL);
> > + PP_ASSERT_WITH_CODE(result, "Failed to update WMTABLE!", 
> > return -EINVAL);
> >   data->water_marks_bitmap |= WaterMarksLoaded;
> >   }
> >
> > diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c 
> > b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> > index a678a67f1c0d..04da52cea824 100644
> > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
> > @@ -2390,7 +2390,7 @@ static int 
> > vega12_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
> >   !(data->water_marks_bitmap & WaterMarksLoaded)) {
> >   result = smum_smc_table_manager(hwmgr,
> >   (uint8_t *)wm_table, 
> > TABLE_WATERMARKS, false);
> > - PP_ASSERT_WITH_CODE(result, "Failed to update WMTABLE!", 
> > return EINVAL);
> > + PP_ASSERT_WITH_CODE(result, "Failed to update WMTABLE!", 
> > return -EINVAL);
> >   data->water_marks_bitmap |= WaterMarksLoaded;
> >   }
> >
>
> ___
> dri-devel mailing list
> dri-de...@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amd/display: Delete duplicated argument to '&&' or '||'

2020-09-17 Thread Alex Deucher
On Thu, Sep 17, 2020 at 11:09 AM Ye Bin  wrote:
>
> Fixes coccicheck warnig:
> drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c:282:12-42:
> duplicated argument to && or ||
> drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c:3240:12-42:
> duplicated argument to && or ||
> drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c:5520:7-91:
> duplicated argument to && or ||
> drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c:5309:86-122:
> duplicated argument to && or ||
>
> Fixes: 6725a88f88a7 ("drm/amd/display: Add DCN3 DML")
> Reported-by: Hulk Robot 
> Signed-off-by: Ye Bin 

Applied.  Thanks!

Alex

> ---
>  drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c| 6 +++---
>  drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c | 2 +-
>  2 files changed, 4 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c 
> b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
> index 8f8bf83a60a1..50b7d011705d 100644
> --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
> +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
> @@ -3237,7 +3237,7 @@ static bool CalculateBytePerPixelAnd256BBlockSizes(
> *BytePerPixelDETC = 0;
> *BytePerPixelY = 4;
> *BytePerPixelC = 0;
> -   } else if (SourcePixelFormat == dm_444_16 || SourcePixelFormat == 
> dm_444_16) {
> +   } else if (SourcePixelFormat == dm_444_16) {
> *BytePerPixelDETY = 2;
> *BytePerPixelDETC = 0;
> *BytePerPixelY = 2;
> @@ -5307,7 +5307,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct 
> display_mode_lib *mode_l
> ViewportExceedsSurface = true;
>
> if (v->SourcePixelFormat[k] != dm_444_64 && 
> v->SourcePixelFormat[k] != dm_444_32 && v->SourcePixelFormat[k] != dm_444_16
> -   && v->SourcePixelFormat[k] != dm_444_16 && 
> v->SourcePixelFormat[k] != dm_444_8 && v->SourcePixelFormat[k] != dm_rgbe) {
> +   && v->SourcePixelFormat[k] != dm_444_8 && 
> v->SourcePixelFormat[k] != dm_rgbe) {
> if (v->ViewportWidthChroma[k] > v->SurfaceWidthC[k] 
> || v->ViewportHeightChroma[k] > v->SurfaceHeightC[k]) {
> ViewportExceedsSurface = true;
> }
> @@ -5517,7 +5517,7 @@ static void 
> CalculateWatermarksAndDRAMSpeedChangeSupport(
> if (WritebackPixelFormat[k] == dm_444_64) {
> WritebackDRAMClockChangeLatencyHiding = 
> WritebackDRAMClockChangeLatencyHiding / 2;
> }
> -   if (mode_lib->vba.WritebackConfiguration == 
> dm_whole_buffer_for_single_stream_interleave || 
> mode_lib->vba.WritebackConfiguration == 
> dm_whole_buffer_for_single_stream_interleave) {
> +   if (mode_lib->vba.WritebackConfiguration == 
> dm_whole_buffer_for_single_stream_interleave) {
> WritebackDRAMClockChangeLatencyHiding = 
> WritebackDRAMClockChangeLatencyHiding * 2;
> }
> WritebackDRAMClockChangeLatencyMargin = 
> WritebackDRAMClockChangeLatencyHiding - 
> mode_lib->vba.WritebackDRAMClockChangeWatermark;
> diff --git 
> a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c 
> b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
> index 5bb10f6e300d..416bf6fb67bd 100644
> --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
> +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
> @@ -279,7 +279,7 @@ static bool CalculateBytePerPixelAnd256BBlockSizes(
> *BytePerPixelDETC = 0;
> *BytePerPixelY = 4;
> *BytePerPixelC = 0;
> -   } else if (SourcePixelFormat == dm_444_16 || SourcePixelFormat == 
> dm_444_16) {
> +   } else if (SourcePixelFormat == dm_444_16) {
> *BytePerPixelDETY = 2;
> *BytePerPixelDETC = 0;
> *BytePerPixelY = 2;
> --
> 2.16.2.dirty
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amd/display: dc/clk_mgr: add support for SI parts (v3)

2020-09-17 Thread Alex Deucher
Applied with a slight cleanup to the commit message.

Thanks!

Alex

On Thu, Sep 17, 2020 at 3:33 AM Mauro Rossi  wrote:
>
> (v1) Changelog
>
> [Why]
> After commit c69dd2d "drm/amd/display: Refactor clk_mgr functions"
> dc/clk_mgr requires these changes to add SI parts support
> Necessary to avoid hitting default: ASSERT(0); /* Unknown Asic */
> that would cause kernel freeze
>
> [How]
> Add case statement for FAMILY_SI chipsets
>
> (v2) Changelog
>
> [Why]
> DCE6 has no DPREFCLK_CNTL register
>
> [How]
> Add DCE6 specific macros definitions for CLK registers and masks
> Add DCE6 specific dce60/dce60_clk_mgr.c for DCE6 customization
> Code style: reuse all the public functions in dce100/dce_clk_mgr.h header
> Code style: use dce60_* static functions as per other DCE implementations
> Add dce60_get_dp_ref_freq_khz() w/o using DPREFCLK_CNTL register
> Use dce60_get_dp_ref_freq_khz() function in dce60_funcs
> Add DCE6 specific dce60_clk_mgr_construct
> dc/clk_mgr/dce_clk_mgr.c: use dce60_clk_mgr_construct for FAMILY_SI chipsets
> Add Makefile rules for dce60_clk_mgr.o target conditional to 
> CONFIG_DRM_AMD_DC_SI
>
> (v3) Changelog
>
> [Why]
> linux-next kernel test robot reported the following problem:
> warning: no previous prototype for 'dce60_get_dp_ref_freq_khz' 
> [-Wmissing-prototypes]
>
> [How]
> mark dce60_get_dp_ref_freq_khz() as static
>
> Fixes: 3ecb3b794e2 "drm/amd/display: dc/clk_mgr: add support for SI parts 
> (v2)"
> Reported-by: kernel test robot 
> Signed-off-by: Mauro Rossi 
> ---
>  drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c | 2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)
>
> diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c 
> b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c
> index c11c6b3a787d..0267644717b2 100644
> --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c
> +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c
> @@ -80,7 +80,7 @@ static const struct state_dependent_clocks 
> dce60_max_clks_by_state[] = {
>  /* ClocksStatePerformance */
>  { .display_clk_khz = 60, .pixel_clk_khz = 40 } };
>
> -int dce60_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base)
> +static int dce60_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base)
>  {
> struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
> int dprefclk_wdivider;
> --
> 2.25.1
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amd/display: Delete duplicated argument to '&&' or '||'

2020-09-17 Thread Ye Bin
Fixes coccicheck warnig:
drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c:282:12-42:
duplicated argument to && or ||
drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c:3240:12-42:
duplicated argument to && or ||
drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c:5520:7-91:
duplicated argument to && or ||
drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c:5309:86-122:
duplicated argument to && or ||

Fixes: 6725a88f88a7 ("drm/amd/display: Add DCN3 DML")
Reported-by: Hulk Robot 
Signed-off-by: Ye Bin 
---
 drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c| 6 +++---
 drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c | 2 +-
 2 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c 
b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
index 8f8bf83a60a1..50b7d011705d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
@@ -3237,7 +3237,7 @@ static bool CalculateBytePerPixelAnd256BBlockSizes(
*BytePerPixelDETC = 0;
*BytePerPixelY = 4;
*BytePerPixelC = 0;
-   } else if (SourcePixelFormat == dm_444_16 || SourcePixelFormat == 
dm_444_16) {
+   } else if (SourcePixelFormat == dm_444_16) {
*BytePerPixelDETY = 2;
*BytePerPixelDETC = 0;
*BytePerPixelY = 2;
@@ -5307,7 +5307,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct 
display_mode_lib *mode_l
ViewportExceedsSurface = true;
 
if (v->SourcePixelFormat[k] != dm_444_64 && 
v->SourcePixelFormat[k] != dm_444_32 && v->SourcePixelFormat[k] != dm_444_16
-   && v->SourcePixelFormat[k] != dm_444_16 && 
v->SourcePixelFormat[k] != dm_444_8 && v->SourcePixelFormat[k] != dm_rgbe) {
+   && v->SourcePixelFormat[k] != dm_444_8 && 
v->SourcePixelFormat[k] != dm_rgbe) {
if (v->ViewportWidthChroma[k] > v->SurfaceWidthC[k] || 
v->ViewportHeightChroma[k] > v->SurfaceHeightC[k]) {
ViewportExceedsSurface = true;
}
@@ -5517,7 +5517,7 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
if (WritebackPixelFormat[k] == dm_444_64) {
WritebackDRAMClockChangeLatencyHiding = 
WritebackDRAMClockChangeLatencyHiding / 2;
}
-   if (mode_lib->vba.WritebackConfiguration == 
dm_whole_buffer_for_single_stream_interleave || 
mode_lib->vba.WritebackConfiguration == 
dm_whole_buffer_for_single_stream_interleave) {
+   if (mode_lib->vba.WritebackConfiguration == 
dm_whole_buffer_for_single_stream_interleave) {
WritebackDRAMClockChangeLatencyHiding = 
WritebackDRAMClockChangeLatencyHiding * 2;
}
WritebackDRAMClockChangeLatencyMargin = 
WritebackDRAMClockChangeLatencyHiding - 
mode_lib->vba.WritebackDRAMClockChangeWatermark;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c 
b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
index 5bb10f6e300d..416bf6fb67bd 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
@@ -279,7 +279,7 @@ static bool CalculateBytePerPixelAnd256BBlockSizes(
*BytePerPixelDETC = 0;
*BytePerPixelY = 4;
*BytePerPixelC = 0;
-   } else if (SourcePixelFormat == dm_444_16 || SourcePixelFormat == 
dm_444_16) {
+   } else if (SourcePixelFormat == dm_444_16) {
*BytePerPixelDETY = 2;
*BytePerPixelDETC = 0;
*BytePerPixelY = 2;
-- 
2.16.2.dirty

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


RE: [PATCH 04/15] drm/amd/display: Replace msleep with udelay while read edid return defer.

2020-09-17 Thread Zhuo, Qingqing
[AMD Official Use Only - Internal Distribution Only]

Hi Jinlong,

This is a public thread for upstream review purpose mainly. Let's sync 
internally for your issue.

Thanks,
Lillian

-Original Message-
From: Koenig, Christian  
Sent: Thursday, September 17, 2020 10:46 AM
To: Zhang, Jinlong ; Zhuo, Qingqing 
; Alex Deucher 
Cc: Brol, Eryk ; Li, Sun peng (Leo) ; 
Lakha, Bhawanpreet ; Siqueira, Rodrigo 
; amd-gfx list ; 
Wentland, Harry 
Subject: Re: [PATCH 04/15] drm/amd/display: Replace msleep with udelay while 
read edid return defer.

No idea what that is. I can include delay.h just fine in the rest of the driver.

Must be something DC specific.

Regards,
Christian.

Am 17.09.20 um 16:39 schrieb Zhang, Jinlong:
> HI Christian
> While #include , it prompt 
> ..\..\..\..\..\dc\dce\dce_aux.c(31): fatal error C1083: Cannot open 
> include file: 'linux/delay.h': No such file or directory Could you 
> help to check how to include the header of void usleep_range(unsigned 
> long min, unsigned long max);
>
> -Original Message-
> From: Zhuo, Qingqing 
> Sent: Thursday, September 17, 2020 9:02 PM
> To: Koenig, Christian ; Alex Deucher 
> 
> Cc: Brol, Eryk ; Li, Sun peng (Leo) 
> ; Lakha, Bhawanpreet ; 
> Siqueira, Rodrigo ; amd-gfx list 
> ; Zhang, Jinlong 
> ; Wentland, Harry 
> Subject: RE: [PATCH 04/15] drm/amd/display: Replace msleep with udelay while 
> read edid return defer.
>
> [AMD Official Use Only - Internal Distribution Only]
>
> Am 17.09.20 um 00:18 schrieb Alex Deucher:
>>> On Wed, Sep 16, 2020 at 6:16 PM Zhuo, Qingqing  
>>> wrote:
 [AMD Official Use Only - Internal Distribution Only]

 On Wed, Sep 16, 2020 at 3:42 PM Qingqing Zhuo  
 wrote:
> From: jinlong zhang 
>
> [why]
> while read edid return defer, then it enter to msleep, but it 
> actually took more time during msleep, this will cause remaining 
> edid read fail.
>
> [how]
> Replacing msleep with udelay, it will not take any extra time, edid 
> return pass finally.
 How long of a delay are we talking about here?  Some platforms don't 
 support long udelays and someone will send a patch to change this to 
 msleep.

 Alex

 -

 Hi Alex,

 It's between 0-5ms for generic cases, though there exist some dongle 
 workaround cases where we will do 70ms. Would this be a concern?
>>> I think ARM has a limit of 2ms for udelay.
>> Yeah, there is even a define somewhere for this.
>> If you need a delay which is longer than this but still more precise than 
>> msleep() then there is the high precision timer sleep as alternative.
>> I've forgotten the function name to use here, but there was a LWN article 
>> about this a few years ago. You just need to google a bit.
> Hi Alex and Christian,
>
> Thanks a lot for the input! Given what's been discussed, I will drop this 
> patch for now.
>
> Regards,
> Lillian
>
>> Regards,
>> Christian.
>>> Alex
>>>
 Thank you,
 Lillian


> Signed-off-by: jinlong zhang 
> Reviewed-by: Wenjing Liu 
> Acked-by: Qingqing Zhuo 
> ---
> drivers/gpu/drm/amd/display/dc/dce/dce_aux.c | 2 +-
> 1 file changed, 1 insertion(+), 1 deletion(-)
>
> diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
> b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
> index 743042d5905a..cdcad82765e0 100644
> --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
> +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
> @@ -653,7 +653,7 @@ bool dce_aux_transfer_with_retries(struct ddc_service 
> *ddc,
>   if ((*payload->reply == 
> AUX_TRANSACTION_REPLY_AUX_DEFER) ||
>   (*payload->reply == 
> AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER)) {
>   if 
> (payload->defer_delay > 0)
> -   
> msleep(payload->defer_delay);
> +
> + udelay(payload->defer_delay * 1000);
>   }
>   }
>   break;
> --
> 2.17.1
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2F
> l
> i
> st
> s.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfxdata=02%7C01%
> 7
> C
> qi
> ngqing.zhuo%40amd.com%7C36c3bee68c28448769fa08d85a884619%7C3dd8961
> f
> e
> 48
> 84e608e11a82d994e183d%7C0%7C0%7C63735627498307sdata=mynpH
> p
> i
> up
> J%2FU2o5gZNW%2Bft%2Fg2beFY86%2BzMRWoTZCghQ%3Dreserved=0
>>> ___
>>> amd-gfx mailing list
>>> amd-gfx@lists.freedesktop.org
>>> 

Re: [PATCH 04/15] drm/amd/display: Replace msleep with udelay while read edid return defer.

2020-09-17 Thread Alex Deucher
On Thu, Sep 17, 2020 at 10:39 AM Zhang, Jinlong  wrote:
>
> HI Christian
> While #include , it prompt 
> ..\..\..\..\..\dc\dce\dce_aux.c(31): fatal error C1083: Cannot open include 
> file: 'linux/delay.h': No such file or directory
> Could you help to check how to include the header of void 
> usleep_range(unsigned long min, unsigned long max);

That should do it.  DC code has #include  in a bunch of
other files.

Alex

>
> -Original Message-
> From: Zhuo, Qingqing 
> Sent: Thursday, September 17, 2020 9:02 PM
> To: Koenig, Christian ; Alex Deucher 
> 
> Cc: Brol, Eryk ; Li, Sun peng (Leo) ; 
> Lakha, Bhawanpreet ; Siqueira, Rodrigo 
> ; amd-gfx list ; 
> Zhang, Jinlong ; Wentland, Harry 
> 
> Subject: RE: [PATCH 04/15] drm/amd/display: Replace msleep with udelay while 
> read edid return defer.
>
> [AMD Official Use Only - Internal Distribution Only]
>
> Am 17.09.20 um 00:18 schrieb Alex Deucher:
> >> On Wed, Sep 16, 2020 at 6:16 PM Zhuo, Qingqing  
> >> wrote:
> >>> [AMD Official Use Only - Internal Distribution Only]
> >>>
> >>>On Wed, Sep 16, 2020 at 3:42 PM Qingqing Zhuo  
> >>>wrote:
>  From: jinlong zhang 
> 
> >>> >[why]
> while read edid return defer, then it enter to msleep, but it
> actually took more time during msleep, this will cause remaining
> edid read fail.
> 
>  [how]
>  Replacing msleep with udelay, it will not take any extra time, edid 
>  return pass finally.
> >>> How long of a delay are we talking about here?  Some platforms don't 
> >>> support long udelays and someone will send a patch to change this to 
> >>> msleep.
> >>>
> >>> Alex
> >>>
> >>> -
> >>>
> >>> Hi Alex,
> >>>
> >>> It's between 0-5ms for generic cases, though there exist some dongle 
> >>> workaround cases where we will do 70ms. Would this be a concern?
> >> I think ARM has a limit of 2ms for udelay.
>
> > Yeah, there is even a define somewhere for this.
>
> > If you need a delay which is longer than this but still more precise than 
> > msleep() then there is the high precision timer sleep as alternative.
>
> > I've forgotten the function name to use here, but there was a LWN article 
> > about this a few years ago. You just need to google a bit.
>
> Hi Alex and Christian,
>
> Thanks a lot for the input! Given what's been discussed, I will drop this 
> patch for now.
>
> Regards,
> Lillian
>
> >
> > Regards,
> > Christian.
> >>
> >> Alex
> >>
> >>> Thank you,
> >>> Lillian
> >>>
> >>>
>  Signed-off-by: jinlong zhang 
>  Reviewed-by: Wenjing Liu 
>  Acked-by: Qingqing Zhuo 
>  ---
>  drivers/gpu/drm/amd/display/dc/dce/dce_aux.c | 2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)
> 
>  diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
>  b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
>  index 743042d5905a..cdcad82765e0 100644
>  --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
>  +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
>  @@ -653,7 +653,7 @@ bool dce_aux_transfer_with_retries(struct 
>  ddc_service *ddc,
>   if ((*payload->reply == 
>  AUX_TRANSACTION_REPLY_AUX_DEFER) ||
>   (*payload->reply == 
>  AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER)) {
>   if 
>  (payload->defer_delay > 0)
>  -   
>  msleep(payload->defer_delay);
>  +
>  + udelay(payload->defer_delay * 1000);
>   }
>   }
>   break;
>  --
>  2.17.1
> 
>  ___
>  amd-gfx mailing list
>  amd-gfx@lists.freedesktop.org
>  https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Fl
>  i
>  st
>  s.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfxdata=02%7C01%7
>  C
>  qi
>  ngqing.zhuo%40amd.com%7C36c3bee68c28448769fa08d85a884619%7C3dd8961f
>  e
>  48
>  84e608e11a82d994e183d%7C0%7C0%7C63735627498307sdata=mynpHp
>  i
>  up
>  J%2FU2o5gZNW%2Bft%2Fg2beFY86%2BzMRWoTZCghQ%3Dreserved=0
> >> ___
> >> amd-gfx mailing list
> >> amd-gfx@lists.freedesktop.org
> >> https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flis
> >> t
> >> s.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfxdata=02%7C01%7CQ
> >> i
> >> ngqing.Zhuo%40amd.com%7Cd4acd0d5e65c49a7270f08d85ae37036%7C3dd8961fe4
> >> 8
> >> 84e608e11a82d994e183d%7C0%7C0%7C637359280197936127sdata=ahcoCqG9
> >> 1
> >> EDMNlHNSk4Eimh1azMtRWSX%2BKyHCdpFq1Q%3Dreserved=0
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH 04/15] drm/amd/display: Replace msleep with udelay while read edid return defer.

2020-09-17 Thread Christian König
No idea what that is. I can include delay.h just fine in the rest of the 
driver.


Must be something DC specific.

Regards,
Christian.

Am 17.09.20 um 16:39 schrieb Zhang, Jinlong:

HI Christian
While #include , it prompt ..\..\..\..\..\dc\dce\dce_aux.c(31): 
fatal error C1083: Cannot open include file: 'linux/delay.h': No such file or 
directory
Could you help to check how to include the header of void usleep_range(unsigned 
long min, unsigned long max);

-Original Message-
From: Zhuo, Qingqing 
Sent: Thursday, September 17, 2020 9:02 PM
To: Koenig, Christian ; Alex Deucher 

Cc: Brol, Eryk ; Li, Sun peng (Leo) ; Lakha, Bhawanpreet 
; Siqueira, Rodrigo ; amd-gfx list 
; Zhang, Jinlong ; Wentland, Harry 

Subject: RE: [PATCH 04/15] drm/amd/display: Replace msleep with udelay while 
read edid return defer.

[AMD Official Use Only - Internal Distribution Only]

Am 17.09.20 um 00:18 schrieb Alex Deucher:

On Wed, Sep 16, 2020 at 6:16 PM Zhuo, Qingqing  wrote:

[AMD Official Use Only - Internal Distribution Only]

On Wed, Sep 16, 2020 at 3:42 PM Qingqing Zhuo  wrote:

From: jinlong zhang 

[why]
while read edid return defer, then it enter to msleep, but it
actually took more time during msleep, this will cause remaining
edid read fail.

[how]
Replacing msleep with udelay, it will not take any extra time, edid return pass 
finally.

How long of a delay are we talking about here?  Some platforms don't support 
long udelays and someone will send a patch to change this to msleep.

Alex

-

Hi Alex,

It's between 0-5ms for generic cases, though there exist some dongle workaround 
cases where we will do 70ms. Would this be a concern?

I think ARM has a limit of 2ms for udelay.

Yeah, there is even a define somewhere for this.
If you need a delay which is longer than this but still more precise than 
msleep() then there is the high precision timer sleep as alternative.
I've forgotten the function name to use here, but there was a LWN article about 
this a few years ago. You just need to google a bit.

Hi Alex and Christian,

Thanks a lot for the input! Given what's been discussed, I will drop this patch 
for now.

Regards,
Lillian


Regards,
Christian.

Alex


Thank you,
Lillian



Signed-off-by: jinlong zhang 
Reviewed-by: Wenjing Liu 
Acked-by: Qingqing Zhuo 
---
drivers/gpu/drm/amd/display/dc/dce/dce_aux.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index 743042d5905a..cdcad82765e0 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -653,7 +653,7 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
  if ((*payload->reply == 
AUX_TRANSACTION_REPLY_AUX_DEFER) ||
  (*payload->reply == 
AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER)) {
  if (payload->defer_delay > 0)
-   
msleep(payload->defer_delay);
+
+ udelay(payload->defer_delay * 1000);
  }
  }
  break;
--
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Fl
i
st
s.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfxdata=02%7C01%7
C
qi
ngqing.zhuo%40amd.com%7C36c3bee68c28448769fa08d85a884619%7C3dd8961f
e
48
84e608e11a82d994e183d%7C0%7C0%7C63735627498307sdata=mynpHp
i
up
J%2FU2o5gZNW%2Bft%2Fg2beFY86%2BzMRWoTZCghQ%3Dreserved=0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flis
t
s.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfxdata=02%7C01%7CQ
i
ngqing.Zhuo%40amd.com%7Cd4acd0d5e65c49a7270f08d85ae37036%7C3dd8961fe4
8
84e608e11a82d994e183d%7C0%7C0%7C637359280197936127sdata=ahcoCqG9
1
EDMNlHNSk4Eimh1azMtRWSX%2BKyHCdpFq1Q%3Dreserved=0


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


RE: [PATCH 04/15] drm/amd/display: Replace msleep with udelay while read edid return defer.

2020-09-17 Thread Zhang, Jinlong
HI Christian
While #include , it prompt ..\..\..\..\..\dc\dce\dce_aux.c(31): 
fatal error C1083: Cannot open include file: 'linux/delay.h': No such file or 
directory
Could you help to check how to include the header of void usleep_range(unsigned 
long min, unsigned long max);

-Original Message-
From: Zhuo, Qingqing  
Sent: Thursday, September 17, 2020 9:02 PM
To: Koenig, Christian ; Alex Deucher 

Cc: Brol, Eryk ; Li, Sun peng (Leo) ; 
Lakha, Bhawanpreet ; Siqueira, Rodrigo 
; amd-gfx list ; 
Zhang, Jinlong ; Wentland, Harry 
Subject: RE: [PATCH 04/15] drm/amd/display: Replace msleep with udelay while 
read edid return defer.

[AMD Official Use Only - Internal Distribution Only]

Am 17.09.20 um 00:18 schrieb Alex Deucher:
>> On Wed, Sep 16, 2020 at 6:16 PM Zhuo, Qingqing  wrote:
>>> [AMD Official Use Only - Internal Distribution Only]
>>>
>>>On Wed, Sep 16, 2020 at 3:42 PM Qingqing Zhuo  wrote:
 From: jinlong zhang 

>>> >[why]
while read edid return defer, then it enter to msleep, but it 
actually took more time during msleep, this will cause remaining 
edid read fail.

 [how]
 Replacing msleep with udelay, it will not take any extra time, edid return 
 pass finally.
>>> How long of a delay are we talking about here?  Some platforms don't 
>>> support long udelays and someone will send a patch to change this to msleep.
>>>
>>> Alex
>>>
>>> -
>>>
>>> Hi Alex,
>>>
>>> It's between 0-5ms for generic cases, though there exist some dongle 
>>> workaround cases where we will do 70ms. Would this be a concern?
>> I think ARM has a limit of 2ms for udelay.

> Yeah, there is even a define somewhere for this.

> If you need a delay which is longer than this but still more precise than 
> msleep() then there is the high precision timer sleep as alternative.

> I've forgotten the function name to use here, but there was a LWN article 
> about this a few years ago. You just need to google a bit.

Hi Alex and Christian,

Thanks a lot for the input! Given what's been discussed, I will drop this patch 
for now.

Regards,
Lillian

>
> Regards,
> Christian.
>>
>> Alex
>>
>>> Thank you,
>>> Lillian
>>>
>>>
 Signed-off-by: jinlong zhang 
 Reviewed-by: Wenjing Liu 
 Acked-by: Qingqing Zhuo 
 ---
 drivers/gpu/drm/amd/display/dc/dce/dce_aux.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

 diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
 b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
 index 743042d5905a..cdcad82765e0 100644
 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
 +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
 @@ -653,7 +653,7 @@ bool dce_aux_transfer_with_retries(struct ddc_service 
 *ddc,
  if ((*payload->reply == 
 AUX_TRANSACTION_REPLY_AUX_DEFER) ||
  (*payload->reply == 
 AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER)) {
  if (payload->defer_delay 
 > 0)
 -   
 msleep(payload->defer_delay);
 +
 + udelay(payload->defer_delay * 1000);
  }
  }
  break;
 --
 2.17.1

 ___
 amd-gfx mailing list
 amd-gfx@lists.freedesktop.org
 https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Fl
 i
 st
 s.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfxdata=02%7C01%7
 C
 qi
 ngqing.zhuo%40amd.com%7C36c3bee68c28448769fa08d85a884619%7C3dd8961f
 e
 48
 84e608e11a82d994e183d%7C0%7C0%7C63735627498307sdata=mynpHp
 i
 up
 J%2FU2o5gZNW%2Bft%2Fg2beFY86%2BzMRWoTZCghQ%3Dreserved=0
>> ___
>> amd-gfx mailing list
>> amd-gfx@lists.freedesktop.org
>> https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flis
>> t 
>> s.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfxdata=02%7C01%7CQ
>> i
>> ngqing.Zhuo%40amd.com%7Cd4acd0d5e65c49a7270f08d85ae37036%7C3dd8961fe4
>> 8
>> 84e608e11a82d994e183d%7C0%7C0%7C637359280197936127sdata=ahcoCqG9
>> 1
>> EDMNlHNSk4Eimh1azMtRWSX%2BKyHCdpFq1Q%3Dreserved=0
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH v3 2/3] drm/amdkfd: Add process eviction counters to sysfs

2020-09-17 Thread Felix Kuehling
Am 2020-09-17 um 9:11 a.m. schrieb Cox, Philip:
> [AMD Official Use Only - Internal Distribution Only]
>
>>> +static struct attribute *procfs_stats_attrs[] = {
>>> +   NULL
>>> +};
>> We could probably use this to populate the attributes in stats automatically 
>> instead of calling sysfs_create_file and sysfs_remove_file manually. Then we 
>> may also not need the attr_evict attribute in the pdd.
>
> We use the attr_evict as an anchor to locate the pdd in 
> kfd_procfs_stats_show().  So, if we use the default attributes, and drop the 
> calls to sysfs_create_file, and sysfs_remove_file, it makes 
> kfd_procfs_stats_show() much more complicated, as we then need to find the 
> correct pdd without using the anchor attr_evict.
>
> Also, if we create the file via the default attributes, as you suggest, and 
> don't drop the attr_evict, we get incorrect results.  
>
> The code is much cleaner I think leaving the calls to sysfs_create_file, and 
> sysfs_remove_file() as they are, and leaving the default stats attributes 
> NULL.  If some other stats are added later, that don't require the pdd, then 
> they can be added to this structure, but I don't think the eviction stats 
> should be.

Thanks. Makes sense. I expect that all the stats will need the PDD. They
are all per-process, per-device stats.

With the small nit-picks fixed, the patch is

Reviewed-by: Felix Kuehling 

Regards,
  Felix


>
> -Original Message-
> From: Kuehling, Felix  
> Sent: Wednesday, September 16, 2020 6:46 PM
> To: Cox, Philip ; amd-gfx@lists.freedesktop.org
> Cc: Tye, Tony ; Morichetti, Laurent 
> ; Kim, Jonathan ; Errabolu, 
> Ramesh 
> Subject: Re: [PATCH v3 2/3] drm/amdkfd: Add process eviction counters to sysfs
>
> Some nit-picks and one more possible simplification inline. I want to make 
> adding more stats later as painless as possible.
>
> Looks good otherwise.
>
>
> Am 2020-09-16 um 2:42 p.m. schrieb Philip Cox:
>> Add per-process eviction counters to sysfs to keep track of how many 
>> eviction events have happened for each process.
>>
>> v2: rename the stats dir, and track all evictions per process, per device.
>> v3: Simplify the stats kobject handling and cleanup.
>>
>> Signed-off-by: Philip Cox 
>> ---
>>  .../drm/amd/amdkfd/kfd_device_queue_manager.c |  9 ++
>>  drivers/gpu/drm/amd/amdkfd/kfd_priv.h |  9 +-
>>  drivers/gpu/drm/amd/amdkfd/kfd_process.c  | 97 +++
>>  3 files changed, 114 insertions(+), 1 deletion(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 
>> b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
>> index cafbc3aa980a..5b9e0df2a90e 100644
>> --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
>> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
>> @@ -653,6 +653,7 @@ static int evict_process_queues_nocpsch(struct 
>> device_queue_manager *dqm,
>>  pr_info_ratelimited("Evicting PASID 0x%x queues\n",
>>  pdd->process->pasid);
>>  
>> +pdd->last_evict_timestamp = get_jiffies_64();
>>  /* Mark all queues as evicted. Deactivate all active queues on
>>   * the qpd.
>>   */
>> @@ -714,6 +715,7 @@ static int evict_process_queues_cpsch(struct 
>> device_queue_manager *dqm,
>>  q->properties.is_active = false;
>>  decrement_queue_count(dqm, q->properties.type);
>>  }
>> +pdd->last_evict_timestamp = get_jiffies_64();
>>  retval = execute_queues_cpsch(dqm,
>>  qpd->is_debug ?
>>  KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES :
>> @@ -732,6 +734,7 @@ static int restore_process_queues_nocpsch(struct 
>> device_queue_manager *dqm,
>>  struct mqd_manager *mqd_mgr;
>>  struct kfd_process_device *pdd;
>>  uint64_t pd_base;
>> +uint64_t eviction_duration;
>>  int retval, ret = 0;
>>  
>>  pdd = qpd_to_pdd(qpd);
>> @@ -799,6 +802,8 @@ static int restore_process_queues_nocpsch(struct 
>> device_queue_manager *dqm,
>>  ret = retval;
>>  }
>>  qpd->evicted = 0;
>> +eviction_duration = get_jiffies_64() - pdd->last_evict_timestamp;
>> +atomic64_add(eviction_duration, >evict_duration_counter);
>>  out:
>>  if (mm)
>>  mmput(mm);
>> @@ -812,6 +817,7 @@ static int restore_process_queues_cpsch(struct 
>> device_queue_manager *dqm,
>>  struct queue *q;
>>  struct kfd_process_device *pdd;
>>  uint64_t pd_base;
>> +uint64_t eviction_duration;
>>  int retval = 0;
>>  
>>  pdd = qpd_to_pdd(qpd);
>> @@ -845,6 +851,9 @@ static int restore_process_queues_cpsch(struct 
>> device_queue_manager *dqm,
>>  retval = execute_queues_cpsch(dqm,
>>  KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
>>  qpd->evicted = 0;
>> +eviction_duration = get_jiffies_64() - pdd->last_evict_timestamp;
>> +atomic64_add(eviction_duration, >evict_duration_counter);
>> +
>>  out:
>>  dqm_unlock(dqm);
>>  return 

[PATCH] drm/amd/display: [FIX] update clock under two conditions

2020-09-17 Thread Qingqing Zhuo
[Why]
Update clock only when non-seamless boot stream exists
creates regression on multiple scenerios.

[How]
Update clock in two conditions
1. Non-seamless boot stream exist.
2. Stream_count = 0

Fixes:06f9b1475d98("drm/amd/display: update clock
when non-seamless boot stream exist")

Signed-off-by: Lewis Huang 
Acked-by: Qingqing Zhuo 
Cc: 
---
 drivers/gpu/drm/amd/display/dc/core/dc.c | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 1efc823c2a14..7e74ddc1c708 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1286,7 +1286,8 @@ static enum dc_status dc_commit_state_no_check(struct dc 
*dc, struct dc_state *c
dc->optimize_seamless_boot_streams++;
}
 
-   if (context->stream_count > dc->optimize_seamless_boot_streams)
+   if (context->stream_count > dc->optimize_seamless_boot_streams ||
+   context->stream_count == 0)
dc->hwss.prepare_bandwidth(dc, context);
 
disable_dangling_plane(dc, context);
@@ -1368,7 +1369,8 @@ static enum dc_status dc_commit_state_no_check(struct dc 
*dc, struct dc_state *c
 
dc_enable_stereo(dc, context, dc_streams, context->stream_count);
 
-   if (context->stream_count > dc->optimize_seamless_boot_streams) {
+   if (context->stream_count > dc->optimize_seamless_boot_streams ||
+   context->stream_count == 0) {
/* Must wait for no flips to be pending before doing optimize 
bw */
wait_for_no_pipes_pending(dc, context);
/* pplib is notified if disp_num changed */
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH v2 16/21] drm/vgem: Introduce GEM object functions

2020-09-17 Thread Melissa Wen
Hi Thomas,

On 09/15, Thomas Zimmermann wrote:
> GEM object functions deprecate several similar callback interfaces in
> struct drm_driver. This patch replaces the per-driver callbacks with
> per-instance callbacks in vgem. The only exception is gem_prime_mmap,
> which is non-trivial to convert.
> 
> Signed-off-by: Thomas Zimmermann 

Thanks here again.

This drv file is little tumultuous to me.
I mean, I took a while to sort functions in my head.

However, finally, I got it, and the change looks good.

Reviewed-by: Melissa Wen 

> ---
>  drivers/gpu/drm/vgem/vgem_drv.c | 21 ++---
>  1 file changed, 14 insertions(+), 7 deletions(-)
> 
> diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
> index cb884c890065..fa54a6d1403d 100644
> --- a/drivers/gpu/drm/vgem/vgem_drv.c
> +++ b/drivers/gpu/drm/vgem/vgem_drv.c
> @@ -50,6 +50,8 @@
>  #define DRIVER_MAJOR 1
>  #define DRIVER_MINOR 0
>  
> +static const struct drm_gem_object_funcs vgem_gem_object_funcs;
> +
>  static struct vgem_device {
>   struct drm_device drm;
>   struct platform_device *platform;
> @@ -167,6 +169,8 @@ static struct drm_vgem_gem_object 
> *__vgem_gem_create(struct drm_device *dev,
>   if (!obj)
>   return ERR_PTR(-ENOMEM);
>  
> + obj->base.funcs = _gem_object_funcs;
> +
>   ret = drm_gem_object_init(dev, >base, roundup(size, PAGE_SIZE));
>   if (ret) {
>   kfree(obj);
> @@ -401,12 +405,20 @@ static int vgem_prime_mmap(struct drm_gem_object *obj,
>   return 0;
>  }
>  
> +static const struct drm_gem_object_funcs vgem_gem_object_funcs = {
> + .free = vgem_gem_free_object,
> + .pin = vgem_prime_pin,
> + .unpin = vgem_prime_unpin,
> + .get_sg_table = vgem_prime_get_sg_table,
> + .vmap = vgem_prime_vmap,
> + .vunmap = vgem_prime_vunmap,
> + .vm_ops = _gem_vm_ops,
> +};
> +
>  static struct drm_driver vgem_driver = {
>   .driver_features= DRIVER_GEM | DRIVER_RENDER,
>   .open   = vgem_open,
>   .postclose  = vgem_postclose,
> - .gem_free_object_unlocked   = vgem_gem_free_object,
> - .gem_vm_ops = _gem_vm_ops,
>   .ioctls = vgem_ioctls,
>   .num_ioctls = ARRAY_SIZE(vgem_ioctls),
>   .fops   = _driver_fops,
> @@ -415,13 +427,8 @@ static struct drm_driver vgem_driver = {
>  
>   .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
>   .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
> - .gem_prime_pin = vgem_prime_pin,
> - .gem_prime_unpin = vgem_prime_unpin,
>   .gem_prime_import = vgem_prime_import,
>   .gem_prime_import_sg_table = vgem_prime_import_sg_table,
> - .gem_prime_get_sg_table = vgem_prime_get_sg_table,
> - .gem_prime_vmap = vgem_prime_vmap,
> - .gem_prime_vunmap = vgem_prime_vunmap,
>   .gem_prime_mmap = vgem_prime_mmap,
>  
>   .name   = DRIVER_NAME,
> -- 
> 2.28.0
> 
> ___
> dri-devel mailing list
> dri-de...@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


RE: [PATCH v3 2/3] drm/amdkfd: Add process eviction counters to sysfs

2020-09-17 Thread Cox, Philip
[AMD Official Use Only - Internal Distribution Only]

> > +static struct attribute *procfs_stats_attrs[] = {
> > +   NULL
> > +};
> 
> We could probably use this to populate the attributes in stats automatically 
> instead of calling sysfs_create_file and sysfs_remove_file manually. Then we 
> may also not need the attr_evict attribute in the pdd.


We use the attr_evict as an anchor to locate the pdd in 
kfd_procfs_stats_show().  So, if we use the default attributes, and drop the 
calls to sysfs_create_file, and sysfs_remove_file, it makes 
kfd_procfs_stats_show() much more complicated, as we then need to find the 
correct pdd without using the anchor attr_evict.

Also, if we create the file via the default attributes, as you suggest, and 
don't drop the attr_evict, we get incorrect results.  

The code is much cleaner I think leaving the calls to sysfs_create_file, and 
sysfs_remove_file() as they are, and leaving the default stats attributes NULL. 
 If some other stats are added later, that don't require the pdd, then they can 
be added to this structure, but I don't think the eviction stats should be.

-Original Message-
From: Kuehling, Felix  
Sent: Wednesday, September 16, 2020 6:46 PM
To: Cox, Philip ; amd-gfx@lists.freedesktop.org
Cc: Tye, Tony ; Morichetti, Laurent 
; Kim, Jonathan ; Errabolu, 
Ramesh 
Subject: Re: [PATCH v3 2/3] drm/amdkfd: Add process eviction counters to sysfs

Some nit-picks and one more possible simplification inline. I want to make 
adding more stats later as painless as possible.

Looks good otherwise.


Am 2020-09-16 um 2:42 p.m. schrieb Philip Cox:
> Add per-process eviction counters to sysfs to keep track of how many 
> eviction events have happened for each process.
>
> v2: rename the stats dir, and track all evictions per process, per device.
> v3: Simplify the stats kobject handling and cleanup.
>
> Signed-off-by: Philip Cox 
> ---
>  .../drm/amd/amdkfd/kfd_device_queue_manager.c |  9 ++
>  drivers/gpu/drm/amd/amdkfd/kfd_priv.h |  9 +-
>  drivers/gpu/drm/amd/amdkfd/kfd_process.c  | 97 +++
>  3 files changed, 114 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 
> b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
> index cafbc3aa980a..5b9e0df2a90e 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
> @@ -653,6 +653,7 @@ static int evict_process_queues_nocpsch(struct 
> device_queue_manager *dqm,
>   pr_info_ratelimited("Evicting PASID 0x%x queues\n",
>   pdd->process->pasid);
>  
> + pdd->last_evict_timestamp = get_jiffies_64();
>   /* Mark all queues as evicted. Deactivate all active queues on
>* the qpd.
>*/
> @@ -714,6 +715,7 @@ static int evict_process_queues_cpsch(struct 
> device_queue_manager *dqm,
>   q->properties.is_active = false;
>   decrement_queue_count(dqm, q->properties.type);
>   }
> + pdd->last_evict_timestamp = get_jiffies_64();
>   retval = execute_queues_cpsch(dqm,
>   qpd->is_debug ?
>   KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES :
> @@ -732,6 +734,7 @@ static int restore_process_queues_nocpsch(struct 
> device_queue_manager *dqm,
>   struct mqd_manager *mqd_mgr;
>   struct kfd_process_device *pdd;
>   uint64_t pd_base;
> + uint64_t eviction_duration;
>   int retval, ret = 0;
>  
>   pdd = qpd_to_pdd(qpd);
> @@ -799,6 +802,8 @@ static int restore_process_queues_nocpsch(struct 
> device_queue_manager *dqm,
>   ret = retval;
>   }
>   qpd->evicted = 0;
> + eviction_duration = get_jiffies_64() - pdd->last_evict_timestamp;
> + atomic64_add(eviction_duration, >evict_duration_counter);
>  out:
>   if (mm)
>   mmput(mm);
> @@ -812,6 +817,7 @@ static int restore_process_queues_cpsch(struct 
> device_queue_manager *dqm,
>   struct queue *q;
>   struct kfd_process_device *pdd;
>   uint64_t pd_base;
> + uint64_t eviction_duration;
>   int retval = 0;
>  
>   pdd = qpd_to_pdd(qpd);
> @@ -845,6 +851,9 @@ static int restore_process_queues_cpsch(struct 
> device_queue_manager *dqm,
>   retval = execute_queues_cpsch(dqm,
>   KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
>   qpd->evicted = 0;
> + eviction_duration = get_jiffies_64() - pdd->last_evict_timestamp;
> + atomic64_add(eviction_duration, >evict_duration_counter);
> +
>  out:
>   dqm_unlock(dqm);
>   return retval;
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h 
> b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
> index 023629f28495..a500fe611b43 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
> @@ -631,7 +631,7 @@ enum kfd_pdd_bound {
>   PDD_BOUND_SUSPENDED,
>  };
>  
> -#define MAX_SYSFS_FILENAME_LEN 

RE: [PATCH 04/15] drm/amd/display: Replace msleep with udelay while read edid return defer.

2020-09-17 Thread Zhuo, Qingqing
[AMD Official Use Only - Internal Distribution Only]

Am 17.09.20 um 00:18 schrieb Alex Deucher:
>> On Wed, Sep 16, 2020 at 6:16 PM Zhuo, Qingqing  wrote:
>>> [AMD Official Use Only - Internal Distribution Only]
>>>
>>>On Wed, Sep 16, 2020 at 3:42 PM Qingqing Zhuo  wrote:
 From: jinlong zhang 

>>> >[why]
while read edid return defer, then it enter to msleep, but it 
actually took more time during msleep, this will cause remaining 
edid read fail.

 [how]
 Replacing msleep with udelay, it will not take any extra time, edid return 
 pass finally.
>>> How long of a delay are we talking about here?  Some platforms don't 
>>> support long udelays and someone will send a patch to change this to msleep.
>>>
>>> Alex
>>>
>>> -
>>>
>>> Hi Alex,
>>>
>>> It's between 0-5ms for generic cases, though there exist some dongle 
>>> workaround cases where we will do 70ms. Would this be a concern?
>> I think ARM has a limit of 2ms for udelay.

> Yeah, there is even a define somewhere for this.

> If you need a delay which is longer than this but still more precise than 
> msleep() then there is the high precision timer sleep as alternative.

> I've forgotten the function name to use here, but there was a LWN article 
> about this a few years ago. You just need to google a bit.

Hi Alex and Christian,

Thanks a lot for the input! Given what's been discussed, I will drop this patch 
for now.

Regards,
Lillian

>
> Regards,
> Christian.
>>
>> Alex
>>
>>> Thank you,
>>> Lillian
>>>
>>>
 Signed-off-by: jinlong zhang 
 Reviewed-by: Wenjing Liu 
 Acked-by: Qingqing Zhuo 
 ---
 drivers/gpu/drm/amd/display/dc/dce/dce_aux.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

 diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
 b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
 index 743042d5905a..cdcad82765e0 100644
 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
 +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
 @@ -653,7 +653,7 @@ bool dce_aux_transfer_with_retries(struct ddc_service 
 *ddc,
  if ((*payload->reply == 
 AUX_TRANSACTION_REPLY_AUX_DEFER) ||
  (*payload->reply == 
 AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER)) {
  if (payload->defer_delay 
 > 0)
 -   
 msleep(payload->defer_delay);
 +
 + udelay(payload->defer_delay * 1000);
  }
  }
  break;
 --
 2.17.1

 ___
 amd-gfx mailing list
 amd-gfx@lists.freedesktop.org
 https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Fli
 st 
 s.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfxdata=02%7C01%7C
 qi
 ngqing.zhuo%40amd.com%7C36c3bee68c28448769fa08d85a884619%7C3dd8961fe
 48 
 84e608e11a82d994e183d%7C0%7C0%7C63735627498307sdata=mynpHpi
 up
 J%2FU2o5gZNW%2Bft%2Fg2beFY86%2BzMRWoTZCghQ%3Dreserved=0
>> ___
>> amd-gfx mailing list
>> amd-gfx@lists.freedesktop.org
>> https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flist
>> s.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfxdata=02%7C01%7CQi
>> ngqing.Zhuo%40amd.com%7Cd4acd0d5e65c49a7270f08d85ae37036%7C3dd8961fe48
>> 84e608e11a82d994e183d%7C0%7C0%7C637359280197936127sdata=ahcoCqG91
>> EDMNlHNSk4Eimh1azMtRWSX%2BKyHCdpFq1Q%3Dreserved=0
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 04/15] drm/amd/display: Replace msleep with udelay while read edid return defer.

2020-09-17 Thread Alex Deucher
On Thu, Sep 17, 2020 at 4:26 AM Christian König
 wrote:
>
> Am 17.09.20 um 00:18 schrieb Alex Deucher:
> > On Wed, Sep 16, 2020 at 6:16 PM Zhuo, Qingqing  
> > wrote:
> >> [AMD Official Use Only - Internal Distribution Only]
> >>
> >> On Wed, Sep 16, 2020 at 3:42 PM Qingqing Zhuo  
> >> wrote:
> >>> From: jinlong zhang 
> >>>
> >>> [why]
> >>> while read edid return defer, then it enter to msleep, but it actually
> >>> took more time during msleep, this will cause remaining edid read
> >>> fail.
> >>>
> >>> [how]
> >>> Replacing msleep with udelay, it will not take any extra time, edid 
> >>> return pass finally.
> >> How long of a delay are we talking about here?  Some platforms don't 
> >> support long udelays and someone will send a patch to change this to 
> >> msleep.
> >>
> >> Alex
> >>
> >> -
> >>
> >> Hi Alex,
> >>
> >> It's between 0-5ms for generic cases, though there exist some dongle 
> >> workaround cases where we will do 70ms. Would this be a concern?
> > I think ARM has a limit of 2ms for udelay.
>
> Yeah, there is even a define somewhere for this.
>
> If you need a delay which is longer than this but still more precise
> than msleep() then there is the high precision timer sleep as alternative.
>
> I've forgotten the function name to use here, but there was a LWN
> article about this a few years ago. You just need to google a bit.

I think usleep_range() is what you want.

Alex

>
> Regards,
> Christian.
>
> >
> > Alex
> >
> >> Thank you,
> >> Lillian
> >>
> >>
> >>> Signed-off-by: jinlong zhang 
> >>> Reviewed-by: Wenjing Liu 
> >>> Acked-by: Qingqing Zhuo 
> >>> ---
> >>>   drivers/gpu/drm/amd/display/dc/dce/dce_aux.c | 2 +-
> >>>   1 file changed, 1 insertion(+), 1 deletion(-)
> >>>
> >>> diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
> >>> b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
> >>> index 743042d5905a..cdcad82765e0 100644
> >>> --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
> >>> +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
> >>> @@ -653,7 +653,7 @@ bool dce_aux_transfer_with_retries(struct ddc_service 
> >>> *ddc,
> >>>  if ((*payload->reply == 
> >>> AUX_TRANSACTION_REPLY_AUX_DEFER) ||
> >>>  (*payload->reply == 
> >>> AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER)) {
> >>>  if (payload->defer_delay 
> >>> > 0)
> >>> -   
> >>> msleep(payload->defer_delay);
> >>> +
> >>> + udelay(payload->defer_delay * 1000);
> >>>  }
> >>>  }
> >>>  break;
> >>> --
> >>> 2.17.1
> >>>
> >>> ___
> >>> amd-gfx mailing list
> >>> amd-gfx@lists.freedesktop.org
> >>> https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flist
> >>> s.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfxdata=02%7C01%7Cqi
> >>> ngqing.zhuo%40amd.com%7C36c3bee68c28448769fa08d85a884619%7C3dd8961fe48
> >>> 84e608e11a82d994e183d%7C0%7C0%7C63735627498307sdata=mynpHpiup
> >>> J%2FU2o5gZNW%2Bft%2Fg2beFY86%2BzMRWoTZCghQ%3Dreserved=0
> > ___
> > amd-gfx mailing list
> > amd-gfx@lists.freedesktop.org
> > https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH v2 0/4] Enlarge tracepoints in the display component

2020-09-17 Thread Daniel Vetter
On Wed, Sep 16, 2020 at 11:27:27AM -0400, Kazlauskas, Nicholas wrote:
> On 2020-09-16 5:12 a.m., Daniel Vetter wrote:
> > On Fri, Sep 11, 2020 at 10:59:23AM -0400, Rodrigo Siqueira wrote:
> > > Debug issues related to display can be a challenge due to the complexity
> > > around this topic and different source of information might help in this
> > > process. We already have support for tracepoints inside the display
> > > component, i.e., we have the basic functionalities available and we just
> > > need to expand it in order to make it more valuable for debugging. For
> > > this reason, this patchset reworks part of the current tracepoint
> > > options and add different sets of tracing inside amdgpu_dm, display
> > > core, and DCN10. The first patch of this series just rework part of the
> > > current tracepoints and the last set of patches introduces new
> > > tracepoints.
> > > 
> > > This first patchset version is functional. Please, let me know what I
> > > can improve in the current version but also let me know what kind of
> > > tracepoint I can add for the next version.
> > > 
> > > Finally, I want to highlight that this work is based on a set of patches
> > > originally made by Nicholas Kazlauskas.
> > > 
> > > Change in V2:
> > > - I added another patch for capturing the clock state for different 
> > > display
> > >architecture.
> > 
> > Hm I'm not super sure tracepoints for state dumping are the right thing
> > here. We kinda have the atomic state dumping code with all the various
> > callbacks, and you can extend that pretty easily. Gives you full state
> > dump in debugfs, plus a few function to dump into dmesg.
> > 
> > Maybe what we need is a function to dump this also into printk tracepoint
> > (otoh with Sean Paul's tracepoint work we'd get that through the dmesg
> > stuff already), and then you could do it there?
> > 
> > Upside is that for customers they'd get a much more consistent way to
> > debug display issues across different drivers.
> > 
> > For low-level hw debug what we do is give the hw guys an mmio trace, and
> > they replay it on the fancy boxes :-) So for that I think this here is
> > again too high level, but maybe what you have is a bit different.
> > -Daniel
> 
> We have raw register traces, but what I find most useful is to be able to
> see are the incoming DRM IOCTLs, objects and properties per commit.
> 
> Many of the bugs we see in display code is in the conversion from DRM -> DM
> -> DC state. The current HW state is kind of useless in most cases, but the
> sequence helps track down intermittent problems and understand state
> transitions.
> 
> Tracepoints provide everything I really need to be able to track down these
> problems without falling back to a full debugger. The existing DRM prints
> (even at high logging levels) aren't enough to understand what's going on in
> most cases in our driver so funneling those into tracepoints to improve perf
> doesn't really help that much.
> 
> I think this kind of idea was rejected for DRM core last year with Sean's
> patch series but if we can't get them into core then I'd like to get them
> into our driver at least. These are a cleaned up version of Sean's work + my
> work that I end up applying locally whenever I debug something.

Nah, Sean's series wasn't rejected. It's simply stuck waiting for review.
So if your goal is to get better dumping going on, I think combining this
with Sean's work (and getting that reviewed), plus then tapping into the
atomic state dumping code. Then you know what was requested, plus what your
atomic_check code computed should be the hw state, and you can compare
that with the register dumps you already grab.

Feels at least like a more complete and flexible solution than ad-hoc
tracepoints for debuggin in each driver. The idea behind Sean's work is
also that we'd have a blackbox recorder for any drm issues which distros
in the field could use. So driver doing their own debug output doesn't
sound super great.

I think Siqueira already chatted a bit with Sean.
-Daniel

> 

> Regards,
> Nicholas Kazlauskas
> 
> > 
> > > 
> > > Rodrigo Siqueira (4):
> > >drm/amd/display: Rework registers tracepoint
> > >drm/amd/display: Add tracepoint for amdgpu_dm
> > >drm/amd/display: Add pipe_state tracepoint
> > >drm/amd/display: Add tracepoint for capturing clocks state
> > > 
> > >   .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c |  17 +
> > >   .../amd/display/amdgpu_dm/amdgpu_dm_trace.h   | 712 +-
> > >   .../dc/clk_mgr/dce112/dce112_clk_mgr.c|   5 +
> > >   .../display/dc/clk_mgr/dcn10/rv1_clk_mgr.c|   4 +
> > >   .../display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c  |   4 +
> > >   .../amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c |   4 +
> > >   .../display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c  |   4 +
> > >   drivers/gpu/drm/amd/display/dc/core/dc.c  |  11 +
> > >   .../gpu/drm/amd/display/dc/dce/dce_clk_mgr.c  |   5 +
> > >   

Re: [PATCH v2 18/21] drm/vkms: Introduce GEM object functions

2020-09-17 Thread Melissa Wen
Hi Thomas,

On 09/15, Thomas Zimmermann wrote:
> GEM object functions deprecate several similar callback interfaces in
> struct drm_driver. This patch replaces the per-driver callbacks with
> per-instance callbacks in vkms.
> 
> Signed-off-by: Thomas Zimmermann 

Thanks! Looks fine.

Reviewed-by: Melissa Wen 

> ---
>  drivers/gpu/drm/vkms/vkms_drv.c |  8 
>  drivers/gpu/drm/vkms/vkms_gem.c | 13 +
>  2 files changed, 13 insertions(+), 8 deletions(-)
> 
> diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
> index cb0b6230c22c..726801ab44d4 100644
> --- a/drivers/gpu/drm/vkms/vkms_drv.c
> +++ b/drivers/gpu/drm/vkms/vkms_drv.c
> @@ -51,12 +51,6 @@ static const struct file_operations vkms_driver_fops = {
>   .release= drm_release,
>  };
>  
> -static const struct vm_operations_struct vkms_gem_vm_ops = {
> - .fault = vkms_gem_fault,
> - .open = drm_gem_vm_open,
> - .close = drm_gem_vm_close,
> -};
> -
>  static void vkms_release(struct drm_device *dev)
>  {
>   struct vkms_device *vkms = container_of(dev, struct vkms_device, drm);
> @@ -98,8 +92,6 @@ static struct drm_driver vkms_driver = {
>   .release= vkms_release,
>   .fops   = _driver_fops,
>   .dumb_create= vkms_dumb_create,
> - .gem_vm_ops = _gem_vm_ops,
> - .gem_free_object_unlocked = vkms_gem_free_object,
>   .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
>   .gem_prime_import_sg_table = vkms_prime_import_sg_table,
>  
> diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c
> index a017fc59905e..19a0e260a4df 100644
> --- a/drivers/gpu/drm/vkms/vkms_gem.c
> +++ b/drivers/gpu/drm/vkms/vkms_gem.c
> @@ -7,6 +7,17 @@
>  
>  #include "vkms_drv.h"
>  
> +static const struct vm_operations_struct vkms_gem_vm_ops = {
> + .fault = vkms_gem_fault,
> + .open = drm_gem_vm_open,
> + .close = drm_gem_vm_close,
> +};
> +
> +static const struct drm_gem_object_funcs vkms_gem_object_funcs = {
> + .free = vkms_gem_free_object,
> + .vm_ops = _gem_vm_ops,
> +};
> +
>  static struct vkms_gem_object *__vkms_gem_create(struct drm_device *dev,
>u64 size)
>  {
> @@ -17,6 +28,8 @@ static struct vkms_gem_object *__vkms_gem_create(struct 
> drm_device *dev,
>   if (!obj)
>   return ERR_PTR(-ENOMEM);
>  
> + obj->gem.funcs = _gem_object_funcs;
> +
>   size = roundup(size, PAGE_SIZE);
>   ret = drm_gem_object_init(dev, >gem, size);
>   if (ret) {
> -- 
> 2.28.0
> 
> ___
> dri-devel mailing list
> dri-de...@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH v2 14/21] drm/tegra: Introduce GEM object functions

2020-09-17 Thread Thierry Reding
On Tue, Sep 15, 2020 at 04:59:51PM +0200, Thomas Zimmermann wrote:
> GEM object functions deprecate several similar callback interfaces in
> struct drm_driver. This patch replaces the per-driver callbacks with
> per-instance callbacks in tegra.
> 
> Signed-off-by: Thomas Zimmermann 
> ---
>  drivers/gpu/drm/tegra/drm.c | 4 
>  drivers/gpu/drm/tegra/gem.c | 8 
>  2 files changed, 8 insertions(+), 4 deletions(-)

Acked-by: Thierry Reding 


signature.asc
Description: PGP signature
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


RE: [PATCH] drm/amd/pm: Skip smu_post_init in SRIOV

2020-09-17 Thread Chen, JingWen
[AMD Public Use]

Typo fixed in v3

Best Regards,
JingWen Chen

> -Original Message-
> From: Chen, Guchun 
> Sent: Thursday, September 17, 2020 5:40 PM
> To: Chen, JingWen ; amd-
> g...@lists.freedesktop.org
> Cc: Chen, JingWen 
> Subject: RE: [PATCH] drm/amd/pm: Skip smu_post_init in SRIOV
> 
> [AMD Public Use]
> 
> You want to call it in SRIOV case or in bare-metal case?
> 
> Regards,
> Guchun
> 
> -Original Message-
> From: amd-gfx  On Behalf Of
> Jingwen Chen
> Sent: Thursday, September 17, 2020 5:17 PM
> To: amd-gfx@lists.freedesktop.org
> Cc: Chen, JingWen 
> Subject: [PATCH] drm/amd/pm: Skip smu_post_init in SRIOV
> 
> smu_post_init needs to enable SMU feature, while this require virtualization
> off. Skip it since this feature is not used in SRIOV.
> 
> v2: move the check to the early stage of smu_post_init.
> 
> Signed-off-by: Jingwen Chen 
> ---
>  drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c | 3 +++
>  1 file changed, 3 insertions(+)
> 
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
> b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
> index a027c7fdad56..a950f009c794 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
> @@ -2631,6 +2631,9 @@ static int navi10_post_smu_init(struct smu_context
> *smu)
>   uint64_t feature_mask = 0;
>   int ret = 0;
> 
> + if (!amdgpu_sriov_vf(adev))
> + return 0;
> +
>   /* For Naiv1x, enable these features only after DAL initialization */
>   if (adev->pm.pp_feature & PP_SOCCLK_DPM_MASK)
>   feature_mask |=
> FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT);
> --
> 2.25.1
> 
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.
> freedesktop.org%2Fmailman%2Flistinfo%2Famd-
> gfxdata=02%7C01%7Cguchun.chen%40amd.com%7C12ec63de0caa413
> 4415008d85aea7b6a%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C
> 637359310721844702sdata=9JCzyhqPIKMZV%2BBEL83HZyfwCyZjTP5iP
> gs7Hn4Epx8%3Dreserved=0
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amd/pm: Skip smu_post_init in SRIOV

2020-09-17 Thread Jingwen Chen
smu_post_init needs to enable SMU feature, while this require
virtualization off. Skip it since this feature is not used in SRIOV.

v2: move the check to the early stage of smu_post_init.

v3: fix typo

Signed-off-by: Jingwen Chen 
---
 drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c 
b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index a027c7fdad56..05cb1fdd15ce 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -2631,6 +2631,9 @@ static int navi10_post_smu_init(struct smu_context *smu)
uint64_t feature_mask = 0;
int ret = 0;
 
+   if (amdgpu_sriov_vf(adev))
+   return 0;
+
/* For Naiv1x, enable these features only after DAL initialization */
if (adev->pm.pp_feature & PP_SOCCLK_DPM_MASK)
feature_mask |= FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT);
-- 
2.25.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


RE: [PATCH] drm/amd/pm: Skip smu_post_init in SRIOV

2020-09-17 Thread Chen, Guchun
[AMD Public Use]

You want to call it in SRIOV case or in bare-metal case?

Regards,
Guchun

-Original Message-
From: amd-gfx  On Behalf Of Jingwen Chen
Sent: Thursday, September 17, 2020 5:17 PM
To: amd-gfx@lists.freedesktop.org
Cc: Chen, JingWen 
Subject: [PATCH] drm/amd/pm: Skip smu_post_init in SRIOV

smu_post_init needs to enable SMU feature, while this require virtualization 
off. Skip it since this feature is not used in SRIOV.

v2: move the check to the early stage of smu_post_init.

Signed-off-by: Jingwen Chen 
---
 drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c 
b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index a027c7fdad56..a950f009c794 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -2631,6 +2631,9 @@ static int navi10_post_smu_init(struct smu_context *smu)
uint64_t feature_mask = 0;
int ret = 0;
 
+   if (!amdgpu_sriov_vf(adev))
+   return 0;
+
/* For Naiv1x, enable these features only after DAL initialization */
if (adev->pm.pp_feature & PP_SOCCLK_DPM_MASK)
feature_mask |= FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT);
--
2.25.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfxdata=02%7C01%7Cguchun.chen%40amd.com%7C12ec63de0caa4134415008d85aea7b6a%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637359310721844702sdata=9JCzyhqPIKMZV%2BBEL83HZyfwCyZjTP5iPgs7Hn4Epx8%3Dreserved=0
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


RE: [PATCH] drm/amd/pm: Skip smu_post_init in SRIOV

2020-09-17 Thread Chen, JingWen
[AMD Public Use]

Done in v2

Best Regards,
JingWen Chen

> -Original Message-
> From: Chen, Guchun 
> Sent: Thursday, September 17, 2020 4:21 PM
> To: Chen, JingWen ; amd-
> g...@lists.freedesktop.org
> Cc: Chen, JingWen 
> Subject: RE: [PATCH] drm/amd/pm: Skip smu_post_init in SRIOV
> 
> [AMD Public Use]
> 
> Why not moving the check in smu_post_init, and return 0 at the first early
> stage if it's SRIOV case?
> 
> Regards,
> Guchun
> 
> -Original Message-
> From: amd-gfx  On Behalf Of
> Jingwen Chen
> Sent: Thursday, September 17, 2020 4:11 PM
> To: amd-gfx@lists.freedesktop.org
> Cc: Chen, JingWen 
> Subject: [PATCH] drm/amd/pm: Skip smu_post_init in SRIOV
> 
> smu_post_init needs to enable SMU feature, while this require virtualization
> off. Skip it since this feature is not used in SRIOV.
> 
> Signed-off-by: Jingwen Chen 
> ---
>  drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 10 ++
>  1 file changed, 6 insertions(+), 4 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> index 5c4b74f964fc..79163d0ff762 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> @@ -469,10 +469,12 @@ static int smu_late_init(void *handle)
>   if (!smu->pm_enabled)
>   return 0;
> 
> - ret = smu_post_init(smu);
> - if (ret) {
> - dev_err(adev->dev, "Failed to post smu init!\n");
> - return ret;
> + if (!amdgpu_sriov_vf(adev)) {
> + ret = smu_post_init(smu);
> + if (ret) {
> + dev_err(adev->dev, "Failed to post smu init!\n");
> + return ret;
> + }
>   }
> 
>   ret = smu_set_default_od_settings(smu);
> --
> 2.25.1
> 
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.
> freedesktop.org%2Fmailman%2Flistinfo%2Famd-
> gfxdata=02%7C01%7Cguchun.chen%40amd.com%7C7bc132d80cd34c4
> e7b8f08d85ae1fcc5%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C
> 637359274256715319sdata=x%2Bc0jbDbTv8PR7qj4GCbYgxorKyFg2K%2
> BJYgcrs4iftE%3Dreserved=0
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amd/pm: Skip smu_post_init in SRIOV

2020-09-17 Thread Jingwen Chen
smu_post_init needs to enable SMU feature, while this require
virtualization off. Skip it since this feature is not used in SRIOV.

v2: move the check to the early stage of smu_post_init.

Signed-off-by: Jingwen Chen 
---
 drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c 
b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index a027c7fdad56..a950f009c794 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -2631,6 +2631,9 @@ static int navi10_post_smu_init(struct smu_context *smu)
uint64_t feature_mask = 0;
int ret = 0;
 
+   if (!amdgpu_sriov_vf(adev))
+   return 0;
+
/* For Naiv1x, enable these features only after DAL initialization */
if (adev->pm.pp_feature & PP_SOCCLK_DPM_MASK)
feature_mask |= FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT);
-- 
2.25.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amd/display: fix crash/reboot while accessing sysfs files

2020-09-17 Thread Shirish S
read/writes to aux_dpcd_* sysfs entries leads to system
reboot or hang.
Hence fix the handling of input data and reporting of errors
appropriately to the user space.

Signed-off-by: Shirish S 
---
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c  | 10 +-
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 004cd8d38214..8cd646eef096 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -908,7 +908,7 @@ static ssize_t dp_dpcd_address_write(struct file *f, const 
char __user *buf,
struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
 
if (size < sizeof(connector->debugfs_dpcd_address))
-   return 0;
+   return -EINVAL;
 
r = copy_from_user(>debugfs_dpcd_address,
buf, sizeof(connector->debugfs_dpcd_address));
@@ -923,7 +923,7 @@ static ssize_t dp_dpcd_size_write(struct file *f, const 
char __user *buf,
struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
 
if (size < sizeof(connector->debugfs_dpcd_size))
-   return 0;
+   return -EINVAL;
 
r = copy_from_user(>debugfs_dpcd_size,
buf, sizeof(connector->debugfs_dpcd_size));
@@ -943,8 +943,8 @@ static ssize_t dp_dpcd_data_write(struct file *f, const 
char __user *buf,
struct dc_link *link = connector->dc_link;
uint32_t write_size = connector->debugfs_dpcd_size;
 
-   if (size < write_size)
-   return 0;
+   if (!write_size || size < write_size)
+   return -EINVAL;
 
data = kzalloc(write_size, GFP_KERNEL);
if (!data)
@@ -967,7 +967,7 @@ static ssize_t dp_dpcd_data_read(struct file *f, char 
__user *buf,
struct dc_link *link = connector->dc_link;
uint32_t read_size = connector->debugfs_dpcd_size;
 
-   if (size < read_size)
+   if (!read_size || size < read_size)
return 0;
 
data = kzalloc(read_size, GFP_KERNEL);
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH v1] powerplay:hwmgr - modify the return value

2020-09-17 Thread Christian König

Am 17.09.20 um 05:46 schrieb Xiaoliang Pang:

modify the return value is -EINVAL


Maybe better write something like "The correct return value should be 
-EINVAL." With that done feel free to add my acked-by.


Christian.



Fixes: f83a9991648bb("drm/amd/powerplay: add Vega10 powerplay support (v5)")
Fixes: 2cac05dee6e30("drm/amd/powerplay: add the hw manager for vega12 (v4)")
Cc: Eric Huang 
Cc: Evan Quan 
Signed-off-by: Xiaoliang Pang 
---
  drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 2 +-
  drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 2 +-
  2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c 
b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index c378a000c934..7eada3098ffc 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -4659,7 +4659,7 @@ static int 
vega10_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
if ((data->water_marks_bitmap & WaterMarksExist) &&
!(data->water_marks_bitmap & WaterMarksLoaded)) {
result = smum_smc_table_manager(hwmgr, (uint8_t *)wm_table, 
WMTABLE, false);
-   PP_ASSERT_WITH_CODE(result, "Failed to update WMTABLE!", return 
EINVAL);
+   PP_ASSERT_WITH_CODE(result, "Failed to update WMTABLE!", return 
-EINVAL);
data->water_marks_bitmap |= WaterMarksLoaded;
}
  
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c

index a678a67f1c0d..04da52cea824 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -2390,7 +2390,7 @@ static int 
vega12_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
!(data->water_marks_bitmap & WaterMarksLoaded)) {
result = smum_smc_table_manager(hwmgr,
(uint8_t *)wm_table, 
TABLE_WATERMARKS, false);
-   PP_ASSERT_WITH_CODE(result, "Failed to update WMTABLE!", return 
EINVAL);
+   PP_ASSERT_WITH_CODE(result, "Failed to update WMTABLE!", return 
-EINVAL);
data->water_marks_bitmap |= WaterMarksLoaded;
}
  


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 04/15] drm/amd/display: Replace msleep with udelay while read edid return defer.

2020-09-17 Thread Christian König

Am 17.09.20 um 00:18 schrieb Alex Deucher:

On Wed, Sep 16, 2020 at 6:16 PM Zhuo, Qingqing  wrote:

[AMD Official Use Only - Internal Distribution Only]

On Wed, Sep 16, 2020 at 3:42 PM Qingqing Zhuo  wrote:

From: jinlong zhang 

[why]
while read edid return defer, then it enter to msleep, but it actually
took more time during msleep, this will cause remaining edid read
fail.

[how]
Replacing msleep with udelay, it will not take any extra time, edid return pass 
finally.

How long of a delay are we talking about here?  Some platforms don't support 
long udelays and someone will send a patch to change this to msleep.

Alex

-

Hi Alex,

It's between 0-5ms for generic cases, though there exist some dongle workaround 
cases where we will do 70ms. Would this be a concern?

I think ARM has a limit of 2ms for udelay.


Yeah, there is even a define somewhere for this.

If you need a delay which is longer than this but still more precise 
than msleep() then there is the high precision timer sleep as alternative.


I've forgotten the function name to use here, but there was a LWN 
article about this a few years ago. You just need to google a bit.


Regards,
Christian.



Alex


Thank you,
Lillian



Signed-off-by: jinlong zhang 
Reviewed-by: Wenjing Liu 
Acked-by: Qingqing Zhuo 
---
  drivers/gpu/drm/amd/display/dc/dce/dce_aux.c | 2 +-
  1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index 743042d5905a..cdcad82765e0 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -653,7 +653,7 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
 if ((*payload->reply == 
AUX_TRANSACTION_REPLY_AUX_DEFER) ||
 (*payload->reply == 
AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER)) {
 if (payload->defer_delay > 0)
-   
msleep(payload->defer_delay);
+
+ udelay(payload->defer_delay * 1000);
 }
 }
 break;
--
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flist
s.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfxdata=02%7C01%7Cqi
ngqing.zhuo%40amd.com%7C36c3bee68c28448769fa08d85a884619%7C3dd8961fe48
84e608e11a82d994e183d%7C0%7C0%7C63735627498307sdata=mynpHpiup
J%2FU2o5gZNW%2Bft%2Fg2beFY86%2BzMRWoTZCghQ%3Dreserved=0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


RE: [PATCH] drm/amd/pm: Skip smu_post_init in SRIOV

2020-09-17 Thread Chen, Guchun
[AMD Public Use]

Why not moving the check in smu_post_init, and return 0 at the first early 
stage if it's SRIOV case?

Regards,
Guchun

-Original Message-
From: amd-gfx  On Behalf Of Jingwen Chen
Sent: Thursday, September 17, 2020 4:11 PM
To: amd-gfx@lists.freedesktop.org
Cc: Chen, JingWen 
Subject: [PATCH] drm/amd/pm: Skip smu_post_init in SRIOV

smu_post_init needs to enable SMU feature, while this require virtualization 
off. Skip it since this feature is not used in SRIOV.

Signed-off-by: Jingwen Chen 
---
 drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 10 ++
 1 file changed, 6 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c 
b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 5c4b74f964fc..79163d0ff762 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -469,10 +469,12 @@ static int smu_late_init(void *handle)
if (!smu->pm_enabled)
return 0;
 
-   ret = smu_post_init(smu);
-   if (ret) {
-   dev_err(adev->dev, "Failed to post smu init!\n");
-   return ret;
+   if (!amdgpu_sriov_vf(adev)) {
+   ret = smu_post_init(smu);
+   if (ret) {
+   dev_err(adev->dev, "Failed to post smu init!\n");
+   return ret;
+   }
}
 
ret = smu_set_default_od_settings(smu);
--
2.25.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfxdata=02%7C01%7Cguchun.chen%40amd.com%7C7bc132d80cd34c4e7b8f08d85ae1fcc5%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637359274256715319sdata=x%2Bc0jbDbTv8PR7qj4GCbYgxorKyFg2K%2BJYgcrs4iftE%3Dreserved=0
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amd/pm: Skip smu_post_init in SRIOV

2020-09-17 Thread Jingwen Chen
smu_post_init needs to enable SMU feature, while this require
virtualization off. Skip it since this feature is not used in SRIOV.

Signed-off-by: Jingwen Chen 
---
 drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 10 ++
 1 file changed, 6 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c 
b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 5c4b74f964fc..79163d0ff762 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -469,10 +469,12 @@ static int smu_late_init(void *handle)
if (!smu->pm_enabled)
return 0;
 
-   ret = smu_post_init(smu);
-   if (ret) {
-   dev_err(adev->dev, "Failed to post smu init!\n");
-   return ret;
+   if (!amdgpu_sriov_vf(adev)) {
+   ret = smu_post_init(smu);
+   if (ret) {
+   dev_err(adev->dev, "Failed to post smu init!\n");
+   return ret;
+   }
}
 
ret = smu_set_default_od_settings(smu);
-- 
2.25.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH v2 01/21] drm/amdgpu: Introduce GEM object functions

2020-09-17 Thread Thomas Zimmermann
Hi

Am 15.09.20 um 17:05 schrieb Christian König:
> Am 15.09.20 um 16:59 schrieb Thomas Zimmermann:
>> GEM object functions deprecate several similar callback interfaces in
>> struct drm_driver. This patch replaces the per-driver callbacks with
>> per-instance callbacks in amdgpu. The only exception is gem_prime_mmap,
>> which is non-trivial to convert.
>>
>> v2:
>> * move object-function instance to amdgpu_gem.c (Christian)
>> * set callbacks in amdgpu_gem_object_create() (Christian)
>>
>> Signed-off-by: Thomas Zimmermann 
>> ---
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c    |  6 --
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c    | 23 +-
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h    |  5 -
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_object.c |  1 +
>>   4 files changed, 19 insertions(+), 16 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
>> index 6edde2b9e402..840ca8f9c1e1 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
>> @@ -1505,19 +1505,13 @@ static struct drm_driver kms_driver = {
>>   .lastclose = amdgpu_driver_lastclose_kms,
>>   .irq_handler = amdgpu_irq_handler,
>>   .ioctls = amdgpu_ioctls_kms,
>> -    .gem_free_object_unlocked = amdgpu_gem_object_free,
>> -    .gem_open_object = amdgpu_gem_object_open,
>> -    .gem_close_object = amdgpu_gem_object_close,
>>   .dumb_create = amdgpu_mode_dumb_create,
>>   .dumb_map_offset = amdgpu_mode_dumb_mmap,
>>   .fops = _driver_kms_fops,
>>     .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
>>   .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
>> -    .gem_prime_export = amdgpu_gem_prime_export,
>>   .gem_prime_import = amdgpu_gem_prime_import,
>> -    .gem_prime_vmap = amdgpu_gem_prime_vmap,
>> -    .gem_prime_vunmap = amdgpu_gem_prime_vunmap,
>>   .gem_prime_mmap = amdgpu_gem_prime_mmap,
>>     .name = DRIVER_NAME,
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
>> index aa7f230c71bf..aeecd5dc3ce4 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
>> @@ -36,9 +36,12 @@
>>     #include "amdgpu.h"
>>   #include "amdgpu_display.h"
>> +#include "amdgpu_dma_buf.h"
>>   #include "amdgpu_xgmi.h"
>>   -void amdgpu_gem_object_free(struct drm_gem_object *gobj)
>> +static const struct drm_gem_object_funcs amdgpu_gem_object_funcs;
>> +
>> +static void amdgpu_gem_object_free(struct drm_gem_object *gobj)
>>   {
>>   struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
>>   @@ -87,6 +90,7 @@ int amdgpu_gem_object_create(struct amdgpu_device
>> *adev, unsigned long size,
>>   return r;
>>   }
>>   *obj = >tbo.base;
>> +    (*obj)->funcs = _gem_object_funcs;
>>     return 0;
>>   }
>> @@ -119,8 +123,8 @@ void amdgpu_gem_force_release(struct amdgpu_device
>> *adev)
>>    * Call from drm_gem_handle_create which appear in both new and open
>> ioctl
>>    * case.
>>    */
>> -int amdgpu_gem_object_open(struct drm_gem_object *obj,
>> -   struct drm_file *file_priv)
>> +static int amdgpu_gem_object_open(struct drm_gem_object *obj,
>> +  struct drm_file *file_priv)
>>   {
>>   struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
>>   struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
>> @@ -152,8 +156,8 @@ int amdgpu_gem_object_open(struct drm_gem_object
>> *obj,
>>   return 0;
>>   }
>>   -void amdgpu_gem_object_close(struct drm_gem_object *obj,
>> - struct drm_file *file_priv)
>> +static void amdgpu_gem_object_close(struct drm_gem_object *obj,
>> +    struct drm_file *file_priv)
>>   {
>>   struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
>>   struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
>> @@ -211,6 +215,15 @@ void amdgpu_gem_object_close(struct
>> drm_gem_object *obj,
>>   ttm_eu_backoff_reservation(, );
>>   }
>>   +static const struct drm_gem_object_funcs amdgpu_gem_object_funcs = {
>> +    .free = amdgpu_gem_object_free,
>> +    .open = amdgpu_gem_object_open,
>> +    .close = amdgpu_gem_object_close,
>> +    .export = amdgpu_gem_prime_export,
>> +    .vmap = amdgpu_gem_prime_vmap,
>> +    .vunmap = amdgpu_gem_prime_vunmap,
>> +};
>> +
>>   /*
>>    * GEM ioctls.
>>    */
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
>> index e0f025dd1b14..637bf51dbf06 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
>> @@ -33,11 +33,6 @@
>>   #define AMDGPU_GEM_DOMAIN_MAX    0x3
>>   #define gem_to_amdgpu_bo(gobj) container_of((gobj), struct
>> amdgpu_bo, tbo.base)
>>   -void amdgpu_gem_object_free(struct drm_gem_object *obj);
>> -int amdgpu_gem_object_open(struct drm_gem_object *obj,
>> -    struct drm_file *file_priv);
>> -void 

[PATCH] drm/amd/display: dc/clk_mgr: add support for SI parts (v3)

2020-09-17 Thread Mauro Rossi
(v1) Changelog

[Why]
After commit c69dd2d "drm/amd/display: Refactor clk_mgr functions"
dc/clk_mgr requires these changes to add SI parts support
Necessary to avoid hitting default: ASSERT(0); /* Unknown Asic */
that would cause kernel freeze

[How]
Add case statement for FAMILY_SI chipsets

(v2) Changelog

[Why]
DCE6 has no DPREFCLK_CNTL register

[How]
Add DCE6 specific macros definitions for CLK registers and masks
Add DCE6 specific dce60/dce60_clk_mgr.c for DCE6 customization
Code style: reuse all the public functions in dce100/dce_clk_mgr.h header
Code style: use dce60_* static functions as per other DCE implementations
Add dce60_get_dp_ref_freq_khz() w/o using DPREFCLK_CNTL register
Use dce60_get_dp_ref_freq_khz() function in dce60_funcs
Add DCE6 specific dce60_clk_mgr_construct
dc/clk_mgr/dce_clk_mgr.c: use dce60_clk_mgr_construct for FAMILY_SI chipsets
Add Makefile rules for dce60_clk_mgr.o target conditional to 
CONFIG_DRM_AMD_DC_SI

(v3) Changelog

[Why]
linux-next kernel test robot reported the following problem:
warning: no previous prototype for 'dce60_get_dp_ref_freq_khz' 
[-Wmissing-prototypes]

[How]
mark dce60_get_dp_ref_freq_khz() as static

Fixes: 3ecb3b794e2 "drm/amd/display: dc/clk_mgr: add support for SI parts (v2)"
Reported-by: kernel test robot 
Signed-off-by: Mauro Rossi 
---
 drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c 
b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c
index c11c6b3a787d..0267644717b2 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c
@@ -80,7 +80,7 @@ static const struct state_dependent_clocks 
dce60_max_clks_by_state[] = {
 /* ClocksStatePerformance */
 { .display_clk_khz = 60, .pixel_clk_khz = 40 } };
 
-int dce60_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base)
+static int dce60_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base)
 {
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
int dprefclk_wdivider;
-- 
2.25.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH v2 00/21] Convert all remaining drivers to GEM object functions

2020-09-17 Thread Thomas Zimmermann
Hi

Am 15.09.20 um 17:25 schrieb Christian König:
> Added my rb to the amdgpu and radeon patches.
> 
> Should we pick those up through the amd branches or do you want to push
> everything to drm-misc-next?
> 
> I think the later since this should result in much merge clash.

Yes, preferable, I'd merge it all through drm-misc.

Best regards
Thomas

> 
> Christian.
> 
> Am 15.09.20 um 16:59 schrieb Thomas Zimmermann:
>> The GEM and PRIME related callbacks in struct drm_driver are
>> deprecated in
>> favor of GEM object functions in struct drm_gem_object_funcs. This
>> patchset
>> converts the remaining drivers to object functions and removes most of
>> the
>> obsolete interfaces.
>>
>> Patches #1 to #16 and #18 to #19 convert DRM drivers to GEM object
>> functions,
>> one by one. Each patch moves existing callbacks from struct drm_driver
>> to an
>> instance of struct drm_gem_object_funcs, and sets these funcs when the
>> GEM
>> object is initialized. The expection is .gem_prime_mmap. There are
>> different
>> ways of how drivers implement the callback, and moving it to GEM object
>> functions requires a closer review for each.
>>
>> Patch #17 fixes virtgpu to use GEM object functions where possible. The
>> driver recently introduced a function for one of the deprecated
>> callbacks.
>>
>> Patch #20 converts xlnx to CMA helper macros. There's no apparent reason
>> why the driver does the GEM setup on it's own. Using CMA helper macros
>> adds GEM object functions implicitly.
>>
>> With most of the GEM and PRIME moved to GEM object functions, related
>> code
>> in struct drm_driver and in the DRM core/helpers is being removed by
>> patch
>> #21.
>>
>> Further testing is welcome. I tested the drivers for which I have HW
>> available. These are gma500, i915, nouveau, radeon and vc4. The console,
>> Weston and Xorg apparently work with the patches applied.
>>
>> v2:
>> * moved code in amdgpu and radeon
>> * made several functions static in various drivers
>> * updated TODO-list item
>> * fix virtgpu
>>
>> Thomas Zimmermann (21):
>>    drm/amdgpu: Introduce GEM object functions
>>    drm/armada: Introduce GEM object functions
>>    drm/etnaviv: Introduce GEM object functions
>>    drm/exynos: Introduce GEM object functions
>>    drm/gma500: Introduce GEM object functions
>>    drm/i915: Introduce GEM object functions
>>    drm/mediatek: Introduce GEM object functions
>>    drm/msm: Introduce GEM object funcs
>>    drm/nouveau: Introduce GEM object functions
>>    drm/omapdrm: Introduce GEM object functions
>>    drm/pl111: Introduce GEM object functions
>>    drm/radeon: Introduce GEM object functions
>>    drm/rockchip: Convert to drm_gem_object_funcs
>>    drm/tegra: Introduce GEM object functions
>>    drm/vc4: Introduce GEM object functions
>>    drm/vgem: Introduce GEM object functions
>>    drm/virtgpu: Set PRIME export function in struct drm_gem_object_funcs
>>    drm/vkms: Introduce GEM object functions
>>    drm/xen: Introduce GEM object functions
>>    drm/xlnx: Initialize DRM driver instance with CMA helper macro
>>    drm: Remove obsolete GEM and PRIME callbacks from struct drm_driver
>>
>>   Documentation/gpu/todo.rst    |  7 +-
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c   |  6 --
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c   | 23 +++--
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h   |  5 --
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_object.c    |  1 +
>>   drivers/gpu/drm/armada/armada_drv.c   |  3 -
>>   drivers/gpu/drm/armada/armada_gem.c   | 12 ++-
>>   drivers/gpu/drm/armada/armada_gem.h   |  2 -
>>   drivers/gpu/drm/drm_gem.c | 35 ++--
>>   drivers/gpu/drm/drm_gem_cma_helper.c  |  6 +-
>>   drivers/gpu/drm/drm_prime.c   | 17 ++--
>>   drivers/gpu/drm/etnaviv/etnaviv_drv.c | 13 ---
>>   drivers/gpu/drm/etnaviv/etnaviv_drv.h |  1 -
>>   drivers/gpu/drm/etnaviv/etnaviv_gem.c | 19 -
>>   drivers/gpu/drm/exynos/exynos_drm_drv.c   | 10 ---
>>   drivers/gpu/drm/exynos/exynos_drm_gem.c   | 15 
>>   drivers/gpu/drm/gma500/framebuffer.c  |  2 +
>>   drivers/gpu/drm/gma500/gem.c  | 18 +++-
>>   drivers/gpu/drm/gma500/gem.h  |  3 +
>>   drivers/gpu/drm/gma500/psb_drv.c  |  9 --
>>   drivers/gpu/drm/gma500/psb_drv.h  |  2 -
>>   drivers/gpu/drm/i915/gem/i915_gem_object.c    | 21 -
>>   drivers/gpu/drm/i915/gem/i915_gem_object.h    |  3 -
>>   drivers/gpu/drm/i915/i915_drv.c   |  4 -
>>   .../gpu/drm/i915/selftests/mock_gem_device.c  |  3 -
>>   drivers/gpu/drm/mediatek/mtk_drm_drv.c    |  5 --
>>   drivers/gpu/drm/mediatek/mtk_drm_gem.c    | 11 +++
>>   drivers/gpu/drm/msm/msm_drv.c | 13 ---
>>   drivers/gpu/drm/msm/msm_drv.h |  1 -
>>   drivers/gpu/drm/msm/msm_gem.c | 19 -
>>   drivers/gpu/drm/nouveau/nouveau_drm.c

RE: [PATCH] drm/amdgpu: remove experimental flag from navi12

2020-09-17 Thread Quan, Evan
[AMD Official Use Only - Internal Distribution Only]

Reviewed-by: Evan Quan 

-Original Message-
From: amd-gfx  On Behalf Of Alex Deucher
Sent: Wednesday, September 16, 2020 2:22 AM
To: amd-gfx@lists.freedesktop.org
Cc: Deucher, Alexander 
Subject: [PATCH] drm/amdgpu: remove experimental flag from navi12

Navi12 has worked fine for a while now.

Signed-off-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index d87d37c25329..6e4c860e8ae0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -1073,8 +1073,8 @@ static const struct pci_device_id pciidlist[] = {
 {0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},

 /* Navi12 */
-{0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12|AMD_EXP_HW_SUPPORT},
-{0x1002, 0x7362, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12|AMD_EXP_HW_SUPPORT},
+{0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12},
+{0x1002, 0x7362, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12},

 {0, 0, 0}
 };
--
2.25.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfxdata=02%7C01%7Cevan.quan%40amd.com%7C5b2949ea7e4e4338e9d808d859a4473b%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637357909421982334sdata=Yg3L5axov9ttkHuMh%2BgRUlU%2BF49y8iSeZxWA0vhJNs4%3Dreserved=0
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx