RE: [PATCH] drm/dp_mst: Correct the bug in drm_dp_update_payload_part1()

2019-12-02 Thread Lin, Wayne


> -Original Message-
> From: Lyude Paul 
> Sent: Tuesday, December 3, 2019 8:23 AM
> To: Lin, Wayne ; dri-de...@lists.freedesktop.org;
> amd-gfx@lists.freedesktop.org
> Cc: Kazlauskas, Nicholas ; Wentland, Harry
> ; Zuo, Jerry 
> Subject: Re: [PATCH] drm/dp_mst: Correct the bug in
> drm_dp_update_payload_part1()
> 
> On Mon, 2019-12-02 at 11:58 +0800, Wayne Lin wrote:
> > [Why]
> > If the payload_state is DP_PAYLOAD_DELETE_LOCAL in series, current
> > code doesn't delete the payload at current index and just move the
> > index to next one after shuffling payloads.
> >
> > [How]
> > After shuffling payloads, decide whether to move on index or not
> > according to payload_state of current payload.
> >
> > Signed-off-by: Wayne Lin 
> > ---
> >  drivers/gpu/drm/drm_dp_mst_topology.c | 3 ++-
> >  1 file changed, 2 insertions(+), 1 deletion(-)
> >
> > diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c
> > b/drivers/gpu/drm/drm_dp_mst_topology.c
> > index 81e92b260d7a..8da5d461ea01 100644
> > --- a/drivers/gpu/drm/drm_dp_mst_topology.c
> > +++ b/drivers/gpu/drm/drm_dp_mst_topology.c
> > @@ -3176,7 +3176,8 @@ int drm_dp_update_payload_part1(struct
> > drm_dp_mst_topology_mgr *mgr)
> > drm_dp_mst_topology_put_port(port);
> > }
> >
> > -   for (i = 0; i < mgr->max_payloads; i++) {
> > +   for (i = 0; i < mgr->max_payloads;
> > +   (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) ?
> > i : i++) {
> 
> Took me a moment to figure out what this line was actually doing. Nice catch
> by the way!
> 
> Anyway: let's just drop this line to avoid making things confusing to read, 
> drop
> i++ from the for loop instead, and just rewrite it so it looks like this:
> 
> for (i = 0; i < mgr->max_payloads; /* do nothing */) {
>   if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL) {
>   i++;
>   continue;
>   }
> 
> With those changes, this patch is:
> 
> Reviewed-by: Lyude Paul 
> 

Thanks for your time and sorry for not well organized code.
I will send you the v2 right away. Thanks!
 
> I can go ahead and push these patches to drm-misc for you once you've sent
> me the v2
> > if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL)
> > continue;
> >
> --
> Cheers,
>   Lyude Paul
--
BR,
Wayne Lin
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH v2] drm/dp_mst: Correct the bug in drm_dp_update_payload_part1()

2019-12-02 Thread Wayne Lin
[Why]
If the payload_state is DP_PAYLOAD_DELETE_LOCAL in series, current
code doesn't delete the payload at current index and just move the
index to next one after shuffling payloads.

[How]
Drop the i++ increasing part in for loop head and decide whether
to increase the index or not according to payload_state of current
payload.

Changes since v1:
* Refine the code to have it easy reading
* Amend the commit message to meet the way code is modified now.

Signed-off-by: Wayne Lin 
Reviewed-by: Lyude Paul 
---
 drivers/gpu/drm/drm_dp_mst_topology.c | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c 
b/drivers/gpu/drm/drm_dp_mst_topology.c
index 81e92b260d7a..4ef6decc0551 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -3176,9 +3176,11 @@ int drm_dp_update_payload_part1(struct 
drm_dp_mst_topology_mgr *mgr)
drm_dp_mst_topology_put_port(port);
}
 
-   for (i = 0; i < mgr->max_payloads; i++) {
-   if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL)
+   for (i = 0; i < mgr->max_payloads; /* do nothing */) {
+   if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL) {
+   i++;
continue;
+   }
 
DRM_DEBUG_KMS("removing payload %d\n", i);
for (j = i; j < mgr->max_payloads - 1; j++) {
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 2/2] drm/amdkfd: Add Arcturus specific set_vm_context_page_table_base()

2019-12-02 Thread Yong Zhao
Since Arcturus has it own function pointer, we can move Arcturus
specific logic to there rather than leaving it entangled with
other GFX9 chips.

Change-Id: I7df7c004a0c8ac0616ded0e65144670df50f92a7
Signed-off-by: Yong Zhao 
---
 .../drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c   | 20 ++-
 .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 14 +++--
 .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h |  2 --
 3 files changed, 22 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
index b6713e0ed1b2..3c119407dc34 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
@@ -46,6 +46,8 @@
 #include "soc15.h"
 #include "soc15d.h"
 #include "amdgpu_amdkfd_gfx_v9.h"
+#include "gfxhub_v1_0.h"
+#include "mmhub_v9_4.h"
 
 #define HQD_N_REGS 56
 #define DUMP_REG(addr) do {\
@@ -258,6 +260,22 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void 
*mqd,
return 0;
 }
 
+static void kgd_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t 
vmid,
+   uint64_t page_table_base)
+{
+   struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+   if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
+   pr_err("trying to set page table base for wrong VMID %u\n",
+  vmid);
+   return;
+   }
+
+   mmhub_v9_4_setup_vm_pt_regs(adev, vmid, page_table_base);
+
+   gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
+}
+
 const struct kfd2kgd_calls arcturus_kfd2kgd = {
.program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
@@ -277,7 +295,7 @@ const struct kfd2kgd_calls arcturus_kfd2kgd = {
.get_atc_vmid_pasid_mapping_info =
kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
.get_tile_config = kgd_gfx_v9_get_tile_config,
-   .set_vm_context_page_table_base = 
kgd_gfx_v9_set_vm_context_page_table_base,
+   .set_vm_context_page_table_base = kgd_set_vm_context_page_table_base,
.invalidate_tlbs = kgd_gfx_v9_invalidate_tlbs,
.invalidate_tlbs_vmid = kgd_gfx_v9_invalidate_tlbs_vmid,
.get_hive_id = amdgpu_amdkfd_get_hive_id,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index 6f1a4676ddde..e7861f0ef415 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -40,7 +40,6 @@
 #include "soc15d.h"
 #include "mmhub_v1_0.h"
 #include "gfxhub_v1_0.h"
-#include "mmhub_v9_4.h"
 
 
 enum hqd_dequeue_request_type {
@@ -758,8 +757,8 @@ uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev 
*kgd,
return 0;
 }
 
-void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t 
vmid,
-   uint64_t page_table_base)
+static void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd,
+   uint32_t vmid, uint64_t page_table_base)
 {
struct amdgpu_device *adev = get_amdgpu_device(kgd);
 
@@ -769,14 +768,7 @@ void kgd_gfx_v9_set_vm_context_page_table_base(struct 
kgd_dev *kgd, uint32_t vmi
return;
}
 
-   /* TODO: take advantage of per-process address space size. For
-* now, all processes share the same address space size, like
-* on GFX8 and older.
-*/
-   if (adev->asic_type == CHIP_ARCTURUS) {
-   mmhub_v9_4_setup_vm_pt_regs(adev, vmid, page_table_base);
-   } else
-   mmhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
+   mmhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
 
gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
index d9e9ad22b2bd..02b1426d17d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
@@ -57,8 +57,6 @@ uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev 
*kgd,
 
 bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
uint8_t vmid, uint16_t *p_pasid);
-void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t 
vmid,
-   uint64_t page_table_base);
 int kgd_gfx_v9_invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
 int kgd_gfx_v9_invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid);
 int kgd_gfx_v9_get_tile_config(struct kgd_dev *kgd,
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 1/2] drm/amdkfd: Contain MMHUB number in the implementation

2019-12-02 Thread Yong Zhao
Adjust the exposed function prototype so that the caller does not need
to know the MMHUB number.

Change-Id: I4420d1715984f703954f074682b075fc59e2a330
Signed-off-by: Yong Zhao 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c |  6 ++
 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h |  8 
 drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c   | 13 +++--
 drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h   |  2 ++
 4 files changed, 15 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index 47c853ef1051..6f1a4676ddde 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -40,7 +40,7 @@
 #include "soc15d.h"
 #include "mmhub_v1_0.h"
 #include "gfxhub_v1_0.h"
-#include "gmc_v9_0.h"
+#include "mmhub_v9_4.h"
 
 
 enum hqd_dequeue_request_type {
@@ -774,9 +774,7 @@ void kgd_gfx_v9_set_vm_context_page_table_base(struct 
kgd_dev *kgd, uint32_t vmi
 * on GFX8 and older.
 */
if (adev->asic_type == CHIP_ARCTURUS) {
-   /* Two MMHUBs */
-   mmhub_v9_4_setup_vm_pt_regs(adev, 0, vmid, page_table_base);
-   mmhub_v9_4_setup_vm_pt_regs(adev, 1, vmid, page_table_base);
+   mmhub_v9_4_setup_vm_pt_regs(adev, vmid, page_table_base);
} else
mmhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h 
b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h
index 971c0840358f..49e8be761214 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h
@@ -36,12 +36,4 @@
 
 extern const struct amd_ip_funcs gmc_v9_0_ip_funcs;
 extern const struct amdgpu_ip_block_version gmc_v9_0_ip_block;
-
-/* amdgpu_amdkfd*.c */
-void gfxhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
-   uint64_t value);
-void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
-   uint64_t value);
-void mmhub_v9_4_setup_vm_pt_regs(struct amdgpu_device *adev, int hubid,
-   uint32_t vmid, uint64_t value);
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c 
b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
index 8599bfdb9a9e..0b621bf8bbd0 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
@@ -54,7 +54,7 @@ u64 mmhub_v9_4_get_fb_location(struct amdgpu_device *adev)
return base;
 }
 
-void mmhub_v9_4_setup_vm_pt_regs(struct amdgpu_device *adev, int hubid,
+static void mmhub_v9_4_setup_hubid_vm_pt_regs(struct amdgpu_device *adev, int 
hubid,
uint32_t vmid, uint64_t value)
 {
/* two registers distance between mmVML2VC0_VM_CONTEXT0_* to
@@ -80,7 +80,7 @@ static void mmhub_v9_4_init_gart_aperture_regs(struct 
amdgpu_device *adev,
 {
uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
 
-   mmhub_v9_4_setup_vm_pt_regs(adev, hubid, 0, pt_base);
+   mmhub_v9_4_setup_hubid_vm_pt_regs(adev, hubid, 0, pt_base);
 
WREG32_SOC15_OFFSET(MMHUB, 0,
mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
@@ -101,6 +101,15 @@ static void mmhub_v9_4_init_gart_aperture_regs(struct 
amdgpu_device *adev,
(u32)(adev->gmc.gart_end >> 44));
 }
 
+void mmhub_v9_4_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+   uint64_t page_table_base)
+{
+   int i;
+   for (i = 0; i < MMHUB_NUM_INSTANCES; i++) {
+   mmhub_v9_4_setup_hubid_vm_pt_regs(adev, i, vmid, 
page_table_base);
+   }
+}
+
 static void mmhub_v9_4_init_system_aperture_regs(struct amdgpu_device *adev,
 int hubid)
 {
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h 
b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h
index 354a4b7e875b..1b979773776c 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h
@@ -34,5 +34,7 @@ void mmhub_v9_4_init(struct amdgpu_device *adev);
 int mmhub_v9_4_set_clockgating(struct amdgpu_device *adev,
   enum amd_clockgating_state state);
 void mmhub_v9_4_get_clockgating(struct amdgpu_device *adev, u32 *flags);
+void mmhub_v9_4_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+   uint64_t page_table_base);
 
 #endif
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH] drm/dp_mst: Remove VCPI while disabling topology mgr

2019-12-02 Thread Lyude Paul
I'm, not entirely sure what this patch is trying to accomplish. I'm guessing
maybe we're leaving stale VCPI allocations from the previous topology
enablement and then somehow trying to use those again when allocating
payloads? The patch looks correct at least.

If this patch is fixing an issue, such as displays not turning on with amdgpu,
I'd definitely mention it in more detail here and Cc to stable if applicable.
Also, one nitpick below:

On Mon, 2019-12-02 at 11:57 +0800, Wayne Lin wrote:
> [Why]
> While disabling mst topology manager in
> drm_dp_mst_topology_mgr_set_mst(), now just reset the mgr->payloads
> but doesn't handle the mgr->proposed_vcpis.
> 
> [How]
> Remove mgr->proposed_vcpis to NULL.
> 
> Signed-off-by: Wayne Lin 
> ---
>  drivers/gpu/drm/drm_dp_mst_topology.c | 12 
>  1 file changed, 12 insertions(+)
> 
> diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c
> b/drivers/gpu/drm/drm_dp_mst_topology.c
> index ae5809a1f19a..81e92b260d7a 100644
> --- a/drivers/gpu/drm/drm_dp_mst_topology.c
> +++ b/drivers/gpu/drm/drm_dp_mst_topology.c
> @@ -3386,6 +3386,7 @@ static int drm_dp_get_vc_payload_bw(u8 dp_link_bw,
> u8  dp_link_count)
>  int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr,
> bool mst_state)
>  {
>   int ret = 0;
> + int i = 0;
>   struct drm_dp_mst_branch *mstb = NULL;
>  
>   mutex_lock(>lock);
> @@ -3446,10 +3447,21 @@ int drm_dp_mst_topology_mgr_set_mst(struct
> drm_dp_mst_topology_mgr *mgr, bool ms
>   /* this can fail if the device is gone */
>   drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
>   ret = 0;
> + mutex_lock(>payload_lock);
>   memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct
> drm_dp_payload));
>   mgr->payload_mask = 0;
>   set_bit(0, >payload_mask);
> + for (i = 0; i < mgr->max_payloads; i++) {
> + struct drm_dp_vcpi *tmp_vcpi = mgr->proposed_vcpis[i];
> +
> + if (tmp_vcpi) {
> + tmp_vcpi->vcpi = 0;
> + tmp_vcpi->num_slots = 0;
> + }
> + mgr->proposed_vcpis[i] = NULL;
> + }
>   mgr->vcpi_mask = 0;
> + mutex_unlock(>payload_lock);

bikeshed: I'd just rename tmp_vcpi here to vcpi, but I'll leave that up to you
>   }
>  
>  out_unlock:
-- 
Cheers,
Lyude Paul

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH] drm/dp_mst: Correct the bug in drm_dp_update_payload_part1()

2019-12-02 Thread Lyude Paul
On Mon, 2019-12-02 at 11:58 +0800, Wayne Lin wrote:
> [Why]
> If the payload_state is DP_PAYLOAD_DELETE_LOCAL in series, current
> code doesn't delete the payload at current index and just move the
> index to next one after shuffling payloads.
> 
> [How]
> After shuffling payloads, decide whether to move on index or not
> according to payload_state of current payload.
> 
> Signed-off-by: Wayne Lin 
> ---
>  drivers/gpu/drm/drm_dp_mst_topology.c | 3 ++-
>  1 file changed, 2 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c
> b/drivers/gpu/drm/drm_dp_mst_topology.c
> index 81e92b260d7a..8da5d461ea01 100644
> --- a/drivers/gpu/drm/drm_dp_mst_topology.c
> +++ b/drivers/gpu/drm/drm_dp_mst_topology.c
> @@ -3176,7 +3176,8 @@ int drm_dp_update_payload_part1(struct
> drm_dp_mst_topology_mgr *mgr)
>   drm_dp_mst_topology_put_port(port);
>   }
>  
> - for (i = 0; i < mgr->max_payloads; i++) {
> + for (i = 0; i < mgr->max_payloads;
> + (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) ?
> i : i++) {

Took me a moment to figure out what this line was actually doing. Nice catch
by the way!

Anyway: let's just drop this line to avoid making things confusing to read,
drop i++ from the for loop instead, and just rewrite it so it looks like this:

for (i = 0; i < mgr->max_payloads; /* do nothing */) {
if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL) {
i++;
continue;
}

With those changes, this patch is:

Reviewed-by: Lyude Paul 

I can go ahead and push these patches to drm-misc for you once you've sent me
the v2
>   if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL)
>   continue;
>  
-- 
Cheers,
Lyude Paul

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

RE: [PATCH] drm/amdgpu/powerplay: unify smu send message function

2019-12-02 Thread Quan, Evan


> -Original Message-
> From: amd-gfx  On Behalf Of Likun
> Gao
> Sent: Monday, December 2, 2019 6:04 PM
> To: amd-gfx@lists.freedesktop.org
> Cc: Gao, Likun ; Wang, Kevin(Yang)
> ; Feng, Kenneth 
> Subject: [PATCH] drm/amdgpu/powerplay: unify smu send message function
> 
> From: Likun Gao 
> 
> Drop smu_send_smc_msg function from ASIC specify structure.
> Reuse smu_send_smc_msg_with_param function for smu_send_smc_msg.
> Set paramer to 0 for smu_send_msg function, otherwise it will send
> with previous paramer value (Not a certain value).
> 
> Signed-off-by: Likun Gao 
> ---
>  drivers/gpu/drm/amd/powerplay/amdgpu_smu.c |  8 
>  drivers/gpu/drm/amd/powerplay/arcturus_ppt.c   |  1 -
>  drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h |  3 ++-
>  drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h  |  2 --
>  drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h  |  2 --
>  drivers/gpu/drm/amd/powerplay/navi10_ppt.c |  1 -
>  drivers/gpu/drm/amd/powerplay/renoir_ppt.c |  1 -
>  drivers/gpu/drm/amd/powerplay/smu_internal.h   |  2 --
>  drivers/gpu/drm/amd/powerplay/smu_v11_0.c  | 26 
> --
>  drivers/gpu/drm/amd/powerplay/smu_v12_0.c  | 25 -
>  drivers/gpu/drm/amd/powerplay/vega20_ppt.c |  1 -
>  11 files changed, 10 insertions(+), 62 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> index 36001a4..e039904 100644
> --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> @@ -2567,3 +2567,11 @@ uint32_t smu_get_pptable_power_limit(struct
> smu_context *smu)
> 
>   return ret;
>  }
> +
> +int smu_send_smc_msg(struct smu_context *smu, uint16_t msg)
> +{
> + int ret;
> +
> + ret = smu->ppt_funcs->send_smc_msg_with_param(smu, msg, 0);
> + return ret;
> +}
[Quan, Evan] Better to still keep the smu_send_smc_msg definition in 
smu_internal.h as it's used internally.
Other than this, the patch is reviewed-by: Evan Quan 
> diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> index 68107de..3f13986 100644
> --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
> @@ -2137,7 +2137,6 @@ static const struct pptable_funcs arcturus_ppt_funcs
> = {
>   .set_tool_table_location = smu_v11_0_set_tool_table_location,
>   .notify_memory_pool_location =
> smu_v11_0_notify_memory_pool_location,
>   .system_features_control = smu_v11_0_system_features_control,
> - .send_smc_msg = smu_v11_0_send_msg,
>   .send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
>   .read_smc_arg = smu_v11_0_read_arg,
>   .init_display_count = smu_v11_0_init_display_count,
> diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> index ada4a8d..fae1026 100644
> --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
> @@ -500,7 +500,6 @@ struct pptable_funcs {
>   int (*notify_memory_pool_location)(struct smu_context *smu);
>   int (*set_last_dcef_min_deep_sleep_clk)(struct smu_context *smu);
>   int (*system_features_control)(struct smu_context *smu, bool en);
> - int (*send_smc_msg)(struct smu_context *smu, uint16_t msg);
>   int (*send_smc_msg_with_param)(struct smu_context *smu, uint16_t
> msg, uint32_t param);
>   int (*read_smc_arg)(struct smu_context *smu, uint32_t *arg);
>   int (*init_display_count)(struct smu_context *smu, uint32_t count);
> @@ -725,4 +724,6 @@ int smu_get_dpm_clock_table(struct smu_context
> *smu,
> 
>  uint32_t smu_get_pptable_power_limit(struct smu_context *smu);
> 
> +int smu_send_smc_msg(struct smu_context *smu, uint16_t msg);
> +
>  #endif
> diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
> b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
> index 5a27713..80b1d20 100644
> --- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
> +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
> @@ -177,8 +177,6 @@ int smu_v11_0_notify_memory_pool_location(struct
> smu_context *smu);
>  int smu_v11_0_system_features_control(struct smu_context *smu,
>bool en);
> 
> -int smu_v11_0_send_msg(struct smu_context *smu, uint16_t msg);
> -
>  int
>  smu_v11_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
> uint32_t param);
> diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
> b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
> index 44c65dd..f709f6e 100644
> --- a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
> +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
> @@ -44,8 +44,6 @@ int smu_v12_0_read_arg(struct smu_context *smu,
> uint32_t *arg);
> 
>  int smu_v12_0_wait_for_response(struct smu_context *smu);
> 
> -int smu_v12_0_send_msg(struct smu_context *smu, uint16_t msg);

RE: [PATCH 3/3] drm/amdgpu: load np fw prior before loading the TAs

2019-12-02 Thread Quan, Evan
Series is acked-by: Evan Quan 

> -Original Message-
> From: amd-gfx  On Behalf Of
> Hawking Zhang
> Sent: Monday, December 2, 2019 2:04 PM
> To: amd-gfx@lists.freedesktop.org; Min, Frank ;
> Clements, John ; Deucher, Alexander
> 
> Cc: Zhang, Hawking 
> Subject: [PATCH 3/3] drm/amdgpu: load np fw prior before loading the TAs
> 
> Platform TAs will independently toggle DF Cstate.
> for instance, get/set topology from xgmi ta. do error
> injection from ras ta. In such case, PMFW needs to be
> loaded before TAs so that all the subsequent Cstate
> calls recieved by PSP FW can be routed to PMFW.
> 
> Change-Id: I83db1a22577a84ae647e7e570c200057650096c5
> Signed-off-by: Hawking Zhang 
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 66 -
>  1 file changed, 33 insertions(+), 33 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
> index 0e8907179e07..ceea8314d88d 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
> @@ -1218,39 +1218,6 @@ static int psp_hw_start(struct psp_context *psp)
>   return ret;
>   }
> 
> - ret = psp_asd_load(psp);
> - if (ret) {
> - DRM_ERROR("PSP load asd failed!\n");
> - return ret;
> - }
> -
> - if (adev->gmc.xgmi.num_physical_nodes > 1) {
> - ret = psp_xgmi_initialize(psp);
> - /* Warning the XGMI seesion initialize failure
> -  * Instead of stop driver initialization
> -  */
> - if (ret)
> - dev_err(psp->adev->dev,
> - "XGMI: Failed to initialize XGMI session\n");
> - }
> -
> - if (psp->adev->psp.ta_fw) {
> - ret = psp_ras_initialize(psp);
> - if (ret)
> - dev_err(psp->adev->dev,
> - "RAS: Failed to initialize RAS\n");
> -
> - ret = psp_hdcp_initialize(psp);
> - if (ret)
> - dev_err(psp->adev->dev,
> - "HDCP: Failed to initialize HDCP\n");
> -
> - ret = psp_dtm_initialize(psp);
> - if (ret)
> - dev_err(psp->adev->dev,
> - "DTM: Failed to initialize DTM\n");
> - }
> -
>   return 0;
>  }
> 
> @@ -1560,6 +1527,39 @@ static int psp_load_fw(struct amdgpu_device *adev)
>   if (ret)
>   goto failed;
> 
> + ret = psp_asd_load(psp);
> + if (ret) {
> + DRM_ERROR("PSP load asd failed!\n");
> + return ret;
> + }
> +
> + if (adev->gmc.xgmi.num_physical_nodes > 1) {
> + ret = psp_xgmi_initialize(psp);
> + /* Warning the XGMI seesion initialize failure
> +  * Instead of stop driver initialization
> +  */
> + if (ret)
> + dev_err(psp->adev->dev,
> + "XGMI: Failed to initialize XGMI session\n");
> + }
> +
> + if (psp->adev->psp.ta_fw) {
> + ret = psp_ras_initialize(psp);
> + if (ret)
> + dev_err(psp->adev->dev,
> + "RAS: Failed to initialize RAS\n");
> +
> + ret = psp_hdcp_initialize(psp);
> + if (ret)
> + dev_err(psp->adev->dev,
> + "HDCP: Failed to initialize HDCP\n");
> +
> + ret = psp_dtm_initialize(psp);
> + if (ret)
> + dev_err(psp->adev->dev,
> + "DTM: Failed to initialize DTM\n");
> + }
> +
>   return 0;
> 
>  failed:
> --
> 2.17.1
> 
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.free
> desktop.org%2Fmailman%2Flistinfo%2Famd-
> gfxdata=02%7C01%7Cevan.quan%40amd.com%7Cfa57b9cb28ce4af1c61
> 308d776ed6f91%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C6371
> 08634483533019sdata=wc15XfsaTlTgIlaP0%2F12a8SFFBnk%2F9RCKGwK
> oREx8J8%3Dreserved=0
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [GIT PULL] Please pull hmm changes

2019-12-02 Thread Jason Gunthorpe
On Sat, Nov 30, 2019 at 10:23:31AM -0800, Linus Torvalds wrote:
> On Sat, Nov 30, 2019 at 10:03 AM Linus Torvalds
>  wrote:
> >
> > I'll try to figure the code out, but my initial reaction was "yeah,
> > not in my VM".
> 
> Why is it ok to sometimes do
> 
> WRITE_ONCE(mni->invalidate_seq, cur_seq);
> 
> (to pair with the unlocked READ_ONCE), and sometimes then do
> 
> mni->invalidate_seq = mmn_mm->invalidate_seq;
> 
> My initial guess was that latter is only done at initialization time,
> but at least in one case it's done *after* the mni has been added to
> the mmn_mm (oh, how I despise those names - I can only repeat: WTF?).

Yes, the only occurrences are in the notifier_insert, under the
spinlock. The one case where it is out of the natural order was to
make the manipulation of seq a bit saner, but in all cases since the
spinlock is held there is no way for another thread to get the pointer
to the 'mmu_interval_notifier *' to do the unlocked read.

Regarding the ugly names.. Naming has been really hard here because
currently everything is a 'mmu notifier' and the natural abberviations
from there are crummy. Here is the basic summary:

struct mmu_notifier_mm (ie the mm->mmu_notifier_mm)
   -> mmn_mm
struct mm_struct 
   -> mm
struct mmu_notifier (ie the user subscription to the mm_struct)
   -> mn
struct mmu_interval_notifier (the other kind of user subscription)
   -> mni
struct mmu_notifier_range (ie the args to invalidate_range)
   -> range

I can send a patch to switch mmn_mm to mmu_notifier_mm, which is the
only pre-existing name for this value. But IIRC, it is a somewhat ugly
with long line wrapping. 'mni' is a pain, I have to reflect on that.
(honesly, I dislike mmu_notififer_mm quite a lot too)

I think it would be overall nicer with better names for the original
structs. Perhaps:

 mmn_* - MMU notifier prefix
 mmn_state <- struct mmu_notifier_mm
 mmn_subscription (mmn_sub) <- struct mmu_notifier
 mmn_range_subscription (mmn_range_sub) <- struct mmu_interval_notifier
 mmn_invalidate_desc <- struct mmu_notifier_range

At least this is how I describe them in my mind..  This is a lot of
churn, and spreads through many drivers. This is why I kept the names
as-is and we ended up with the also quite bad 'mmu_interval_notifier'

Maybe just switch mmu_notifier_mm for mmn_state and leave the drivers
alone?

Anyone on the CC list have advice?

Jason
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

RE: [PATCH] drm/dp_mst: Remove VCPI while disabling topology mgr

2019-12-02 Thread Lin, Wayne


> -Original Message-
> From: Lyude Paul 
> Sent: Tuesday, December 3, 2019 8:03 AM
> To: Lin, Wayne ; dri-de...@lists.freedesktop.org;
> amd-gfx@lists.freedesktop.org
> Cc: Kazlauskas, Nicholas ; Wentland, Harry
> ; Zuo, Jerry 
> Subject: Re: [PATCH] drm/dp_mst: Remove VCPI while disabling topology mgr
> 
> I'm, not entirely sure what this patch is trying to accomplish. I'm guessing
> maybe we're leaving stale VCPI allocations from the previous topology
> enablement and then somehow trying to use those again when allocating
> payloads? The patch looks correct at least.
> 
Thanks for your time and the comment.

Yes, this patch is trying to address the problem you mentioned.
Once unplug a DP MST capable device, driver will call 
drm_dp_mst_topology_mgr_set_mst() to reset mgr->payloads but it doesn't
reset the mgr->proposed_vcpis. If it doesn't reset the proposed_vcpi, code will
fail at checking port validation once plug in MST device later. 
Once MST capable device plug in again and try to allocate payloads by calling
drm_dp_update_payload_part1(), this function will iterate over all proposed
virtual channels and check each port validation to see if the specified port is 
still
in the topology. Since there are stale VCPI allocations from the previous 
topology
enablement in proposed_vcpi[], code flow will fail and reurn EINVAL.

> If this patch is fixing an issue, such as displays not turning on with 
> amdgpu, I'd
> definitely mention it in more detail here and Cc to stable if applicable.

Thanks for your comment. I will Cc to sta...@vger.kernel.org and amend the 
message in more detail in next version.

> Also, one nitpick below:

Thanks, I'll modify it.
> 
> On Mon, 2019-12-02 at 11:57 +0800, Wayne Lin wrote:
> > [Why]
> > While disabling mst topology manager in
> > drm_dp_mst_topology_mgr_set_mst(), now just reset the mgr->payloads
> > but doesn't handle the mgr->proposed_vcpis.
> >
> > [How]
> > Remove mgr->proposed_vcpis to NULL.
> >
> > Signed-off-by: Wayne Lin 
> > ---
> >  drivers/gpu/drm/drm_dp_mst_topology.c | 12 
> >  1 file changed, 12 insertions(+)
> >
> > diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c
> > b/drivers/gpu/drm/drm_dp_mst_topology.c
> > index ae5809a1f19a..81e92b260d7a 100644
> > --- a/drivers/gpu/drm/drm_dp_mst_topology.c
> > +++ b/drivers/gpu/drm/drm_dp_mst_topology.c
> > @@ -3386,6 +3386,7 @@ static int drm_dp_get_vc_payload_bw(u8
> > dp_link_bw,
> > u8  dp_link_count)
> >  int drm_dp_mst_topology_mgr_set_mst(struct
> drm_dp_mst_topology_mgr
> > *mgr, bool mst_state)  {
> > int ret = 0;
> > +   int i = 0;
> > struct drm_dp_mst_branch *mstb = NULL;
> >
> > mutex_lock(>lock);
> > @@ -3446,10 +3447,21 @@ int drm_dp_mst_topology_mgr_set_mst(struct
> > drm_dp_mst_topology_mgr *mgr, bool ms
> > /* this can fail if the device is gone */
> > drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
> > ret = 0;
> > +   mutex_lock(>payload_lock);
> > memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct
> > drm_dp_payload));
> > mgr->payload_mask = 0;
> > set_bit(0, >payload_mask);
> > +   for (i = 0; i < mgr->max_payloads; i++) {
> > +   struct drm_dp_vcpi *tmp_vcpi = mgr->proposed_vcpis[i];
> > +
> > +   if (tmp_vcpi) {
> > +   tmp_vcpi->vcpi = 0;
> > +   tmp_vcpi->num_slots = 0;
> > +   }
> > +   mgr->proposed_vcpis[i] = NULL;
> > +   }
> > mgr->vcpi_mask = 0;
> > +   mutex_unlock(>payload_lock);
> 
> bikeshed: I'd just rename tmp_vcpi here to vcpi, but I'll leave that up to you
> > }
> >
> >  out_unlock:
> --
> Cheers,
>   Lyude Paul
--
BR,
Wayne Lin

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

RE: [PATCH] amd/amdgpu/sriov swSMU disable for sriov

2019-12-02 Thread Quan, Evan


> -Original Message-
> From: amd-gfx  On Behalf Of Jack
> Zhang
> Sent: Monday, December 2, 2019 7:05 PM
> To: amd-gfx@lists.freedesktop.org
> Cc: Zhang, Jack (Jian) 
> Subject: [PATCH] amd/amdgpu/sriov swSMU disable for sriov
> 
> For boards greater than ARCTURUS, and under sriov platform,
> swSMU is not supported because smu ip block is commented at
> guest driver.
> 
> Generally for sriov, initialization of smu is moved to host driver.
> Thus, smu sw_init and hw_init will not be executed at guest driver.
> 
> Without sw structure being initialized in guest driver, swSMU cannot
> declare to be supported.
> 
> Signed-off-by: Jack Zhang 
> ---
>  drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 5 -
>  1 file changed, 4 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> index 36001a4..0b8a53b 100644
> --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> @@ -531,7 +531,10 @@ bool is_support_sw_smu(struct amdgpu_device *adev)
>   if (adev->asic_type == CHIP_VEGA20)
>   return (amdgpu_dpm == 2) ? true : false;
>   else if (adev->asic_type >= CHIP_ARCTURUS)
> - return true;
> + if (amdgpu_sriov_vf(adev))
> + return false;
> + else
> + return true;
[Quan, Evan] Are "{" and "}" missing around this code block? This seems a 
little weird.
>   else
>   return false;
>  }
> --
> 2.7.4
> 
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.free
> desktop.org%2Fmailman%2Flistinfo%2Famd-
> gfxdata=02%7C01%7Cevan.quan%40amd.com%7Ca0119099a3db450554
> f208d777178b5f%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C6371
> 08815384772040sdata=8RJ1QyDzHEcnOnk0EBGkhfVljeiPWaZSNlO6OyAa
> enc%3Dreserved=0
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH 1/3] drm/amdgpu: drop asd shared memory

2019-12-02 Thread Alex Deucher
Thanks.

Reviewed-by: Alex Deucher 

On Mon, Dec 2, 2019 at 9:57 PM Zhang, Hawking  wrote:
>
> RE - Do we need to keep that memory around for the TAs or do they use some 
> other memory?
>
> Hi Alex,
>
> Each TA owns its shared memory for the cmd that gfx send to TEE. The command 
> could be different per TA and might be using simultaneously so the shared 
> memory can't be shared among Tas.
>
> Originally, we also created shared buffer for ASD driver. but since gfx 
> driver is not privileged to talk to ASD directly. The shared buffer is 
> actually never used.
>
> Regards,
> Hawking
> -Original Message-
> From: Deucher, Alexander 
> Sent: 2019年12月3日 0:44
> To: Zhang, Hawking ; amd-gfx@lists.freedesktop.org; 
> Min, Frank ; Clements, John 
> Cc: Zhang, Hawking 
> Subject: RE: [PATCH 1/3] drm/amdgpu: drop asd shared memory
>
> > -Original Message-
> > From: Hawking Zhang 
> > Sent: Monday, December 2, 2019 1:04 AM
> > To: amd-gfx@lists.freedesktop.org; Min, Frank ;
> > Clements, John ; Deucher, Alexander
> > 
> > Cc: Zhang, Hawking 
> > Subject: [PATCH 1/3] drm/amdgpu: drop asd shared memory
> >
> > asd shared memory is not needed since drivers doesn't invoke any
> > further cmd to asd directly after the asd loading. trust application
> > is the one who needs to talk to asd after the initialization
> >
>
> Do we need to keep that memory around for the TAs or do they use some other 
> memory?
>
> Alex
>
> > Change-Id: I728afa4c7e8b67bc06678b10e92ac064ba10173e
> > Signed-off-by: Hawking Zhang 
> > ---
> >  drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 44
> > +++-- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h | 12
> > ---
> >  2 files changed, 18 insertions(+), 38 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
> > b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
> > index d8ef7098ffdf..bdc9e7ae4892 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
> > @@ -309,35 +309,17 @@ static int psp_tmr_load(struct psp_context *psp)
> >   return ret;
> >  }
> >
> > -static void psp_prep_asd_cmd_buf(struct psp_gfx_cmd_resp *cmd,
> > -  uint64_t asd_mc, uint64_t asd_mc_shared,
> > -  uint32_t size, uint32_t shared_size)
> > +static void psp_prep_asd_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
> > + uint64_t asd_mc, uint32_t size)
> >  {
> >   cmd->cmd_id = GFX_CMD_ID_LOAD_ASD;
> >   cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(asd_mc);
> >   cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(asd_mc);
> >   cmd->cmd.cmd_load_ta.app_len = size;
> >
> > - cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo =
> > lower_32_bits(asd_mc_shared);
> > - cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi =
> > upper_32_bits(asd_mc_shared);
> > - cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size;
> > -}
> > -
> > -static int psp_asd_init(struct psp_context *psp) -{
> > - int ret;
> > -
> > - /*
> > -  * Allocate 16k memory aligned to 4k from Frame Buffer (local
> > -  * physical) for shared ASD <-> Driver
> > -  */
> > - ret = amdgpu_bo_create_kernel(psp->adev,
> > PSP_ASD_SHARED_MEM_SIZE,
> > -   PAGE_SIZE,
> > AMDGPU_GEM_DOMAIN_VRAM,
> > -   >asd_shared_bo,
> > -   >asd_shared_mc_addr,
> > -   >asd_shared_buf);
> > -
> > - return ret;
> > + cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = 0;
> > + cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = 0;
> > + cmd->cmd.cmd_load_ta.cmd_buf_len = 0;
> >  }
> >
> >  static int psp_asd_load(struct psp_context *psp) @@ -359,11 +341,15
> > @@ static int psp_asd_load(struct psp_context *psp)
> >   memset(psp->fw_pri_buf, 0, PSP_1_MEG);
> >   memcpy(psp->fw_pri_buf, psp->asd_start_addr, psp-
> > >asd_ucode_size);
> >
> > - psp_prep_asd_cmd_buf(cmd, psp->fw_pri_mc_addr, psp-
> > >asd_shared_mc_addr,
> > -  psp->asd_ucode_size,
> > PSP_ASD_SHARED_MEM_SIZE);
> > + psp_prep_asd_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
> > +   psp->asd_ucode_size);
> >
> >   ret = psp_cmd_submit_buf(psp, NULL, cmd,
> >psp->fence_buf_mc_addr);
> > + if (!ret) {
> > + psp->asd_context.asd_initialized = true;
> > + psp->asd_context.session_id = cmd->resp.session_id;
> > + }
> >
> >   kfree(cmd);
> >
> > @@ -1198,12 +1184,6 @@ static int psp_hw_start(struct psp_context *psp)
> >   return ret;
> >   }
> >
> > - ret = psp_asd_init(psp);
> > - if (ret) {
> > - DRM_ERROR("PSP asd init failed!\n");
> > - return ret;
> > - }
> > -
> >   ret = psp_asd_load(psp);
> >   if (ret) {
> >   DRM_ERROR("PSP load asd failed!\n"); @@ -1611,8 +1591,6 @@ 
> > static
> > int 

RE: [PATCH 1/3] drm/amdgpu: drop asd shared memory

2019-12-02 Thread Zhang, Hawking
Correct the typo: gfx -> gfx driver in first sentence.

-Original Message-
From: amd-gfx  On Behalf Of Zhang, 
Hawking
Sent: 2019年12月3日 10:57
To: Deucher, Alexander ; 
amd-gfx@lists.freedesktop.org; Min, Frank ; Clements, John 

Subject: RE: [PATCH 1/3] drm/amdgpu: drop asd shared memory

RE - Do we need to keep that memory around for the TAs or do they use some 
other memory?

Hi Alex,

Each TA owns its shared memory for cmd that gfx driver send to TEE. The command 
could be different per TA and might be using simultaneously so the shared 
memory can't be shared among Tas.

Originally, we also created shared buffer for ASD driver. but since gfx driver 
is not privileged to talk to ASD directly. The shared buffer is actually never 
used.

Regards,
Hawking
-Original Message-
From: Deucher, Alexander 
Sent: 2019年12月3日 0:44
To: Zhang, Hawking ; amd-gfx@lists.freedesktop.org; Min, 
Frank ; Clements, John 
Cc: Zhang, Hawking 
Subject: RE: [PATCH 1/3] drm/amdgpu: drop asd shared memory

> -Original Message-
> From: Hawking Zhang 
> Sent: Monday, December 2, 2019 1:04 AM
> To: amd-gfx@lists.freedesktop.org; Min, Frank ; 
> Clements, John ; Deucher, Alexander 
> 
> Cc: Zhang, Hawking 
> Subject: [PATCH 1/3] drm/amdgpu: drop asd shared memory
> 
> asd shared memory is not needed since drivers doesn't invoke any 
> further cmd to asd directly after the asd loading. trust application 
> is the one who needs to talk to asd after the initialization
> 

Do we need to keep that memory around for the TAs or do they use some other 
memory?

Alex

> Change-Id: I728afa4c7e8b67bc06678b10e92ac064ba10173e
> Signed-off-by: Hawking Zhang 
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 44
> +++-- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h | 12
> ---
>  2 files changed, 18 insertions(+), 38 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
> index d8ef7098ffdf..bdc9e7ae4892 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
> @@ -309,35 +309,17 @@ static int psp_tmr_load(struct psp_context *psp)
>   return ret;
>  }
> 
> -static void psp_prep_asd_cmd_buf(struct psp_gfx_cmd_resp *cmd,
> -  uint64_t asd_mc, uint64_t asd_mc_shared,
> -  uint32_t size, uint32_t shared_size)
> +static void psp_prep_asd_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
> + uint64_t asd_mc, uint32_t size)
>  {
>   cmd->cmd_id = GFX_CMD_ID_LOAD_ASD;
>   cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(asd_mc);
>   cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(asd_mc);
>   cmd->cmd.cmd_load_ta.app_len = size;
> 
> - cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo =
> lower_32_bits(asd_mc_shared);
> - cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi =
> upper_32_bits(asd_mc_shared);
> - cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size;
> -}
> -
> -static int psp_asd_init(struct psp_context *psp) -{
> - int ret;
> -
> - /*
> -  * Allocate 16k memory aligned to 4k from Frame Buffer (local
> -  * physical) for shared ASD <-> Driver
> -  */
> - ret = amdgpu_bo_create_kernel(psp->adev,
> PSP_ASD_SHARED_MEM_SIZE,
> -   PAGE_SIZE,
> AMDGPU_GEM_DOMAIN_VRAM,
> -   >asd_shared_bo,
> -   >asd_shared_mc_addr,
> -   >asd_shared_buf);
> -
> - return ret;
> + cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = 0;
> + cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = 0;
> + cmd->cmd.cmd_load_ta.cmd_buf_len = 0;
>  }
> 
>  static int psp_asd_load(struct psp_context *psp) @@ -359,11 +341,15 
> @@ static int psp_asd_load(struct psp_context *psp)
>   memset(psp->fw_pri_buf, 0, PSP_1_MEG);
>   memcpy(psp->fw_pri_buf, psp->asd_start_addr, psp-
> >asd_ucode_size);
> 
> - psp_prep_asd_cmd_buf(cmd, psp->fw_pri_mc_addr, psp-
> >asd_shared_mc_addr,
> -  psp->asd_ucode_size,
> PSP_ASD_SHARED_MEM_SIZE);
> + psp_prep_asd_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
> +   psp->asd_ucode_size);
> 
>   ret = psp_cmd_submit_buf(psp, NULL, cmd,
>psp->fence_buf_mc_addr);
> + if (!ret) {
> + psp->asd_context.asd_initialized = true;
> + psp->asd_context.session_id = cmd->resp.session_id;
> + }
> 
>   kfree(cmd);
> 
> @@ -1198,12 +1184,6 @@ static int psp_hw_start(struct psp_context *psp)
>   return ret;
>   }
> 
> - ret = psp_asd_init(psp);
> - if (ret) {
> - DRM_ERROR("PSP asd init failed!\n");
> - return ret;
> - }
> -
>   ret = psp_asd_load(psp);
>   if (ret) {
>   DRM_ERROR("PSP load asd failed!\n"); @@ -1611,8 +1591,6 @@ 
> static 
> int psp_hw_fini(void *handle)
>   

RE: [PATCH 1/3] drm/amdgpu: drop asd shared memory

2019-12-02 Thread Zhang, Hawking
RE - Do we need to keep that memory around for the TAs or do they use some 
other memory?

Hi Alex,

Each TA owns its shared memory for the cmd that gfx send to TEE. The command 
could be different per TA and might be using simultaneously so the shared 
memory can't be shared among Tas.

Originally, we also created shared buffer for ASD driver. but since gfx driver 
is not privileged to talk to ASD directly. The shared buffer is actually never 
used.

Regards,
Hawking
-Original Message-
From: Deucher, Alexander  
Sent: 2019年12月3日 0:44
To: Zhang, Hawking ; amd-gfx@lists.freedesktop.org; Min, 
Frank ; Clements, John 
Cc: Zhang, Hawking 
Subject: RE: [PATCH 1/3] drm/amdgpu: drop asd shared memory

> -Original Message-
> From: Hawking Zhang 
> Sent: Monday, December 2, 2019 1:04 AM
> To: amd-gfx@lists.freedesktop.org; Min, Frank ; 
> Clements, John ; Deucher, Alexander 
> 
> Cc: Zhang, Hawking 
> Subject: [PATCH 1/3] drm/amdgpu: drop asd shared memory
> 
> asd shared memory is not needed since drivers doesn't invoke any 
> further cmd to asd directly after the asd loading. trust application 
> is the one who needs to talk to asd after the initialization
> 

Do we need to keep that memory around for the TAs or do they use some other 
memory?

Alex

> Change-Id: I728afa4c7e8b67bc06678b10e92ac064ba10173e
> Signed-off-by: Hawking Zhang 
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 44 
> +++-- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h | 12 
> ---
>  2 files changed, 18 insertions(+), 38 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
> index d8ef7098ffdf..bdc9e7ae4892 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
> @@ -309,35 +309,17 @@ static int psp_tmr_load(struct psp_context *psp)
>   return ret;
>  }
> 
> -static void psp_prep_asd_cmd_buf(struct psp_gfx_cmd_resp *cmd,
> -  uint64_t asd_mc, uint64_t asd_mc_shared,
> -  uint32_t size, uint32_t shared_size)
> +static void psp_prep_asd_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
> + uint64_t asd_mc, uint32_t size)
>  {
>   cmd->cmd_id = GFX_CMD_ID_LOAD_ASD;
>   cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(asd_mc);
>   cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(asd_mc);
>   cmd->cmd.cmd_load_ta.app_len = size;
> 
> - cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo =
> lower_32_bits(asd_mc_shared);
> - cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi =
> upper_32_bits(asd_mc_shared);
> - cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size;
> -}
> -
> -static int psp_asd_init(struct psp_context *psp) -{
> - int ret;
> -
> - /*
> -  * Allocate 16k memory aligned to 4k from Frame Buffer (local
> -  * physical) for shared ASD <-> Driver
> -  */
> - ret = amdgpu_bo_create_kernel(psp->adev,
> PSP_ASD_SHARED_MEM_SIZE,
> -   PAGE_SIZE,
> AMDGPU_GEM_DOMAIN_VRAM,
> -   >asd_shared_bo,
> -   >asd_shared_mc_addr,
> -   >asd_shared_buf);
> -
> - return ret;
> + cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = 0;
> + cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = 0;
> + cmd->cmd.cmd_load_ta.cmd_buf_len = 0;
>  }
> 
>  static int psp_asd_load(struct psp_context *psp) @@ -359,11 +341,15 
> @@ static int psp_asd_load(struct psp_context *psp)
>   memset(psp->fw_pri_buf, 0, PSP_1_MEG);
>   memcpy(psp->fw_pri_buf, psp->asd_start_addr, psp-
> >asd_ucode_size);
> 
> - psp_prep_asd_cmd_buf(cmd, psp->fw_pri_mc_addr, psp-
> >asd_shared_mc_addr,
> -  psp->asd_ucode_size,
> PSP_ASD_SHARED_MEM_SIZE);
> + psp_prep_asd_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
> +   psp->asd_ucode_size);
> 
>   ret = psp_cmd_submit_buf(psp, NULL, cmd,
>psp->fence_buf_mc_addr);
> + if (!ret) {
> + psp->asd_context.asd_initialized = true;
> + psp->asd_context.session_id = cmd->resp.session_id;
> + }
> 
>   kfree(cmd);
> 
> @@ -1198,12 +1184,6 @@ static int psp_hw_start(struct psp_context *psp)
>   return ret;
>   }
> 
> - ret = psp_asd_init(psp);
> - if (ret) {
> - DRM_ERROR("PSP asd init failed!\n");
> - return ret;
> - }
> -
>   ret = psp_asd_load(psp);
>   if (ret) {
>   DRM_ERROR("PSP load asd failed!\n"); @@ -1611,8 +1591,6 @@ 
> static 
> int psp_hw_fini(void *handle)
> >fw_pri_mc_addr, >fw_pri_buf);
>   amdgpu_bo_free_kernel(>fence_buf_bo,
> >fence_buf_mc_addr, >fence_buf);
> - amdgpu_bo_free_kernel(>asd_shared_bo, 
> >asd_shared_mc_addr,
> -   >asd_shared_buf);
>   

RE: [PATCH] amd/amdgpu/sriov swSMU disable for sriov

2019-12-02 Thread Zhang, Jack (Jian)
Thanks Evan, I will add "{ }" before I check-in the code.

Best,
Jack

-Original Message-
From: Quan, Evan  
Sent: Tuesday, December 3, 2019 10:45 AM
To: Zhang, Jack (Jian) ; amd-gfx@lists.freedesktop.org
Cc: Zhang, Jack (Jian) 
Subject: RE: [PATCH] amd/amdgpu/sriov swSMU disable for sriov



> -Original Message-
> From: amd-gfx  On Behalf Of 
> Jack Zhang
> Sent: Monday, December 2, 2019 7:05 PM
> To: amd-gfx@lists.freedesktop.org
> Cc: Zhang, Jack (Jian) 
> Subject: [PATCH] amd/amdgpu/sriov swSMU disable for sriov
> 
> For boards greater than ARCTURUS, and under sriov platform, swSMU is 
> not supported because smu ip block is commented at guest driver.
> 
> Generally for sriov, initialization of smu is moved to host driver.
> Thus, smu sw_init and hw_init will not be executed at guest driver.
> 
> Without sw structure being initialized in guest driver, swSMU cannot 
> declare to be supported.
> 
> Signed-off-by: Jack Zhang 
> ---
>  drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 5 -
>  1 file changed, 4 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> index 36001a4..0b8a53b 100644
> --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> @@ -531,7 +531,10 @@ bool is_support_sw_smu(struct amdgpu_device *adev)
>   if (adev->asic_type == CHIP_VEGA20)
>   return (amdgpu_dpm == 2) ? true : false;
>   else if (adev->asic_type >= CHIP_ARCTURUS)
> - return true;
> + if (amdgpu_sriov_vf(adev))
> + return false;
> + else
> + return true;
[Quan, Evan] Are "{" and "}" missing around this code block? This seems a 
little weird.
>   else
>   return false;
>  }
> --
> 2.7.4
> 
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flist
> s.free
> desktop.org%2Fmailman%2Flistinfo%2Famd-
> gfxdata=02%7C01%7Cevan.quan%40amd.com%7Ca0119099a3db450554
> f208d777178b5f%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C6371
> 08815384772040sdata=8RJ1QyDzHEcnOnk0EBGkhfVljeiPWaZSNlO6OyAa
> enc%3Dreserved=0
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

RE: [PATCH] drm/dp_mst: Correct the bug in drm_dp_update_payload_part1()

2019-12-02 Thread Liu, Zhan


> -Original Message-
> From: amd-gfx  On Behalf Of
> Wayne Lin
> Sent: 2019/December/01, Sunday 10:59 PM
> To: dri-de...@lists.freedesktop.org; amd-gfx@lists.freedesktop.org
> Cc: Zuo, Jerry ; Wentland, Harry
> ; Kazlauskas, Nicholas
> ; Lin, Wayne 
> Subject: [PATCH] drm/dp_mst: Correct the bug in
> drm_dp_update_payload_part1()
> 
> [Why]
> If the payload_state is DP_PAYLOAD_DELETE_LOCAL in series, current code
> doesn't delete the payload at current index and just move the index to next
> one after shuffling payloads.
> 
> [How]
> After shuffling payloads, decide whether to move on index or not according
> to payload_state of current payload.
> 
> Signed-off-by: Wayne Lin 


Reviewed-by: Zhan Liu 


> ---
>  drivers/gpu/drm/drm_dp_mst_topology.c | 3 ++-
>  1 file changed, 2 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c
> b/drivers/gpu/drm/drm_dp_mst_topology.c
> index 81e92b260d7a..8da5d461ea01 100644
> --- a/drivers/gpu/drm/drm_dp_mst_topology.c
> +++ b/drivers/gpu/drm/drm_dp_mst_topology.c
> @@ -3176,7 +3176,8 @@ int drm_dp_update_payload_part1(struct
> drm_dp_mst_topology_mgr *mgr)
>   drm_dp_mst_topology_put_port(port);
>   }
> 
> - for (i = 0; i < mgr->max_payloads; i++) {
> + for (i = 0; i < mgr->max_payloads;
> + (mgr->payloads[i].payload_state ==
> DP_PAYLOAD_DELETE_LOCAL) ? i :
> +i++) {
>   if (mgr->payloads[i].payload_state !=
> DP_PAYLOAD_DELETE_LOCAL)
>   continue;
> 
> --
> 2.17.1
> 
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.
> freedesktop.org%2Fmailman%2Flistinfo%2Famd-
> gfxdata=02%7C01%7Czhan.liu%40amd.com%7Cacbf6c9272c44e5642
> ca08d776dbf688%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C6
> 37108559483579263sdata=JNUpYWZRxNe%2B0%2FCe04fjWRvWh%2
> FPp3CASy4FRrg0iHuI%3Dreserved=0
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 12/51] drm/amd/display: Return DMUB_STATUS_OK when autoload unsupported

2019-12-02 Thread sunpeng . li
From: Nicholas Kazlauskas 

[Why]
Not having support for autoload isn't an error. If the DMUB firmware
doesn't support it then don't return DMUB_STATUS_INVALID.

[How]
Return DMUB_STATUS_OK when ->is_auto_load_done is NULL.

Signed-off-by: Nicholas Kazlauskas 
Reviewed-by: Tony Cheng 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c | 10 --
 1 file changed, 8 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c 
b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index 3ec26f6af2e1..70c7a4be9ccc 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -379,9 +379,12 @@ enum dmub_status dmub_srv_wait_for_auto_load(struct 
dmub_srv *dmub,
 {
uint32_t i;
 
-   if (!dmub->hw_init || !dmub->hw_funcs.is_auto_load_done)
+   if (!dmub->hw_init)
return DMUB_STATUS_INVALID;
 
+   if (!dmub->hw_funcs.is_auto_load_done)
+   return DMUB_STATUS_OK;
+
for (i = 0; i <= timeout_us; i += 100) {
if (dmub->hw_funcs.is_auto_load_done(dmub))
return DMUB_STATUS_OK;
@@ -397,9 +400,12 @@ enum dmub_status dmub_srv_wait_for_phy_init(struct 
dmub_srv *dmub,
 {
uint32_t i = 0;
 
-   if (!dmub->hw_init || !dmub->hw_funcs.is_phy_init)
+   if (!dmub->hw_init)
return DMUB_STATUS_INVALID;
 
+   if (!dmub->hw_funcs.is_phy_init)
+   return DMUB_STATUS_OK;
+
for (i = 0; i <= timeout_us; i += 10) {
if (dmub->hw_funcs.is_phy_init(dmub))
return DMUB_STATUS_OK;
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 01/51] drm/amd/display: update sr and pstate latencies for Renoir

2019-12-02 Thread sunpeng . li
From: Eric Yang 

[Why]
DF team has produced more optimized latency numbers.

[How]
Add sr latencies to the wm table, use different latencies
for different wm sets.
Also fix bb override from registery key for these latencies.

Signed-off-by: Eric Yang 
Reviewed-by: Tony Cheng 
Acked-by: Leo Li 
---
 .../amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c| 16 
 .../drm/amd/display/dc/dcn21/dcn21_resource.c| 15 ---
 drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h  |  2 ++
 3 files changed, 26 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c 
b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index 790a2d211bd6..841095d09d3c 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -523,25 +523,33 @@ struct clk_bw_params rn_bw_params = {
{
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
-   .pstate_latency_us = 23.84,
+   .pstate_latency_us = 11.72,
+   .sr_exit_time_us = 6.09,
+   .sr_enter_plus_exit_time_us = 7.14,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
-   .pstate_latency_us = 23.84,
+   .pstate_latency_us = 11.72,
+   .sr_exit_time_us = 10.12,
+   .sr_enter_plus_exit_time_us = 11.48,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
-   .pstate_latency_us = 23.84,
+   .pstate_latency_us = 11.72,
+   .sr_exit_time_us = 10.12,
+   .sr_enter_plus_exit_time_us = 11.48,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
-   .pstate_latency_us = 23.84,
+   .pstate_latency_us = 11.72,
+   .sr_exit_time_us = 10.12,
+   .sr_enter_plus_exit_time_us = 11.48,
.valid = true,
},
},
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index dd3bc37d4eb9..818c7a629484 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -972,6 +972,8 @@ static void calculate_wm_set_for_vlevel(
pipes[0].clks_cfg.socclk_mhz = dml->soc.clock_limits[vlevel].socclk_mhz;
 
dml->soc.dram_clock_change_latency_us = table_entry->pstate_latency_us;
+   dml->soc.sr_exit_time_us = table_entry->sr_exit_time_us;
+   dml->soc.sr_enter_plus_exit_time_us = 
table_entry->sr_enter_plus_exit_time_us;
 
wm_set->urgent_ns = get_wm_urgent(dml, pipes, pipe_cnt) * 1000;
wm_set->cstate_pstate.cstate_enter_plus_exit_ns = 
get_wm_stutter_enter_exit(dml, pipes, pipe_cnt) * 1000;
@@ -987,14 +989,21 @@ static void calculate_wm_set_for_vlevel(
 
 static void patch_bounding_box(struct dc *dc, struct 
_vcs_dpi_soc_bounding_box_st *bb)
 {
+   int i;
+
kernel_fpu_begin();
if (dc->bb_overrides.sr_exit_time_ns) {
-   bb->sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0;
+   for (i = 0; i < WM_SET_COUNT; i++) {
+ 
dc->clk_mgr->bw_params->wm_table.entries[i].sr_exit_time_us =
+ dc->bb_overrides.sr_exit_time_ns / 
1000.0;
+   }
}
 
if (dc->bb_overrides.sr_enter_plus_exit_time_ns) {
-   bb->sr_enter_plus_exit_time_us =
-   dc->bb_overrides.sr_enter_plus_exit_time_ns / 
1000.0;
+   for (i = 0; i < WM_SET_COUNT; i++) {
+ 
dc->clk_mgr->bw_params->wm_table.entries[i].sr_enter_plus_exit_time_us =
+ 
dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
+   }
}
 
if (dc->bb_overrides.urgent_latency_ns) {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h 
b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index f55203e427de..4aa09fe954c5 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -66,6 

[PATCH 05/51] drm/amd/display: Map DSC resources 1-to-1 if numbers of OPPs and DSCs are equal

2019-12-02 Thread sunpeng . li
From: Nikola Cornij 

[why]
On ASICs where number of DSCs is the same as OPPs there's no need
for DSC resource management. Mappping 1-to-1 fixes mode-set- or S3-
-related issues for such platforms.

[how]
Map DSC resources 1-to-1 to pipes only if number of OPPs is the same
as number of DSCs. This will still keep other ASICs working.
A follow-up patch to fix mode-set issues on those ASICs will be
required if testing shows issues with mode set.

Signed-off-by: Nikola Cornij 
Reviewed-by: Dmytro Laktyushkin 
Acked-by: Leo Li 
---
 .../gpu/drm/amd/display/dc/dcn20/dcn20_resource.c   | 13 ++---
 1 file changed, 10 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index da7a92fc0909..2aa6c0be45b4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -1458,13 +1458,20 @@ enum dc_status dcn20_build_mapped_resource(const struct 
dc *dc, struct dc_state
 
 static void acquire_dsc(struct resource_context *res_ctx,
const struct resource_pool *pool,
-   struct display_stream_compressor **dsc)
+   struct display_stream_compressor **dsc,
+   int pipe_idx)
 {
int i;
 
ASSERT(*dsc == NULL);
*dsc = NULL;
 
+   if (pool->res_cap->num_dsc == pool->res_cap->num_opp) {
+   *dsc = pool->dscs[pipe_idx];
+   res_ctx->is_dsc_acquired[pipe_idx] = true;
+   return;
+   }
+
/* Find first free DSC */
for (i = 0; i < pool->res_cap->num_dsc; i++)
if (!res_ctx->is_dsc_acquired[i]) {
@@ -1505,7 +1512,7 @@ static enum dc_status add_dsc_to_stream_resource(struct 
dc *dc,
if (pipe_ctx->stream != dc_stream)
continue;
 
-   acquire_dsc(_ctx->res_ctx, pool, _ctx->stream_res.dsc);
+   acquire_dsc(_ctx->res_ctx, pool, _ctx->stream_res.dsc, 
i);
 
/* The number of DSCs can be less than the number of pipes */
if (!pipe_ctx->stream_res.dsc) {
@@ -1697,7 +1704,7 @@ bool dcn20_split_stream_for_odm(
}
next_odm_pipe->stream_res.opp = pool->opps[next_odm_pipe->pipe_idx];
if (next_odm_pipe->stream->timing.flags.DSC == 1) {
-   acquire_dsc(res_ctx, pool, _odm_pipe->stream_res.dsc);
+   acquire_dsc(res_ctx, pool, _odm_pipe->stream_res.dsc, 
next_odm_pipe->pipe_idx);
ASSERT(next_odm_pipe->stream_res.dsc);
if (next_odm_pipe->stream_res.dsc == NULL)
return false;
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 04/51] drm/amd/display: Fix Dali clk mgr construct

2019-12-02 Thread sunpeng . li
From: Michael Strauss 

[WHY]
Dali is currently being misinterpreted as Renoir,
as a result uses wrong clk mgr constructor

[HOW]
Add check to init Dali as Raven2 before it can be misidentified
Clean up & fix Raven2 & Dali ASIC checks
Signed-off-by: Michael Strauss 
Reviewed-by: Eric Yang 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c  |  7 +++
 drivers/gpu/drm/amd/display/include/dal_asic_id.h | 12 +---
 2 files changed, 12 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c 
b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index a7c4c1d1fc59..6d60ef822619 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -134,6 +134,13 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, 
struct pp_smu_funcs *p
 
 #if defined(CONFIG_DRM_AMD_DC_DCN)
case FAMILY_RV:
+   if (ASICREV_IS_DALI(asic_id.hw_internal_rev)) {
+   /* TEMP: this check has to come before 
ASICREV_IS_RENOIR */
+   /* which also incorrectly returns true for Dali */
+   rv2_clk_mgr_construct(ctx, clk_mgr, pp_smu);
+   break;
+   }
+
if (ASICREV_IS_RENOIR(asic_id.hw_internal_rev)) {
rn_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
break;
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h 
b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index 6f56208a9471..72b659c63aea 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -134,19 +134,17 @@
 #define PICASSO_A0 0x41
 /* DCN1_01 */
 #define RAVEN2_A0 0x81
+#define RAVEN2_15D8_REV_E3 0xE3
+#define RAVEN2_15D8_REV_E4 0xE4
 #define RAVEN1_F0 0xF0
 #define RAVEN_UNKNOWN 0xFF
 
-#define PICASSO_15D8_REV_E3 0xE3
-#define PICASSO_15D8_REV_E4 0xE4
-
 #define ASICREV_IS_RAVEN(eChipRev) ((eChipRev >= RAVEN_A0) && eChipRev < 
RAVEN_UNKNOWN)
 #define ASICREV_IS_PICASSO(eChipRev) ((eChipRev >= PICASSO_A0) && (eChipRev < 
RAVEN2_A0))
-#define ASICREV_IS_RAVEN2(eChipRev) ((eChipRev >= RAVEN2_A0) && (eChipRev < 
PICASSO_15D8_REV_E3))
-#define ASICREV_IS_DALI(eChipRev) ((eChipRev >= PICASSO_15D8_REV_E3) && 
(eChipRev < RAVEN1_F0))
-
+#define ASICREV_IS_RAVEN2(eChipRev) ((eChipRev >= RAVEN2_A0) && (eChipRev < 
RAVEN1_F0))
 #define ASICREV_IS_RV1_F0(eChipRev) ((eChipRev >= RAVEN1_F0) && (eChipRev < 
RAVEN_UNKNOWN))
-
+#define ASICREV_IS_DALI(eChipRev) ((eChipRev == RAVEN2_15D8_REV_E3) \
+   || (eChipRev == RAVEN2_15D8_REV_E4))
 
 #define FAMILY_RV 142 /* DCN 1*/
 
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 03/51] drm/amd/display: add separate of private hwss functions

2019-12-02 Thread sunpeng . li
From: Anthony Koo 

[Why]
Some function pointers in the hwss function pointer table are
meant to be hw sequencer entry points to be called from dc.

However some of those function pointers are not meant to
be entry points, but instead used as a code reuse/inheritance
tool called directly by other hwss functions, not by dc.

Therefore, we want a more clear separation of which functions
we determine to be interface functions vs the functions we
use within hwss.

[How]
DC interface functions will be stored in:
struct hw_sequencer_funcs
Functions used within HWSS will be stored in:
struct hwseq_private_funcs

Signed-off-by: Anthony Koo 
Reviewed-by: Tony Cheng 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/core/dc.c  |   6 +
 .../gpu/drm/amd/display/dc/core/dc_debug.c|   1 -
 .../gpu/drm/amd/display/dc/core/dc_stream.c   |   3 -
 .../gpu/drm/amd/display/dc/dce/dce_hwseq.c|   2 +-
 .../gpu/drm/amd/display/dc/dce/dce_hwseq.h|   6 +-
 .../display/dc/dce100/dce100_hw_sequencer.c   |   3 +-
 .../display/dc/dce100/dce100_hw_sequencer.h   |   1 +
 .../display/dc/dce110/dce110_hw_sequencer.c   |  77 ++--
 .../display/dc/dce110/dce110_hw_sequencer.h   |   1 +
 .../amd/display/dc/dce110/dce110_resource.c   |   3 +-
 .../display/dc/dce112/dce112_hw_sequencer.c   |   2 +-
 .../display/dc/dce112/dce112_hw_sequencer.h   |   1 +
 .../display/dc/dce120/dce120_hw_sequencer.c   |   2 +-
 .../display/dc/dce120/dce120_hw_sequencer.h   |   1 +
 .../amd/display/dc/dce80/dce80_hw_sequencer.c |   2 +-
 .../amd/display/dc/dce80/dce80_hw_sequencer.h |   1 +
 .../amd/display/dc/dcn10/dcn10_hw_sequencer.c | 124 +++---
 .../amd/display/dc/dcn10/dcn10_hw_sequencer.h |   1 +
 .../gpu/drm/amd/display/dc/dcn10/dcn10_init.c |  38 +-
 .../drm/amd/display/dc/dcn20/dcn20_hwseq.c|  71 ++--
 .../drm/amd/display/dc/dcn20/dcn20_hwseq.h|   3 +
 .../gpu/drm/amd/display/dc/dcn20/dcn20_init.c |  54 +--
 .../drm/amd/display/dc/dcn21/dcn21_hwseq.c|   1 +
 .../drm/amd/display/dc/dcn21/dcn21_hwseq.h|   2 +
 .../gpu/drm/amd/display/dc/dcn21/dcn21_init.c |  63 +--
 .../gpu/drm/amd/display/dc/inc/hw_sequencer.h | 370 +-
 .../amd/display/dc/inc/hw_sequencer_private.h | 156 
 27 files changed, 525 insertions(+), 470 deletions(-)
 create mode 100644 drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 2645d20e8c4c..e384c143bb58 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -2004,6 +2004,12 @@ static void commit_planes_do_stream_update(struct dc *dc,
dc->hwss.update_info_frame(pipe_ctx);
}
 
+   if (stream_update->hdr_static_metadata &&
+   stream->use_dynamic_meta &&
+   dc->hwss.set_dmdata_attributes &&
+   
pipe_ctx->stream->dmdata_address.quad_part != 0)
+   dc->hwss.set_dmdata_attributes(pipe_ctx);
+
if (stream_update->gamut_remap)
dc_stream_set_gamut_remap(dc, stream);
 
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
index bf13cffed703..502ed3c7959d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
@@ -33,7 +33,6 @@
 
 #include "core_status.h"
 #include "core_types.h"
-#include "hw_sequencer.h"
 
 #include "resource.h"
 
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 70b7c1eb8a8f..b43a4b115fd8 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -33,9 +33,6 @@
 #include "resource.h"
 #include "ipp.h"
 #include "timing_generator.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-#include "dcn10/dcn10_hw_sequencer.h"
-#endif
 
 #define DC_LOGGER dc->ctx->logger
 
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c 
b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c
index 0275d6d60da4..e1c5839a80dc 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c
@@ -25,7 +25,7 @@
 
 #include "dce_hwseq.h"
 #include "reg_helper.h"
-#include "hw_sequencer.h"
+#include "hw_sequencer_private.h"
 #include "core_types.h"
 
 #define CTX \
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h 
b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
index bff03a68aa01..c5aa1f48593a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
@@ -25,7 +25,7 @@
 #ifndef __DCE_HWSEQ_H__
 #define __DCE_HWSEQ_H__
 
-#include "hw_sequencer.h"
+#include "dc_types.h"
 
 #define BL_REG_LIST()\
SR(LVTMA_PWRSEQ_CNTL), \
@@ 

[PATCH 02/51] drm/amd/display: rename core_dc to dc

2019-12-02 Thread sunpeng . li
From: Anthony Koo 

[Why]
First, to make code more consistent
Second, to get rid of those scenario where we create a second
local pointer to dc when it's already passed in.

[How]
Rename core_dc to dc
Remove duplicate local pointers to dc

Signed-off-by: Anthony Koo 
Reviewed-by: Aric Cyr 
Acked-by: Leo Li 
---
 .../dc/clk_mgr/dce112/dce112_clk_mgr.c| 12 ++--
 .../dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c  |  6 +-
 .../dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c   |  6 +-
 .../gpu/drm/amd/display/dc/core/dc_debug.c|  7 +-
 drivers/gpu/drm/amd/display/dc/core/dc_link.c | 65 +--
 .../drm/amd/display/dc/core/dc_link_hwss.c| 26 
 .../gpu/drm/amd/display/dc/core/dc_resource.c |  3 +-
 .../gpu/drm/amd/display/dc/core/dc_stream.c   | 40 ++--
 .../gpu/drm/amd/display/dc/core/dc_surface.c  | 22 +++
 .../display/dc/dce110/dce110_hw_sequencer.c   |  8 +--
 .../amd/display/dc/dcn10/dcn10_hw_sequencer.c | 10 +--
 .../dc/irq/dce110/irq_service_dce110.c|  4 +-
 12 files changed, 102 insertions(+), 107 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c 
b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c
index a6c46e903ff9..d031bd3d3072 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c
@@ -72,8 +72,8 @@ int dce112_set_clock(struct clk_mgr *clk_mgr_base, int 
requested_clk_khz)
struct clk_mgr_internal *clk_mgr_dce = 
TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct bp_set_dce_clock_parameters dce_clk_params;
struct dc_bios *bp = clk_mgr_base->ctx->dc_bios;
-   struct dc *core_dc = clk_mgr_base->ctx->dc;
-   struct dmcu *dmcu = core_dc->res_pool->dmcu;
+   struct dc *dc = clk_mgr_base->ctx->dc;
+   struct dmcu *dmcu = dc->res_pool->dmcu;
int actual_clock = requested_clk_khz;
/* Prepare to program display clock*/
memset(_clk_params, 0, sizeof(dce_clk_params));
@@ -110,7 +110,7 @@ int dce112_set_clock(struct clk_mgr *clk_mgr_base, int 
requested_clk_khz)
 
bp->funcs->set_dce_clock(bp, _clk_params);
 
-   if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
+   if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
if (clk_mgr_dce->dfs_bypass_disp_clk != actual_clock)
dmcu->funcs->set_psr_wait_loop(dmcu,
@@ -126,8 +126,8 @@ int dce112_set_dispclk(struct clk_mgr_internal *clk_mgr, 
int requested_clk_khz)
 {
struct bp_set_dce_clock_parameters dce_clk_params;
struct dc_bios *bp = clk_mgr->base.ctx->dc_bios;
-   struct dc *core_dc = clk_mgr->base.ctx->dc;
-   struct dmcu *dmcu = core_dc->res_pool->dmcu;
+   struct dc *dc = clk_mgr->base.ctx->dc;
+   struct dmcu *dmcu = dc->res_pool->dmcu;
int actual_clock = requested_clk_khz;
/* Prepare to program display clock*/
memset(_clk_params, 0, sizeof(dce_clk_params));
@@ -152,7 +152,7 @@ int dce112_set_dispclk(struct clk_mgr_internal *clk_mgr, 
int requested_clk_khz)
clk_mgr->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
 
 
-   if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
+   if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
if (clk_mgr->dfs_bypass_disp_clk != actual_clock)
dmcu->funcs->set_psr_wait_loop(dmcu,
diff --git 
a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c 
b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
index 1897e91c8ccb..97b7f32294fd 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
@@ -88,8 +88,8 @@ int rv1_vbios_smu_send_msg_with_param(struct clk_mgr_internal 
*clk_mgr, unsigned
 int rv1_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int 
requested_dispclk_khz)
 {
int actual_dispclk_set_mhz = -1;
-   struct dc *core_dc = clk_mgr->base.ctx->dc;
-   struct dmcu *dmcu = core_dc->res_pool->dmcu;
+   struct dc *dc = clk_mgr->base.ctx->dc;
+   struct dmcu *dmcu = dc->res_pool->dmcu;
 
/*  Unit of SMU msg parameter is Mhz */
actual_dispclk_set_mhz = rv1_vbios_smu_send_msg_with_param(
@@ -100,7 +100,7 @@ int rv1_vbios_smu_set_dispclk(struct clk_mgr_internal 
*clk_mgr, int requested_di
/* Actual dispclk set is returned in the parameter register */
actual_dispclk_set_mhz = REG_READ(MP1_SMN_C2PMSG_83) * 1000;
 
-   if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
+   if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
if 

[PATCH 23/51] drm/amd/display: fix dml20 min_dst_y_next_start calculation

2019-12-02 Thread sunpeng . li
From: Dmytro Laktyushkin 

Bring this calculation in line with HW programming guide.

Signed-off-by: Dmytro Laktyushkin 
Reviewed-by: Tony Cheng 
Acked-by: Leo Li 
---
 .../gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c  | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c 
b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
index 2c7455e22a65..9df24ececcec 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
@@ -929,8 +929,7 @@ static void dml20_rq_dlg_get_dlg_params(struct 
display_mode_lib *mode_lib,
min_dst_y_ttu_vblank = min_ttu_vblank * pclk_freq_in_mhz / (double) 
htotal;
dlg_vblank_start = interlaced ? (vblank_start / 2) : vblank_start;
 
-   disp_dlg_regs->min_dst_y_next_start = (unsigned int) (((double) 
dlg_vblank_start
-   + min_dst_y_ttu_vblank) * dml_pow(2, 2));
+   disp_dlg_regs->min_dst_y_next_start = (unsigned int) ((double) 
dlg_vblank_start * dml_pow(2, 2));
ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int) dml_pow(2, 
18));
 
dml_print("DML_DLG: %s: min_dcfclk_mhz = 
%3.2f\n",
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 09/51] drm/amd/display: Disable link before reenable

2019-12-02 Thread sunpeng . li
From: Lucy Li 

[Why]
Black screen seen after display is disabled then re-enabled.
Caused by difference in link settings when
switching between different resolutions.

[How]
In PnP case, or whenever the display is
still enabled but the driver is unloaded,
disable link before re-enabling with new link settings.

Signed-off-by: Lucy Li 
Reviewed-by: Anthony Koo 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/core/dc_link.c | 99 ++-
 1 file changed, 52 insertions(+), 47 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 5a35395e6060..4681ca20f683 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -1511,15 +1511,6 @@ static enum dc_status enable_link_dp(
decide_link_settings(stream, _settings);
 
if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP) {
-   /* If link settings are different than current and link already 
enabled
-* then need to disable before programming to new rate.
-*/
-   if (link->link_status.link_active &&
-   (link->cur_link_settings.lane_count != 
link_settings.lane_count ||
-link->cur_link_settings.link_rate != 
link_settings.link_rate)) {
-   dp_disable_link_phy(link, pipe_ctx->stream->signal);
-   }
-
/*in case it is not on*/
link->dc->hwss.edp_power_control(link, true);
link->dc->hwss.edp_wait_for_hpd_ready(link, true);
@@ -2039,6 +2030,45 @@ static void write_i2c_redriver_setting(
ASSERT(i2c_success);
 }
 
+static void disable_link(struct dc_link *link, enum signal_type signal)
+{
+   /*
+* TODO: implement call for dp_set_hw_test_pattern
+* it is needed for compliance testing
+*/
+
+   /* Here we need to specify that encoder output settings
+* need to be calculated as for the set mode,
+* it will lead to querying dynamic link capabilities
+* which should be done before enable output
+*/
+
+   if (dc_is_dp_signal(signal)) {
+   /* SST DP, eDP */
+   if (dc_is_dp_sst_signal(signal))
+   dp_disable_link_phy(link, signal);
+   else
+   dp_disable_link_phy_mst(link, signal);
+
+   if (dc_is_dp_sst_signal(signal) ||
+   link->mst_stream_alloc_table.stream_count == 0) 
{
+   dp_set_fec_enable(link, false);
+   dp_set_fec_ready(link, false);
+   }
+   } else {
+   if (signal != SIGNAL_TYPE_VIRTUAL)
+   link->link_enc->funcs->disable_output(link->link_enc, 
signal);
+   }
+
+   if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+   /* MST disable link only when no stream use the link */
+   if (link->mst_stream_alloc_table.stream_count <= 0)
+   link->link_status.link_active = false;
+   } else {
+   link->link_status.link_active = false;
+   }
+}
+
 static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
 {
struct dc_stream_state *stream = pipe_ctx->stream;
@@ -2123,6 +2153,19 @@ static enum dc_status enable_link(
struct pipe_ctx *pipe_ctx)
 {
enum dc_status status = DC_ERROR_UNEXPECTED;
+   struct dc_stream_state *stream = pipe_ctx->stream;
+   struct dc_link *link = stream->link;
+
+   /* There's some scenarios where driver is unloaded with display
+* still enabled. When driver is reloaded, it may cause a display
+* to not light up if there is a mismatch between old and new
+* link settings. Need to call disable first before enabling at
+* new link settings.
+*/
+   if (link->link_status.link_active) {
+   disable_link(link, pipe_ctx->stream->signal);
+   }
+
switch (pipe_ctx->stream->signal) {
case SIGNAL_TYPE_DISPLAY_PORT:
status = enable_link_dp(state, pipe_ctx);
@@ -2157,44 +2200,6 @@ static enum dc_status enable_link(
return status;
 }
 
-static void disable_link(struct dc_link *link, enum signal_type signal)
-{
-   /*
-* TODO: implement call for dp_set_hw_test_pattern
-* it is needed for compliance testing
-*/
-
-   /* here we need to specify that encoder output settings
-* need to be calculated as for the set mode,
-* it will lead to querying dynamic link capabilities
-* which should be done before enable output */
-
-   if (dc_is_dp_signal(signal)) {
-   /* SST DP, eDP */
-   if (dc_is_dp_sst_signal(signal))
-   dp_disable_link_phy(link, signal);
-   else
-   dp_disable_link_phy_mst(link, signal);
-
-   

[PATCH 11/51] drm/amd/display: Only wait for DMUB phy init on dcn21

2019-12-02 Thread sunpeng . li
From: Nicholas Kazlauskas 

[Why]
The wait for PHY init won't finish if the firmware doesn't support it.

[How]
Only hook this functionality up on DCN21 and move it out of DCN20.

For ASIC without support then this should return OK so we don't hang
while waiting in DC.

Signed-off-by: Nicholas Kazlauskas 
Reviewed-by: Tony Cheng 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c | 5 -
 drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h | 2 --
 drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c | 5 +
 drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h | 2 ++
 drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c   | 2 +-
 5 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c 
b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
index e2b2cf2e01fd..6b7d54572aa3 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
@@ -135,8 +135,3 @@ bool dmub_dcn20_is_supported(struct dmub_srv *dmub)
 
return supported;
 }
-
-bool dmub_dcn20_is_phy_init(struct dmub_srv *dmub)
-{
-   return REG_READ(DMCUB_SCRATCH10) == 0;
-}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h 
b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
index e1ba748ca594..ca7db03b94f7 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
@@ -59,6 +59,4 @@ bool dmub_dcn20_is_hw_init(struct dmub_srv *dmub);
 
 bool dmub_dcn20_is_supported(struct dmub_srv *dmub);
 
-bool dmub_dcn20_is_phy_init(struct dmub_srv *dmub);
-
 #endif /* _DMUB_DCN20_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c 
b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c
index d40a808112e7..b9dc2dd645eb 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c
@@ -124,3 +124,8 @@ bool dmub_dcn21_is_auto_load_done(struct dmub_srv *dmub)
 {
return (REG_READ(DMCUB_SCRATCH0) == 3);
 }
+
+bool dmub_dcn21_is_phy_init(struct dmub_srv *dmub)
+{
+   return REG_READ(DMCUB_SCRATCH10) == 0;
+}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h 
b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h
index f57969d8d56f..9e5f195e288f 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h
@@ -42,4 +42,6 @@ void dmub_dcn21_setup_windows(struct dmub_srv *dmub,
 
 bool dmub_dcn21_is_auto_load_done(struct dmub_srv *dmub);
 
+bool dmub_dcn21_is_phy_init(struct dmub_srv *dmub);
+
 #endif /* _DMUB_DCN21_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c 
b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index 60c574a39c6a..3ec26f6af2e1 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -76,13 +76,13 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum 
dmub_asic asic)
funcs->get_inbox1_rptr = dmub_dcn20_get_inbox1_rptr;
funcs->set_inbox1_wptr = dmub_dcn20_set_inbox1_wptr;
funcs->is_supported = dmub_dcn20_is_supported;
-   funcs->is_phy_init = dmub_dcn20_is_phy_init;
funcs->is_hw_init = dmub_dcn20_is_hw_init;
 
if (asic == DMUB_ASIC_DCN21) {
funcs->backdoor_load = dmub_dcn21_backdoor_load;
funcs->setup_windows = dmub_dcn21_setup_windows;
funcs->is_auto_load_done = dmub_dcn21_is_auto_load_done;
+   funcs->is_phy_init = dmub_dcn21_is_phy_init;
}
break;
 
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 24/51] drm/amd/display: Reset steer fifo before unblanking the stream

2019-12-02 Thread sunpeng . li
From: Nikola Cornij 

[why]
During mode transition steer fifo could overflow. Quite often it
recovers by itself, but sometimes it doesn't.

[how]
Add steer fifo reset before unblanking the stream. Also add a short
delay when resetting dig resync fifo to make sure register writes
don't end up back-to-back, in which case the HW might miss the reset
request.

Signed-off-by: Nikola Cornij 
Reviewed-by: Tony Cheng 
Acked-by: Leo Li 
---
 .../drm/amd/display/dc/dcn20/dcn20_stream_encoder.c  | 12 ++--
 1 file changed, 10 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c 
b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
index be0978401476..9b70a1e7b962 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
@@ -488,15 +488,23 @@ void enc2_stream_encoder_dp_unblank(
DP_VID_N_MUL, n_multiply);
}
 
-   /* set DIG_START to 0x1 to reset FIFO */
+   /* make sure stream is disabled before resetting steer fifo */
+   REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, false);
+   REG_WAIT(DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS, 0, 10, 5000);
 
+   /* set DIG_START to 0x1 to reset FIFO */
REG_UPDATE(DIG_FE_CNTL, DIG_START, 1);
+   udelay(1);
 
/* write 0 to take the FIFO out of reset */
 
REG_UPDATE(DIG_FE_CNTL, DIG_START, 0);
 
-   /* switch DP encoder to CRTC data */
+   /* switch DP encoder to CRTC data, but reset it the fifo first. It may 
happen
+* that it overflows during mode transition, and sometimes doesn't 
recover.
+*/
+   REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 1);
+   udelay(10);
 
REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 0);
 
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 13/51] drm/amd/display: Program CW5 for tracebuffer for dcn20

2019-12-02 Thread sunpeng . li
From: Nicholas Kazlauskas 

[Why]
On dcn21 this is programmed for tracebuffer support but isn't being
programmed on dcn20.

DMCUB execution hits an undefined address 6500 on tracebuffer
access.

[How]
Program CW5.

Signed-off-by: Nicholas Kazlauskas 
Reviewed-by: Tony Cheng 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c | 7 +++
 1 file changed, 7 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c 
b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
index 6b7d54572aa3..302dd3d4b77d 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
@@ -99,6 +99,13 @@ void dmub_dcn20_setup_windows(struct dmub_srv *dmub,
REG_SET_2(DMCUB_REGION4_TOP_ADDRESS, 0, DMCUB_REGION4_TOP_ADDRESS,
  cw4->region.top - cw4->region.base - 1, DMCUB_REGION4_ENABLE,
  1);
+
+   REG_WRITE(DMCUB_REGION3_CW5_OFFSET, cw5->offset.u.low_part);
+   REG_WRITE(DMCUB_REGION3_CW5_OFFSET_HIGH, cw5->offset.u.high_part);
+   REG_WRITE(DMCUB_REGION3_CW5_BASE_ADDRESS, cw5->region.base);
+   REG_SET_2(DMCUB_REGION3_CW5_TOP_ADDRESS, 0,
+ DMCUB_REGION3_CW5_TOP_ADDRESS, cw5->region.top,
+ DMCUB_REGION3_CW5_ENABLE, 1);
 }
 
 void dmub_dcn20_setup_mailbox(struct dmub_srv *dmub,
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 08/51] drm/amd/display: Reset PHY in link re-training

2019-12-02 Thread sunpeng . li
From: Paul Hsieh 

[Why]
Link training failed randomly when plugging USB-C display in/out.

[How]
If link training failed, reset PHY in link re-training.

Signed-off-by: Paul Hsieh 
Reviewed-by: Wenjing Liu 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/core/dc_link.c | 32 ++---
 .../gpu/drm/amd/display/dc/core/dc_link_dp.c  | 68 +++
 .../drm/amd/display/dc/core/dc_link_hwss.c| 14 +---
 .../gpu/drm/amd/display/dc/inc/dc_link_dp.h   |  5 +-
 4 files changed, 66 insertions(+), 53 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 093f6c808876..5a35395e6060 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -1495,7 +1495,6 @@ static enum dc_status enable_link_dp(
bool skip_video_pattern;
struct dc_link *link = stream->link;
struct dc_link_settings link_settings = {0};
-   enum dp_panel_mode panel_mode;
bool fec_enable;
int i;
bool apply_seamless_boot_optimization = false;
@@ -1531,40 +1530,17 @@ static enum dc_status enable_link_dp(
if (state->clk_mgr && !apply_seamless_boot_optimization)
state->clk_mgr->funcs->update_clocks(state->clk_mgr, state, 
false);
 
-   dp_enable_link_phy(
-   link,
-   pipe_ctx->stream->signal,
-   pipe_ctx->clock_source->id,
-   _settings);
-
-   if (stream->sink_patches.dppowerup_delay > 0) {
-   int delay_dp_power_up_in_ms = 
stream->sink_patches.dppowerup_delay;
-
-   msleep(delay_dp_power_up_in_ms);
-   }
-
-   panel_mode = dp_get_panel_mode(link);
-   dp_set_panel_mode(link, panel_mode);
-
-   /* We need to do this before the link training to ensure the idle 
pattern in SST
-* mode will be sent right after the link training */
-   link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
-   
pipe_ctx->stream_res.stream_enc->id, true);
skip_video_pattern = true;
 
if (link_settings.link_rate == LINK_RATE_LOW)
skip_video_pattern = false;
 
-   if (link->aux_access_disabled) {
-   dc_link_dp_perform_link_training_skip_aux(link, _settings);
-
-   link->cur_link_settings = link_settings;
-   status = DC_OK;
-   } else if (perform_link_training_with_retries(
-   link,
+   if (perform_link_training_with_retries(
_settings,
skip_video_pattern,
-   LINK_TRAINING_ATTEMPTS)) {
+   LINK_TRAINING_ATTEMPTS,
+   pipe_ctx,
+   pipe_ctx->stream->signal)) {
link->cur_link_settings = link_settings;
status = DC_OK;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 272261192e82..537b4dee8f22 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -1433,23 +1433,58 @@ enum link_training_result 
dc_link_dp_perform_link_training(
 }
 
 bool perform_link_training_with_retries(
-   struct dc_link *link,
const struct dc_link_settings *link_setting,
bool skip_video_pattern,
-   int attempts)
+   int attempts,
+   struct pipe_ctx *pipe_ctx,
+   enum signal_type signal)
 {
uint8_t j;
uint8_t delay_between_attempts = LINK_TRAINING_RETRY_DELAY;
+   struct dc_stream_state *stream = pipe_ctx->stream;
+   struct dc_link *link = stream->link;
+   enum dp_panel_mode panel_mode = dp_get_panel_mode(link);
 
for (j = 0; j < attempts; ++j) {
 
-   if (dc_link_dp_perform_link_training(
+   dp_enable_link_phy(
+   link,
+   signal,
+   pipe_ctx->clock_source->id,
+   link_setting);
+
+   if (stream->sink_patches.dppowerup_delay > 0) {
+   int delay_dp_power_up_in_ms = 
stream->sink_patches.dppowerup_delay;
+
+   msleep(delay_dp_power_up_in_ms);
+   }
+
+   dp_set_panel_mode(link, panel_mode);
+
+   /* We need to do this before the link training to ensure the 
idle pattern in SST
+* mode will be sent right after the link training
+*/
+   link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
+   
pipe_ctx->stream_res.stream_enc->id, true);
+
+   if (link->aux_access_disabled) {
+   dc_link_dp_perform_link_training_skip_aux(link, 
link_setting);
+   return true;
+   } else if 

[PATCH 22/51] drm/amd/display: Disable chroma viewport w/a when rotated 180 degrees

2019-12-02 Thread sunpeng . li
From: Michael Strauss 

[WHY]
Previous Renoir chroma viewport workaround fixed an MPO flicker by
increasing the chroma viewport size. However, when the MPO plane is
rotated 180 degrees, the viewport is read in reverse. Since the workaround
increases viewport size, when reading in reverse it causes a vertical
chroma offset.

[HOW]
Pass rotation value to viewport set functions
Temporarily disable the chroma viewport w/a when hubp is rotated 180 degrees

Signed-off-by: Michael Strauss 
Reviewed-by: Tony Cheng 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | 3 ++-
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h | 4 +++-
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 3 ++-
 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c| 3 ++-
 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c | 7 +--
 drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h  | 4 +++-
 6 files changed, 17 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index 31b64733d693..4d1301e5eaf5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -810,7 +810,8 @@ static void hubp1_set_vm_context0_settings(struct hubp 
*hubp,
 void min_set_viewport(
struct hubp *hubp,
const struct rect *viewport,
-   const struct rect *viewport_c)
+   const struct rect *viewport_c,
+   enum dc_rotation_angle rotation)
 {
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
 
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
index 780af5b3c16f..e44eaae5033b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
@@ -749,7 +749,9 @@ void hubp1_set_blank(struct hubp *hubp, bool blank);
 
 void min_set_viewport(struct hubp *hubp,
const struct rect *viewport,
-   const struct rect *viewport_c);
+   const struct rect *viewport_c,
+   enum dc_rotation_angle rotation);
+/* rotation angle added for use by hubp21_set_viewport */
 
 void hubp1_clk_cntl(struct hubp *hubp, bool enable);
 void hubp1_vtg_sel(struct hubp *hubp, uint32_t otg_inst);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 2b3081ee0e07..2440e28493e7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -2286,7 +2286,8 @@ static void dcn10_update_dchubp_dpp(
hubp->funcs->mem_program_viewport(
hubp,
_ctx->plane_res.scl_data.viewport,
-   _ctx->plane_res.scl_data.viewport_c);
+   _ctx->plane_res.scl_data.viewport_c,
+   plane_state->rotation);
}
 
if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c 
b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 8091c7c1e0d0..ece0817708f5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -1363,7 +1363,8 @@ static void dcn20_update_dchubp_dpp(
hubp->funcs->mem_program_viewport(
hubp,
_ctx->plane_res.scl_data.viewport,
-   _ctx->plane_res.scl_data.viewport_c);
+   _ctx->plane_res.scl_data.viewport_c,
+   plane_state->rotation);
 
/* Any updates are handled in dc interface, just need to apply existing 
for plane enable */
if ((pipe_ctx->update_flags.bits.enable || 
pipe_ctx->update_flags.bits.opp_changed)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c 
b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
index 4408aed5087b..38661b9c61f8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
@@ -169,7 +169,8 @@ static void hubp21_setup(
 void hubp21_set_viewport(
struct hubp *hubp,
const struct rect *viewport,
-   const struct rect *viewport_c)
+   const struct rect *viewport_c,
+   enum dc_rotation_angle rotation)
 {
struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
int patched_viewport_height = 0;
@@ -196,9 +197,11 @@ void hubp21_set_viewport(
 *  Work around for underflow issue with NV12 + rIOMMU translation
 *  + immediate flip. This will cause hubp underflow, but will not
 *  be user visible since underflow is in blank region
+*  Disable w/a when rotated 180 degrees, causes vertical chroma 
offset
 */
patched_viewport_height = 

[PATCH 00/51] DC Patches - 2 Dec 2019

2019-12-02 Thread sunpeng . li
From: Leo Li 

Summary of change:

* More DMCUB updates for Renoir
* Cleanup and refactor of DC hardware sequencer interface

Amanda Liu (1):
  drm/amd/display: Fix screen tearing on vrr tests

Anthony Koo (4):
  drm/amd/display: rename core_dc to dc
  drm/amd/display: add separate of private hwss functions
  drm/amd/display: add DP protocol version
  drm/amd/display: Limit NV12 chroma workaround

Aric Cyr (3):
  drm/amd/display: 3.2.61
  drm/amd/display: fix cursor positioning for multiplane cases
  drm/amd/display: 3.2.62

Brandon Syu (1):
  drm/amd/display: fixed that I2C over AUX didn't read data issue

David Galiffi (1):
  drm/amd/display: Fixed kernel panic when booting with DP-to-HDMI
dongle

Dmytro Laktyushkin (2):
  drm/amd/display: fix dml20 min_dst_y_next_start calculation
  drm/amd/display: update dml related structs

Eric Yang (3):
  drm/amd/display: update sr and pstate latencies for Renoir
  drm/amd/display: fix dprefclk and ss percentage reading on RN
  drm/amd/display: update dispclk and dppclk vco frequency

George Shen (1):
  drm/amd/display: Increase the number of retries after AUX DEFER

Hugo Hu (1):
  drm/amd/display: Save/restore link setting for disable phy when link
retraining

Jaehyun Chung (1):
  drm/amd/display: Wrong ifdef guards were used around DML validation

Joseph Gravenor (5):
  drm/amd/display: fix DalDramClockChangeLatencyNs override
  drm/amd/display: populate bios integrated info for renoir
  drm/amd/display: have two different sr and pstate latency tables for
renoir
  drm/amd/display: update p-state latency for renoir when using lpddr4
  drm/amd/display: update sr latency for renoir when using lpddr4

Krunoslav Kovac (1):
  drm/amd/display: Change HDR_MULT check

Leo (Hanghong) Ma (1):
  drm/amd/display: Change the delay time before enabling FEC

Lucy Li (1):
  drm/amd/display: Disable link before reenable

Michael Strauss (2):
  drm/amd/display: Fix Dali clk mgr construct
  drm/amd/display: Disable chroma viewport w/a when rotated 180 degrees

Mikita Lipski (1):
  drm/amd/display: Return a correct error value

Nicholas Kazlauskas (6):
  drm/amd/display: Only wait for DMUB phy init on dcn21
  drm/amd/display: Return DMUB_STATUS_OK when autoload unsupported
  drm/amd/display: Program CW5 for tracebuffer for dcn20
  drm/amd/display: Split DMUB cmd type into type/subtype
  drm/amd/display: Add shared DMCUB/driver firmware state cache window
  drm/amd/display: Extend DMCUB offload testing into dcn20/21

Nikola Cornij (2):
  drm/amd/display: Map DSC resources 1-to-1 if numbers of OPPs and DSCs
are equal
  drm/amd/display: Reset steer fifo before unblanking the stream

Noah Abradjian (3):
  drm/amd/display: Remove flag check in mpcc update
  drm/amd/display: Modify logic for when to wait for mpcc idle
  drm/amd/display: Remove redundant call

Paul Hsieh (1):
  drm/amd/display: Reset PHY in link re-training

Reza Amini (2):
  drm/amd/display: Implement DePQ for DCN1
  drm/amd/display: Implement DePQ for DCN2

Wenjing Liu (3):
  drm/amd/display: add dc dsc functions to return bpp range for pixel
encoding
  drm/amd/display: remove spam DSC log
  drm/amd/display: add dsc policy getter

Yongqiang Sun (2):
  drm/amd/display: Add DMCUB__PG_DONE trace code enum
  drm/amd/display: Compare clock state member to determine optimization.

abdoulaye berthe (3):
  drm/amd/display: add log for lttpr
  drm/amd/display: check for repeater when setting aux_rd_interval.
  drm/amd/display: correct log message for lttpr

 .../drm/amd/display/dc/bios/bios_parser2.c|   2 +
 .../drm/amd/display/dc/bios/command_table2.c  |  13 +-
 .../gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c  |   7 +
 .../dc/clk_mgr/dce112/dce112_clk_mgr.c|  12 +-
 .../dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c  |   6 +-
 .../amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c | 146 +--
 .../dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c   |   6 +-
 drivers/gpu/drm/amd/display/dc/core/dc.c  |  12 +-
 .../gpu/drm/amd/display/dc/core/dc_debug.c|   8 +-
 drivers/gpu/drm/amd/display/dc/core/dc_link.c | 198 +-
 .../gpu/drm/amd/display/dc/core/dc_link_ddc.c |   2 +-
 .../gpu/drm/amd/display/dc/core/dc_link_dp.c  | 209 +++---
 .../drm/amd/display/dc/core/dc_link_hwss.c|  40 +-
 .../gpu/drm/amd/display/dc/core/dc_resource.c |   9 +-
 .../gpu/drm/amd/display/dc/core/dc_stream.c   |  43 +-
 .../gpu/drm/amd/display/dc/core/dc_surface.c  |  22 +-
 drivers/gpu/drm/amd/display/dc/dc.h   |   7 +-
 drivers/gpu/drm/amd/display/dc/dc_dsc.h   |  16 +-
 drivers/gpu/drm/amd/display/dc/dc_helper.c|   3 +
 drivers/gpu/drm/amd/display/dc/dce/dce_aux.c  |  32 +-
 .../gpu/drm/amd/display/dc/dce/dce_hwseq.c|   2 +-
 .../gpu/drm/amd/display/dc/dce/dce_hwseq.h|   6 +-
 .../display/dc/dce100/dce100_hw_sequencer.c   |   3 +-
 .../display/dc/dce100/dce100_hw_sequencer.h   |   1 +
 .../display/dc/dce110/dce110_hw_sequencer.c   |  85 ++--
 .../display/dc/dce110/dce110_hw_sequencer.h   |   1 +
 

[PATCH 15/51] drm/amd/display: Fixed kernel panic when booting with DP-to-HDMI dongle

2019-12-02 Thread sunpeng . li
From: David Galiffi 

[Why]
In dc_link_is_dp_sink_present, if dal_ddc_open fails, then
dal_gpio_destroy_ddc is called, destroying pin_data and pin_clock. They
are created only on dc_construct, and next aux access will cause a panic.

[How]
Instead of calling dal_gpio_destroy_ddc, call dal_ddc_close.

Signed-off-by: David Galiffi 
Reviewed-by: Tony Cheng 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/core/dc_link.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 4681ca20f683..cef8c1ba9797 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -372,7 +372,7 @@ bool dc_link_is_dp_sink_present(struct dc_link *link)
 
if (GPIO_RESULT_OK != dal_ddc_open(
ddc, GPIO_MODE_INPUT, GPIO_DDC_CONFIG_TYPE_MODE_I2C)) {
-   dal_gpio_destroy_ddc();
+   dal_ddc_close(ddc);
 
return present;
}
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 07/51] drm/amd/display: Wrong ifdef guards were used around DML validation

2019-12-02 Thread sunpeng . li
From: Jaehyun Chung 

[Why]
Wrong guards were causing the debug option not to run.

[How]
Changed the guard to the correct one, matching the rq, ttu, dlg regs struct
members that need to be guarded. Also log a message when validation starts.

Signed-off-by: Jaehyun Chung 
Reviewed-by: Alvin Lee 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/core/dc.c  | 2 +-
 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c | 1 +
 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c | 1 +
 3 files changed, 3 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc.c
index e384c143bb58..061e8adf7476 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -2187,7 +2187,7 @@ static void commit_planes_for_stream(struct dc *dc,
}
if (dc->hwss.program_front_end_for_ctx && update_type != 
UPDATE_TYPE_FAST) {
dc->hwss.program_front_end_for_ctx(dc, context);
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+#ifdef CONFIG_DRM_AMD_DC_DCN
if (dc->debug.validate_dml_output) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx cur_pipe = 
context->res_ctx.pipe_ctx[i];
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c 
b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
index 2823be75b071..84d7ac5dd206 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
@@ -1257,6 +1257,7 @@ void hubp2_validate_dml_output(struct hubp *hubp,
struct _vcs_dpi_display_dlg_regs_st dlg_attr = {0};
struct _vcs_dpi_display_ttu_regs_st ttu_attr = {0};
DC_LOGGER_INIT(ctx->logger);
+   DC_LOG_DEBUG("DML Validation | Running Validation");
 
/* Requestor Regs */
REG_GET(HUBPRET_CONTROL,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c 
b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
index 0be1c917b242..4408aed5087b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
@@ -267,6 +267,7 @@ void hubp21_validate_dml_output(struct hubp *hubp,
struct _vcs_dpi_display_dlg_regs_st dlg_attr = {0};
struct _vcs_dpi_display_ttu_regs_st ttu_attr = {0};
DC_LOGGER_INIT(ctx->logger);
+   DC_LOG_DEBUG("DML Validation | Running Validation");
 
/* Requester - Per hubp */
REG_GET(HUBPRET_CONTROL,
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 06/51] drm/amd/display: fix DalDramClockChangeLatencyNs override

2019-12-02 Thread sunpeng . li
From: Joseph Gravenor 

[why]
pstate_latency_us never gets updated from the hard coded value
in rn_clk_mgr.c

[how]
update the wm table's values before we do calculations with them

Signed-off-by: Joseph Gravenor 
Reviewed-by: Eric Yang 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index 818c7a629484..fef11d57d2b7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -1011,9 +1011,12 @@ static void patch_bounding_box(struct dc *dc, struct 
_vcs_dpi_soc_bounding_box_s
}
 
if (dc->bb_overrides.dram_clock_change_latency_ns) {
-   bb->dram_clock_change_latency_us =
+   for (i = 0; i < WM_SET_COUNT; i++) {
+   
dc->clk_mgr->bw_params->wm_table.entries[i].pstate_latency_us =
dc->bb_overrides.dram_clock_change_latency_ns / 
1000.0;
+   }
}
+
kernel_fpu_end();
 }
 
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 14/51] drm/amd/display: populate bios integrated info for renoir

2019-12-02 Thread sunpeng . li
From: Joseph Gravenor 

[Why]
When video_memory_type bw_params->vram_type
is assigned, wedistinguish between Ddr4MemType and LpDdr4MemType.
Because of this we will never report that we are using
LpDdr4MemType and never re-purpose WM set D

[How]
populate bios integrated info for renoir by adding the
revision number for renoir and use that integrated info
table instead of of asic_id to get the vram type

Signed-off-by: Joseph Gravenor 
Reviewed-by: Tony Cheng 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c |  1 +
 .../gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c  | 10 ++
 2 files changed, 7 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c 
b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index eb06ee765c78..72795ae81dd0 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -1638,6 +1638,7 @@ static enum bp_result construct_integrated_info(
/* Don't need to check major revision as they are all 1 */
switch (revision.minor) {
case 11:
+   case 12:
result = get_integrated_info_v11(bp, info);
break;
default:
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c 
b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index 841095d09d3c..9f0381c68844 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -569,7 +569,7 @@ static unsigned int find_dcfclk_for_voltage(struct 
dpm_clocks *clock_table, unsi
return 0;
 }
 
-static void rn_clk_mgr_helper_populate_bw_params(struct clk_bw_params 
*bw_params, struct dpm_clocks *clock_table, struct hw_asic_id *asic_id)
+static void rn_clk_mgr_helper_populate_bw_params(struct clk_bw_params 
*bw_params, struct dpm_clocks *clock_table, struct integrated_info *bios_info)
 {
int i, j = 0;
 
@@ -601,8 +601,8 @@ static void rn_clk_mgr_helper_populate_bw_params(struct 
clk_bw_params *bw_params
bw_params->clk_table.entries[i].dcfclk_mhz = 
find_dcfclk_for_voltage(clock_table, clock_table->FClocks[j].Vol);
}
 
-   bw_params->vram_type = asic_id->vram_type;
-   bw_params->num_channels = asic_id->vram_width / DDR4_DRAM_WIDTH;
+   bw_params->vram_type = bios_info->memory_type;
+   bw_params->num_channels = bios_info->ma_channel_number;
 
for (i = 0; i < WM_SET_COUNT; i++) {
bw_params->wm_table.entries[i].wm_inst = i;
@@ -685,7 +685,9 @@ void rn_clk_mgr_construct(
 
if (pp_smu && pp_smu->rn_funcs.get_dpm_clock_table) {
pp_smu->rn_funcs.get_dpm_clock_table(_smu->rn_funcs.pp_smu, 
_table);
-   rn_clk_mgr_helper_populate_bw_params(clk_mgr->base.bw_params, 
_table, >asic_id);
+   if (ctx->dc_bios && ctx->dc_bios->integrated_info) {
+   rn_clk_mgr_helper_populate_bw_params 
(clk_mgr->base.bw_params, _table, ctx->dc_bios->integrated_info);
+   }
}
 
if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment) && clk_mgr->smu_ver >= 
0x00371500) {
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 17/51] drm/amd/display: fix dprefclk and ss percentage reading on RN

2019-12-02 Thread sunpeng . li
From: Eric Yang 

[Why]
Before was using HW counter value to determine the dprefclk. Which
take into account ss, but has large variation, not good enough for
generating audio dto. Also, the bios parser code to get the ss
percentage was not working.

[How]
After this change, dprefclk is hard coded, same as on RV. We don't
expect this to change on Renoir. Modified bios parser code to get
the right ss percentage.

Signed-off-by: Eric Yang 
Reviewed-by: Tony Cheng 
Acked-by: Leo Li 
---
 .../gpu/drm/amd/display/dc/bios/bios_parser2.c   |  1 +
 .../amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c| 16 +++-
 drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h  |  1 +
 3 files changed, 5 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c 
b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 72795ae81dd0..da29fd62f56a 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -834,6 +834,7 @@ static enum bp_result bios_parser_get_spread_spectrum_info(
case 1:
return get_ss_info_v4_1(bp, signal, index, ss_info);
case 2:
+   case 3:
return get_ss_info_v4_2(bp, signal, index, ss_info);
default:
break;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c 
b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index 89ed230cdb26..307c8540e36f 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -675,7 +675,6 @@ void rn_clk_mgr_construct(
 {
struct dc_debug_options *debug = >dc->debug;
struct dpm_clocks clock_table = { 0 };
-   struct clk_state_registers_and_bypass s = { 0 };
 
clk_mgr->base.ctx = ctx;
clk_mgr->base.funcs = _funcs;
@@ -695,7 +694,6 @@ void rn_clk_mgr_construct(
if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) {
dcn21_funcs.update_clocks = dcn2_update_clocks_fpga;
clk_mgr->base.dentist_vco_freq_khz = 360;
-   clk_mgr->base.dprefclk_khz = 60;
} else {
struct clk_log_info log_info = {0};
 
@@ -706,24 +704,16 @@ void rn_clk_mgr_construct(
if (clk_mgr->base.dentist_vco_freq_khz == 0)
clk_mgr->base.dentist_vco_freq_khz = 360;
 
-   rn_dump_clk_registers(, _mgr->base, _info);
-   /* Convert dprefclk units from MHz to KHz */
-   /* Value already divided by 10, some resolution lost */
-   clk_mgr->base.dprefclk_khz = s.dprefclk * 1000;
-
-   /* in case we don't get a value from the register, use default 
*/
-   if (clk_mgr->base.dprefclk_khz == 0) {
-   ASSERT(clk_mgr->base.dprefclk_khz == 60);
-   clk_mgr->base.dprefclk_khz = 60;
-   }
-
if (ctx->dc_bios->integrated_info->memory_type == 
LpDdr4MemType) {
rn_bw_params.wm_table = lpddr4_wm_table;
} else {
rn_bw_params.wm_table = ddr4_wm_table;
}
+   /* Saved clocks configured at boot for debug purposes */
+   rn_dump_clk_registers(_mgr->base.boot_snapshot, 
_mgr->base, _info);
}
 
+   clk_mgr->base.dprefclk_khz = 60;
dce_clock_read_ss_info(clk_mgr);
 
 
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h 
b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index 4aa09fe954c5..ac530c057ddd 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -191,6 +191,7 @@ struct clk_mgr {
bool psr_allow_active_cache;
int dprefclk_khz; // Used by program pixel clock in clock source funcs, 
need to figureout where this goes
int dentist_vco_freq_khz;
+   struct clk_state_registers_and_bypass boot_snapshot;
struct clk_bw_params *bw_params;
 };
 
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 16/51] drm/amd/display: have two different sr and pstate latency tables for renoir

2019-12-02 Thread sunpeng . li
From: Joseph Gravenor 

[Why]
new sr and pstate latencies are optimized for the case when we are not
using lpddr4 memory

[How]
have two different wm tables, one for the lpddr case and one for
non lpddr case

Signed-off-by: Joseph Gravenor 
Reviewed-by: Eric Yang 
Acked-by: Leo Li 
---
 .../amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c | 114 --
 1 file changed, 80 insertions(+), 34 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c 
b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index 9f0381c68844..89ed230cdb26 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -518,44 +518,83 @@ struct clk_bw_params rn_bw_params = {
.num_entries = 4,
},
 
-   .wm_table = {
-   .entries = {
-   {
-   .wm_inst = WM_A,
-   .wm_type = WM_TYPE_PSTATE_CHG,
-   .pstate_latency_us = 11.72,
-   .sr_exit_time_us = 6.09,
-   .sr_enter_plus_exit_time_us = 7.14,
-   .valid = true,
-   },
-   {
-   .wm_inst = WM_B,
-   .wm_type = WM_TYPE_PSTATE_CHG,
-   .pstate_latency_us = 11.72,
-   .sr_exit_time_us = 10.12,
-   .sr_enter_plus_exit_time_us = 11.48,
-   .valid = true,
-   },
-   {
-   .wm_inst = WM_C,
-   .wm_type = WM_TYPE_PSTATE_CHG,
-   .pstate_latency_us = 11.72,
-   .sr_exit_time_us = 10.12,
-   .sr_enter_plus_exit_time_us = 11.48,
-   .valid = true,
-   },
-   {
-   .wm_inst = WM_D,
-   .wm_type = WM_TYPE_PSTATE_CHG,
-   .pstate_latency_us = 11.72,
-   .sr_exit_time_us = 10.12,
-   .sr_enter_plus_exit_time_us = 11.48,
-   .valid = true,
-   },
+};
+
+struct wm_table ddr4_wm_table = {
+   .entries = {
+   {
+   .wm_inst = WM_A,
+   .wm_type = WM_TYPE_PSTATE_CHG,
+   .pstate_latency_us = 11.72,
+   .sr_exit_time_us = 6.09,
+   .sr_enter_plus_exit_time_us = 7.14,
+   .valid = true,
+   },
+   {
+   .wm_inst = WM_B,
+   .wm_type = WM_TYPE_PSTATE_CHG,
+   .pstate_latency_us = 11.72,
+   .sr_exit_time_us = 10.12,
+   .sr_enter_plus_exit_time_us = 11.48,
+   .valid = true,
+   },
+   {
+   .wm_inst = WM_C,
+   .wm_type = WM_TYPE_PSTATE_CHG,
+   .pstate_latency_us = 11.72,
+   .sr_exit_time_us = 10.12,
+   .sr_enter_plus_exit_time_us = 11.48,
+   .valid = true,
+   },
+   {
+   .wm_inst = WM_D,
+   .wm_type = WM_TYPE_PSTATE_CHG,
+   .pstate_latency_us = 11.72,
+   .sr_exit_time_us = 10.12,
+   .sr_enter_plus_exit_time_us = 11.48,
+   .valid = true,
},
}
 };
 
+struct wm_table lpddr4_wm_table = {
+   .entries = {
+   {
+   .wm_inst = WM_A,
+   .wm_type = WM_TYPE_PSTATE_CHG,
+   .pstate_latency_us = 23.84,
+   .sr_exit_time_us = 12.5,
+   .sr_enter_plus_exit_time_us = 17.0,
+   .valid = true,
+   },
+   {
+   .wm_inst = WM_B,
+   .wm_type = WM_TYPE_PSTATE_CHG,
+   .pstate_latency_us = 23.84,
+   .sr_exit_time_us = 12.5,
+   .sr_enter_plus_exit_time_us = 17.0,
+   .valid = true,
+   },
+   {
+   .wm_inst = WM_C,
+   .wm_type = WM_TYPE_PSTATE_CHG,
+   .pstate_latency_us = 23.84,
+   .sr_exit_time_us = 12.5,
+   .sr_enter_plus_exit_time_us = 17.0,
+   .valid = true,
+   },
+   {
+   .wm_inst = WM_D,
+   

[PATCH 20/51] drm/amd/display: fixed that I2C over AUX didn't read data issue

2019-12-02 Thread sunpeng . li
From: Brandon Syu 

[Why]
The variable mismatch assignment error.

[How]
To use uint32_t replace it.

Signed-off-by: Brandon Syu 
Reviewed-by: Charlene Liu 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c  | 2 +-
 drivers/gpu/drm/amd/display/include/i2caux_interface.h | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index 3fc9752edfe0..c2c136b12184 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -589,7 +589,7 @@ bool dal_ddc_service_query_ddc_data(
 bool dal_ddc_submit_aux_command(struct ddc_service *ddc,
struct aux_payload *payload)
 {
-   uint8_t retrieved = 0;
+   uint32_t retrieved = 0;
bool ret = 0;
 
if (!ddc)
diff --git a/drivers/gpu/drm/amd/display/include/i2caux_interface.h 
b/drivers/gpu/drm/amd/display/include/i2caux_interface.h
index bb012cb1a9f5..c7fbb9c3ad6b 100644
--- a/drivers/gpu/drm/amd/display/include/i2caux_interface.h
+++ b/drivers/gpu/drm/amd/display/include/i2caux_interface.h
@@ -42,7 +42,7 @@ struct aux_payload {
bool write;
bool mot;
uint32_t address;
-   uint8_t length;
+   uint32_t length;
uint8_t *data;
/*
 * used to return the reply type of the transaction
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 18/51] drm/amd/display: 3.2.61

2019-12-02 Thread sunpeng . li
From: Aric Cyr 

Signed-off-by: Aric Cyr 
Reviewed-by: Aric Cyr 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/dc.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dc.h 
b/drivers/gpu/drm/amd/display/dc/dc.h
index 3e6133f8cdc4..34b824270c84 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -39,7 +39,7 @@
 #include "inc/hw/dmcu.h"
 #include "dml/display_mode_lib.h"
 
-#define DC_VER "3.2.60"
+#define DC_VER "3.2.61"
 
 #define MAX_SURFACES 3
 #define MAX_PLANES 6
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 26/51] drm/amd/display: update p-state latency for renoir when using lpddr4

2019-12-02 Thread sunpeng . li
From: Joseph Gravenor 

[Why]
DF team has produced more optimized latency numbers, for lpddr4

[How]
change the p-state laency in the lpddr4 wm table to the new latency
number

Signed-off-by: Joseph Gravenor 
Reviewed-by: Tony Cheng 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c 
b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index 307c8540e36f..901e7035bf8e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -562,7 +562,7 @@ struct wm_table lpddr4_wm_table = {
{
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
-   .pstate_latency_us = 23.84,
+   .pstate_latency_us = 11.65333,
.sr_exit_time_us = 12.5,
.sr_enter_plus_exit_time_us = 17.0,
.valid = true,
@@ -570,7 +570,7 @@ struct wm_table lpddr4_wm_table = {
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
-   .pstate_latency_us = 23.84,
+   .pstate_latency_us = 11.65333,
.sr_exit_time_us = 12.5,
.sr_enter_plus_exit_time_us = 17.0,
.valid = true,
@@ -578,7 +578,7 @@ struct wm_table lpddr4_wm_table = {
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
-   .pstate_latency_us = 23.84,
+   .pstate_latency_us = 11.65333,
.sr_exit_time_us = 12.5,
.sr_enter_plus_exit_time_us = 17.0,
.valid = true,
@@ -586,7 +586,7 @@ struct wm_table lpddr4_wm_table = {
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
-   .pstate_latency_us = 23.84,
+   .pstate_latency_us = 11.65333,
.sr_exit_time_us = 12.5,
.sr_enter_plus_exit_time_us = 17.0,
.valid = true,
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 25/51] drm/amd/display: Implement DePQ for DCN1

2019-12-02 Thread sunpeng . li
From: Reza Amini 

[Why]
Need support for more color management in 10bit
surface.

[How]
Provide support for DePQ for 10bit surface

Signed-off-by: Reza Amini 
Reviewed-by: Krunoslav Kovac 
Acked-by: Leo Li 
---
 .../drm/amd/display/dc/dcn10/dcn10_dpp_cm.c   |  3 ++
 .../amd/display/dc/dcn10/dcn10_hw_sequencer.c |  5 +++
 .../amd/display/modules/color/color_gamma.c   | 39 ++-
 3 files changed, 38 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
index 6b7593dd0c77..935c892622a0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
@@ -628,6 +628,9 @@ void dpp1_set_degamma(
case IPP_DEGAMMA_MODE_HW_xvYCC:
REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 2);
break;
+   case IPP_DEGAMMA_MODE_USER_PWL:
+   REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 3);
+   break;
default:
BREAK_TO_DEBUGGER();
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 2440e28493e7..9551fefb9d1d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -1465,6 +1465,11 @@ bool dcn10_set_input_transfer_func(struct dc *dc, struct 
pipe_ctx *pipe_ctx,
dpp_base->funcs->dpp_set_degamma(dpp_base, 
IPP_DEGAMMA_MODE_BYPASS);
break;
case TRANSFER_FUNCTION_PQ:
+   dpp_base->funcs->dpp_set_degamma(dpp_base, 
IPP_DEGAMMA_MODE_USER_PWL);
+   cm_helper_translate_curve_to_degamma_hw_format(tf, 
_base->degamma_params);
+   dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, 
_base->degamma_params);
+   result = true;
+   break;
default:
result = false;
break;
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c 
b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index 9b121b08c806..b52c4d379651 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -154,6 +154,7 @@ static void compute_de_pq(struct fixed31_32 in_x, struct 
fixed31_32 *out_y)
 
struct fixed31_32 l_pow_m1;
struct fixed31_32 base, div;
+   struct fixed31_32 base2;
 
 
if (dc_fixpt_lt(in_x, dc_fixpt_zero))
@@ -163,13 +164,15 @@ static void compute_de_pq(struct fixed31_32 in_x, struct 
fixed31_32 *out_y)
dc_fixpt_div(dc_fixpt_one, m2));
base = dc_fixpt_sub(l_pow_m1, c1);
 
-   if (dc_fixpt_lt(base, dc_fixpt_zero))
-   base = dc_fixpt_zero;
-
div = dc_fixpt_sub(c2, dc_fixpt_mul(c3, l_pow_m1));
 
-   *out_y = dc_fixpt_pow(dc_fixpt_div(base, div),
-   dc_fixpt_div(dc_fixpt_one, m1));
+   base2 = dc_fixpt_div(base, div);
+   //avoid complex numbers
+   if (dc_fixpt_lt(base2, dc_fixpt_zero))
+   base2 = dc_fixpt_sub(dc_fixpt_zero, base2);
+
+
+   *out_y = dc_fixpt_pow(base2, dc_fixpt_div(dc_fixpt_one, m1));
 
 }
 
@@ -1998,10 +2001,28 @@ bool mod_color_calculate_degamma_params(struct 
dc_transfer_func *input_tf,
tf_pts->x_point_at_y1_green = 1;
tf_pts->x_point_at_y1_blue = 1;
 
-   map_regamma_hw_to_x_user(ramp, coeff, rgb_user,
-   coordinates_x, axis_x, curve,
-   MAX_HW_POINTS, tf_pts,
-   mapUserRamp && ramp && ramp->type == GAMMA_RGB_256);
+   if (input_tf->tf == TRANSFER_FUNCTION_PQ) {
+   /* just copy current rgb_regamma into  tf_pts */
+   struct pwl_float_data_ex *curvePt = curve;
+   int i = 0;
+
+   while (i <= MAX_HW_POINTS) {
+   tf_pts->red[i]   = curvePt->r;
+   tf_pts->green[i] = curvePt->g;
+   tf_pts->blue[i]  = curvePt->b;
+   ++curvePt;
+   ++i;
+   }
+   } else {
+   //clamps to 0-1
+   map_regamma_hw_to_x_user(ramp, coeff, rgb_user,
+   coordinates_x, axis_x, curve,
+   MAX_HW_POINTS, tf_pts,
+   mapUserRamp && ramp && ramp->type == 
GAMMA_RGB_256);
+   }
+
+
+
if (ramp->type == GAMMA_CUSTOM)
apply_lut_1d(ramp, MAX_HW_POINTS, tf_pts);
 
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 21/51] drm/amd/display: add log for lttpr

2019-12-02 Thread sunpeng . li
From: abdoulaye berthe 

Signed-off-by: abdoulaye berthe 
Reviewed-by: Wenjing Liu 
Acked-by: Leo Li 
---
 .../gpu/drm/amd/display/dc/core/dc_link_dp.c  | 125 +-
 1 file changed, 93 insertions(+), 32 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index b10019106030..486c14e0cd41 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -255,11 +255,18 @@ static void dpcd_set_lt_pattern_and_lane_settings(
dpcd_lt_buffer[DP_TRAINING_PATTERN_SET - DP_TRAINING_PATTERN_SET]
= dpcd_pattern.raw;
 
-   DC_LOG_HW_LINK_TRAINING("%s\n 0x%X pattern = %x\n",
-   __func__,
-   dpcd_base_lt_offset,
-   dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
-
+   if (is_repeater(link, offset)) {
+   DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n 0x%X 
pattern = %x\n",
+   __func__,
+   offset,
+   dpcd_base_lt_offset,
+   dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
+   } else {
+   DC_LOG_HW_LINK_TRAINING("%s\n 0x%X pattern = %x\n",
+   __func__,
+   dpcd_base_lt_offset,
+   dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
+   }
/*
* DpcdAddress_Lane0Set -> DpcdAddress_Lane3Set
*/
@@ -289,14 +296,25 @@ static void dpcd_set_lt_pattern_and_lane_settings(
dpcd_lane,
size_in_bytes);
 
-   DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X VS set = %x  PE set = %x max VS 
Reached = %x  max PE Reached = %x\n",
-   __func__,
-   dpcd_base_lt_offset,
-   dpcd_lane[0].bits.VOLTAGE_SWING_SET,
-   dpcd_lane[0].bits.PRE_EMPHASIS_SET,
-   dpcd_lane[0].bits.MAX_SWING_REACHED,
-   dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED);
-
+   if (is_repeater(link, offset)) {
+   DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
+   " 0x%X VS set = %x PE set = %x max VS Reached = 
%x  max PE Reached = %x\n",
+   __func__,
+   offset,
+   dpcd_base_lt_offset,
+   dpcd_lane[0].bits.VOLTAGE_SWING_SET,
+   dpcd_lane[0].bits.PRE_EMPHASIS_SET,
+   dpcd_lane[0].bits.MAX_SWING_REACHED,
+   dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED);
+   } else {
+   DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X VS set = %x  PE set = %x 
max VS Reached = %x  max PE Reached = %x\n",
+   __func__,
+   dpcd_base_lt_offset,
+   dpcd_lane[0].bits.VOLTAGE_SWING_SET,
+   dpcd_lane[0].bits.PRE_EMPHASIS_SET,
+   dpcd_lane[0].bits.MAX_SWING_REACHED,
+   dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED);
+   }
if (edp_workaround) {
/* for eDP write in 2 parts because the 5-byte burst is
* causing issues on some eDP panels (EPR#366724)
@@ -544,23 +562,42 @@ static void get_lane_status_and_drive_settings(
 
ln_status_updated->raw = dpcd_buf[2];
 
-   DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X Lane01Status = %x\n 0x%X 
Lane23Status = %x\n ",
-   __func__,
-   lane01_status_address, dpcd_buf[0],
-   lane01_status_address + 1, dpcd_buf[1]);
-
+   if (is_repeater(link, offset)) {
+   DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
+   " 0x%X Lane01Status = %x\n 0x%X Lane23Status = 
%x\n ",
+   __func__,
+   offset,
+   lane01_status_address, dpcd_buf[0],
+   lane01_status_address + 1, dpcd_buf[1]);
+   } else {
+   DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X Lane01Status = %x\n 0x%X 
Lane23Status = %x\n ",
+   __func__,
+   lane01_status_address, dpcd_buf[0],
+   lane01_status_address + 1, dpcd_buf[1]);
+   }
lane01_adjust_address = DP_ADJUST_REQUEST_LANE0_1;
 
if (is_repeater(link, offset))
lane01_adjust_address = DP_ADJUST_REQUEST_LANE0_1_PHY_REPEATER1 
+
((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * 
(offset - 1));
 
-   DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X Lane01AdjustRequest = %x\n 0x%X 
Lane23AdjustRequest = %x\n",
-   __func__,
-   lane01_adjust_address,
-   dpcd_buf[lane_adjust_offset],
-   lane01_adjust_address + 1,
-   dpcd_buf[lane_adjust_offset + 1]);

[PATCH 29/51] drm/amd/display: Return a correct error value

2019-12-02 Thread sunpeng . li
From: Mikita Lipski 

[why]
The function is expected to return instance of the timing generator
therefore we shouldn't be returning boolean in integer function,
and we shouldn't be returning zero so changing it to -1.

Signed-off-by: Mikita Lipski 
Reviewed-by: Martin Leung 
Acked-by: Anthony Koo 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index a9412720c860..0c19de678339 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1866,7 +1866,7 @@ static int acquire_resource_from_hw_enabled_state(
inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
 
if (inst == ENGINE_ID_UNKNOWN)
-   return false;
+   return -1;
 
for (i = 0; i < pool->stream_enc_count; i++) {
if (pool->stream_enc[i]->id == inst) {
@@ -1878,10 +1878,10 @@ static int acquire_resource_from_hw_enabled_state(
 
// tg_inst not found
if (i == pool->stream_enc_count)
-   return false;
+   return -1;
 
if (tg_inst >= pool->timing_generator_count)
-   return false;
+   return -1;
 
if (!res_ctx->pipe_ctx[tg_inst].stream) {
struct pipe_ctx *pipe_ctx = _ctx->pipe_ctx[tg_inst];
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 10/51] drm/amd/display: Add DMCUB__PG_DONE trace code enum

2019-12-02 Thread sunpeng . li
From: Yongqiang Sun 

Signed-off-by: Yongqiang Sun 
Reviewed-by: Tony Cheng 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dmub/inc/dmub_trace_buffer.h | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_trace_buffer.h 
b/drivers/gpu/drm/amd/display/dmub/inc/dmub_trace_buffer.h
index b0ee099d8a6e..6b3ee42db350 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_trace_buffer.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_trace_buffer.h
@@ -45,6 +45,7 @@ enum dmucb_trace_code {
DMCUB__DMCU_ISR_LOAD_END,
DMCUB__MAIN_IDLE,
DMCUB__PERF_TRACE,
+   DMCUB__PG_DONE,
 };
 
 struct dmcub_trace_buf_entry {
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 27/51] drm/amd/display: add DP protocol version

2019-12-02 Thread sunpeng . li
From: Anthony Koo 

[Why]
We want to know DP protocol version

[How]
In DC create we initialize a cap to indicate the max
DP protocol version supported

Signed-off-by: Anthony Koo 
Reviewed-by: Aric Cyr 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/core/dc.c | 2 ++
 drivers/gpu/drm/amd/display/dc/dc.h  | 5 +
 2 files changed, 7 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 061e8adf7476..55f22a1c0aa5 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -809,6 +809,8 @@ struct dc *dc_create(const struct dc_init_data *init_params)
dc->caps.max_audios = dc->res_pool->audio_count;
dc->caps.linear_pitch_alignment = 64;
 
+   dc->caps.max_dp_protocol_version = DP_VERSION_1_4;
+
/* Populate versioning information */
dc->versions.dc_ver = DC_VER;
 
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h 
b/drivers/gpu/drm/amd/display/dc/dc.h
index 34b824270c84..4c7a2882a512 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -54,6 +54,10 @@ struct dc_versions {
struct dmcu_version dmcu_version;
 };
 
+enum dp_protocol_version {
+   DP_VERSION_1_4,
+};
+
 enum dc_plane_type {
DC_PLANE_TYPE_INVALID,
DC_PLANE_TYPE_DCE_RGB,
@@ -114,6 +118,7 @@ struct dc_caps {
bool extended_aux_timeout_support;
bool dmcub_support;
bool hw_3d_lut;
+   enum dp_protocol_version max_dp_protocol_version;
struct dc_plane_cap planes[MAX_PLANES];
 };
 
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 28/51] drm/amd/display: Save/restore link setting for disable phy when link retraining

2019-12-02 Thread sunpeng . li
From: Hugo Hu 

[Why]
The link setting will be modify after disable phy
and due to DP Compliance Fails.

[How]
Save and resotre link setting for disable link phy when link retraining.

Signed-off-by: Hugo Hu 
Reviewed-by: Wenjing Liu 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 486c14e0cd41..015fa0c52746 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -2788,9 +2788,9 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, 
union hpd_irq_data *out_hpd
union hpd_irq_data hpd_irq_dpcd_data = { { { {0} } } };
union device_service_irq device_service_clear = { { 0 } };
enum dc_status result;
-
bool status = false;
struct pipe_ctx *pipe_ctx;
+   struct dc_link_settings previous_link_settings;
int i;
 
if (out_link_loss)
@@ -2873,9 +2873,10 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, 
union hpd_irq_data *out_hpd
if (pipe_ctx == NULL || pipe_ctx->stream == NULL)
return false;
 
+   previous_link_settings = link->cur_link_settings;
dp_disable_link_phy(link, pipe_ctx->stream->signal);
 
-   perform_link_training_with_retries(>cur_link_settings,
+   perform_link_training_with_retries(_link_settings,
true, LINK_TRAINING_ATTEMPTS,
pipe_ctx,
pipe_ctx->stream->signal);
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 19/51] drm/amd/display: Change the delay time before enabling FEC

2019-12-02 Thread sunpeng . li
From: "Leo (Hanghong) Ma" 

[why]
DP spec requires 1000 symbols delay between the end of link training
and enabling FEC in the stream. Currently we are using 1 miliseconds
delay which is not accurate.

[how]
One lane RBR should have the maximum time for transmitting 1000 LL
codes which is 6.173 us. So using 7 microseconds delay instead of
1 miliseconds.

Signed-off-by: Leo (Hanghong) Ma 
Reviewed-by: Harry Wentland 
Reviewed-by: Nikola Cornij 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 9 -
 1 file changed, 8 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 537b4dee8f22..b10019106030 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -3951,7 +3951,14 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
if (link_enc->funcs->fec_set_enable &&
link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) {
if (link->fec_state == dc_link_fec_ready && enable) {
-   msleep(1);
+   /* Accord to DP spec, FEC enable sequence can first
+* be transmitted anytime after 1000 LL codes have
+* been transmitted on the link after link training
+* completion. Using 1 lane RBR should have the maximum
+* time for transmitting 1000 LL codes which is 6.173 
us.
+* So use 7 microseconds delay instead.
+*/
+   udelay(7);
link_enc->funcs->fec_set_enable(link_enc, true);
link->fec_state = dc_link_fec_enabled;
} else if (link->fec_state == dc_link_fec_enabled && !enable) {
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

RE: [PATCH] drm/amdgpu/sriov: No need the event 3 and 4 now

2019-12-02 Thread Liu, Zhan


> -Original Message-
> From: amd-gfx  On Behalf Of
> Emily Deng
> Sent: 2019/November/30, Saturday 5:42 PM
> To: amd-gfx@lists.freedesktop.org
> Cc: Deng, Emily 
> Subject: [PATCH] drm/amdgpu/sriov: No need the event 3 and 4 now
> 
> As will call unload kms when initialize fail, and the unload kms will send 
> event
> 3 and 4, so don't need event 3 and 4 in device init.
> 
> Signed-off-by: Emily Deng 

Reviewed-by: Zhan Liu 

> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 --
>  1 file changed, 2 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> index d1d573d..0393e35 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> @@ -3036,8 +3036,6 @@ int amdgpu_device_init(struct amdgpu_device
> *adev,
>   }
>   dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
>   amdgpu_vf_error_put(adev,
> AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
> - if (amdgpu_virt_request_full_gpu(adev, false))
> - amdgpu_virt_release_full_gpu(adev, false);
>   goto failed;
>   }
> 
> --
> 2.7.4
> 
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.
> freedesktop.org%2Fmailman%2Flistinfo%2Famd-
> gfxdata=02%7C01%7Czhan.liu%40amd.com%7C90645930d99440941
> a7a08d775e684f3%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C
> 637107505314883521sdata=ymkWbnpIGc%2BM%2Bf1yxtfrlr3Gf4KDQ
> 3XWkSegl1q5228%3Dreserved=0
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

RE: [PATCH 3/3] drm/amdgpu: load np fw prior before loading the TAs

2019-12-02 Thread Deucher, Alexander
> -Original Message-
> From: Hawking Zhang 
> Sent: Monday, December 2, 2019 1:04 AM
> To: amd-gfx@lists.freedesktop.org; Min, Frank ;
> Clements, John ; Deucher, Alexander
> 
> Cc: Zhang, Hawking 
> Subject: [PATCH 3/3] drm/amdgpu: load np fw prior before loading the TAs
> 
> Platform TAs will independently toggle DF Cstate.
> for instance, get/set topology from xgmi ta. do error injection from ras ta. 
> In
> such case, PMFW needs to be loaded before TAs so that all the subsequent
> Cstate calls recieved by PSP FW can be routed to PMFW.
> 
> Change-Id: I83db1a22577a84ae647e7e570c200057650096c5
> Signed-off-by: Hawking Zhang 


Reviewed-by: Alex Deucher 

> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 66 --
> ---
>  1 file changed, 33 insertions(+), 33 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
> index 0e8907179e07..ceea8314d88d 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
> @@ -1218,39 +1218,6 @@ static int psp_hw_start(struct psp_context *psp)
>   return ret;
>   }
> 
> - ret = psp_asd_load(psp);
> - if (ret) {
> - DRM_ERROR("PSP load asd failed!\n");
> - return ret;
> - }
> -
> - if (adev->gmc.xgmi.num_physical_nodes > 1) {
> - ret = psp_xgmi_initialize(psp);
> - /* Warning the XGMI seesion initialize failure
> -  * Instead of stop driver initialization
> -  */
> - if (ret)
> - dev_err(psp->adev->dev,
> - "XGMI: Failed to initialize XGMI session\n");
> - }
> -
> - if (psp->adev->psp.ta_fw) {
> - ret = psp_ras_initialize(psp);
> - if (ret)
> - dev_err(psp->adev->dev,
> - "RAS: Failed to initialize RAS\n");
> -
> - ret = psp_hdcp_initialize(psp);
> - if (ret)
> - dev_err(psp->adev->dev,
> - "HDCP: Failed to initialize HDCP\n");
> -
> - ret = psp_dtm_initialize(psp);
> - if (ret)
> - dev_err(psp->adev->dev,
> - "DTM: Failed to initialize DTM\n");
> - }
> -
>   return 0;
>  }
> 
> @@ -1560,6 +1527,39 @@ static int psp_load_fw(struct amdgpu_device
> *adev)
>   if (ret)
>   goto failed;
> 
> + ret = psp_asd_load(psp);
> + if (ret) {
> + DRM_ERROR("PSP load asd failed!\n");
> + return ret;
> + }
> +
> + if (adev->gmc.xgmi.num_physical_nodes > 1) {
> + ret = psp_xgmi_initialize(psp);
> + /* Warning the XGMI seesion initialize failure
> +  * Instead of stop driver initialization
> +  */
> + if (ret)
> + dev_err(psp->adev->dev,
> + "XGMI: Failed to initialize XGMI session\n");
> + }
> +
> + if (psp->adev->psp.ta_fw) {
> + ret = psp_ras_initialize(psp);
> + if (ret)
> + dev_err(psp->adev->dev,
> + "RAS: Failed to initialize RAS\n");
> +
> + ret = psp_hdcp_initialize(psp);
> + if (ret)
> + dev_err(psp->adev->dev,
> + "HDCP: Failed to initialize HDCP\n");
> +
> + ret = psp_dtm_initialize(psp);
> + if (ret)
> + dev_err(psp->adev->dev,
> + "DTM: Failed to initialize DTM\n");
> + }
> +
>   return 0;
> 
>  failed:
> --
> 2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

RE: [PATCH 2/3] drm/amdgpu: unload asd in psp hw de-init phase

2019-12-02 Thread Deucher, Alexander
> -Original Message-
> From: Hawking Zhang 
> Sent: Monday, December 2, 2019 1:04 AM
> To: amd-gfx@lists.freedesktop.org; Min, Frank ;
> Clements, John ; Deucher, Alexander
> 
> Cc: Zhang, Hawking 
> Subject: [PATCH 2/3] drm/amdgpu: unload asd in psp hw de-init phase
> 
> issue unload_ta_cmd to tOS to unload asd driver
> 
> Change-Id: I697cfc1774205ed6cbe22eb3c16143b603543564
> Signed-off-by: Hawking Zhang 


Reviewed-by: Alex Deucher 


> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 36
> +
>  1 file changed, 36 insertions(+)
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
> index bdc9e7ae4892..0e8907179e07 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
> @@ -356,6 +356,40 @@ static int psp_asd_load(struct psp_context *psp)
>   return ret;
>  }
> 
> +static void psp_prep_asd_unload_cmd_buf(struct psp_gfx_cmd_resp
> *cmd,
> + uint32_t asd_session_id)
> +{
> + cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
> + cmd->cmd.cmd_unload_ta.session_id = asd_session_id; }
> +
> +static int psp_asd_unload(struct psp_context *psp) {
> + int ret;
> + struct psp_gfx_cmd_resp *cmd;
> +
> + if (amdgpu_sriov_vf(psp->adev))
> + return 0;
> +
> + if (!psp->asd_context.asd_initialized)
> + return 0;
> +
> + cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
> + if (!cmd)
> + return -ENOMEM;
> +
> + psp_prep_asd_unload_cmd_buf(cmd, psp-
> >asd_context.session_id);
> +
> + ret = psp_cmd_submit_buf(psp, NULL, cmd,
> +  psp->fence_buf_mc_addr);
> + if (!ret)
> + psp->asd_context.asd_initialized = false;
> +
> + kfree(cmd);
> +
> + return ret;
> +}
> +
>  static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd,
>   uint32_t id, uint32_t value)
>  {
> @@ -1583,6 +1617,8 @@ static int psp_hw_fini(void *handle)
>   psp_hdcp_terminate(psp);
>   }
> 
> + psp_asd_unload(psp);
> +
>   psp_ring_destroy(psp, PSP_RING_TYPE__KM);
> 
>   pptr = amdgpu_sriov_vf(psp->adev) ? _buf : NULL;
> --
> 2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

RE: [PATCH] drm/amd/display: fix typos for dcn20_funcs and dcn21_funcs struct

2019-12-02 Thread Koo, Anthony
Reviewed-by: Anthony Koo 

-Original Message-
From: Stanley.Yang  
Sent: Thursday, November 28, 2019 10:52 PM
To: amd-gfx-boun...@lists.freedesktop.org
Cc: Koo, Anthony ; Yang, Stanley 
Subject: [PATCH] drm/amd/display: fix typos for dcn20_funcs and dcn21_funcs 
struct

In dcn20_funcs and dcn21_funcs struct, the member ".dsc_pg_control = NULL"
should be removed due to .dsc_pg_control be assigned to dcn20_dsc_pg_control.

Change-Id: Ic24095e79cdac23e502995e07e75886573d1c112
Signed-off-by: Stanley.Yang 
---
 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c | 1 -  
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c | 1 -
 2 files changed, 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c 
b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
index 51b6c25aa3c5..e5debb7c500f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
@@ -94,7 +94,6 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
.enable_power_gating_plane = dcn20_enable_power_gating_plane,
.dpp_pg_control = dcn20_dpp_pg_control,
.hubp_pg_control = dcn20_hubp_pg_control,
-   .dsc_pg_control = NULL,
.program_triplebuffer = dcn20_program_triple_buffer,
.enable_writeback = dcn20_enable_writeback,
.disable_writeback = dcn20_disable_writeback, diff --git 
a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c 
b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
index 1d8b67b4e252..d733f8fcfab2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
@@ -95,7 +95,6 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
.enable_power_gating_plane = dcn20_enable_power_gating_plane,
.dpp_pg_control = dcn20_dpp_pg_control,
.hubp_pg_control = dcn20_hubp_pg_control,
-   .dsc_pg_control = NULL,
.program_triplebuffer = dcn20_program_triple_buffer,
.enable_writeback = dcn20_enable_writeback,
.disable_writeback = dcn20_disable_writeback,
--
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 50/51] drm/amd/display: correct log message for lttpr

2019-12-02 Thread sunpeng . li
From: abdoulaye berthe 

[Why]
When setting lttpr mode, the new mode to bet is not logged properly.

[How]
Update log message to show the right mode.

Signed-off-by: abdoulaye berthe 
Reviewed-by: George Shen 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index dfcd6421ee01..42aa889fd0f5 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -1219,7 +1219,7 @@ static void configure_lttpr_mode(struct dc_link *link)
uint8_t repeater_id;
uint8_t repeater_mode = DP_PHY_REPEATER_MODE_TRANSPARENT;
 
-   DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Non Transparent Mode\n", 
__func__);
+   DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Transparent Mode\n", 
__func__);
core_link_write_dpcd(link,
DP_PHY_REPEATER_MODE,
(uint8_t *)_mode,
@@ -1227,7 +1227,7 @@ static void configure_lttpr_mode(struct dc_link *link)
 
if (!link->is_lttpr_mode_transparent) {
 
-   DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Transparent Mode\n", 
__func__);
+   DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Non Transparent 
Mode\n", __func__);
 
repeater_mode = DP_PHY_REPEATER_MODE_NON_TRANSPARENT;
core_link_write_dpcd(link,
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 31/51] drm/amd/display: Add shared DMCUB/driver firmware state cache window

2019-12-02 Thread sunpeng . li
From: Nicholas Kazlauskas 

[Why]
Scratch registers are limited on the DMCUB and we have an expanding
list of state to track between driver and DMCUB.

[How]
Place shared state in cache window 6. The cache window size is aligned
to the size of the cache line on the DMCUB to make it easy to
invalidate.

The shared state is intended to be read only from driver side so
it's been marked as const.

The use of volatile is intentional. The memory for the shared firmware
state is memory mapped from the framebuffer memory. The DMCUB will
flush its cache after modifying the region. There's no way for x86
to known whether this data is stale or not so we want to intentionally
disable optimization to force the read at every access.

Signed-off-by: Nicholas Kazlauskas 
Reviewed-by: Tony Cheng 
Acked-by: Leo Li 
---
 .../drm/amd/display/dmub/inc/dmub_fw_state.h  | 73 +++
 .../gpu/drm/amd/display/dmub/inc/dmub_srv.h   |  8 +-
 .../gpu/drm/amd/display/dmub/src/dmub_dcn20.c | 10 ++-
 .../gpu/drm/amd/display/dmub/src/dmub_dcn20.h |  3 +-
 .../gpu/drm/amd/display/dmub/src/dmub_dcn21.c | 12 ++-
 .../gpu/drm/amd/display/dmub/src/dmub_dcn21.h |  3 +-
 .../gpu/drm/amd/display/dmub/src/dmub_srv.c   | 27 +--
 7 files changed, 125 insertions(+), 11 deletions(-)
 create mode 100644 drivers/gpu/drm/amd/display/dmub/inc/dmub_fw_state.h

diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_fw_state.h 
b/drivers/gpu/drm/amd/display/dmub/inc/dmub_fw_state.h
new file mode 100644
index ..c87b1ba7590e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_fw_state.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DMUB_FW_STATE_H_
+#define _DMUB_FW_STATE_H_
+
+#include "dmub_types.h"
+
+#pragma pack(push, 1)
+
+struct dmub_fw_state {
+   /**
+* @phy_initialized_during_fw_boot:
+*
+* Detects if VBIOS/VBL has ran before firmware boot.
+* A value of 1 will usually mean S0i3 boot.
+*/
+   uint8_t phy_initialized_during_fw_boot;
+
+   /**
+* @intialized_phy:
+*
+* Bit vector of initialized PHY.
+*/
+   uint8_t initialized_phy;
+
+   /**
+* @enabled_phy:
+*
+* Bit vector of enabled PHY for DP alt mode switch tracking.
+*/
+   uint8_t enabled_phy;
+
+   /**
+* @dmcu_fw_loaded:
+*
+* DMCU auto load state.
+*/
+   uint8_t dmcu_fw_loaded;
+
+   /**
+* @psr_state:
+*
+* PSR state tracking.
+*/
+   uint8_t psr_state;
+};
+
+#pragma pack(pop)
+
+#endif /* _DMUB_FW_STATE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h 
b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
index fdedbe15e026..528243e35add 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
@@ -67,6 +67,7 @@
 #include "dmub_types.h"
 #include "dmub_cmd.h"
 #include "dmub_rb.h"
+#include "dmub_fw_state.h"
 
 #if defined(__cplusplus)
 extern "C" {
@@ -102,7 +103,7 @@ enum dmub_window_id {
DMUB_WINDOW_3_VBIOS,
DMUB_WINDOW_4_MAILBOX,
DMUB_WINDOW_5_TRACEBUFF,
-   DMUB_WINDOW_6_RESERVED,
+   DMUB_WINDOW_6_FW_STATE,
DMUB_WINDOW_7_RESERVED,
DMUB_WINDOW_TOTAL,
 };
@@ -241,7 +242,8 @@ struct dmub_srv_hw_funcs {
  const struct dmub_window *cw2,
  const struct dmub_window *cw3,
  const struct dmub_window *cw4,
- const struct dmub_window *cw5);
+ const struct dmub_window *cw5,
+ const struct dmub_window *cw6);
 
void (*setup_mailbox)(struct dmub_srv *dmub,
  const struct dmub_region *inbox1);

[PATCH 41/51] drm/amd/display: fix cursor positioning for multiplane cases

2019-12-02 Thread sunpeng . li
From: Aric Cyr 

[Why]
Cursor position needs to take into account plane scaling as well.

[How]
Translate cursor coords from stream space to plane space.

Signed-off-by: Aric Cyr 
Reviewed-by: Anthony Koo 
Acked-by: Leo Li 
Acked-by: Nicholas Kazlauskas 
---
 .../amd/display/dc/dcn10/dcn10_hw_sequencer.c | 33 ++-
 1 file changed, 24 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 9551fefb9d1d..61d2f1233f8c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -2913,15 +2913,30 @@ void dcn10_set_cursor_position(struct pipe_ctx 
*pipe_ctx)
.rotation = pipe_ctx->plane_state->rotation,
.mirror = pipe_ctx->plane_state->horizontal_mirror
};
-   uint32_t x_plane = pipe_ctx->plane_state->dst_rect.x;
-   uint32_t y_plane = pipe_ctx->plane_state->dst_rect.y;
-   uint32_t x_offset = min(x_plane, pos_cpy.x);
-   uint32_t y_offset = min(y_plane, pos_cpy.y);
-
-   pos_cpy.x -= x_offset;
-   pos_cpy.y -= y_offset;
-   pos_cpy.x_hotspot += (x_plane - x_offset);
-   pos_cpy.y_hotspot += (y_plane - y_offset);
+
+   int x_plane = pipe_ctx->plane_state->dst_rect.x;
+   int y_plane = pipe_ctx->plane_state->dst_rect.y;
+   int x_pos = pos_cpy.x;
+   int y_pos = pos_cpy.y;
+
+   // translate cursor from stream space to plane space
+   x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width /
+   pipe_ctx->plane_state->dst_rect.width;
+   y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
+   pipe_ctx->plane_state->dst_rect.height;
+
+   if (x_pos < 0) {
+   pos_cpy.x_hotspot -= x_pos;
+   x_pos = 0;
+   }
+
+   if (y_pos < 0) {
+   pos_cpy.y_hotspot -= y_pos;
+   y_pos = 0;
+   }
+
+   pos_cpy.x = (uint32_t)x_pos;
+   pos_cpy.y = (uint32_t)y_pos;
 
if (pipe_ctx->plane_state->address.type
== PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 44/51] drm/amd/display: Implement DePQ for DCN2

2019-12-02 Thread sunpeng . li
From: Reza Amini 

[Why]
Need support for more color management in 10bit
surface.

[How]
Provide support for DePQ for 10bit surface

Signed-off-by: Reza Amini 
Reviewed-by: Krunoslav Kovac 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c | 3 +++
 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c  | 5 +
 2 files changed, 8 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c 
b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c
index 2d112c316424..05a3e7f97ef0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c
@@ -149,6 +149,9 @@ void dpp2_set_degamma(
case IPP_DEGAMMA_MODE_HW_xvYCC:
REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 2);
break;
+   case IPP_DEGAMMA_MODE_USER_PWL:
+   REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 3);
+   break;
default:
BREAK_TO_DEBUGGER();
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c 
b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 2d093ff0a76c..ec9838d6e0ee 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -878,6 +878,11 @@ bool dcn20_set_input_transfer_func(struct dc *dc,
IPP_DEGAMMA_MODE_BYPASS);
break;
case TRANSFER_FUNCTION_PQ:
+   dpp_base->funcs->dpp_set_degamma(dpp_base, 
IPP_DEGAMMA_MODE_USER_PWL);
+   cm_helper_translate_curve_to_degamma_hw_format(tf, 
_base->degamma_params);
+   dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, 
_base->degamma_params);
+   result = true;
+   break;
default:
result = false;
break;
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 30/51] drm/amd/display: Split DMUB cmd type into type/subtype

2019-12-02 Thread sunpeng . li
From: Nicholas Kazlauskas 

[Why]
Commands will be considered a stable ABI between driver and firmware.

Commands are also split between DC commands, DAL feature commands,
and VBIOS commands.

Commands are currently not designated to a specific ID and the enum
does not provide a stable ABI.

We currently group all of these into a single command type of 8-bits.
With the stable ABI consideration in mind it's not unreasonable to
run out of command IDs.

For cleaner separation and versioning split the commands into a main
type and a subtype.

[How]
For commands where performance matters (like reg sequences) these
are still considered main commands.

Sub commands will be split by ownership/feature.

Update existing command sequences to reflect new changes.

Signed-off-by: Nicholas Kazlauskas 
Reviewed-by: Tony Cheng 
Acked-by: Leo Li 
---
 .../drm/amd/display/dc/bios/command_table2.c  | 13 +++--
 drivers/gpu/drm/amd/display/dc/dc_helper.c|  3 ++
 .../gpu/drm/amd/display/dmub/inc/dmub_cmd.h   | 48 +++
 .../drm/amd/display/dmub/inc/dmub_cmd_dal.h   | 41 
 .../drm/amd/display/dmub/inc/dmub_cmd_vbios.h | 41 
 5 files changed, 112 insertions(+), 34 deletions(-)
 create mode 100644 drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h
 create mode 100644 drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_vbios.h

diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c 
b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index 1836f16bb7fe..2cb7a4288cb7 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -111,7 +111,8 @@ static void encoder_control_dmcub(
 {
struct dmub_rb_cmd_digx_encoder_control encoder_control = { 0 };
 
-   encoder_control.header.type = DMUB_CMD__DIGX_ENCODER_CONTROL;
+   encoder_control.header.type = DMUB_CMD__VBIOS;
+   encoder_control.header.sub_type = DMUB_CMD__VBIOS_DIGX_ENCODER_CONTROL;
encoder_control.encoder_control.dig.stream_param = *dig;
 
dc_dmub_srv_cmd_queue(dmcub, _control.header);
@@ -219,7 +220,9 @@ static void transmitter_control_dmcub(
 {
struct dmub_rb_cmd_dig1_transmitter_control transmitter_control;
 
-   transmitter_control.header.type = DMUB_CMD__DIG1_TRANSMITTER_CONTROL;
+   transmitter_control.header.type = DMUB_CMD__VBIOS;
+   transmitter_control.header.sub_type =
+   DMUB_CMD__VBIOS_DIG1_TRANSMITTER_CONTROL;
transmitter_control.transmitter_control.dig = *dig;
 
dc_dmub_srv_cmd_queue(dmcub, _control.header);
@@ -302,7 +305,8 @@ static void set_pixel_clock_dmcub(
 {
struct dmub_rb_cmd_set_pixel_clock pixel_clock = { 0 };
 
-   pixel_clock.header.type = DMUB_CMD__SET_PIXEL_CLOCK;
+   pixel_clock.header.type = DMUB_CMD__VBIOS;
+   pixel_clock.header.sub_type = DMUB_CMD__VBIOS_SET_PIXEL_CLOCK;
pixel_clock.pixel_clock.clk = *clk;
 
dc_dmub_srv_cmd_queue(dmcub, _clock.header);
@@ -650,7 +654,8 @@ static void enable_disp_power_gating_dmcub(
 {
struct dmub_rb_cmd_enable_disp_power_gating power_gating;
 
-   power_gating.header.type  = DMUB_CMD__ENABLE_DISP_POWER_GATING;
+   power_gating.header.type = DMUB_CMD__VBIOS;
+   power_gating.header.sub_type = DMUB_CMD__VBIOS_ENABLE_DISP_POWER_GATING;
power_gating.power_gating.pwr = *pwr;
 
dc_dmub_srv_cmd_queue(dmcub, _gating.header);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c 
b/drivers/gpu/drm/amd/display/dc/dc_helper.c
index e41befa067ce..02a63e9cb62f 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
@@ -178,6 +178,7 @@ static bool dmub_reg_value_burst_set_pack(const struct 
dc_context *ctx, uint32_t
}
 
cmd_buf->header.type = DMUB_CMD__REG_SEQ_BURST_WRITE;
+   cmd_buf->header.sub_type = 0;
cmd_buf->addr = addr;
cmd_buf->write_values[offload->reg_seq_count] = reg_val;
offload->reg_seq_count++;
@@ -206,6 +207,7 @@ static uint32_t dmub_reg_value_pack(const struct dc_context 
*ctx, uint32_t addr,
 
/* pack commands */
cmd_buf->header.type = DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE;
+   cmd_buf->header.sub_type = 0;
seq = _buf->seq[offload->reg_seq_count];
 
if (offload->reg_seq_count) {
@@ -230,6 +232,7 @@ static void dmub_reg_wait_done_pack(const struct dc_context 
*ctx, uint32_t addr,
struct dmub_rb_cmd_reg_wait *cmd_buf = >cmd_data.reg_wait;
 
cmd_buf->header.type = DMUB_CMD__REG_REG_WAIT;
+   cmd_buf->header.sub_type = 0;
cmd_buf->reg_wait.addr = addr;
cmd_buf->reg_wait.condition_field_value = mask & (condition_value << 
shift);
cmd_buf->reg_wait.mask = mask;
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h 
b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 43f1cd647aab..b10728f33f62 100644
--- 

[PATCH 38/51] drm/amd/display: remove spam DSC log

2019-12-02 Thread sunpeng . li
From: Wenjing Liu 

[why]
add_dsc_to_stream_resource could be called for validation.
Failing validation is completely fine.
However failing it inside commit streams is bad.
This code could be triggered for both contexts.
The function itself cannot distinguish the caller, which
makes it impossible to output the log only in the
meaningful case (commit streams).

Signed-off-by: Wenjing Liu 
Reviewed-by: Nikola Cornij 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 1 -
 1 file changed, 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index 2aa6c0be45b4..f853af413582 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -1516,7 +1516,6 @@ static enum dc_status add_dsc_to_stream_resource(struct 
dc *dc,
 
/* The number of DSCs can be less than the number of pipes */
if (!pipe_ctx->stream_res.dsc) {
-   dm_output_to_console("No DSCs available\n");
result = DC_NO_DSC_RESOURCE;
}
 
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 33/51] drm/amd/display: Remove flag check in mpcc update

2019-12-02 Thread sunpeng . li
From: Noah Abradjian 

[Why]
MPCC programming was being missed during certain split pipe enables due
to full_update flag not being true. This caused a momentary flash on
half the screen. After discussion, determined we should not have that
flag check within update_mpcc, as it should always perform full
programming when called.

[How]
Remove flag check. We call update_blending within insert_plane, so we
do not need to replace its call from the if block.

Signed-off-by: Noah Abradjian 
Reviewed-by: Dmytro Laktyushkin 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c | 6 --
 1 file changed, 6 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c 
b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index ece0817708f5..fb23142cf535 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -2139,12 +2139,6 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx 
*pipe_ctx)
 */
mpcc_id = hubp->inst;
 
-   /* If there is no full update, don't need to touch MPC tree*/
-   if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
-   mpc->funcs->update_blending(mpc, _cfg, mpcc_id);
-   return;
-   }
-
/* check if this MPCC is already being used */
new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
/* remove MPCC if being used */
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 39/51] drm/amd/display: add dsc policy getter

2019-12-02 Thread sunpeng . li
From: Wenjing Liu 

dc needs to expose its internal dsc policy.

Signed-off-by: Wenjing Liu 
Reviewed-by: Nikola Cornij 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/dc_dsc.h |  14 ++-
 drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c | 103 
 2 files changed, 75 insertions(+), 42 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h 
b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
index d98b89bad353..8ec09813ee17 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
@@ -45,6 +45,14 @@ struct display_stream_compressor {
int inst;
 };
 
+struct dc_dsc_policy {
+   bool use_min_slices_h;
+   int max_slices_h; // Maximum available if 0
+   int min_slice_height; // Must not be less than 8
+   uint32_t max_target_bpp;
+   uint32_t min_target_bpp;
+};
+
 bool dc_dsc_parse_dsc_dpcd(const uint8_t *dpcd_dsc_basic_data,
const uint8_t *dpcd_dsc_ext_data,
struct dsc_dec_dpcd_caps *dsc_sink_caps);
@@ -66,7 +74,7 @@ bool dc_dsc_compute_config(
const struct dc_crtc_timing *timing,
struct dc_dsc_config *dsc_cfg);
 
-bool dc_dsc_get_bpp_range_for_pixel_encoding(enum dc_pixel_encoding pixel_enc,
-   uint32_t *min_bpp,
-   uint32_t *max_bpp);
+void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing,
+   struct dc_dsc_policy *policy);
+
 #endif
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c 
b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index febae6cc7295..d2423ad1fac2 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -27,19 +27,6 @@
 #include 
 #include "dc.h"
 
-struct dc_dsc_policy {
-   bool use_min_slices_h;
-   int max_slices_h; // Maximum available if 0
-   int min_sice_height; // Must not be less than 8
-};
-
-const struct dc_dsc_policy dsc_policy = {
-   .use_min_slices_h = true, // DSC Policy: Use minimum number of slices 
that fits the pixel clock
-   .max_slices_h = 0, // DSC Policy: Use max available slices (in our case 
4 for or 8, depending on the mode)
-   .min_sice_height = 108, // DSC Policy: Use slice height recommended by 
VESA DSC Spreadsheet user guide
-};
-
-
 /* This module's internal functions */
 
 static uint32_t dc_dsc_bandwidth_in_kbps_from_timing(
@@ -370,6 +357,7 @@ static void get_dsc_bandwidth_range(
  *or if it couldn't be applied based on DSC policy.
  */
 static bool decide_dsc_target_bpp_x16(
+   const struct dc_dsc_policy *policy,
const struct dsc_enc_caps *dsc_common_caps,
const int target_bandwidth_kbps,
const struct dc_crtc_timing *timing,
@@ -377,13 +365,10 @@ static bool decide_dsc_target_bpp_x16(
 {
bool should_use_dsc = false;
struct dc_dsc_bw_range range;
-   uint32_t min_target_bpp = 0;
-   uint32_t max_target_bpp = 0;
 
memset(, 0, sizeof(range));
 
-   dc_dsc_get_bpp_range_for_pixel_encoding(timing->pixel_encoding, 
_target_bpp, _target_bpp);
-   get_dsc_bandwidth_range(min_target_bpp, max_target_bpp,
+   get_dsc_bandwidth_range(policy->min_target_bpp, policy->max_target_bpp,
dsc_common_caps, timing, );
if (target_bandwidth_kbps >= range.stream_kbps) {
/* enough bandwidth without dsc */
@@ -579,9 +564,11 @@ static bool setup_dsc_config(
bool is_dsc_possible = false;
int pic_height;
int slice_height;
+   struct dc_dsc_policy policy;
 
memset(dsc_cfg, 0, sizeof(struct dc_dsc_config));
 
+   dc_dsc_get_policy_for_timing(timing, );
pic_width = timing->h_addressable + timing->h_border_left + 
timing->h_border_right;
pic_height = timing->v_addressable + timing->v_border_top + 
timing->v_border_bottom;
 
@@ -597,7 +584,12 @@ static bool setup_dsc_config(
goto done;
 
if (target_bandwidth_kbps > 0) {
-   is_dsc_possible = decide_dsc_target_bpp_x16(_common_caps, 
target_bandwidth_kbps, timing, _bpp);
+   is_dsc_possible = decide_dsc_target_bpp_x16(
+   ,
+   _common_caps,
+   target_bandwidth_kbps,
+   timing,
+   _bpp);
dsc_cfg->bits_per_pixel = target_bpp;
}
if (!is_dsc_possible)
@@ -699,20 +691,20 @@ static bool setup_dsc_config(
if (!is_dsc_possible)
goto done;
 
-   if (dsc_policy.use_min_slices_h) {
+   if (policy.use_min_slices_h) {
if (min_slices_h > 0)
num_slices_h = min_slices_h;
else if (max_slices_h > 0) { // Fall back to max slices if min 
slices is not working out
-   if (dsc_policy.max_slices_h)
-   

[PATCH 48/51] drm/amd/display: Compare clock state member to determine optimization.

2019-12-02 Thread sunpeng . li
From: Yongqiang Sun 

[Why]
It seems always request passive flip on RN due to incorrect compare
clock state to determine optization.

[How]
Instead of calling memcmp, compare clock state member to determine the
condition.

Signed-off-by: Yongqiang Sun 
Reviewed-by: Tony Cheng 
Acked-by: Leo Li 
---
 .../amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c  | 18 +-
 1 file changed, 17 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c 
b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index 37230d3d94a0..de51ef12e33a 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -471,12 +471,28 @@ static void rn_notify_wm_ranges(struct clk_mgr 
*clk_mgr_base)
 
 }
 
+static bool rn_are_clock_states_equal(struct dc_clocks *a,
+   struct dc_clocks *b)
+{
+   if (a->dispclk_khz != b->dispclk_khz)
+   return false;
+   else if (a->dppclk_khz != b->dppclk_khz)
+   return false;
+   else if (a->dcfclk_khz != b->dcfclk_khz)
+   return false;
+   else if (a->dcfclk_deep_sleep_khz != b->dcfclk_deep_sleep_khz)
+   return false;
+
+   return true;
+}
+
+
 static struct clk_mgr_funcs dcn21_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.update_clocks = rn_update_clocks,
.init_clocks = rn_init_clocks,
.enable_pme_wa = rn_enable_pme_wa,
-   /* .dump_clk_registers = rn_dump_clk_registers, */
+   .are_clock_states_equal = rn_are_clock_states_equal,
.notify_wm_ranges = rn_notify_wm_ranges
 };
 
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 42/51] drm/amd/display: Fix screen tearing on vrr tests

2019-12-02 Thread sunpeng . li
From: Amanda Liu 

[Why]
Screen tearing is present in tests when setting the frame rate to
certain fps

[How]
Revert previous optimizations for low frame rates.

Signed-off-by: Amanda Liu 
Reviewed-by: Aric Cyr 
Acked-by: Leo Li 
---
 .../amd/display/modules/freesync/freesync.c   | 32 ---
 .../amd/display/modules/inc/mod_freesync.h|  1 -
 2 files changed, 13 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c 
b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index 16e69bbc69aa..5437b50e9f90 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -37,8 +37,8 @@
 #define STATIC_SCREEN_RAMP_DELTA_REFRESH_RATE_PER_FRAME ((1000 / 60) * 65)
 /* Number of elements in the render times cache array */
 #define RENDER_TIMES_MAX_COUNT 10
-/* Threshold to exit/exit BTR (to avoid frequent enter-exits at the lower 
limit) */
-#define BTR_MAX_MARGIN 2500
+/* Threshold to exit BTR (to avoid frequent enter-exits at the lower limit) */
+#define BTR_EXIT_MARGIN 2000
 /* Threshold to change BTR multiplier (to avoid frequent changes) */
 #define BTR_DRIFT_MARGIN 2000
 /*Threshold to exit fixed refresh rate*/
@@ -254,22 +254,24 @@ static void apply_below_the_range(struct core_freesync 
*core_freesync,
unsigned int delta_from_mid_point_in_us_1 = 0x;
unsigned int delta_from_mid_point_in_us_2 = 0x;
unsigned int frames_to_insert = 0;
+   unsigned int min_frame_duration_in_ns = 0;
+   unsigned int max_render_time_in_us = in_out_vrr->max_duration_in_us;
unsigned int delta_from_mid_point_delta_in_us;
-   unsigned int max_render_time_in_us =
-   in_out_vrr->max_duration_in_us - 
in_out_vrr->btr.margin_in_us;
+
+   min_frame_duration_in_ns = ((unsigned int) (div64_u64(
+   (10ULL * 100),
+   in_out_vrr->max_refresh_in_uhz)));
 
/* Program BTR */
-   if ((last_render_time_in_us + in_out_vrr->btr.margin_in_us / 2) < 
max_render_time_in_us) {
+   if (last_render_time_in_us + BTR_EXIT_MARGIN < max_render_time_in_us) {
/* Exit Below the Range */
if (in_out_vrr->btr.btr_active) {
in_out_vrr->btr.frame_counter = 0;
in_out_vrr->btr.btr_active = false;
}
-   } else if (last_render_time_in_us > (max_render_time_in_us + 
in_out_vrr->btr.margin_in_us / 2)) {
+   } else if (last_render_time_in_us > max_render_time_in_us) {
/* Enter Below the Range */
-   if (!in_out_vrr->btr.btr_active) {
-   in_out_vrr->btr.btr_active = true;
-   }
+   in_out_vrr->btr.btr_active = true;
}
 
/* BTR set to "not active" so disengage */
@@ -325,9 +327,7 @@ static void apply_below_the_range(struct core_freesync 
*core_freesync,
/* Choose number of frames to insert based on how close it
 * can get to the mid point of the variable range.
 */
-   if ((frame_time_in_us / mid_point_frames_ceil) > 
in_out_vrr->min_duration_in_us &&
-   (delta_from_mid_point_in_us_1 < 
delta_from_mid_point_in_us_2 ||
-   mid_point_frames_floor < 2)) {
+   if (delta_from_mid_point_in_us_1 < 
delta_from_mid_point_in_us_2) {
frames_to_insert = mid_point_frames_ceil;
delta_from_mid_point_delta_in_us = 
delta_from_mid_point_in_us_2 -
delta_from_mid_point_in_us_1;
@@ -343,7 +343,7 @@ static void apply_below_the_range(struct core_freesync 
*core_freesync,
if (in_out_vrr->btr.frames_to_insert != 0 &&
delta_from_mid_point_delta_in_us < 
BTR_DRIFT_MARGIN) {
if (((last_render_time_in_us / 
in_out_vrr->btr.frames_to_insert) <
-   max_render_time_in_us) &&
+   in_out_vrr->max_duration_in_us) &&
((last_render_time_in_us / 
in_out_vrr->btr.frames_to_insert) >
in_out_vrr->min_duration_in_us))
frames_to_insert = 
in_out_vrr->btr.frames_to_insert;
@@ -796,11 +796,6 @@ void mod_freesync_build_vrr_params(struct mod_freesync 
*mod_freesync,
refresh_range = in_out_vrr->max_refresh_in_uhz -
in_out_vrr->min_refresh_in_uhz;
 
-   in_out_vrr->btr.margin_in_us = in_out_vrr->max_duration_in_us -
-   2 * in_out_vrr->min_duration_in_us;
-   if (in_out_vrr->btr.margin_in_us > BTR_MAX_MARGIN)
-   in_out_vrr->btr.margin_in_us = BTR_MAX_MARGIN;
-

[PATCH 51/51] drm/amd/display: Extend DMCUB offload testing into dcn20/21

2019-12-02 Thread sunpeng . li
From: Nicholas Kazlauskas 

[Why]
To quickly validate whether DMCUB is running and accepting commands for
offload testing we want to intercept a common sequence as part of
modeset programming.

[How]
OTG enable will cause the most impact in terms of golden register
changes and it's a single register write.

This approach was previously done in dcn10 code when it was shared with
dcn20 but it wasn't ported over to the dcn20 code.

Port over start, execute and wait sequence into dcn20_optc.

Signed-off-by: Nicholas Kazlauskas 
Reviewed-by: Tony Cheng 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c | 5 +
 1 file changed, 5 insertions(+)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c 
b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
index f5854a5d2b76..673c83e2afd4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
@@ -59,11 +59,16 @@ bool optc2_enable_crtc(struct timing_generator *optc)
REG_UPDATE(CONTROL,
VTG0_ENABLE, 1);
 
+   REG_SEQ_START();
+
/* Enable CRTC */
REG_UPDATE_2(OTG_CONTROL,
OTG_DISABLE_POINT_CNTL, 3,
OTG_MASTER_EN, 1);
 
+   REG_SEQ_SUBMIT();
+   REG_SEQ_WAIT_DONE();
+
return true;
 }
 
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 43/51] drm/amd/display: update dispclk and dppclk vco frequency

2019-12-02 Thread sunpeng . li
From: Eric Yang 

Value obtained from DV is not allowing 8k60 CTA mode with DSC to
pass, after checking real value being used in hw, find out that
correct value is 3600, which will allow that mode.

Signed-off-by: Eric Yang 
Reviewed-by: Tony Cheng 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index fef11d57d2b7..8fa63929d3b9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -255,7 +255,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
.vmm_page_size_bytes = 4096,
.dram_clock_change_latency_us = 23.84,
.return_bus_width_bytes = 64,
-   .dispclk_dppclk_vco_speed_mhz = 3550,
+   .dispclk_dppclk_vco_speed_mhz = 3600,
.xfc_bus_transport_time_us = 4,
.xfc_xbuf_latency_tolerance_us = 4,
.use_urgent_burst_bw = 1,
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 45/51] drm/amd/display: 3.2.62

2019-12-02 Thread sunpeng . li
From: Aric Cyr 

Signed-off-by: Aric Cyr 
Reviewed-by: Aric Cyr 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/dc.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dc.h 
b/drivers/gpu/drm/amd/display/dc/dc.h
index 4c7a2882a512..c24639080371 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -39,7 +39,7 @@
 #include "inc/hw/dmcu.h"
 #include "dml/display_mode_lib.h"
 
-#define DC_VER "3.2.61"
+#define DC_VER "3.2.62"
 
 #define MAX_SURFACES 3
 #define MAX_PLANES 6
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 49/51] drm/amd/display: update dml related structs

2019-12-02 Thread sunpeng . li
From: Dmytro Laktyushkin 

In preparation for further changes

Signed-off-by: Dmytro Laktyushkin 
Reviewed-by: Chris Park 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 2 ++
 drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h | 3 +++
 drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c | 2 +-
 3 files changed, 6 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c 
b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index f853af413582..5e0f0e679899 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -1967,6 +1967,7 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].pipe.src.viewport_height = 
timing->v_addressable;
if (pipes[pipe_cnt].pipe.src.viewport_height > 1080)
pipes[pipe_cnt].pipe.src.viewport_height = 1080;
+   pipes[pipe_cnt].pipe.src.surface_height_y = 
pipes[pipe_cnt].pipe.src.viewport_height;
pipes[pipe_cnt].pipe.src.data_pitch = 
((pipes[pipe_cnt].pipe.src.viewport_width + 63) / 64) * 64; /* linear sw only */
pipes[pipe_cnt].pipe.src.source_format = dm_444_32;
pipes[pipe_cnt].pipe.dest.recout_width = 
pipes[pipe_cnt].pipe.src.viewport_width; /*vp_width/hratio*/
@@ -2000,6 +2001,7 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].pipe.src.viewport_width_c = 
scl->viewport_c.width;
pipes[pipe_cnt].pipe.src.viewport_height = 
scl->viewport.height;
pipes[pipe_cnt].pipe.src.viewport_height_c = 
scl->viewport_c.height;
+   pipes[pipe_cnt].pipe.src.surface_height_y = 
pln->plane_size.surface_size.height;
if (pln->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
pipes[pipe_cnt].pipe.src.data_pitch = 
pln->plane_size.surface_pitch;
pipes[pipe_cnt].pipe.src.data_pitch_c = 
pln->plane_size.chroma_pitch;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h 
b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index 516396d53d01..220d5e610f1f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -99,6 +99,7 @@ struct _vcs_dpi_soc_bounding_box_st {
unsigned int num_chans;
unsigned int vmm_page_size_bytes;
unsigned int hostvm_min_page_size_bytes;
+   unsigned int gpuvm_min_page_size_bytes;
double dram_clock_change_latency_us;
double dummy_pstate_latency_us;
double writeback_dram_clock_change_latency_us;
@@ -224,6 +225,7 @@ struct _vcs_dpi_display_pipe_source_params_st {
int source_scan;
int sw_mode;
int macro_tile_size;
+   unsigned int surface_height_y;
unsigned int viewport_width;
unsigned int viewport_height;
unsigned int viewport_y_y;
@@ -400,6 +402,7 @@ struct _vcs_dpi_display_rq_misc_params_st {
 struct _vcs_dpi_display_rq_params_st {
unsigned char yuv420;
unsigned char yuv420_10bpc;
+   unsigned char rgbe_alpha;
display_rq_misc_params_st misc;
display_rq_sizing_params_st sizing;
display_rq_dlg_params_st dlg;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c 
b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
index b1c2b79e42b6..15b72a8b5174 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
@@ -231,7 +231,7 @@ static void fetch_socbb_params(struct display_mode_lib 
*mode_lib)
mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading = 
soc->dcn_downspread_percent;   // new
mode_lib->vba.DISPCLKDPPCLKVCOSpeed = 
soc->dispclk_dppclk_vco_speed_mhz;   // new
mode_lib->vba.VMMPageSize = soc->vmm_page_size_bytes;
-   mode_lib->vba.GPUVMMinPageSize = soc->vmm_page_size_bytes / 1024;
+   mode_lib->vba.GPUVMMinPageSize = soc->gpuvm_min_page_size_bytes / 1024;
mode_lib->vba.HostVMMinPageSize = soc->hostvm_min_page_size_bytes / 
1024;
// Set the voltage scaling clocks as the defaults. Most of these will
// be set to different values by the test
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 34/51] drm/amd/display: check for repeater when setting aux_rd_interval.

2019-12-02 Thread sunpeng . li
From: abdoulaye berthe 

[Why]
When training with repeater the aux read interval must be set to
repeater specific aux_red_interval. This value is always 100us for CR.

[How]
Check for repeater when setting the aux_rd_interval in channel
equalization.
Use the right offset in the aux_rd_interval array

Signed-off-by: abdoulaye berthe 
Reviewed-by: Wenjing Liu 
Acked-by: George Shen 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c 
b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 015fa0c52746..dfcd6421ee01 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -906,10 +906,10 @@ static enum link_training_result 
perform_channel_equalization_sequence(
/* 3. wait for receiver to lock-on*/
wait_time_microsec = lt_settings->eq_pattern_time;
 
-   if (!link->is_lttpr_mode_transparent)
+   if (is_repeater(link, offset))
wait_time_microsec =
translate_training_aux_read_interval(
-   
link->dpcd_caps.lttpr_caps.aux_rd_interval[offset]);
+   
link->dpcd_caps.lttpr_caps.aux_rd_interval[offset - 1]);
 
wait_for_training_aux_rd_interval(
link,
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 46/51] drm/amd/display: Change HDR_MULT check

2019-12-02 Thread sunpeng . li
From: Krunoslav Kovac 

[Why]
Currently we require HDR_MULT >= 1.0
There are scenarios where we need < 1.0

[How]
Only guard against 0 - it will black-screen image.
It is up to higher-level logic to decide what HDR_MULT
values are allowed in each particular case.

Signed-off-by: Krunoslav Kovac 
Reviewed-by: Aric Cyr 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 6 +-
 1 file changed, 1 insertion(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c 
b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 61d2f1233f8c..3996fef56948 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -2390,17 +2390,13 @@ void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult;
uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
struct custom_float_format fmt;
-   bool mult_negative; // True if fixed31_32 sign bit indicates negative 
value
-   uint32_t mult_int; // int component of fixed31_32
 
fmt.exponenta_bits = 6;
fmt.mantissa_bits = 12;
fmt.sign = true;
 
-   mult_negative = multiplier.value >> 63 != 0;
-   mult_int = multiplier.value >> 32;
 
-   if (mult_int && !mult_negative) // Check if greater than 1
+   if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0
convert_to_custom_float_format(multiplier, , _mult);
 
pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier(
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 47/51] drm/amd/display: Increase the number of retries after AUX DEFER

2019-12-02 Thread sunpeng . li
From: George Shen 

[Why]
When a timeout occurs after a DEFER, some devices require more retries
than in the case of a regular timeout.

[How]
In a timeout occurrence, check whether a DEFER has occurred before the
timeout and retry MAX_DEFER_RETRIES retries times instead of
MAX_TIMEOUT_RETRIES.

Signed-off-by: George Shen 
Reviewed-by: Tony Cheng 
Acked-by: Abdoulaye Berthe 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/dce/dce_aux.c | 32 ++--
 1 file changed, 22 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c 
b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index f7626cd70ec8..191b68b8163a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -611,6 +611,8 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
uint8_t reply;
bool payload_reply = true;
enum aux_channel_operation_result operation_result;
+   bool retry_on_defer = false;
+
int aux_ack_retries = 0,
aux_defer_retries = 0,
aux_i2c_defer_retries = 0,
@@ -641,8 +643,9 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
break;
 
case AUX_TRANSACTION_REPLY_AUX_DEFER:
-   case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK:
case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER:
+   retry_on_defer = true;
+   case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK:
if (++aux_defer_retries >= 
AUX_MAX_DEFER_RETRIES) {
goto fail;
} else {
@@ -675,15 +678,24 @@ bool dce_aux_transfer_with_retries(struct ddc_service 
*ddc,
break;
 
case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
-   if (++aux_timeout_retries >= AUX_MAX_TIMEOUT_RETRIES)
-   goto fail;
-   else {
-   /*
-* DP 1.4, 2.8.2:  AUX Transaction 
Response/Reply Timeouts
-* According to the DP spec there should be 3 
retries total
-* with a 400us wait inbetween each. Hardware 
already waits
-* for 550us therefore no wait is required here.
-*/
+   // Check whether a DEFER had occurred before the 
timeout.
+   // If so, treat timeout as a DEFER.
+   if (retry_on_defer) {
+   if (++aux_defer_retries >= 
AUX_MAX_DEFER_RETRIES)
+   goto fail;
+   else if (payload->defer_delay > 0)
+   msleep(payload->defer_delay);
+   } else {
+   if (++aux_timeout_retries >= 
AUX_MAX_TIMEOUT_RETRIES)
+   goto fail;
+   else {
+   /*
+* DP 1.4, 2.8.2:  AUX Transaction 
Response/Reply Timeouts
+* According to the DP spec there 
should be 3 retries total
+* with a 400us wait inbetween each. 
Hardware already waits
+* for 550us therefore no wait is 
required here.
+*/
+   }
}
break;
 
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 35/51] drm/amd/display: Modify logic for when to wait for mpcc idle

2019-12-02 Thread sunpeng . li
From: Noah Abradjian 

[Why]
I was advised that we may need to check for mpcc idle in more cases
than just when opp_changed is true. Also, mpcc_inst is equal to
pipe_idx, so remove for loop.

[How]
Remove opp_changed flag check and mpcc_inst loop.

Signed-off-by: Noah Abradjian 
Reviewed-by: Dmytro Laktyushkin 
Acked-by: Leo Li 
---
 .../gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c | 18 +-
 1 file changed, 9 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c 
b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index fb23142cf535..2d093ff0a76c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -1330,16 +1330,16 @@ static void dcn20_update_dchubp_dpp(
if (pipe_ctx->update_flags.bits.mpcc
|| plane_state->update_flags.bits.global_alpha_change
|| 
plane_state->update_flags.bits.per_pixel_alpha_change) {
-   /* Need mpcc to be idle if changing opp */
-   if (pipe_ctx->update_flags.bits.opp_changed) {
-   struct pipe_ctx *old_pipe_ctx = 
>current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx];
-   int mpcc_inst;
-
-   for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) 
{
-   if 
(!old_pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst])
-   continue;
+   // MPCC inst is equal to pipe index in practice
+   int mpcc_inst = pipe_ctx->pipe_idx;
+   int opp_inst;
+   int opp_count = dc->res_pool->res_cap->num_opp;
+
+   for (opp_inst = 0; opp_inst < opp_count; opp_inst++) {
+   if 
(dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst]) {

dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst);
-   
old_pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
+   
dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false;
+   break;
}
}
hws->funcs.update_mpcc(dc, pipe_ctx);
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 32/51] drm/amd/display: update sr latency for renoir when using lpddr4

2019-12-02 Thread sunpeng . li
From: Joseph Gravenor 

[Why]
DF team has produced more optimized sr latency numbers, for lpddr4

[How]
change the sr laency in the lpddr4 wm table to the new latency
number

Signed-off-by: Joseph Gravenor 
Reviewed-by: Tony Cheng 
Acked-by: Leo Li 
---
 .../amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c| 16 
 1 file changed, 8 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c 
b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index 901e7035bf8e..37230d3d94a0 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -563,32 +563,32 @@ struct wm_table lpddr4_wm_table = {
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
-   .sr_exit_time_us = 12.5,
-   .sr_enter_plus_exit_time_us = 17.0,
+   .sr_exit_time_us = 5.32,
+   .sr_enter_plus_exit_time_us = 6.38,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
-   .sr_exit_time_us = 12.5,
-   .sr_enter_plus_exit_time_us = 17.0,
+   .sr_exit_time_us = 9.82,
+   .sr_enter_plus_exit_time_us = 11.196,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
-   .sr_exit_time_us = 12.5,
-   .sr_enter_plus_exit_time_us = 17.0,
+   .sr_exit_time_us = 9.89,
+   .sr_enter_plus_exit_time_us = 11.24,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
-   .sr_exit_time_us = 12.5,
-   .sr_enter_plus_exit_time_us = 17.0,
+   .sr_exit_time_us = 9.748,
+   .sr_enter_plus_exit_time_us = 11.102,
.valid = true,
},
}
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 40/51] drm/amd/display: Limit NV12 chroma workaround

2019-12-02 Thread sunpeng . li
From: Anthony Koo 

[Why]
It is causing green Line at the bottom of SDR 480p
MPO playback

[How]
Limit workaround to vertical > 512

Signed-off-by: Anthony Koo 
Reviewed-by: Tony Cheng 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c 
b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
index 38661b9c61f8..332bf3d3a664 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
@@ -200,7 +200,7 @@ void hubp21_set_viewport(
 *  Disable w/a when rotated 180 degrees, causes vertical chroma 
offset
 */
patched_viewport_height = viewport_c->height;
-   if (viewport_c->height != 0 && debug->nv12_iflip_vm_wa &&
+   if (debug->nv12_iflip_vm_wa && viewport_c->height > 512 &&
rotation != ROTATION_ANGLE_180) {
int pte_row_height = 0;
int pte_rows = 0;
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 37/51] drm/amd/display: add dc dsc functions to return bpp range for pixel encoding

2019-12-02 Thread sunpeng . li
From: Wenjing Liu 

[why]
Need to support 6 bpp for 420 pixel encoding only.

[how]
Add a dc function to determine what bpp range can be supported
for given pixel encoding.

Signed-off-by: Wenjing Liu 
Reviewed-by: Nikola Cornij 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/dc_dsc.h |  8 +++--
 drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c | 38 +
 2 files changed, 37 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h 
b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
index cc9915e545cd..d98b89bad353 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
@@ -52,8 +52,8 @@ bool dc_dsc_parse_dsc_dpcd(const uint8_t *dpcd_dsc_basic_data,
 bool dc_dsc_compute_bandwidth_range(
const struct display_stream_compressor *dsc,
const uint32_t dsc_min_slice_height_override,
-   const uint32_t min_kbps,
-   const uint32_t max_kbps,
+   const uint32_t min_bpp,
+   const uint32_t max_bpp,
const struct dsc_dec_dpcd_caps *dsc_sink_caps,
const struct dc_crtc_timing *timing,
struct dc_dsc_bw_range *range);
@@ -65,4 +65,8 @@ bool dc_dsc_compute_config(
uint32_t target_bandwidth_kbps,
const struct dc_crtc_timing *timing,
struct dc_dsc_config *dsc_cfg);
+
+bool dc_dsc_get_bpp_range_for_pixel_encoding(enum dc_pixel_encoding pixel_enc,
+   uint32_t *min_bpp,
+   uint32_t *max_bpp);
 #endif
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c 
b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index ec86ba73a039..febae6cc7295 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -31,16 +31,12 @@ struct dc_dsc_policy {
bool use_min_slices_h;
int max_slices_h; // Maximum available if 0
int min_sice_height; // Must not be less than 8
-   int max_target_bpp;
-   int min_target_bpp; // Minimum target bits per pixel
 };
 
 const struct dc_dsc_policy dsc_policy = {
.use_min_slices_h = true, // DSC Policy: Use minimum number of slices 
that fits the pixel clock
.max_slices_h = 0, // DSC Policy: Use max available slices (in our case 
4 for or 8, depending on the mode)
.min_sice_height = 108, // DSC Policy: Use slice height recommended by 
VESA DSC Spreadsheet user guide
-   .max_target_bpp = 16,
-   .min_target_bpp = 8,
 };
 
 
@@ -374,7 +370,6 @@ static void get_dsc_bandwidth_range(
  *or if it couldn't be applied based on DSC policy.
  */
 static bool decide_dsc_target_bpp_x16(
-   const struct dc_dsc_policy *policy,
const struct dsc_enc_caps *dsc_common_caps,
const int target_bandwidth_kbps,
const struct dc_crtc_timing *timing,
@@ -382,10 +377,13 @@ static bool decide_dsc_target_bpp_x16(
 {
bool should_use_dsc = false;
struct dc_dsc_bw_range range;
+   uint32_t min_target_bpp = 0;
+   uint32_t max_target_bpp = 0;
 
memset(, 0, sizeof(range));
 
-   get_dsc_bandwidth_range(policy->min_target_bpp, policy->max_target_bpp,
+   dc_dsc_get_bpp_range_for_pixel_encoding(timing->pixel_encoding, 
_target_bpp, _target_bpp);
+   get_dsc_bandwidth_range(min_target_bpp, max_target_bpp,
dsc_common_caps, timing, );
if (target_bandwidth_kbps >= range.stream_kbps) {
/* enough bandwidth without dsc */
@@ -599,7 +597,7 @@ static bool setup_dsc_config(
goto done;
 
if (target_bandwidth_kbps > 0) {
-   is_dsc_possible = decide_dsc_target_bpp_x16(_policy, 
_common_caps, target_bandwidth_kbps, timing, _bpp);
+   is_dsc_possible = decide_dsc_target_bpp_x16(_common_caps, 
target_bandwidth_kbps, timing, _bpp);
dsc_cfg->bits_per_pixel = target_bpp;
}
if (!is_dsc_possible)
@@ -906,3 +904,29 @@ bool dc_dsc_compute_config(
timing, dsc_min_slice_height_override, dsc_cfg);
return is_dsc_possible;
 }
+
+bool dc_dsc_get_bpp_range_for_pixel_encoding(enum dc_pixel_encoding pixel_enc,
+   uint32_t *min_bpp,
+   uint32_t *max_bpp)
+{
+   bool result = true;
+
+   switch (pixel_enc) {
+   case PIXEL_ENCODING_RGB:
+   case PIXEL_ENCODING_YCBCR444:
+   case PIXEL_ENCODING_YCBCR422:
+   *min_bpp = 8;
+   *max_bpp = 16;
+   break;
+   case PIXEL_ENCODING_YCBCR420:
+   *min_bpp = 6;
+   *max_bpp = 16;
+   break;
+   default:
+   *min_bpp = 0;
+   *max_bpp = 0;
+   result = false;
+   }
+
+   return result;
+}
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org

[PATCH 36/51] drm/amd/display: Remove redundant call

2019-12-02 Thread sunpeng . li
From: Noah Abradjian 

[Why]
I was advised that we don't need this call of program_front_end, as
earlier and later calls in the same sequence are sufficient.

[How]
Remove first call of program_front_end in dc_commit_state_no_check.

Signed-off-by: Noah Abradjian 
Reviewed-by: Yongqiang Sun 
Acked-by: Leo Li 
---
 drivers/gpu/drm/amd/display/dc/core/dc.c | 2 --
 1 file changed, 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c 
b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 55f22a1c0aa5..39fe38cb39b6 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1167,8 +1167,6 @@ static enum dc_status dc_commit_state_no_check(struct dc 
*dc, struct dc_state *c
context->stream_status[i].plane_count,
context); /* use new pipe config in new context 
*/
}
-   if (dc->hwss.program_front_end_for_ctx)
-   dc->hwss.program_front_end_for_ctx(dc, context);
 
/* Program hardware */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
-- 
2.24.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [error] Drm -> amdgpu Unrecoverable Machine Check

2019-12-02 Thread Yusuf Altıparmak
>
> Most likely not. There is support for resizing the VRAM BAR, but usually
> you can only make it larger and not smaller.
> Please give me the output of "sudo setpci -s 0001:01:00.0 ECAP15+4.l
> ECAP15+8.l" if you want to double check that.
>

Okay I'll try it tomorrow. What does the " sudo setpci -s 0001:01:00.0
ECAP15+4.l ECAP15+8.l" command exactly do ?



> Well you rather need to ask if anybody has sample PCIe configuration for
> GPUs in general. That problem is not really E9171 related. You might want
> to ask NXP for that maybe.
> Sorry, no idea if that is correct or not. You need to ask NXP for help
> with that.
>
>
Okay no problem. At least I know what is the missing point now. The problem
is probably because of the .dtsi and u-boot config files. Memory ranges are
overwriting like you said. I'll ask nxp to take some sample PCIe
configuration for GPUs.

Thank you for your interest Christian.
Regards .


>
> Am 02.12.19 um 14:32 schrieb Yusuf Altıparmak:
>>
>>
>>> I attached my dts file.
>>>
>>> System is working fine when GPU is not plugged in.
>>>
>>> *This is the last console log before freeze:*
>>> [drm] amdgpu kernel modesetting enabled.
>>>
>>> [drm] initializing kernel modesetting (POLARIS12 0x1002:0x6987
>>> 0x1787:0x2389 0x80).
>>> [drm] register mmio base: 0x2020
>>>
>>> fsl-fman-port ffe488000.port fm1-gb0: renamed from eth0
>>>
>>> [drm] register mmio size: 262144
>>>
>>> [drm] add ip block number 0 
>>>
>>> [drm] add ip block number 1 
>>>
>>> [drm] add ip block number 2 
>>>
>>> [drm] add ip block number 3 
>>>
>>> [drm] add ip block number 4 
>>>
>>> [drm] add ip block number 5 
>>>
>>> [drm] add ip block number 6 
>>>
>>> [drm] add ip block number 7 
>>>
>>> [drm] add ip block number 8 
>>>
>>> [drm] UVD is enabled in VM mode
>>>
>>> [drm] UVD ENC is enabled in VM mode
>>>
>>> [drm] VCE enabled in VM mode
>>>
>>> ATOM BIOS: 113-ER16BFC-001
>>>
>>> [drm] GPU posting now...
>>>
>>> Disabling lock debugging due to kernel taint
>>>
>>> Machine check in kernel mode.
>>>
>>> Caused by (from MCSR=a000): Load Error Report
>>>
>>> Guarded Load Error Report
>>>
>>> Kernel panic - not syncing: Unrecoverable Machine check
>>>
>>> CPU: 1 PID: 2023 Comm: udevd Tainted: G   M
>>>  4.19.26+gc0c2141 #1
>>> Call Trace:
>>>
>>>
>>>
>>> ___
>>> amd-gfx mailing 
>>> listamd-gfx@lists.freedesktop.orghttps://lists.freedesktop.org/mailman/listinfo/amd-gfx
>>>  
>>> 
>>>
>>>
>>>
>>
>> Christian König , 2 Ara 2019 Pzt,
>> 15:28 tarihinde şunu yazdı:
>>
>>> Hi Yusuf,
>>>
>>> Am 02.12.19 um 12:41 schrieb Yusuf Altıparmak:
>>>
>>> My embedded board is freezing when I put E9171 on PCIe. What is the
>>> meaning of Unrecoverable Machine Check error about GPU?
>>>
>>>
>>> Well see the explanation on Wikipedia for example:
>>> https://en.wikipedia.org/wiki/Machine-check_exception
>>> 
>>>
>>> In general it means you have messed up something in your hardware
>>> configuration.
>>>
>>> Could PCIe settings in .dts file cause this problem?
>>>
>>>
>>> Possible, but rather unlikely. My best guess is that it is some problem
>>> with the power supply.
>>>
>>> If it is, is there any sample PCIe configuration for E9171?
>>>
>>>
>>> The E9171 is just a PCIe device, so the dtsi is actually rather
>>> uninteresting. What we really need is a full dmesg and maybe lspci output
>>> would help as well.
>>>
>>> Regards,
>>> Christian.
>>>
>>
>>
>> Hi Christian,
>>
>> At first, I am using NXP T1042D4RDB-64B which has 256 MB PCIe buffer
>> according to its. PCIe memory range was arranged to 256 MB in .dts file and
>> in U-boot configuration file. Driver was giving error with exit code -12
>> (OUT_OF_MEMORY). But I was able to reach the linux console.
>>
>> [5.512922] [drm] amdgpu kernel modesetting enabled.
>> [5.517065] [drm] initializing kernel modesetting (POLARIS12
>> 0x1002:0x6987 0x1787:0x2389 0x80).
>> [5.524507] amdgpu 0001:01:00.0: Fatal error during GPU init
>> [5.529296] amdgpu: probe of 0001:01:00.0 failed with error -12
>>
>> Then I canged 256 MB to 4GB in .dtsi and U-boot conf file. I also changed
>> 64KB I/O size to 1MB . When I do this, I wasn't able to reach the linux
>> console because board was freezing. But driver was successfull at this
>> time. I already mentioned successfull driver console logs up.
>>
>> *this is lspci -v when GPU is plugged and 

RE: [PATCH 1/3] drm/amdgpu: drop asd shared memory

2019-12-02 Thread Deucher, Alexander
> -Original Message-
> From: Hawking Zhang 
> Sent: Monday, December 2, 2019 1:04 AM
> To: amd-gfx@lists.freedesktop.org; Min, Frank ;
> Clements, John ; Deucher, Alexander
> 
> Cc: Zhang, Hawking 
> Subject: [PATCH 1/3] drm/amdgpu: drop asd shared memory
> 
> asd shared memory is not needed since drivers doesn't invoke any further
> cmd to asd directly after the asd loading. trust application is the one who
> needs to talk to asd after the initialization
> 

Do we need to keep that memory around for the TAs or do they use some other 
memory?

Alex

> Change-Id: I728afa4c7e8b67bc06678b10e92ac064ba10173e
> Signed-off-by: Hawking Zhang 
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 44 +++--
> drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h | 12 ---
>  2 files changed, 18 insertions(+), 38 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
> index d8ef7098ffdf..bdc9e7ae4892 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
> @@ -309,35 +309,17 @@ static int psp_tmr_load(struct psp_context *psp)
>   return ret;
>  }
> 
> -static void psp_prep_asd_cmd_buf(struct psp_gfx_cmd_resp *cmd,
> -  uint64_t asd_mc, uint64_t asd_mc_shared,
> -  uint32_t size, uint32_t shared_size)
> +static void psp_prep_asd_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
> + uint64_t asd_mc, uint32_t size)
>  {
>   cmd->cmd_id = GFX_CMD_ID_LOAD_ASD;
>   cmd->cmd.cmd_load_ta.app_phy_addr_lo =
> lower_32_bits(asd_mc);
>   cmd->cmd.cmd_load_ta.app_phy_addr_hi =
> upper_32_bits(asd_mc);
>   cmd->cmd.cmd_load_ta.app_len = size;
> 
> - cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo =
> lower_32_bits(asd_mc_shared);
> - cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi =
> upper_32_bits(asd_mc_shared);
> - cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size;
> -}
> -
> -static int psp_asd_init(struct psp_context *psp) -{
> - int ret;
> -
> - /*
> -  * Allocate 16k memory aligned to 4k from Frame Buffer (local
> -  * physical) for shared ASD <-> Driver
> -  */
> - ret = amdgpu_bo_create_kernel(psp->adev,
> PSP_ASD_SHARED_MEM_SIZE,
> -   PAGE_SIZE,
> AMDGPU_GEM_DOMAIN_VRAM,
> -   >asd_shared_bo,
> -   >asd_shared_mc_addr,
> -   >asd_shared_buf);
> -
> - return ret;
> + cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = 0;
> + cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = 0;
> + cmd->cmd.cmd_load_ta.cmd_buf_len = 0;
>  }
> 
>  static int psp_asd_load(struct psp_context *psp) @@ -359,11 +341,15 @@
> static int psp_asd_load(struct psp_context *psp)
>   memset(psp->fw_pri_buf, 0, PSP_1_MEG);
>   memcpy(psp->fw_pri_buf, psp->asd_start_addr, psp-
> >asd_ucode_size);
> 
> - psp_prep_asd_cmd_buf(cmd, psp->fw_pri_mc_addr, psp-
> >asd_shared_mc_addr,
> -  psp->asd_ucode_size,
> PSP_ASD_SHARED_MEM_SIZE);
> + psp_prep_asd_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
> +   psp->asd_ucode_size);
> 
>   ret = psp_cmd_submit_buf(psp, NULL, cmd,
>psp->fence_buf_mc_addr);
> + if (!ret) {
> + psp->asd_context.asd_initialized = true;
> + psp->asd_context.session_id = cmd->resp.session_id;
> + }
> 
>   kfree(cmd);
> 
> @@ -1198,12 +1184,6 @@ static int psp_hw_start(struct psp_context *psp)
>   return ret;
>   }
> 
> - ret = psp_asd_init(psp);
> - if (ret) {
> - DRM_ERROR("PSP asd init failed!\n");
> - return ret;
> - }
> -
>   ret = psp_asd_load(psp);
>   if (ret) {
>   DRM_ERROR("PSP load asd failed!\n");
> @@ -1611,8 +1591,6 @@ static int psp_hw_fini(void *handle)
> >fw_pri_mc_addr, >fw_pri_buf);
>   amdgpu_bo_free_kernel(>fence_buf_bo,
> >fence_buf_mc_addr, >fence_buf);
> - amdgpu_bo_free_kernel(>asd_shared_bo, 
> >asd_shared_mc_addr,
> -   >asd_shared_buf);
>   amdgpu_bo_free_kernel(>cmd_buf_bo, 
> >cmd_buf_mc_addr,
> (void **)>cmd_buf_mem);
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
> index 6de0ee97861f..a4d7690ea577 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
> @@ -32,7 +32,6 @@
> 
>  #define PSP_FENCE_BUFFER_SIZE0x1000
>  #define PSP_CMD_BUFFER_SIZE  0x1000
> -#define PSP_ASD_SHARED_MEM_SIZE 0x4000
>  #define PSP_XGMI_SHARED_MEM_SIZE 0x4000  #define
> PSP_RAS_SHARED_MEM_SIZE 0x8000
>  #define PSP_1_MEG0x10
> @@ -130,6 +129,11 @@ struct psp_xgmi_topology_info {
>   struct psp_xgmi_node_info
>   

Re: [PATCH 5/5] drm: drop DRM_AUTH from PRIME_TO/FROM_HANDLE ioctls

2019-12-02 Thread Emil Velikov
On Wed, 27 Nov 2019 at 18:37, Daniel Vetter  wrote:
>
> On Wed, Nov 27, 2019 at 06:32:56PM +, Emil Velikov wrote:
> > On Wed, 27 Nov 2019 at 18:04, Daniel Vetter  wrote:
> > >
> > > On Wed, Nov 27, 2019 at 04:27:29PM +, Emil Velikov wrote:
> > > > On Wed, 27 Nov 2019 at 07:41, Boris Brezillon
> > > >  wrote:
> > > > >
> > > > > Hi Emil,
> > > > >
> > > > > On Fri,  1 Nov 2019 13:03:13 +
> > > > > Emil Velikov  wrote:
> > > > >
> > > > > > From: Emil Velikov 
> > > > > >
> > > > > > As mentioned by Christian, for drivers which support only primary 
> > > > > > nodes
> > > > > > this changes the returned error from -EACCES into 
> > > > > > -EOPNOTSUPP/-ENOSYS.
> > > > >
> > > > > Are you sure this is true for MODESET-only nodes (those that do not
> > > > > have the RENDER cap set) implementing ->{fd_to_handle,handle_to_fd}()?
> > > > > Shouldn't the is_authenticated() check still be done in that case?
> > > > >
> > > > Thanks for catching this. Just sent out v2, which I should address the 
> > > > concern.
> > >
> > > Why do we need this additional check in v2? What can go wrong on modeset
> > > drivers if non-authenticated legacy things can use this? modeset-only
> > > drivers have all their resources segregated by the drm core (drm_fb,
> > > mmaps, buffer lists), so there's really no access limitations that can go
> > > wrong here.
> >
> > Welcome back Daniel.
> >
> > I haven't audited the core drm code, so wasn't sure if there's any
> > issues that may arise.
> > Hence the conservative approach in v2.
> >
> > If you think this is fine as-is a formal Reviewed-by would be highly
> > appreciated.
>
> I think there's a non-zero chance I'll have to eat a few hats on this, but
> I think v1 is solid.
>
> Reviewed-by: Daniel Vetter 
>
Thanks. I've just re-read the DIM instructions and pushed this to drm-misc-next.
Fingers crossed, I did not butcher it this time around.

-Emil
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH 2/2] drm/amdgpu: avoid using invalidate semaphore for picasso

2019-12-02 Thread Alex Deucher
On Mon, Dec 2, 2019 at 1:11 AM Changfeng.Zhu  wrote:
>
> From: changzhu 
>
> It may cause timeout waiting for sem acquire in VM flush when using
> invalidate semaphore for picasso. So it needs to avoid using invalidate
> semaphore for piasso.

Is this really just picasso?  I think it would be simpler to just
disable it for all RAVEN variants.

Alex

>
> Change-Id: I193e6a9eecc0a8b2c99baabf18ad816fb473da52
> Signed-off-by: changzhu 
> ---
>  drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c | 20 
>  drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c  | 20 
>  2 files changed, 32 insertions(+), 8 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c 
> b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
> index 49b2ce30d629..2f3ba8f143cb 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
> @@ -245,7 +245,10 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device 
> *adev, uint32_t vmid,
> /* TODO: It needs to continue working on debugging with semaphore for 
> GFXHUB as well. */
> if ((vmhub == AMDGPU_MMHUB_0 ||
>  vmhub == AMDGPU_MMHUB_1) &&
> -   (!amdgpu_sriov_vf(adev))) {
> +   (!amdgpu_sriov_vf(adev)) &&
> +   (!(adev->asic_type == CHIP_RAVEN &&
> +  adev->rev_id < 0x8 &&
> +  adev->pdev->device == 0x15d8))) {
> for (i = 0; i < adev->usec_timeout; i++) {
> /* a read return value of 1 means semaphore acuqire */
> tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng);
> @@ -280,7 +283,10 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device 
> *adev, uint32_t vmid,
> /* TODO: It needs to continue working on debugging with semaphore for 
> GFXHUB as well. */
> if ((vmhub == AMDGPU_MMHUB_0 ||
>  vmhub == AMDGPU_MMHUB_1) &&
> -   (!amdgpu_sriov_vf(adev)))
> +   (!amdgpu_sriov_vf(adev)) &&
> +   (!(adev->asic_type == CHIP_RAVEN &&
> +  adev->rev_id < 0x8 &&
> +  adev->pdev->device == 0x15d8)))
> /*
>  * add semaphore release after invalidation,
>  * write with 0 means semaphore release
> @@ -385,7 +391,10 @@ static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct 
> amdgpu_ring *ring,
> /* TODO: It needs to continue working on debugging with semaphore for 
> GFXHUB as well. */
> if ((ring->funcs->vmhub == AMDGPU_MMHUB_0 ||
>  ring->funcs->vmhub == AMDGPU_MMHUB_1) &&
> -   (!amdgpu_sriov_vf(adev)))
> +   (!amdgpu_sriov_vf(adev)) &&
> +   (!(adev->asic_type == CHIP_RAVEN &&
> +  adev->rev_id < 0x8 &&
> +  adev->pdev->device == 0x15d8)))
> /* a read return value of 1 means semaphore acuqire */
> amdgpu_ring_emit_reg_wait(ring,
>   hub->vm_inv_eng0_sem + eng, 0x1, 
> 0x1);
> @@ -403,7 +412,10 @@ static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct 
> amdgpu_ring *ring,
> /* TODO: It needs to continue working on debugging with semaphore for 
> GFXHUB as well. */
> if ((ring->funcs->vmhub == AMDGPU_MMHUB_0 ||
>  ring->funcs->vmhub == AMDGPU_MMHUB_1) &&
> -   (!amdgpu_sriov_vf(adev)))
> +   (!amdgpu_sriov_vf(adev)) &&
> +   (!(adev->asic_type == CHIP_RAVEN &&
> +  adev->rev_id < 0x8 &&
> +  adev->pdev->device == 0x15d8)))
> /*
>  * add semaphore release after invalidation,
>  * write with 0 means semaphore release
> diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c 
> b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
> index 6c9a9c09cdb1..1cfed8787031 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
> @@ -466,7 +466,10 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device 
> *adev, uint32_t vmid,
> /* TODO: It needs to continue working on debugging with semaphore for 
> GFXHUB as well. */
> if ((vmhub == AMDGPU_MMHUB_0 ||
>  vmhub == AMDGPU_MMHUB_1) &&
> -   (!amdgpu_sriov_vf(adev))) {
> +   (!amdgpu_sriov_vf(adev)) &&
> +   (!(adev->asic_type == CHIP_RAVEN &&
> +  adev->rev_id < 0x8 &&
> +  adev->pdev->device == 0x15d8))) {
> for (j = 0; j < adev->usec_timeout; j++) {
> /* a read return value of 1 means semaphore acuqire */
> tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng);
> @@ -498,7 +501,10 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device 
> *adev, uint32_t vmid,
> /* TODO: It needs to continue working on debugging with semaphore for 
> GFXHUB as well. */
> if ((vmhub == AMDGPU_MMHUB_0 ||
>  vmhub == AMDGPU_MMHUB_1) &&
> -   (!amdgpu_sriov_vf(adev)))
> +   (!amdgpu_sriov_vf(adev)) &&

[RESEND 1/2] drm: Add support for DP 1.4 Compliance edid corruption test 4.2.2.6

2019-12-02 Thread Jerry (Fangzhi) Zuo
DP 1.4 edid corruption test requires source DUT to write calculated
CRC, not the corrupted CRC from reference sink.

Return the calculated CRC back, and initiate the required sequence.

-v2: Have separate routine for returning real CRC

-v3: Rewrite checksum computation routine to avoid duplicated code.
 Rename to avoid confusion

-v4: Fix a minor typo.

Signed-off-by: Jerry (Fangzhi) Zuo 
Reviewed-by: Harry Wentland 
---
 drivers/gpu/drm/drm_dp_helper.c | 36 
 drivers/gpu/drm/drm_edid.c  | 18 +++---
 include/drm/drm_connector.h |  7 +++
 include/drm/drm_dp_helper.h |  3 +++
 4 files changed, 61 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index ffc68d305afe..22a0e966ea9f 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -336,6 +336,42 @@ int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux,
 }
 EXPORT_SYMBOL(drm_dp_dpcd_read_link_status);
 
+/**
+  * drm_dp_send_real_edid_checksum() - send back real edid checksum value
+  * @aux: DisplayPort AUX channel
+  * @real_edid_checksum: real edid checksum for the last block
+  *
+  * Returns true on success
+  */
+bool drm_dp_send_real_edid_checksum(struct drm_dp_aux *aux,
+u8 real_edid_checksum)
+{
+u8 link_edid_read = 0, auto_test_req = 0;
+u8 test_resp = 0;
+
+drm_dp_dpcd_read(aux, DP_DEVICE_SERVICE_IRQ_VECTOR, _test_req, 1);
+auto_test_req &= DP_AUTOMATED_TEST_REQUEST;
+
+drm_dp_dpcd_read(aux, DP_TEST_REQUEST, _edid_read, 1);
+link_edid_read &= DP_TEST_LINK_EDID_READ;
+
+if (!auto_test_req || !link_edid_read) {
+DRM_DEBUG_KMS("Source DUT does not support TEST_EDID_READ\n");
+return false;
+}
+
+drm_dp_dpcd_write(aux, DP_DEVICE_SERVICE_IRQ_VECTOR, _test_req, 
1);
+
+/* send back checksum for the last edid extension block data */
+drm_dp_dpcd_write(aux, DP_TEST_EDID_CHECKSUM, _edid_checksum, 1);
+
+test_resp |= DP_TEST_EDID_CHECKSUM_WRITE;
+drm_dp_dpcd_write(aux, DP_TEST_RESPONSE, _resp, 1);
+
+return true;
+}
+EXPORT_SYMBOL(drm_dp_send_real_edid_checksum);
+
 /**
  * drm_dp_link_probe() - probe a DisplayPort link for capabilities
  * @aux: DisplayPort AUX channel
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 82a4ceed3fcf..ff64e5f1feb6 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -1348,10 +1348,19 @@ static int drm_edid_block_checksum(const u8 *raw_edid)
 {
int i;
u8 csum = 0;
-   for (i = 0; i < EDID_LENGTH; i++)
+
+   for (i = 0; i < EDID_LENGTH - 1; i++)
csum += raw_edid[i];
 
-   return csum;
+   return (0x100 - csum);
+}
+
+static bool drm_edid_block_checksum_diff(const u8 *raw_edid, u8 real_checksum)
+{
+   if (raw_edid[EDID_LENGTH - 1] != real_checksum)
+   return true;
+   else
+   return false;
 }
 
 static bool drm_edid_is_zero(const u8 *in_edid, int length)
@@ -1409,7 +1418,7 @@ bool drm_edid_block_valid(u8 *raw_edid, int block, bool 
print_bad_edid,
}
 
csum = drm_edid_block_checksum(raw_edid);
-   if (csum) {
+   if (drm_edid_block_checksum_diff(raw_edid, csum)) {
if (edid_corrupt)
*edid_corrupt = true;
 
@@ -1572,6 +1581,9 @@ static void connector_bad_edid(struct drm_connector 
*connector,
   prefix, DUMP_PREFIX_NONE, 16, 1,
   block, EDID_LENGTH, false);
}
+
+   /* Calculate real checksum for the last edid extension block data */
+   connector->real_edid_checksum = drm_edid_block_checksum(edid + 
edid[0x7e] * EDID_LENGTH);
 }
 
 /* Get override or firmware EDID */
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 681cb590f952..eb0d8c7b35fd 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -1345,6 +1345,13 @@ struct drm_connector {
 * rev1.1 4.2.2.6
 */
bool edid_corrupt;
+   /**
+ * @real_edid_checksum: real edid checksum value for corrupted edid 
block.
+ * Required in Displayport 1.4 compliance testing
+ * rev1.1 4.2.2.6
+ */
+uint8_t real_edid_checksum;
+
 
/** @debugfs_entry: debugfs directory for this connector */
struct dentry *debugfs_entry;
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 5a795075d5da..84709d7810f8 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -1383,6 +1383,9 @@ static inline ssize_t drm_dp_dpcd_writeb(struct 
drm_dp_aux *aux,
 int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux,
 u8 status[DP_LINK_STATUS_SIZE]);
 
+bool drm_dp_send_real_edid_checksum(struct drm_dp_aux *aux,
+   

[RESEND 2/2] drm/amd/display: Hook up drm interface for DP 1.4 edid corruption test

2019-12-02 Thread Jerry (Fangzhi) Zuo
-v3: Rename to avoid confusion

Signed-off-by: Jerry (Fangzhi) Zuo 
Reviewed-by: Harry Wentland 
---
 .../drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c  | 35 +-
 1 file changed, 7 insertions(+), 28 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 11e5784aa62a..927fdac77b6f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -575,6 +575,7 @@ enum dc_edid_status dm_helpers_read_local_edid(
struct dc_sink *sink)
 {
struct amdgpu_dm_connector *aconnector = link->priv;
+   struct drm_connector *connector = >base;
struct i2c_adapter *ddc;
int retry = 3;
enum dc_edid_status edid_status;
@@ -592,6 +593,12 @@ enum dc_edid_status dm_helpers_read_local_edid(
 
edid = drm_get_edid(>base, ddc);
 
+   if (link->aux_mode && connector->edid_corrupt)
+   
drm_dp_send_real_edid_checksum(>dm_dp_aux.aux, 
connector->real_edid_checksum);
+
+   if (!edid && connector->edid_corrupt)
+   return EDID_BAD_CHECKSUM;
+
if (!edid)
return EDID_NO_RESPONSE;
 
@@ -612,34 +619,6 @@ enum dc_edid_status dm_helpers_read_local_edid(
DRM_ERROR("EDID err: %d, on connector: %s",
edid_status,
aconnector->base.name);
-   if (link->aux_mode) {
-   union test_request test_request = { {0} };
-   union test_response test_response = { {0} };
-
-   dm_helpers_dp_read_dpcd(ctx,
-   link,
-   DP_TEST_REQUEST,
-   _request.raw,
-   sizeof(union test_request));
-
-   if (!test_request.bits.EDID_READ)
-   return edid_status;
-
-   test_response.bits.EDID_CHECKSUM_WRITE = 1;
-
-   dm_helpers_dp_write_dpcd(ctx,
-   link,
-   DP_TEST_EDID_CHECKSUM,
-   
>dc_edid.raw_edid[sink->dc_edid.length-1],
-   1);
-
-   dm_helpers_dp_write_dpcd(ctx,
-   link,
-   DP_TEST_RESPONSE,
-   _response.raw,
-   sizeof(test_response));
-
-   }
 
return edid_status;
 }
-- 
2.14.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[RESEND 0/2] Changes for DP 1.4 Compliance test 4.2.2.6

2019-12-02 Thread Jerry (Fangzhi) Zuo
Unlike DP 1.2 Compliance test 4.2.2.6, DP 1.4 requires to calculate real
CRC value of the last edid data block, and write it back.

Current edid CRC calculate routine adds the last CRC byte, and check if
non-zero or not. Need to return the actual CRC value when corruption is
detected.

[For CI]
Resend this patchset to intel gfx CI. 

Jerry (Fangzhi) Zuo (2):
  drm: Add support for DP 1.4 Compliance edid corruption test 4.2.2.6
  drm/amd/display: Hook up drm interface for DP 1.4 edid corruption test

 .../drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c  | 35 +
 drivers/gpu/drm/drm_dp_helper.c| 36 ++
 drivers/gpu/drm/drm_edid.c | 18 +--
 include/drm/drm_connector.h|  7 +
 include/drm/drm_dp_helper.h|  3 ++
 5 files changed, 68 insertions(+), 31 deletions(-)

-- 
2.14.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH RFC v4 02/16] cgroup: Introduce cgroup for drm subsystem

2019-12-02 Thread Tejun Heo
On Fri, Nov 29, 2019 at 01:00:36AM -0500, Kenny Ho wrote:
> On Tue, Oct 1, 2019 at 10:31 AM Michal Koutný  wrote:
> > On Thu, Aug 29, 2019 at 02:05:19AM -0400, Kenny Ho  wrote:
> > > +struct cgroup_subsys drm_cgrp_subsys = {
> > > + .css_alloc  = drmcg_css_alloc,
> > > + .css_free   = drmcg_css_free,
> > > + .early_init = false,
> > > + .legacy_cftypes = files,
> > Do you really want to expose the DRM controller on v1 hierarchies (where
> > threads of one process can be in different cgroups, or children cgroups
> > compete with their parents)?
> 
> (Sorry for the delay, I have been distracted by something else.)
> Yes, I am hoping to make the functionality as widely available as
> possible since the ecosystem is still transitioning to v2.  Do you see
> inherent problem with this approach?

Integrating with memcg could be more challenging on cgroup1.  That's
one of the reasons why e.g. cgroup-aware pagecache writeback is only
on cgroup2.

-- 
tejun
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH] drm/amdgpu/powerplay: unify smu send message function

2019-12-02 Thread Likun Gao
From: Likun Gao 

Drop smu_send_smc_msg function from ASIC specify structure.
Reuse smu_send_smc_msg_with_param function for smu_send_smc_msg.
Set paramer to 0 for smu_send_msg function, otherwise it will send
with previous paramer value (Not a certain value).

Signed-off-by: Likun Gao 
---
 drivers/gpu/drm/amd/powerplay/amdgpu_smu.c |  8 
 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c   |  1 -
 drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h |  3 ++-
 drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h  |  2 --
 drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h  |  2 --
 drivers/gpu/drm/amd/powerplay/navi10_ppt.c |  1 -
 drivers/gpu/drm/amd/powerplay/renoir_ppt.c |  1 -
 drivers/gpu/drm/amd/powerplay/smu_internal.h   |  2 --
 drivers/gpu/drm/amd/powerplay/smu_v11_0.c  | 26 --
 drivers/gpu/drm/amd/powerplay/smu_v12_0.c  | 25 -
 drivers/gpu/drm/amd/powerplay/vega20_ppt.c |  1 -
 11 files changed, 10 insertions(+), 62 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c 
b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 36001a4..e039904 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -2567,3 +2567,11 @@ uint32_t smu_get_pptable_power_limit(struct smu_context 
*smu)
 
return ret;
 }
+
+int smu_send_smc_msg(struct smu_context *smu, uint16_t msg)
+{
+   int ret;
+
+   ret = smu->ppt_funcs->send_smc_msg_with_param(smu, msg, 0);
+   return ret;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c 
b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
index 68107de..3f13986 100644
--- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
@@ -2137,7 +2137,6 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.set_tool_table_location = smu_v11_0_set_tool_table_location,
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
.system_features_control = smu_v11_0_system_features_control,
-   .send_smc_msg = smu_v11_0_send_msg,
.send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
.read_smc_arg = smu_v11_0_read_arg,
.init_display_count = smu_v11_0_init_display_count,
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h 
b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index ada4a8d..fae1026 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -500,7 +500,6 @@ struct pptable_funcs {
int (*notify_memory_pool_location)(struct smu_context *smu);
int (*set_last_dcef_min_deep_sleep_clk)(struct smu_context *smu);
int (*system_features_control)(struct smu_context *smu, bool en);
-   int (*send_smc_msg)(struct smu_context *smu, uint16_t msg);
int (*send_smc_msg_with_param)(struct smu_context *smu, uint16_t msg, 
uint32_t param);
int (*read_smc_arg)(struct smu_context *smu, uint32_t *arg);
int (*init_display_count)(struct smu_context *smu, uint32_t count);
@@ -725,4 +724,6 @@ int smu_get_dpm_clock_table(struct smu_context *smu,
 
 uint32_t smu_get_pptable_power_limit(struct smu_context *smu);
 
+int smu_send_smc_msg(struct smu_context *smu, uint16_t msg);
+
 #endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h 
b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
index 5a27713..80b1d20 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
@@ -177,8 +177,6 @@ int smu_v11_0_notify_memory_pool_location(struct 
smu_context *smu);
 int smu_v11_0_system_features_control(struct smu_context *smu,
 bool en);
 
-int smu_v11_0_send_msg(struct smu_context *smu, uint16_t msg);
-
 int
 smu_v11_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
  uint32_t param);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h 
b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
index 44c65dd..f709f6e 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
@@ -44,8 +44,6 @@ int smu_v12_0_read_arg(struct smu_context *smu, uint32_t 
*arg);
 
 int smu_v12_0_wait_for_response(struct smu_context *smu);
 
-int smu_v12_0_send_msg(struct smu_context *smu, uint16_t msg);
-
 int
 smu_v12_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
  uint32_t param);
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c 
b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index f842246..15403b7 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -2083,7 +2083,6 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.set_tool_table_location = smu_v11_0_set_tool_table_location,
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,

RE: [PATCH] amd/amdgpu/sriov swSMU disable for sriov

2019-12-02 Thread Zhang, Jack (Jian)
Hi, Team,

Would you please help to review this patch?

Best,
Jack

-Original Message-
From: Jack Zhang  
Sent: Monday, December 2, 2019 7:05 PM
To: amd-gfx@lists.freedesktop.org
Cc: Zhang, Jack (Jian) 
Subject: [PATCH] amd/amdgpu/sriov swSMU disable for sriov

For boards greater than ARCTURUS, and under sriov platform, swSMU is not 
supported because smu ip block is commented at guest driver.

Generally for sriov, initialization of smu is moved to host driver.
Thus, smu sw_init and hw_init will not be executed at guest driver.

Without sw structure being initialized in guest driver, swSMU cannot declare to 
be supported.

Signed-off-by: Jack Zhang 
---
 drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c 
b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 36001a4..0b8a53b 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -531,7 +531,10 @@ bool is_support_sw_smu(struct amdgpu_device *adev)
if (adev->asic_type == CHIP_VEGA20)
return (amdgpu_dpm == 2) ? true : false;
else if (adev->asic_type >= CHIP_ARCTURUS)
-   return true;
+   if (amdgpu_sriov_vf(adev))
+   return false;
+   else
+   return true;
else
return false;
 }
--
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [RFC PATCH] drm/amdgpu: allocate entities on demand

2019-12-02 Thread Christian König

Am 02.12.19 um 15:43 schrieb Nirmoy:


On 11/29/19 7:42 PM, Christian König wrote:

Am 29.11.19 um 15:29 schrieb Nirmoy:

Hi Christian,

On 11/26/19 10:45 AM, Christian König wrote:
It looks like a start, but there numerous things which needs to be 
fixed.


Question number one is: What's that good for? Entities are not the 
problem here. The real issue is the fence ring and the rq_list.


The rq_list could actually be made constant since it should never 
be changed by the entity. It is only changed for backward 
compatibility in drm_sched_entity_set_priority().


So I would start there and cleanup the 
drm_sched_entity_set_priority() to actually just set a new constant 
rq list instead.


I am missing some context here. Can you please explain bit more? I 
looked over and over again but I still don't understand what do you 
mean by  new constant rq list :/


Ok that needs a bit wider explanation.

The GPU scheduler consists mainly of drm_gpu_scheduler instances. 
Each of those instances contain multiple runqueues with different 
priorities (5 IIRC).


Now for each entity we give a list of runqueues where this entity can 
be served on, e.g. where the jobs which are pushed to the entities 
are executed.


The entity itself keeps a copy of that runqueue list because we have 
the drm_sched_entity_set_priority() which modifies this runqueue list.


But essentially that is complete overkill, the runqueue lists are 
constant for each amdgpu device, e.g. all contexts should use SDMA0 
and SDMA1 in the same way.


In other words building the list on runqueues should happen only once 
and not for each contexts.

Okay I understand now the real problem. Thanks for detail explanation.


Multiple approach to fix this would be possible. One rather elegant 
solution would be to change the rq list into a scheduler instances 
list + priority.


Do you mean something like

diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 684692a8ed76..ac67f8f098fa 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -81,7 +81,7 @@ enum drm_sched_priority {
 struct drm_sched_entity {
    struct list_head    list;
    struct drm_sched_rq *rq;
-   struct drm_sched_rq **rq_list;
+  struct drm_gpu_scheduler    **sched;
    unsigned int    num_rq_list;
    spinlock_t  rq_lock;


Yes, exactly. Problem is that I'm not 100% sure if that really works 
with all users of the rq_list.


Regards,
Christian.






This way we would also fix the age old bug that changing the priority 
of a context could actually mess up already scheduled jobs.


The alternative I noted before would be to drop 
drm_sched_entity_set_priority() or change it into 
drm_sched_entity_set_runqueues().
I was working on it but then I got stuck by a  "BUG: kernel NULL 
pointer dereference, address:" which I am trying to figure out why


Regards,
Christian.





Then we could embed the fences in amdgpu_ctx_entity as dynamic 
array at the end of the structure.


And last we can start to dynamic allocate and initialize the 
amdgpu_ctx_entity() structures.


Regards,
Christian.





___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [error] Drm -> amdgpu Unrecoverable Machine Check

2019-12-02 Thread Christian König

Hi Yusuf,

Am 02.12.19 um 15:20 schrieb Yusuf Altıparmak:


That is an expected result. 256MB is not enough for the VRAM BAR
and the doorbell BAR to fit into. But you can still use VGA
emulation that way if I'm not completely mistaken.


Hmm, then what procedure should I follow to take a VGA output. It 
seems Graphic Card does not have a VGA output. And isn't there any way 
to use this GPU with a 256MB buffered PCIe?


Most likely not. There is support for resizing the VRAM BAR, but usually 
you can only make it larger and not smaller.


Please give me the output of "sudo setpci -s 0001:01:00.0 ECAP15+4.l 
ECAP15+8.l" if you want to double check that.



Then I canged 256 MB to 4GB in .dtsi and U-boot conf file.

How did you do this? Is your memory layout consistent?

See when you just changed one end address you might need to adjust
other addresses as well.

Regards,
Christian.

It's not consistent I seems. At first I changed .dtsi like it's 
consistent. I only changed PCIe1 Device Memory and I/O range. It gave 
same error. Then I also changed PCIe2 and PCIe3 devices starting 
adress according to PCIe1 device end adress in .dtsi. I am not sure I 
made this correctly or not but it gave same result again. This is why 
I asked "If it is, is there any sample PCIe configuration for E9171?".


Well you rather need to ask if anybody has sample PCIe configuration for 
GPUs in general. That problem is not really E9171 related. You might 
want to ask NXP for that maybe.




*This is a code piece from my T104xRDB.h file which is contain PCIe 
configuration variables for U-boot. I made changes like it's not 
consistent. RAM size is 8 GB.

*


Sorry, no idea if that is correct or not. You need to ask NXP for help 
with that.


Regards,
Christian.



#ifdef CONFIG_PCI
/* controller 1, direct to uli, tgtid 3, Base address 2 */
#ifdef CONFIG_PCIE1
#define CONFIG_SYS_PCIE1_MEM_VIRT *0x8000*
#define CONFIG_SYS_PCIE1_MEM_BUS 0xe000
#define CONFIG_SYS_PCIE1_MEM_PHYS 0xcull
#define CONFIG_SYS_PCIE1_MEM_SIZE *0x1000 /* 256M */*
#define CONFIG_SYS_PCIE1_IO_VIRT 0xf800
#define CONFIG_SYS_PCIE1_IO_BUS 0x
#define CONFIG_SYS_PCIE1_IO_PHYS *0xff800ull*
#define CONFIG_SYS_PCIE1_IO_SIZE *0x0001 /* 64k */*
#endif

/* controller 2, Slot 2, tgtid 2, Base address 201000 */
#ifdef CONFIG_PCIE2
#define CONFIG_SYS_PCIE2_MEM_VIRT *0x9000*
#define CONFIG_SYS_PCIE2_MEM_BUS 0xe000
#define CONFIG_SYS_PCIE2_MEM_PHYS 0xc1000ull
#define CONFIG_SYS_PCIE2_MEM_SIZE*0x1 /* 4GB */*
#define CONFIG_SYS_PCIE2_IO_VIRT 0xf801
#define CONFIG_SYS_PCIE2_IO_BUS 0x
#define CONFIG_SYS_PCIE2_IO_PHYS 0xff801ull
#define CONFIG_SYS_PCIE2_IO_SIZE *0x0010 /* 1M */*
#endif

/* controller 3, Slot 1, tgtid 1, Base address 202000 */
#ifdef CONFIG_PCIE3
#define CONFIG_SYS_PCIE3_MEM_VIRT *0x19000 /* I changed this to 
0x19000 instead of 0xa000 because PCIE2 end adress is changed. 
End adress is to 4 GB (0x1 hex so I added 0x9000 with 
1) */*

#define CONFIG_SYS_PCIE3_MEM_BUS 0xe000
#define CONFIG_SYS_PCIE3_MEM_PHYS 0xd1000ull
#define CONFIG_SYS_PCIE3_MEM_SIZE 0x1000 /* 256M */
#define CONFIG_SYS_PCIE3_IO_VIRT 0xf811
#define CONFIG_SYS_PCIE3_IO_BUS 0x
#define CONFIG_SYS_PCIE3_IO_PHYS *0xff811ull /* Did same things 
for IO *

#define CONFIG_SYS_PCIE3_IO_SIZE 0x0001 /* 64k */
#endif

/* controller 4, Base address 203000 */
#ifdef CONFIG_PCIE4
#define CONFIG_SYS_PCIE4_MEM_VIRT *0x2 /* SAME STEP WITH 
PCIE3. This time I added with 256 MB (0x1000) */*

#define CONFIG_SYS_PCIE4_MEM_BUS 0xe000
#define CONFIG_SYS_PCIE4_MEM_PHYS 0xd2000ull
#define CONFIG_SYS_PCIE4_MEM_SIZE 0x1000 /* 256M */
#define CONFIG_SYS_PCIE4_IO_VIRT 0xf812
#define CONFIG_SYS_PCIE4_IO_BUS 0x
#define CONFIG_SYS_PCIE4_IO_PHYS *0xff812ull /* Did same things 
for IO *

#define CONFIG_SYS_PCIE4_IO_SIZE 0x0001 /* 64k */
#endif




Am 02.12.19 um 14:32 schrieb Yusuf Altıparmak:




I attached my dts file.

System is working fine when GPU is not plugged in.
*
*
*This is the last console log before freeze:*
[drm] amdgpu kernel modesetting enabled.
[drm] initializing kernel modesetting (POLARIS12
0x1002:0x6987 0x1787:0x2389 0x80).
[drm] register mmio base: 0x2020
fsl-fman-port ffe488000.port fm1-gb0: renamed from eth0
[drm] register mmio size: 262144
[drm] add ip block number 0 
[drm] add ip block number 1 
[drm] add ip block number 2 
[drm] add ip block number 3 
[drm] add ip block number 4 
[drm] add ip block number 5 
[drm] add ip block number 6 
[drm] add ip block number 7 
[drm] add ip block number 8 
[drm] UVD is enabled in VM mode
[drm] UVD ENC is enabled in VM mode
[drm] VCE enabled in VM mode

Re: [PATCH] drm/amdgpu/powerplay: unify smu send message function

2019-12-02 Thread Wang, Kevin(Yang)
[AMD Official Use Only - Internal Distribution Only]



From: Gao, Likun 
Sent: Monday, December 2, 2019 6:03 PM
To: amd-gfx@lists.freedesktop.org 
Cc: Wang, Kevin(Yang) ; Feng, Kenneth 
; Gao, Likun 
Subject: [PATCH] drm/amdgpu/powerplay: unify smu send message function

From: Likun Gao 

Drop smu_send_smc_msg function from ASIC specify structure.
Reuse smu_send_smc_msg_with_param function for smu_send_smc_msg.
Set paramer to 0 for smu_send_msg function, otherwise it will send
with previous paramer value (Not a certain value).

Signed-off-by: Likun Gao 
---
 drivers/gpu/drm/amd/powerplay/amdgpu_smu.c |  8 
 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c   |  1 -
 drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h |  3 ++-
 drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h  |  2 --
 drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h  |  2 --
 drivers/gpu/drm/amd/powerplay/navi10_ppt.c |  1 -
 drivers/gpu/drm/amd/powerplay/renoir_ppt.c |  1 -
 drivers/gpu/drm/amd/powerplay/smu_internal.h   |  2 --
 drivers/gpu/drm/amd/powerplay/smu_v11_0.c  | 26 --
 drivers/gpu/drm/amd/powerplay/smu_v12_0.c  | 25 -
 drivers/gpu/drm/amd/powerplay/vega20_ppt.c |  1 -
 11 files changed, 10 insertions(+), 62 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c 
b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 36001a4..e039904 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -2567,3 +2567,11 @@ uint32_t smu_get_pptable_power_limit(struct smu_context 
*smu)

 return ret;
 }
+
+int smu_send_smc_msg(struct smu_context *smu, uint16_t msg)
[kevin]:
please use "enum smu_message_type" replace "uint16_t" type.
+{
+   int ret;
+
+   ret = smu->ppt_funcs->send_smc_msg_with_param(smu, msg, 0);
[kevin]:
i think use function smu_send_smc_msg_with_param() replace it is better.
after fixed,
Reviewed-by: Kevin Wang 
+   return ret;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c 
b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
index 68107de..3f13986 100644
--- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
@@ -2137,7 +2137,6 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
 .set_tool_table_location = smu_v11_0_set_tool_table_location,
 .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
 .system_features_control = smu_v11_0_system_features_control,
-   .send_smc_msg = smu_v11_0_send_msg,
 .send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
 .read_smc_arg = smu_v11_0_read_arg,
 .init_display_count = smu_v11_0_init_display_count,
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h 
b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index ada4a8d..fae1026 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -500,7 +500,6 @@ struct pptable_funcs {
 int (*notify_memory_pool_location)(struct smu_context *smu);
 int (*set_last_dcef_min_deep_sleep_clk)(struct smu_context *smu);
 int (*system_features_control)(struct smu_context *smu, bool en);
-   int (*send_smc_msg)(struct smu_context *smu, uint16_t msg);
 int (*send_smc_msg_with_param)(struct smu_context *smu, uint16_t msg, 
uint32_t param);
 int (*read_smc_arg)(struct smu_context *smu, uint32_t *arg);
 int (*init_display_count)(struct smu_context *smu, uint32_t count);
@@ -725,4 +724,6 @@ int smu_get_dpm_clock_table(struct smu_context *smu,

 uint32_t smu_get_pptable_power_limit(struct smu_context *smu);

+int smu_send_smc_msg(struct smu_context *smu, uint16_t msg);
+
 #endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h 
b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
index 5a27713..80b1d20 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
@@ -177,8 +177,6 @@ int smu_v11_0_notify_memory_pool_location(struct 
smu_context *smu);
 int smu_v11_0_system_features_control(struct smu_context *smu,
  bool en);

-int smu_v11_0_send_msg(struct smu_context *smu, uint16_t msg);
-
 int
 smu_v11_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
   uint32_t param);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h 
b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
index 44c65dd..f709f6e 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
@@ -44,8 +44,6 @@ int smu_v12_0_read_arg(struct smu_context *smu, uint32_t 
*arg);

 int smu_v12_0_wait_for_response(struct smu_context *smu);

-int smu_v12_0_send_msg(struct smu_context *smu, uint16_t msg);
-
 int
 smu_v12_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
 

Re: [error] Drm -> amdgpu Unrecoverable Machine Check

2019-12-02 Thread Christian König

Hi Yusuf,

Am 02.12.19 um 12:41 schrieb Yusuf Altıparmak:
My embedded board is freezing when I put E9171 on PCIe. What is the 
meaning of Unrecoverable Machine Check error about GPU?


Well see the explanation on Wikipedia for example: 
https://en.wikipedia.org/wiki/Machine-check_exception


In general it means you have messed up something in your hardware 
configuration.



Could PCIe settings in .dts file cause this problem?


Possible, but rather unlikely. My best guess is that it is some problem 
with the power supply.



If it is, is there any sample PCIe configuration for E9171?


The E9171 is just a PCIe device, so the dtsi is actually rather 
uninteresting. What we really need is a full dmesg and maybe lspci 
output would help as well.


Regards,
Christian.


I attached my dts file.

System is working fine when GPU is not plugged in.
*
*
*This is the last console log before freeze:*
[drm] amdgpu kernel modesetting enabled.
[drm] initializing kernel modesetting (POLARIS12 0x1002:0x6987 
0x1787:0x2389 0x80).

[drm] register mmio base: 0x2020
fsl-fman-port ffe488000.port fm1-gb0: renamed from eth0
[drm] register mmio size: 262144
[drm] add ip block number 0 
[drm] add ip block number 1 
[drm] add ip block number 2 
[drm] add ip block number 3 
[drm] add ip block number 4 
[drm] add ip block number 5 
[drm] add ip block number 6 
[drm] add ip block number 7 
[drm] add ip block number 8 
[drm] UVD is enabled in VM mode
[drm] UVD ENC is enabled in VM mode
[drm] VCE enabled in VM mode
ATOM BIOS: 113-ER16BFC-001
[drm] GPU posting now...
Disabling lock debugging due to kernel taint
Machine check in kernel mode.
Caused by (from MCSR=a000): Load Error Report
Guarded Load Error Report
Kernel panic - not syncing: Unrecoverable Machine check
CPU: 1 PID: 2023 Comm: udevd Tainted: G   M  4.19.26+gc0c2141 #1
Call Trace:

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [error] Drm -> amdgpu Unrecoverable Machine Check

2019-12-02 Thread Yusuf Altıparmak
>
>
> I attached my dts file.
>
> System is working fine when GPU is not plugged in.
>
> *This is the last console log before freeze:*
> [drm] amdgpu kernel modesetting enabled.
>
> [drm] initializing kernel modesetting (POLARIS12 0x1002:0x6987
> 0x1787:0x2389 0x80).
> [drm] register mmio base: 0x2020
>
> fsl-fman-port ffe488000.port fm1-gb0: renamed from eth0
>
> [drm] register mmio size: 262144
>
> [drm] add ip block number 0 
>
> [drm] add ip block number 1 
>
> [drm] add ip block number 2 
>
> [drm] add ip block number 3 
>
> [drm] add ip block number 4 
>
> [drm] add ip block number 5 
>
> [drm] add ip block number 6 
>
> [drm] add ip block number 7 
>
> [drm] add ip block number 8 
>
> [drm] UVD is enabled in VM mode
>
> [drm] UVD ENC is enabled in VM mode
>
> [drm] VCE enabled in VM mode
>
> ATOM BIOS: 113-ER16BFC-001
>
> [drm] GPU posting now...
>
> Disabling lock debugging due to kernel taint
>
> Machine check in kernel mode.
>
> Caused by (from MCSR=a000): Load Error Report
>
> Guarded Load Error Report
>
> Kernel panic - not syncing: Unrecoverable Machine check
>
> CPU: 1 PID: 2023 Comm: udevd Tainted: G   M  4.19.26+gc0c2141
> #1
> Call Trace:
>
>
>
> ___
> amd-gfx mailing 
> listamd-gfx@lists.freedesktop.orghttps://lists.freedesktop.org/mailman/listinfo/amd-gfx
>
>
>

Christian König , 2 Ara 2019 Pzt, 15:28
tarihinde şunu yazdı:

> Hi Yusuf,
>
> Am 02.12.19 um 12:41 schrieb Yusuf Altıparmak:
>
> My embedded board is freezing when I put E9171 on PCIe. What is the
> meaning of Unrecoverable Machine Check error about GPU?
>
>
> Well see the explanation on Wikipedia for example:
> https://en.wikipedia.org/wiki/Machine-check_exception
>
> In general it means you have messed up something in your hardware
> configuration.
>
> Could PCIe settings in .dts file cause this problem?
>
>
> Possible, but rather unlikely. My best guess is that it is some problem
> with the power supply.
>
> If it is, is there any sample PCIe configuration for E9171?
>
>
> The E9171 is just a PCIe device, so the dtsi is actually rather
> uninteresting. What we really need is a full dmesg and maybe lspci output
> would help as well.
>
> Regards,
> Christian.
>


Hi Christian,

At first, I am using NXP T1042D4RDB-64B which has 256 MB PCIe buffer
according to its. PCIe memory range was arranged to 256 MB in .dts file and
in U-boot configuration file. Driver was giving error with exit code -12
(OUT_OF_MEMORY). But I was able to reach the linux console.

[5.512922] [drm] amdgpu kernel modesetting enabled.
[5.517065] [drm] initializing kernel modesetting (POLARIS12
0x1002:0x6987 0x1787:0x2389 0x80).
[5.524507] amdgpu 0001:01:00.0: Fatal error during GPU init
[5.529296] amdgpu: probe of 0001:01:00.0 failed with error -12

Then I canged 256 MB to 4GB in .dtsi and U-boot conf file. I also changed
64KB I/O size to 1MB . When I do this, I wasn't able to reach the linux
console because board was freezing. But driver was successfull at this
time. I already mentioned successfull driver console logs up.

*this is lspci -v when GPU is plugged and Memory size is 256 MB.*

root@t1042d4rdb-64b:~# lspci -v
:00:00.0 PCI bridge: Freescale Semiconductor Inc Device 0824 (rev 11)
(prog-if 00 [Normal decode])
Device tree node: /sys/firmware/devicetree/base/pcie@ffe24
/pcie@0
Flags: bus master, fast devsel, latency 0, IRQ 20
Memory at  (32-bit, non-prefetchable)
Bus: primary=00, secondary=01, subordinate=01, sec-latency=0
I/O behind bridge: - [size=64K]
Memory behind bridge: e000-efff [size=256M]
Prefetchable memory behind bridge: None
Capabilities: [44] Power Management version 3
Capabilities: [4c] Express Root Port (Slot-), MSI 00
Capabilities: [100] Advanced Error Reporting
Kernel driver in use: pcieport

0001:00:00.0 PCI bridge: Freescale Semiconductor Inc Device 0824 (rev 11)
(prog-if 00 [Normal decode])
Device tree node: /sys/firmware/devicetree/base/pcie@ffe25
/pcie@0
Flags: bus master, fast devsel, latency 0, IRQ 21
Memory at  (32-bit, non-prefetchable)
Bus: primary=00, secondary=01, subordinate=01, sec-latency=0
I/O behind bridge: - [size=64K]
Memory behind bridge: e000-efff [size=256M]
Prefetchable memory behind bridge: None
Capabilities: [44] Power Management version 3
Capabilities: [4c] Express Root Port (Slot-), MSI 00
Capabilities: [100] Advanced Error Reporting
Kernel driver in use: pcieport

0001:01:00.0 VGA compatible controller: Advanced Micro Devices, Inc.
[AMD/ATI] Lexa [Radeon E9171 MCM] (rev 80) (prog-if 00 [VGA controller])
Subsystem: Hightech Information System Ltd. Device 2389
Flags: fast devsel, IRQ 41
Memory at c1000 (64-bit, prefetchable) [size=256M]
Memory at  (64-bit, 

[PATCH] amd/amdgpu/sriov swSMU disable for sriov

2019-12-02 Thread Jack Zhang
For boards greater than ARCTURUS, and under sriov platform,
swSMU is not supported because smu ip block is commented at
guest driver.

Generally for sriov, initialization of smu is moved to host driver.
Thus, smu sw_init and hw_init will not be executed at guest driver.

Without sw structure being initialized in guest driver, swSMU cannot
declare to be supported.

Signed-off-by: Jack Zhang 
---
 drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c 
b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 36001a4..0b8a53b 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -531,7 +531,10 @@ bool is_support_sw_smu(struct amdgpu_device *adev)
if (adev->asic_type == CHIP_VEGA20)
return (amdgpu_dpm == 2) ? true : false;
else if (adev->asic_type >= CHIP_ARCTURUS)
-   return true;
+   if (amdgpu_sriov_vf(adev))
+   return false;
+   else
+   return true;
else
return false;
 }
-- 
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[error] Drm -> amdgpu Unrecoverable Machine Check

2019-12-02 Thread Yusuf Altıparmak
My embedded board is freezing when I put E9171 on PCIe. What is the meaning
of Unrecoverable Machine Check error about GPU?

Could PCIe settings in .dts file cause this problem? If it is, is there any
sample PCIe configuration for E9171? I attached my dts file.

System is working fine when GPU is not plugged in.

*This is the last console log before freeze:*
[drm] amdgpu kernel modesetting enabled.

[drm] initializing kernel modesetting (POLARIS12 0x1002:0x6987
0x1787:0x2389 0x80).
[drm] register mmio base: 0x2020

fsl-fman-port ffe488000.port fm1-gb0: renamed from eth0

[drm] register mmio size: 262144

[drm] add ip block number 0 

[drm] add ip block number 1 

[drm] add ip block number 2 

[drm] add ip block number 3 

[drm] add ip block number 4 

[drm] add ip block number 5 

[drm] add ip block number 6 

[drm] add ip block number 7 

[drm] add ip block number 8 

[drm] UVD is enabled in VM mode

[drm] UVD ENC is enabled in VM mode

[drm] VCE enabled in VM mode

ATOM BIOS: 113-ER16BFC-001

[drm] GPU posting now...

Disabling lock debugging due to kernel taint

Machine check in kernel mode.

Caused by (from MCSR=a000): Load Error Report

Guarded Load Error Report

Kernel panic - not syncing: Unrecoverable Machine check

CPU: 1 PID: 2023 Comm: udevd Tainted: G   M  4.19.26+gc0c2141
#1
Call Trace:


t104xd4rdb.dtsi
Description: Binary data
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

RE: [PATCH 07/10] drm/amdgpu: add concurrent baco reset support for XGMI

2019-12-02 Thread Ma, Le
[AMD Official Use Only - Internal Distribution Only]



From: Grodzovsky, Andrey 
Sent: Saturday, November 30, 2019 12:22 AM
To: Ma, Le ; amd-gfx@lists.freedesktop.org
Cc: Chen, Guchun ; Zhou1, Tao ; 
Deucher, Alexander ; Li, Dennis ; 
Zhang, Hawking 
Subject: Re: [PATCH 07/10] drm/amdgpu: add concurrent baco reset support for 
XGMI



On 11/28/19 4:00 AM, Ma, Le wrote:





-Original Message-
From: Grodzovsky, Andrey 

Sent: Wednesday, November 27, 2019 11:46 PM
To: Ma, Le ; 
amd-gfx@lists.freedesktop.org
Cc: Chen, Guchun ; Zhou1, Tao 
; Deucher, Alexander 
; Li, Dennis 
; Zhang, Hawking 

Subject: Re: [PATCH 07/10] drm/amdgpu: add concurrent baco reset support for 
XGMI





On 11/27/19 4:15 AM, Le Ma wrote:

> Currently each XGMI node reset wq does not run in parrallel because

> same work item bound to same cpu runs in sequence. So change to bound

> the xgmi_reset_work item to different cpus.



It's not the same work item, see more bellow





>

> XGMI requires all nodes enter into baco within very close proximity

> before any node exit baco. So schedule the xgmi_reset_work wq twice

> for enter/exit baco respectively.

>

> The default reset code path and methods do not change for vega20 production:

>- baco reset without xgmi/ras

>- psp reset with xgmi/ras

>

> To enable baco for XGMI/RAS case, both 2 conditions below are needed:

>- amdgpu_ras_enable=2

>- baco-supported smu firmware

>

> The case that PSP reset and baco reset coexist within an XGMI hive is

> not in the consideration.

>

> Change-Id: I9c08cf90134f940b42e20d2129ff87fba761c532

> Signed-off-by: Le Ma mailto:le...@amd.com>>

> ---

>   drivers/gpu/drm/amd/amdgpu/amdgpu.h|  2 +

>   drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 78 
> ++

>   2 files changed, 70 insertions(+), 10 deletions(-)

>

> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h

> b/drivers/gpu/drm/amd/amdgpu/amdgpu.h

> index d120fe5..08929e6 100644

> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h

> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h

> @@ -998,6 +998,8 @@ struct amdgpu_device {

>  int   pstate;

>  /* enable runtime pm on the device */

>  boolrunpm;

> +

> +  boolin_baco;

>   };

>

>   static inline struct amdgpu_device *amdgpu_ttm_adev(struct

> ttm_bo_device *bdev) diff --git

> a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c

> b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c

> index bd387bb..71abfe9 100644

> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c

> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c

> @@ -2654,7 +2654,13 @@ static void amdgpu_device_xgmi_reset_func(struct 
> work_struct *__work)

>  struct amdgpu_device *adev =

>  container_of(__work, struct amdgpu_device, 
> xgmi_reset_work);

>

> -   adev->asic_reset_res =  amdgpu_asic_reset(adev);

> +  if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)

> +  adev->asic_reset_res = (adev->in_baco == false) ?

> +  
> amdgpu_device_baco_enter(adev->ddev) :

> +  
> amdgpu_device_baco_exit(adev->ddev);

> +  else

> +  adev->asic_reset_res = amdgpu_asic_reset(adev);

> +

>  if (adev->asic_reset_res)

>  DRM_WARN("ASIC reset failed with error, %d for drm dev, 
> %s",

>   adev->asic_reset_res, adev->ddev->unique); 
> @@ -3796,6 +3802,7 @@

> static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,

>  struct amdgpu_device *tmp_adev = NULL;

>  bool need_full_reset = *need_full_reset_arg, vram_lost = false;

>  int r = 0;

> +  int cpu = smp_processor_id();

>

>  /*

>   * ASIC reset has to be done on all HGMI hive nodes ASAP @@

> -3803,21 +3810,24 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info 
> *hive,

>   */

>  if (need_full_reset) {

>  list_for_each_entry(tmp_adev, device_list_handle, 
> gmc.xgmi.head) {

> -   /* For XGMI run all resets in parallel to 
> speed up the process */

> +  /*

> +  * For XGMI run all resets in parallel to speed 
> up the

> +  * process by scheduling the highpri wq on 
> different

> +  * cpus. For XGMI with baco reset, all nodes 
> must enter

> +  * baco within close proximity before anyone 
> exit.

> +  */

> 

Re: [PATCH] amd/amdgpu/sriov swSMU disable for sriov

2019-12-02 Thread Wang, Kevin(Yang)
[AMD Official Use Only - Internal Distribution Only]

Reviewed-by: Kevin Wang 
But it's better to optimize the flow of control in is_support_sw_smu()

Best Regards,
Kevin



From: amd-gfx  on behalf of Jack Zhang 

Sent: Monday, December 2, 2019 7:05 PM
To: amd-gfx@lists.freedesktop.org 
Cc: Zhang, Jack (Jian) 
Subject: [PATCH] amd/amdgpu/sriov swSMU disable for sriov

For boards greater than ARCTURUS, and under sriov platform,
swSMU is not supported because smu ip block is commented at
guest driver.

Generally for sriov, initialization of smu is moved to host driver.
Thus, smu sw_init and hw_init will not be executed at guest driver.

Without sw structure being initialized in guest driver, swSMU cannot
declare to be supported.

Signed-off-by: Jack Zhang 
---
 drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c 
b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 36001a4..0b8a53b 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -531,7 +531,10 @@ bool is_support_sw_smu(struct amdgpu_device *adev)
 if (adev->asic_type == CHIP_VEGA20)
 return (amdgpu_dpm == 2) ? true : false;
 else if (adev->asic_type >= CHIP_ARCTURUS)
-   return true;
+   if (amdgpu_sriov_vf(adev))
+   return false;
+   else
+   return true;
 else
 return false;
 }
--
2.7.4

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfxdata=02%7C01%7CKevin1.Wang%40amd.com%7Cc80551b2c7d9426a7d5e08d777178bb9%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637108815398674653sdata=5dFralarJqC8%2FkwhQWCjsU8b5UQDILN2p6HafXGi0TM%3Dreserved=0
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [RFC PATCH] drm/amdgpu: allocate entities on demand

2019-12-02 Thread Nirmoy


On 11/29/19 7:42 PM, Christian König wrote:

Am 29.11.19 um 15:29 schrieb Nirmoy:

Hi Christian,

On 11/26/19 10:45 AM, Christian König wrote:
It looks like a start, but there numerous things which needs to be 
fixed.


Question number one is: What's that good for? Entities are not the 
problem here. The real issue is the fence ring and the rq_list.


The rq_list could actually be made constant since it should never be 
changed by the entity. It is only changed for backward compatibility 
in drm_sched_entity_set_priority().


So I would start there and cleanup the 
drm_sched_entity_set_priority() to actually just set a new constant 
rq list instead.


I am missing some context here. Can you please explain bit more? I 
looked over and over again but I still don't understand what do you 
mean by  new constant rq list :/


Ok that needs a bit wider explanation.

The GPU scheduler consists mainly of drm_gpu_scheduler instances. Each 
of those instances contain multiple runqueues with different 
priorities (5 IIRC).


Now for each entity we give a list of runqueues where this entity can 
be served on, e.g. where the jobs which are pushed to the entities are 
executed.


The entity itself keeps a copy of that runqueue list because we have 
the drm_sched_entity_set_priority() which modifies this runqueue list.


But essentially that is complete overkill, the runqueue lists are 
constant for each amdgpu device, e.g. all contexts should use SDMA0 
and SDMA1 in the same way.


In other words building the list on runqueues should happen only once 
and not for each contexts.

Okay I understand now the real problem. Thanks for detail explanation.


Multiple approach to fix this would be possible. One rather elegant 
solution would be to change the rq list into a scheduler instances 
list + priority.


Do you mean something like

diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 684692a8ed76..ac67f8f098fa 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -81,7 +81,7 @@ enum drm_sched_priority {
 struct drm_sched_entity {
    struct list_head    list;
    struct drm_sched_rq *rq;
-   struct drm_sched_rq **rq_list;
+  struct drm_gpu_scheduler    **sched;
    unsigned int    num_rq_list;
    spinlock_t  rq_lock;




This way we would also fix the age old bug that changing the priority 
of a context could actually mess up already scheduled jobs.


The alternative I noted before would be to drop 
drm_sched_entity_set_priority() or change it into 
drm_sched_entity_set_runqueues().
I was working on it but then I got stuck by a  "BUG: kernel NULL pointer 
dereference, address:" which I am trying to figure out why


Regards,
Christian.





Then we could embed the fences in amdgpu_ctx_entity as dynamic array 
at the end of the structure.


And last we can start to dynamic allocate and initialize the 
amdgpu_ctx_entity() structures.


Regards,
Christian.





___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [error] Drm -> amdgpu Unrecoverable Machine Check

2019-12-02 Thread Christian König

Hi Yusuf,

At first, I am using NXP T1042D4RDB-64B which has 256 MB PCIe buffer 
according to its. PCIe memory range was arranged to 256 MB in .dts 
file and in U-boot configuration file. Driver was giving error with 
exit code -12 (OUT_OF_MEMORY). But I was able to reach the linux console.


That is an expected result. 256MB is not enough for the VRAM BAR and the 
doorbell BAR to fit into. But you can still use VGA emulation that way 
if I'm not completely mistaken.



Then I canged 256 MB to 4GB in .dtsi and U-boot conf file.

How did you do this? Is your memory layout consistent?

See when you just changed one end address you might need to adjust other 
addresses as well.


Regards,
Christian.

Am 02.12.19 um 14:32 schrieb Yusuf Altıparmak:




I attached my dts file.

System is working fine when GPU is not plugged in.
*
*
*This is the last console log before freeze:*
[drm] amdgpu kernel modesetting enabled.
[drm] initializing kernel modesetting (POLARIS12 0x1002:0x6987
0x1787:0x2389 0x80).
[drm] register mmio base: 0x2020
fsl-fman-port ffe488000.port fm1-gb0: renamed from eth0
[drm] register mmio size: 262144
[drm] add ip block number 0 
[drm] add ip block number 1 
[drm] add ip block number 2 
[drm] add ip block number 3 
[drm] add ip block number 4 
[drm] add ip block number 5 
[drm] add ip block number 6 
[drm] add ip block number 7 
[drm] add ip block number 8 
[drm] UVD is enabled in VM mode
[drm] UVD ENC is enabled in VM mode
[drm] VCE enabled in VM mode
ATOM BIOS: 113-ER16BFC-001
[drm] GPU posting now...
Disabling lock debugging due to kernel taint
Machine check in kernel mode.
Caused by (from MCSR=a000): Load Error Report
Guarded Load Error Report
Kernel panic - not syncing: Unrecoverable Machine check
CPU: 1 PID: 2023 Comm: udevd Tainted: G   M    4.19.26+gc0c2141 #1
Call Trace:





___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org  
https://lists.freedesktop.org/mailman/listinfo/amd-gfx  





Christian König >, 2 Ara 2019 Pzt, 15:28 
tarihinde şunu yazdı:


Hi Yusuf,

Am 02.12.19 um 12:41 schrieb Yusuf Altıparmak:

My embedded board is freezing when I put E9171 on PCIe. What is
the meaning of Unrecoverable Machine Check error about GPU?


Well see the explanation on Wikipedia for example:
https://en.wikipedia.org/wiki/Machine-check_exception



In general it means you have messed up something in your hardware
configuration.


Could PCIe settings in .dts file cause this problem?


Possible, but rather unlikely. My best guess is that it is some
problem with the power supply.


If it is, is there any sample PCIe configuration for E9171?


The E9171 is just a PCIe device, so the dtsi is actually rather
uninteresting. What we really need is a full dmesg and maybe lspci
output would help as well.

Regards,
Christian.



Hi Christian,

At first, I am using NXP T1042D4RDB-64B which has 256 MB PCIe buffer 
according to its. PCIe memory range was arranged to 256 MB in .dts 
file and in U-boot configuration file. Driver was giving error with 
exit code -12 (OUT_OF_MEMORY). But I was able to reach the linux console.


[    5.512922] [drm] amdgpu kernel modesetting enabled.
[    5.517065] [drm] initializing kernel modesetting (POLARIS12 
0x1002:0x6987 0x1787:0x2389 0x80).

[    5.524507] amdgpu 0001:01:00.0: Fatal error during GPU init
[    5.529296] amdgpu: probe of 0001:01:00.0 failed with error -12

Then I canged 256 MB to 4GB in .dtsi and U-boot conf file. I also 
changed 64KB I/O size to 1MB . When I do this, I wasn't able to reach 
the linux console because board was freezing. But driver was 
successfull at this time. I already mentioned successfull driver 
console logs up.


*this is lspci -v when GPU is plugged and Memory size is 256 MB.*

root@t1042d4rdb-64b:~# lspci -v
:00:00.0 PCI bridge: Freescale Semiconductor Inc Device 0824 (rev 
11) (prog-if 00 [Normal decode])
        Device tree node: 
/sys/firmware/devicetree/base/pcie@ffe24/pcie@0

        Flags: bus master, fast devsel, latency 0, IRQ 20
        Memory at  (32-bit, non-prefetchable)
  

Re: [error] Drm -> amdgpu Unrecoverable Machine Check

2019-12-02 Thread Yusuf Altıparmak
> That is an expected result. 256MB is not enough for the VRAM BAR and the
> doorbell BAR to fit into. But you can still use VGA emulation that way if
> I'm not completely mistaken.
>

Hmm, then what procedure should I follow to take a VGA output. It seems
Graphic Card does not have a VGA output. And isn't there any way to use
this GPU with a 256MB buffered PCIe?



> Then I canged 256 MB to 4GB in .dtsi and U-boot conf file.
>
> How did you do this? Is your memory layout consistent?
>
> See when you just changed one end address you might need to adjust other
> addresses as well.
>
> Regards,
> Christian.
>

It's not consistent I seems. At first I changed .dtsi like it's consistent.
I only changed PCIe1 Device Memory and I/O range. It gave same error. Then
I also changed PCIe2 and PCIe3 devices starting adress according to PCIe1
device end adress in .dtsi. I am not sure I made this correctly or not but
it gave same result again. This is why I asked "If it is, is there any
sample PCIe configuration for E9171?".


*This is a code piece from my T104xRDB.h file which is contain PCIe
configuration variables for U-boot. I made changes like it's not
consistent. RAM size is 8 GB.*

#ifdef CONFIG_PCI
/* controller 1, direct to uli, tgtid 3, Base address 2 */
#ifdef CONFIG_PCIE1
#define CONFIG_SYS_PCIE1_MEM_VIRT *0x8000*
#define CONFIG_SYS_PCIE1_MEM_BUS 0xe000
#define CONFIG_SYS_PCIE1_MEM_PHYS 0xcull
#define CONFIG_SYS_PCIE1_MEM_SIZE *0x1000 /* 256M */*
#define CONFIG_SYS_PCIE1_IO_VIRT 0xf800
#define CONFIG_SYS_PCIE1_IO_BUS 0x
#define CONFIG_SYS_PCIE1_IO_PHYS *0xff800ull*
#define CONFIG_SYS_PCIE1_IO_SIZE *0x0001 /* 64k */*
#endif

/* controller 2, Slot 2, tgtid 2, Base address 201000 */
#ifdef CONFIG_PCIE2
#define CONFIG_SYS_PCIE2_MEM_VIRT *0x9000*
#define CONFIG_SYS_PCIE2_MEM_BUS 0xe000
#define CONFIG_SYS_PCIE2_MEM_PHYS 0xc1000ull
#define CONFIG_SYS_PCIE2_MEM_SIZE* 0x1 /* 4GB */*
#define CONFIG_SYS_PCIE2_IO_VIRT 0xf801
#define CONFIG_SYS_PCIE2_IO_BUS 0x
#define CONFIG_SYS_PCIE2_IO_PHYS 0xff801ull
#define CONFIG_SYS_PCIE2_IO_SIZE *0x0010 /* 1M */*
#endif

/* controller 3, Slot 1, tgtid 1, Base address 202000 */
#ifdef CONFIG_PCIE3
#define CONFIG_SYS_PCIE3_MEM_VIRT *0x19000 /* I changed this to
0x19000 instead of 0xa000 because PCIE2 end adress is changed. End
adress is to 4 GB (0x1 hex so I added 0x9000 with 1) */*
#define CONFIG_SYS_PCIE3_MEM_BUS 0xe000
#define CONFIG_SYS_PCIE3_MEM_PHYS 0xd1000ull
#define CONFIG_SYS_PCIE3_MEM_SIZE 0x1000 /* 256M */
#define CONFIG_SYS_PCIE3_IO_VIRT 0xf811
#define CONFIG_SYS_PCIE3_IO_BUS 0x
#define CONFIG_SYS_PCIE3_IO_PHYS *0xff811ull /* Did same things for IO *
#define CONFIG_SYS_PCIE3_IO_SIZE 0x0001 /* 64k */
#endif

/* controller 4, Base address 203000 */
#ifdef CONFIG_PCIE4
#define CONFIG_SYS_PCIE4_MEM_VIRT *0x2 /* SAME STEP WITH PCIE3.
This time I added with 256 MB (0x1000) */*
#define CONFIG_SYS_PCIE4_MEM_BUS 0xe000
#define CONFIG_SYS_PCIE4_MEM_PHYS 0xd2000ull
#define CONFIG_SYS_PCIE4_MEM_SIZE 0x1000 /* 256M */
#define CONFIG_SYS_PCIE4_IO_VIRT 0xf812
#define CONFIG_SYS_PCIE4_IO_BUS 0x
#define CONFIG_SYS_PCIE4_IO_PHYS *0xff812ull /* Did same things for IO *
#define CONFIG_SYS_PCIE4_IO_SIZE 0x0001 /* 64k */
#endif




Am 02.12.19 um 14:32 schrieb Yusuf Altıparmak:
>
>
>> I attached my dts file.
>>
>> System is working fine when GPU is not plugged in.
>>
>> *This is the last console log before freeze:*
>> [drm] amdgpu kernel modesetting enabled.
>>
>> [drm] initializing kernel modesetting (POLARIS12 0x1002:0x6987
>> 0x1787:0x2389 0x80).
>> [drm] register mmio base: 0x2020
>>
>> fsl-fman-port ffe488000.port fm1-gb0: renamed from eth0
>>
>> [drm] register mmio size: 262144
>>
>> [drm] add ip block number 0 
>>
>> [drm] add ip block number 1 
>>
>> [drm] add ip block number 2 
>>
>> [drm] add ip block number 3 
>>
>> [drm] add ip block number 4 
>>
>> [drm] add ip block number 5 
>>
>> [drm] add ip block number 6 
>>
>> [drm] add ip block number 7 
>>
>> [drm] add ip block number 8 
>>
>> [drm] UVD is enabled in VM mode
>>
>> [drm] UVD ENC is enabled in VM mode
>>
>> [drm] VCE enabled in VM mode
>>
>> ATOM BIOS: 113-ER16BFC-001
>>
>> [drm] GPU posting now...
>>
>> Disabling lock debugging due to kernel taint
>>
>> Machine check in kernel mode.
>>
>> Caused by (from MCSR=a000): Load Error Report
>>
>> Guarded Load Error Report
>>
>> Kernel panic - not syncing: Unrecoverable Machine check
>>
>> CPU: 1 PID: 2023 Comm: udevd Tainted: G   M  4.19.26+gc0c2141
>> #1
>> Call Trace:
>>
>>
>>
>> ___
>> amd-gfx mailing 
>> listamd-gfx@lists.freedesktop.orghttps://lists.freedesktop.org/mailman/listinfo/amd-gfx
>>  
>> 

Re: [PATCH][next] drm/amd/display: fix double assignment to msg_id field

2019-12-02 Thread Alex Deucher
Applied.  thanks!

Alex

On Wed, Nov 27, 2019 at 11:51 AM Harry Wentland  wrote:
>
> On 2019-11-20 12:22 p.m., Colin King wrote:
> > From: Colin Ian King 
> >
> > The msg_id field is being assigned twice. Fix this by replacing the second
> > assignment with an assignment to msg_size.
> >
> > Addresses-Coverity: ("Unused value")
> > Fixes: 11a00965d261 ("drm/amd/display: Add PSP block to verify HDCP2.2 
> > steps")
> > Signed-off-by: Colin Ian King 
>
> Reviewed-by: Harry Wentland 
>
> Harry
>
> > ---
> >  drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c | 2 +-
> >  1 file changed, 1 insertion(+), 1 deletion(-)
> >
> > diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c 
> > b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
> > index 2dd5feec8e6c..6791c5844e43 100644
> > --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
> > +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
> > @@ -42,7 +42,7 @@ static void hdcp2_message_init(struct mod_hdcp *hdcp,
> >   in->process.msg2_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__NULL_MESSAGE;
> >   in->process.msg2_desc.msg_size = 0;
> >   in->process.msg3_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__NULL_MESSAGE;
> > - in->process.msg3_desc.msg_id = 0;
> > + in->process.msg3_desc.msg_size = 0;
> >  }
> >  enum mod_hdcp_status mod_hdcp_remove_display_topology(struct mod_hdcp 
> > *hdcp)
> >  {
> >
> ___
> dri-devel mailing list
> dri-de...@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH 0/5] drm/amd/powerplay: Remove unneeded variable

2019-12-02 Thread Alex Deucher
Applied the series.  Thanks!

Alex

On Wed, Nov 27, 2019 at 12:42 PM zhengbin  wrote:
>
> zhengbin (5):
>   drm/amd/powerplay: Remove unneeded variable 'result' in smu10_hwmgr.c
>   drm/amd/powerplay: Remove unneeded variable 'result' in vega10_hwmgr.c
>   drm/amd/powerplay: Remove unneeded variable 'ret' in smu7_hwmgr.c
>   drm/amd/powerplay: Remove unneeded variable 'result' in vega12_hwmgr.c
>   drm/amd/powerplay: Remove unneeded variable 'ret' in amdgpu_smu.c
>
>  drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 8 +++-
>  drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c  | 3 +--
>  drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c   | 4 +---
>  drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 3 +--
>  drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 4 +---
>  5 files changed, 7 insertions(+), 15 deletions(-)
>
> --
> 2.7.4
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH 0/4] drm/amd/display: Remove unneeded semicolon

2019-12-02 Thread Alex Deucher
Applied the series.  Thanks!

Alex

On Thu, Nov 28, 2019 at 9:46 AM Harry Wentland  wrote:
>
> Series is
> Reviewed-by: Harry Wentland 
>
> Harry
>
> On 2019-11-27 9:31 p.m., zhengbin wrote:
> > zhengbin (4):
> >   drm/amd/display: Remove unneeded semicolon in bios_parser.c
> >   drm/amd/display: Remove unneeded semicolon in bios_parser2.c
> >   drm/amd/display: Remove unneeded semicolon in hdcp.c
> >   drm/amd/display: Remove unneeded semicolon in display_rq_dlg_calc_21.c
> >
> >  drivers/gpu/drm/amd/display/dc/bios/bios_parser.c | 2 +-
> >  drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c| 2 +-
> >  drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c | 4 ++--
> >  drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c   | 2 +-
> >  4 files changed, 5 insertions(+), 5 deletions(-)
> >
> > --
> > 2.7.4
> >
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH] drm/amgdpu: add cache flush workaround to gfx8 emit_fence

2019-12-02 Thread Alex Deucher
On Thu, Nov 28, 2019 at 6:47 AM Pierre-Eric Pelloux-Prayer
 wrote:
>
> The same workaround is used for gfx7.
> Both PAL and Mesa use it for gfx8 too, so port this commit to
> gfx_v8_0_ring_emit_fence_gfx.
>
> Signed-off-by: Pierre-Eric Pelloux-Prayer 

Reviewed-by: Alex Deucher 

> ---
>  drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 22 +++---
>  1 file changed, 19 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 
> b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
> index 80b79583dffe..dcd747bef391 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
> @@ -6183,7 +6183,23 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct 
> amdgpu_ring *ring, u64 addr,
> bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
> bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
>
> -   /* EVENT_WRITE_EOP - flush caches, send int */
> +   /* Workaround for cache flush problems. First send a dummy EOP
> +* event down the pipe with seq one below.
> +*/
> +   amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
> +   amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
> +EOP_TC_ACTION_EN |
> +EOP_TC_WB_ACTION_EN |
> +EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
> +EVENT_INDEX(5)));
> +   amdgpu_ring_write(ring, addr & 0xfffc);
> +   amdgpu_ring_write(ring, (upper_32_bits(addr) & 0x) |
> +   DATA_SEL(1) | INT_SEL(0));
> +   amdgpu_ring_write(ring, lower_32_bits(seq - 1));
> +   amdgpu_ring_write(ring, upper_32_bits(seq - 1));
> +
> +   /* Then send the real EOP event down the pipe:
> +* EVENT_WRITE_EOP - flush caches, send int */
> amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
> amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
>  EOP_TC_ACTION_EN |
> @@ -6926,7 +6942,7 @@ static const struct amdgpu_ring_funcs 
> gfx_v8_0_ring_funcs_gfx = {
> 5 +  /* COND_EXEC */
> 7 +  /* PIPELINE_SYNC */
> VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 9 + /* VM_FLUSH */
> -   8 +  /* FENCE for VM_FLUSH */
> +   12 +  /* FENCE for VM_FLUSH */
> 20 + /* GDS switch */
> 4 + /* double SWITCH_BUFFER,
>the first COND_EXEC jump to the place just
> @@ -6938,7 +6954,7 @@ static const struct amdgpu_ring_funcs 
> gfx_v8_0_ring_funcs_gfx = {
> 31 + /* DE_META */
> 3 + /* CNTX_CTRL */
> 5 + /* HDP_INVL */
> -   8 + 8 + /* FENCE x2 */
> +   12 + 12 + /* FENCE x2 */
> 2, /* SWITCH_BUFFER */
> .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_gfx */
> .emit_ib = gfx_v8_0_ring_emit_ib_gfx,
> --
> 2.24.0.rc0
>
> ___
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH] drm/amd/display: remove redundant assignment to variable v_total

2019-12-02 Thread Alex Deucher
Applied.  thanks!

Alex

On Mon, Dec 2, 2019 at 10:47 AM Colin King  wrote:
>
> From: Colin Ian King 
>
> The variable v_total is being initialized with a value that is never
> read and it is being updated later with a new value.  The initialization
> is redundant and can be removed.
>
> Addresses-Coverity: ("Unused value")
> Signed-off-by: Colin Ian King 
> ---
>  drivers/gpu/drm/amd/display/modules/freesync/freesync.c | 2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)
>
> diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c 
> b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
> index 16e69bbc69aa..fa57885503d4 100644
> --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
> +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
> @@ -122,7 +122,7 @@ static unsigned int calc_v_total_from_refresh(
> const struct dc_stream_state *stream,
> unsigned int refresh_in_uhz)
>  {
> -   unsigned int v_total = stream->timing.v_total;
> +   unsigned int v_total;
> unsigned int frame_duration_in_ns;
>
> frame_duration_in_ns =
> --
> 2.24.0
>
> ___
> dri-devel mailing list
> dri-de...@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH v2] drm/amd/display: Reduce HDMI pixel encoding if max clock is exceeded

2019-12-02 Thread Thomas Anderson
For high-res (8K) or HFR (4K120) displays, using uncompressed pixel
formats like YCbCr444 would exceed the bandwidth of HDMI 2.0, so the
"interesting" modes would be disabled, leaving only low-res or low
framerate modes.

This change lowers the pixel encoding to 4:2:2 or 4:2:0 if the max TMDS
clock is exceeded. Verified that 8K30 and 4K120 are now available and
working with a Samsung Q900R over an HDMI 2.0b link from a Radeon 5700.

Signed-off-by: Thomas Anderson 
---
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 45 ++-
 1 file changed, 23 insertions(+), 22 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 7aac9568d3be..803e59d97411 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -3356,27 +3356,21 @@ get_output_color_space(const struct dc_crtc_timing 
*dc_crtc_timing)
return color_space;
 }
 
-static void reduce_mode_colour_depth(struct dc_crtc_timing *timing_out)
-{
-   if (timing_out->display_color_depth <= COLOR_DEPTH_888)
-   return;
-
-   timing_out->display_color_depth--;
-}
-
-static void adjust_colour_depth_from_display_info(struct dc_crtc_timing 
*timing_out,
-   const struct drm_display_info 
*info)
+static bool adjust_colour_depth_from_display_info(
+   struct dc_crtc_timing *timing_out,
+   const struct drm_display_info *info)
 {
+   enum dc_color_depth depth = timing_out->display_color_depth;
int normalized_clk;
-   if (timing_out->display_color_depth <= COLOR_DEPTH_888)
-   return;
do {
normalized_clk = timing_out->pix_clk_100hz / 10;
/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
normalized_clk /= 2;
/* Adjusting pix clock following on HDMI spec based on colour 
depth */
-   switch (timing_out->display_color_depth) {
+   switch (depth) {
+   case COLOR_DEPTH_888:
+   break;
case COLOR_DEPTH_101010:
normalized_clk = (normalized_clk * 30) / 24;
break;
@@ -3387,14 +3381,15 @@ static void 
adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_
normalized_clk = (normalized_clk * 48) / 24;
break;
default:
-   return;
+   /* The above depths are the only ones valid for HDMI. */
+   return false;
}
-   if (normalized_clk <= info->max_tmds_clock)
-   return;
-   reduce_mode_colour_depth(timing_out);
-
-   } while (timing_out->display_color_depth > COLOR_DEPTH_888);
-
+   if (normalized_clk <= info->max_tmds_clock) {
+   timing_out->display_color_depth = depth;
+   return true;
+   }
+   } while (--depth > COLOR_DEPTH_666);
+   return false;
 }
 
 static void fill_stream_properties_from_drm_display_mode(
@@ -3474,8 +3469,14 @@ static void fill_stream_properties_from_drm_display_mode(
 
stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
-   if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
-   adjust_colour_depth_from_display_info(timing_out, info);
+   if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
+   if (!adjust_colour_depth_from_display_info(timing_out, info) &&
+   drm_mode_is_420_also(info, mode_in) &&
+   timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
+   timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
+   adjust_colour_depth_from_display_info(timing_out, info);
+   }
+   }
 }
 
 static void fill_audio_info(struct audio_info *audio_info,
-- 
2.24.0.393.g34dc348eaf-goog

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

  1   2   >