Re: [PATCH v4 4/4] drm/amd/amdgpu/vcn: Add RB decouple feature under SRIOV - P4

2023-10-16 Thread Leo Liu

The set looks good to me. The series is:

Reviewed-by: Leo Liu 

On 2023-10-16 12:54, Bokun Zhang wrote:

- In VCN 4 SRIOV code path, add code to enable RB decouple feature

Signed-off-by: Bokun Zhang 
---
  drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c | 71 +--
  1 file changed, 55 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index 88e17f5e20b2..bf07aa200030 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -176,9 +176,6 @@ static int vcn_v4_0_sw_init(void *handle)

AMDGPU_DRM_KEY_INJECT_WORKAROUND_VCNFW_ASD_HANDSHAKING;
}
  
-		if (amdgpu_sriov_vf(adev))

-   fw_shared->present_flag_0 |= 
cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG);
-
if (amdgpu_vcnfw_log)
amdgpu_vcn_fwlog_init(>vcn.inst[i]);
}
@@ -1209,6 +1206,24 @@ static int vcn_v4_0_start(struct amdgpu_device *adev)
return 0;
  }
  
+static int vcn_v4_0_init_ring_metadata(struct amdgpu_device *adev, uint32_t vcn_inst, struct amdgpu_ring *ring_enc)

+{
+   struct amdgpu_vcn_rb_metadata *rb_metadata = NULL;
+   uint8_t *rb_ptr = (uint8_t *)ring_enc->ring;
+
+   rb_ptr += ring_enc->ring_size;
+   rb_metadata = (struct amdgpu_vcn_rb_metadata *)rb_ptr;
+
+   memset(rb_metadata, 0, sizeof(struct amdgpu_vcn_rb_metadata));
+   rb_metadata->size = sizeof(struct amdgpu_vcn_rb_metadata);
+   rb_metadata->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG);
+   rb_metadata->present_flag_0 |= 
cpu_to_le32(AMDGPU_VCN_VF_RB_DECOUPLE_FLAG);
+   rb_metadata->version = 1;
+   rb_metadata->ring_id = vcn_inst & 0xFF;
+
+   return 0;
+}
+
  static int vcn_v4_0_start_sriov(struct amdgpu_device *adev)
  {
int i;
@@ -1334,11 +1349,30 @@ static int vcn_v4_0_start_sriov(struct amdgpu_device 
*adev)
rb_enc_addr = ring_enc->gpu_addr;
  
  		rb_setup->is_rb_enabled_flags |= RB_ENABLED;

-   rb_setup->rb_addr_lo = lower_32_bits(rb_enc_addr);
-   rb_setup->rb_addr_hi = upper_32_bits(rb_enc_addr);
-   rb_setup->rb_size = ring_enc->ring_size / 4;
fw_shared->present_flag_0 |= 
cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG);
  
+		if (amdgpu_sriov_is_vcn_rb_decouple(adev)) {

+   vcn_v4_0_init_ring_metadata(adev, i, ring_enc);
+
+   memset((void *)_setup->rb_info, 0, sizeof(struct 
amdgpu_vcn_rb_setup_info) * MAX_NUM_VCN_RB_SETUP);
+   if (!(adev->vcn.harvest_config & (1 << 0))) {
+   rb_setup->rb_info[0].rb_addr_lo = 
lower_32_bits(adev->vcn.inst[0].ring_enc[0].gpu_addr);
+   rb_setup->rb_info[0].rb_addr_hi = 
upper_32_bits(adev->vcn.inst[0].ring_enc[0].gpu_addr);
+   rb_setup->rb_info[0].rb_size = 
adev->vcn.inst[0].ring_enc[0].ring_size / 4;
+   }
+   if (!(adev->vcn.harvest_config & (1 << 1))) {
+   rb_setup->rb_info[2].rb_addr_lo = 
lower_32_bits(adev->vcn.inst[1].ring_enc[0].gpu_addr);
+   rb_setup->rb_info[2].rb_addr_hi = 
upper_32_bits(adev->vcn.inst[1].ring_enc[0].gpu_addr);
+   rb_setup->rb_info[2].rb_size = 
adev->vcn.inst[1].ring_enc[0].ring_size / 4;
+   }
+   fw_shared->decouple.is_enabled = 1;
+   fw_shared->present_flag_0 |= 
cpu_to_le32(AMDGPU_VCN_VF_RB_DECOUPLE_FLAG);
+   } else {
+   rb_setup->rb_addr_lo = lower_32_bits(rb_enc_addr);
+   rb_setup->rb_addr_hi = upper_32_bits(rb_enc_addr);
+   rb_setup->rb_size = ring_enc->ring_size / 4;
+   }
+
MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
lower_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr));
@@ -1810,6 +1844,7 @@ static struct amdgpu_ring_funcs 
vcn_v4_0_unified_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_ENC,
.align_mask = 0x3f,
.nop = VCN_ENC_CMD_NO_OP,
+   .extra_dw = sizeof(struct amdgpu_vcn_rb_metadata),
.get_rptr = vcn_v4_0_unified_ring_get_rptr,
.get_wptr = vcn_v4_0_unified_ring_get_wptr,
.set_wptr = vcn_v4_0_unified_ring_set_wptr,
@@ -2023,16 +2058,20 @@ static int vcn_v4_0_process_interrupt(struct 
amdgpu_device *adev, struct amdgpu_
  {
uint32_t ip_instance;
  
-	switch (entry->client_id) {

-   case SOC15_IH_CLIENTID_VCN:
-   ip_instance = 0;
-   break;
-   case SOC15_IH_CLIENTI

Re: [PATCH v4 1/7] drm/amdgpu/vcn: Add MMSCH v4_0_3 support for sriov

2023-08-08 Thread Leo Liu

The series is:

Acked-by: Leo Liu .


On 2023-08-08 12:26, Samir Dhume wrote:

 The structures are the same as v4_0 except for the
 init header

Signed-off-by: Samir Dhume 
---
  drivers/gpu/drm/amd/amdgpu/mmsch_v4_0_3.h | 37 +++
  1 file changed, 37 insertions(+)
  create mode 100644 drivers/gpu/drm/amd/amdgpu/mmsch_v4_0_3.h

diff --git a/drivers/gpu/drm/amd/amdgpu/mmsch_v4_0_3.h 
b/drivers/gpu/drm/amd/amdgpu/mmsch_v4_0_3.h
new file mode 100644
index ..db7eb5260295
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/mmsch_v4_0_3.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __MMSCH_V4_0_3_H__
+#define __MMSCH_V4_0_3_H__
+
+#include "amdgpu_vcn.h"
+#include "mmsch_v4_0.h"
+
+struct mmsch_v4_0_3_init_header {
+   uint32_t version;
+   uint32_t total_size;
+   struct mmsch_v4_0_table_info vcn0;
+   struct mmsch_v4_0_table_info mjpegdec0[4];
+   struct mmsch_v4_0_table_info mjpegdec1[4];
+};
+#endif


Re: [PATCH v3 3/7] drm/amdgpu/vcn: sriov support for vcn_v4_0_3

2023-08-08 Thread Leo Liu



On 2023-07-28 15:15, Samir Dhume wrote:

initialization table handshake with mmsch

Signed-off-by: Samir Dhume 
---
  drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c | 257 +---
  1 file changed, 233 insertions(+), 24 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
index 411c1d802823..b978265b2d77 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
@@ -31,6 +31,7 @@
  #include "soc15d.h"
  #include "soc15_hw_ip.h"
  #include "vcn_v2_0.h"
+#include "mmsch_v4_0_3.h"
  
  #include "vcn/vcn_4_0_3_offset.h"

  #include "vcn/vcn_4_0_3_sh_mask.h"
@@ -44,6 +45,7 @@
  #define VCN_VID_SOC_ADDRESS_2_0   0x1fb00
  #define VCN1_VID_SOC_ADDRESS_3_0  0x48300
  
+static int vcn_v4_0_3_start_sriov(struct amdgpu_device *adev);

  static void vcn_v4_0_3_set_unified_ring_funcs(struct amdgpu_device *adev);
  static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev);
  static int vcn_v4_0_3_set_powergating_state(void *handle,
@@ -130,6 +132,10 @@ static int vcn_v4_0_3_sw_init(void *handle)
amdgpu_vcn_fwlog_init(>vcn.inst[i]);
}
  
+	r = amdgpu_virt_alloc_mm_table(adev);


Since this function is not for bare-metal, please move amdgpu_sriov_vf() 
check from inside of the function to here, to avoid confusion.




+   if (r)
+   return r;
+
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
adev->vcn.pause_dpg_mode = vcn_v4_0_3_pause_dpg_mode;
  
@@ -167,6 +173,8 @@ static int vcn_v4_0_3_sw_fini(void *handle)

drm_dev_exit(idx);
}
  
+	amdgpu_virt_free_mm_table(adev);


Same as above.


Regards,

Leo




+
r = amdgpu_vcn_suspend(adev);
if (r)
return r;
@@ -189,33 +197,47 @@ static int vcn_v4_0_3_hw_init(void *handle)
struct amdgpu_ring *ring;
int i, r, vcn_inst;
  
-	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {

-   vcn_inst = GET_INST(VCN, i);
-   ring = >vcn.inst[i].ring_enc[0];
+   if (amdgpu_sriov_vf(adev)) {
+   r = vcn_v4_0_3_start_sriov(adev);
+   if (r)
+   goto done;
  
-		if (ring->use_doorbell) {

-   adev->nbio.funcs->vcn_doorbell_range(
-   adev, ring->use_doorbell,
-   (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
-   9 * vcn_inst,
-   adev->vcn.inst[i].aid_id);
-
-   WREG32_SOC15(
-   VCN, GET_INST(VCN, ring->me),
-   regVCN_RB1_DB_CTRL,
-   ring->doorbell_index
-   << 
VCN_RB1_DB_CTRL__OFFSET__SHIFT |
-   VCN_RB1_DB_CTRL__EN_MASK);
-
-   /* Read DB_CTRL to flush the write DB_CTRL command. */
-   RREG32_SOC15(
-   VCN, GET_INST(VCN, ring->me),
-   regVCN_RB1_DB_CTRL);
+   for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+   ring = >vcn.inst[i].ring_enc[0];
+   ring->wptr = 0;
+   ring->wptr_old = 0;
+   vcn_v4_0_3_unified_ring_set_wptr(ring);
+   ring->sched.ready = true;
}
+   } else {
+   for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+   vcn_inst = GET_INST(VCN, i);
+   ring = >vcn.inst[i].ring_enc[0];
+
+   if (ring->use_doorbell) {
+   adev->nbio.funcs->vcn_doorbell_range(
+   adev, ring->use_doorbell,
+   (adev->doorbell_index.vcn.vcn_ring0_1 
<< 1) +
+   9 * vcn_inst,
+   adev->vcn.inst[i].aid_id);
+
+   WREG32_SOC15(
+   VCN, GET_INST(VCN, ring->me),
+   regVCN_RB1_DB_CTRL,
+   ring->doorbell_index
+   << 
VCN_RB1_DB_CTRL__OFFSET__SHIFT |
+   VCN_RB1_DB_CTRL__EN_MASK);
+
+   /* Read DB_CTRL to flush the write DB_CTRL 
command. */
+   RREG32_SOC15(
+   VCN, GET_INST(VCN, ring->me),
+   regVCN_RB1_DB_CTRL);
+   }
  
-		r = amdgpu_ring_test_helper(ring);

-   if (r)
-   goto done;
+   r = amdgpu_ring_test_helper(ring);
+   if (r)

Re: [PATCH] drm/amdgpu: Enabling FW workaround through shared memory for VCN4_0_2

2023-07-18 Thread Leo Liu

Reviewed-by: Leo Liu 

On 2023-07-17 23:20, sguttula wrote:

This patch will enable VCN FW workaround using
DRM KEY INJECT WORKAROUND method,
which is helping in fixing the secure playback.

Signed-off-by: sguttula 

---

Changes in v2:
-updated commit message as per veera's feedback

Changes in v3:
-updated commit message as enabling for 402
-updated the logic as per leo, feedback
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h | 9 +
  drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c   | 6 ++
  2 files changed, 15 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 1f1d7dc94f90..a3eed90b6af0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -161,6 +161,7 @@
} while (0)
  
  #define AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE (1 << 2)

+#define AMDGPU_FW_SHARED_FLAG_0_DRM_KEY_INJECT (1 << 4)
  #define AMDGPU_VCN_FW_SHARED_FLAG_0_RB(1 << 6)
  #define AMDGPU_VCN_MULTI_QUEUE_FLAG   (1 << 8)
  #define AMDGPU_VCN_SW_RING_FLAG   (1 << 9)
@@ -180,6 +181,8 @@
  #define AMDGPU_VCN_SMU_DPM_INTERFACE_DGPU (0)
  #define AMDGPU_VCN_SMU_DPM_INTERFACE_APU (1)
  
+#define AMDGPU_DRM_KEY_INJECT_WORKAROUND_VCNFW_ASD_HANDSHAKING 2

+
  enum fw_queue_mode {
FW_QUEUE_RING_RESET = 1,
FW_QUEUE_DPG_HOLD_OFF = 2,
@@ -343,6 +346,11 @@ struct amdgpu_fw_shared_rb_setup {
uint32_t  reserved[6];
  };
  
+struct amdgpu_fw_shared_drm_key_wa {

+   uint8_t  method;
+   uint8_t  reserved[3];
+};
+
  struct amdgpu_vcn4_fw_shared {
uint32_t present_flag_0;
uint8_t pad[12];
@@ -352,6 +360,7 @@ struct amdgpu_vcn4_fw_shared {
uint8_t pad2[20];
struct amdgpu_fw_shared_rb_setup rb_setup;
struct amdgpu_fw_shared_smu_interface_info smu_dpm_interface;
+   struct amdgpu_fw_shared_drm_key_wa drm_key_wa;
  };
  
  struct amdgpu_vcn_fwlog {

diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index e8c02ae10163..16ee73cfc3a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -169,6 +169,12 @@ static int vcn_v4_0_sw_init(void *handle)
fw_shared->smu_dpm_interface.smu_interface_type = (adev->flags 
& AMD_IS_APU) ?
AMDGPU_VCN_SMU_DPM_INTERFACE_APU : 
AMDGPU_VCN_SMU_DPM_INTERFACE_DGPU;
  
+		if (adev->ip_versions[VCN_HWIP][0] == IP_VERSION(4, 0, 2)) {

+   fw_shared->present_flag_0 |= 
AMDGPU_FW_SHARED_FLAG_0_DRM_KEY_INJECT;
+   fw_shared->drm_key_wa.method =
+   
AMDGPU_DRM_KEY_INJECT_WORKAROUND_VCNFW_ASD_HANDSHAKING;
+   }
+
if (amdgpu_sriov_vf(adev))
fw_shared->present_flag_0 |= 
cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG);
  


Re: [PATCH] drm/amdgpu: allow secure submission on VCN4 ring

2023-07-18 Thread Leo Liu

Reviewed-by: Leo Liu 

On 2023-07-17 13:27, sguttula wrote:

This patch will enable secure decode playback on VCN4_0_2

Signed-off-by: sguttula 

---
Changes in v2:
-updated commit message only enabling for VCN402
-updated the logic as per Leo's feedback
---
  drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c | 8 ++--
  1 file changed, 6 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index e8c02ae10163..d2d89bb711b0 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -1801,7 +1801,7 @@ static int vcn_v4_0_ring_patch_cs_in_place(struct 
amdgpu_cs_parser *p,
return 0;
  }
  
-static const struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = {

+static struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_ENC,
.align_mask = 0x3f,
.nop = VCN_ENC_CMD_NO_OP,
@@ -1846,7 +1846,11 @@ static void vcn_v4_0_set_unified_ring_funcs(struct 
amdgpu_device *adev)
if (adev->vcn.harvest_config & (1 << i))
continue;
  
-		adev->vcn.inst[i].ring_enc[0].funcs = _v4_0_unified_ring_vm_funcs;

+   if (adev->ip_versions[VCN_HWIP][0] == IP_VERSION(4, 0, 2))
+   
vcn_v4_0_unified_ring_vm_funcs.secure_submission_supported = true;
+
+   adev->vcn.inst[i].ring_enc[0].funcs =
+  (const struct amdgpu_ring_funcs 
*)_v4_0_unified_ring_vm_funcs;
adev->vcn.inst[i].ring_enc[0].me = i;
  
  		DRM_INFO("VCN(%d) encode/decode are enabled in VM mode\n", i);


Re: [PATCH 1/2] drm/amdgpu: allow secure submission on VCN4 ring

2023-07-17 Thread Leo Liu
Since the changes will affect multiple ASICs, if you only tested with 
VCN4_0_4, please just apply the set to that HW.


Regards,

Leo


On 2023-07-16 23:15, Guttula, Suresh wrote:


Hi Leo,

There are two issues here.

This change fixing the Crash while secure playback and we see below error:
2023-07-14T15:51:24.837592Z ERR kernel: [  129.255823] amdgpu :c2:00.0: amdgpu: 
secure submissions not supported on ring 
2023-07-14T15:51:24.837601Z ERR kernel: [  129.255827] [drm:amdgpu_job_run] 
*ERROR* Error scheduling IBs (-22)
2023-07-14T15:51:24.837603Z INFO kernel: [  129.255834] [drm] Skip scheduling 
IBs!

The second one : https://patchwork.freedesktop.org/patch/547587/
Fixing the garbage/corruption.

With both patches playback working fine.

Thanks,
Suresh G

-Original Message-
From: Liu, Leo 
Sent: Friday, July 14, 2023 7:08 PM
To: Guttula, Suresh ; Deucher, Alexander 
; Koenig, Christian 
Cc: amd-gfx@lists.freedesktop.org
Subject: Re: [PATCH 1/2] drm/amdgpu: allow secure submission on VCN4 ring


On 2023-07-14 05:44, sguttula wrote:

This patch will enable secure decode playback on VCN4

Signed-off-by: sguttula 
---
   drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c | 1 +
   1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index e8c02ae10163..d199f87febd1 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -1804,6 +1804,7 @@ static int vcn_v4_0_ring_patch_cs_in_place(struct 
amdgpu_cs_parser *p,
   static const struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_ENC,
.align_mask = 0x3f,
+   .secure_submission_supported = true,

We should set it to true  with VCN4_0_4 only for now, and check either this 
boolean or VCN4_0_4 with your implementation from patch 2

Regards,

Leo



.nop = VCN_ENC_CMD_NO_OP,
.get_rptr = vcn_v4_0_unified_ring_get_rptr,
.get_wptr = vcn_v4_0_unified_ring_get_wptr,


Re: [PATCH 1/2] drm/amdgpu: allow secure submission on VCN4 ring

2023-07-14 Thread Leo Liu



On 2023-07-14 05:44, sguttula wrote:

This patch will enable secure decode playback on VCN4

Signed-off-by: sguttula 
---
  drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c | 1 +
  1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index e8c02ae10163..d199f87febd1 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -1804,6 +1804,7 @@ static int vcn_v4_0_ring_patch_cs_in_place(struct 
amdgpu_cs_parser *p,
  static const struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_ENC,
.align_mask = 0x3f,
+   .secure_submission_supported = true,


We should set it to true  with VCN4_0_4 only for now, and check either 
this boolean or VCN4_0_4 with your implementation from patch 2


Regards,

Leo



.nop = VCN_ENC_CMD_NO_OP,
.get_rptr = vcn_v4_0_unified_ring_get_rptr,
.get_wptr = vcn_v4_0_unified_ring_get_wptr,


Re: [PATCH v3 2/2] drm/amdgpu: update kernel vcn ring test

2023-07-10 Thread Leo Liu



On 2023-07-10 16:19, Liu, Leo wrote:

[AMD Official Use Only - General]

[AMD Official Use Only - General]

-Original Message-
From: Jamadar, Saleemkhan 
Sent: Monday, July 10, 2023 12:54 PM
To: Jamadar, Saleemkhan ; amd-gfx@lists.freedesktop.org; Liu, Leo 
; Gopalakrishnan, Veerabadhran (Veera) 
; Sundararaju, Sathishkumar 

Cc: Koenig, Christian ; Rao, Srinath 

Subject: [PATCH v3 2/2] drm/amdgpu: update kernel vcn ring test

add session context buffer to decoder ring test fro vcn v1 to v3.

v3 - correct the cmd for sesssion ctx buf
v2 - add the buffer into IB (Leo liu)

Signed-off-by: Saleemkhan Jamadar 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 14 ++
  1 file changed, 14 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 76e9a2418286..4ee5f933e420 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -521,6 +521,7 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
struct dma_fence **fence)
  {
 u64 addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
+   uint64_t session_ctx_buf_gaddr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr 
+ 8192);
 struct amdgpu_device *adev = ring->adev;
 struct dma_fence *f = NULL;
 struct amdgpu_job *job;
@@ -546,6 +547,19 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring 
*ring,
 }
 ib->length_dw = 16;

This line above can be removed. With that the patch is:
Reviewed-by: Leo Liu 


I think we should rework the lines above of this line for msg buffer, 
put the session ctx buffer right behind it, and no need fill the nop 
command in between, so make the code cleaner.


Regards,

Leo



+   ib->ptr[ib->length_dw++] = PACKET0(adev->vcn.internal.data0, 0);
+   ib->ptr[ib->length_dw++] = lower_32_bits(session_ctx_buf_gaddr);
+   ib->ptr[ib->length_dw++] = PACKET0(adev->vcn.internal.data1, 0);
+   ib->ptr[ib->length_dw++] = upper_32_bits(session_ctx_buf_gaddr);
+   /* session ctx buffer cmd */
+   ib->ptr[ib->length_dw++] = PACKET0(adev->vcn.internal.cmd, 0xa);
+   ib->ptr[ib->length_dw++] = 0;
+   for (i = ib->length_dw; i < 32; i += 2) {
+   ib->ptr[i] = PACKET0(adev->vcn.internal.nop, 0);
+   ib->ptr[i+1] = 0;
+   }
+   ib->length_dw = 32;
+
 r = amdgpu_job_submit_direct(job, ring, );
 if (r)
 goto err_free;
--
2.25.1



Re: [PATCH 2/2] drm/amdgpu: use psp_execute_load_ip_fw_cmd_buf instead

2023-07-05 Thread Leo Liu

It looks good to me. The series is:

Reviewed-by: Leo Liu 

On 2023-06-27 00:48, Lang Yu wrote:

Replace the old ones with psp_execute_load_ip_fw_cmd_buf.

Signed-off-by: Lang Yu 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 31 -
  drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h |  2 --
  drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c |  9 +++
  drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h |  2 ++
  drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c   |  4 +---
  drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c   |  4 +---
  drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c   |  4 +---
  drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c   |  4 +---
  drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c |  4 +---
  9 files changed, 20 insertions(+), 44 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index a1cb541f315f..b61963112118 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -2474,21 +2474,11 @@ int psp_execute_load_ip_fw_cmd_buf(struct amdgpu_device 
*adev,
return ret;
  }
  
-static int psp_execute_non_psp_fw_load(struct psp_context *psp,

- struct amdgpu_firmware_info *ucode)
+static inline
+int psp_execute_non_psp_fw_load(struct psp_context *psp,
+   struct amdgpu_firmware_info *ucode)
  {
-   int ret = 0;
-   struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
-
-   ret = psp_prep_load_ip_fw_cmd_buf(ucode, cmd);
-   if (!ret) {
-   ret = psp_cmd_submit_buf(psp, ucode, cmd,
-psp->fence_buf_mc_addr);
-   }
-
-   release_psp_cmd_buf(psp);
-
-   return ret;
+   return psp_execute_load_ip_fw_cmd_buf(psp->adev, ucode, 0, 0, 0);
  }
  
  static int psp_load_smu_fw(struct psp_context *psp)

@@ -2946,19 +2936,6 @@ int psp_rlc_autoload_start(struct psp_context *psp)
return ret;
  }
  
-int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx,

-   uint64_t cmd_gpu_addr, int cmd_size)
-{
-   struct amdgpu_firmware_info ucode = {0};
-
-   ucode.ucode_id = inst_idx ? AMDGPU_UCODE_ID_VCN1_RAM :
-   AMDGPU_UCODE_ID_VCN0_RAM;
-   ucode.mc_addr = cmd_gpu_addr;
-   ucode.ucode_size = cmd_size;
-
-   return psp_execute_non_psp_fw_load(>psp, );
-}
-
  int psp_ring_cmd_submit(struct psp_context *psp,
uint64_t cmd_buf_mc_addr,
uint64_t fence_mc_addr,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index bd324fed6237..e49984a9d570 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -459,8 +459,6 @@ extern int psp_wait_for_spirom_update(struct psp_context 
*psp, uint32_t reg_inde
uint32_t field_val, uint32_t mask, uint32_t 
msec_timeout);
  
  int psp_gpu_reset(struct amdgpu_device *adev);

-int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx,
-   uint64_t cmd_gpu_addr, int cmd_size);
  
  int psp_execute_load_ip_fw_cmd_buf(struct amdgpu_device *adev,

   struct amdgpu_firmware_info *ucode,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index d37ebd4402ef..1805cd042d34 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -1257,3 +1257,12 @@ int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev)
  
  	return 0;

  }
+
+int amdgpu_vcn_psp_update_sram(struct amdgpu_device *adev, int inst_idx)
+{
+   return psp_execute_load_ip_fw_cmd_buf(adev, NULL,
+   inst_idx ? AMDGPU_UCODE_ID_VCN1_RAM : AMDGPU_UCODE_ID_VCN0_RAM,
+   adev->vcn.inst[inst_idx].dpg_sram_gpu_addr,
+   
(uint32_t)((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr -
+  
(uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr));
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 92d5534df5f4..3ac5ad91ed08 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -414,4 +414,6 @@ int amdgpu_vcn_ras_late_init(struct amdgpu_device *adev,
struct ras_common_if *ras_block);
  int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev);
  
+int amdgpu_vcn_psp_update_sram(struct amdgpu_device *adev, int inst_idx);

+
  #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index c975aed2f6c7..74cd1522067c 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -881,9 +881,7 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device 
*adev, bool indirect)
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
  
  	if (indirect)

-   psp_update_vcn_sram(adev,

Re: [PATCH] drm/amdgpu: Clear VCN cache when hw_init

2023-07-05 Thread Leo Liu
What Christian says is correct, esp. during the playback or encode, when 
suspend/resume happens, it will save the FW context, and after resume, 
it will continue the job to where it left during the suspend. Will this 
apply to SRIOV case? Since the changes only within the SRIOV code, 
please make sure that also please specify the SRIOV from your patch 
subject and commit message.


Regards,

Leo


On 2023-06-30 07:38, Christian König wrote:

Am 20.06.23 um 15:29 schrieb Horace Chen:

[Why]
VCN will use some framebuffer space as its cache. It needs to
be reset when reset happens, such as FLR. Otherwise some error
may be kept after the reset.


Well this doesn't make sense at all.

The full content of adev->vcn.inst[i].cpu_addr is saved and restored 
during suspend/resume and IIRC GPU resets as well.


See functions amdgpu_vcn_suspend() and amdgpu_vcn_resume().

Please let Leo's team take a look at this and review the change before 
it is committed.


Regards,
Christian.



Signed-off-by: Horace Chen 
---
  drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c | 3 +++
  1 file changed, 3 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c

index b48bb5212488..2db73a964031 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -1292,6 +1292,7 @@ static int vcn_v4_0_start_sriov(struct 
amdgpu_device *adev)

  cache_size);
    cache_addr = adev->vcn.inst[i].gpu_addr + offset;
+    memset(adev->vcn.inst[i].cpu_addr + offset, 0, 
AMDGPU_VCN_STACK_SIZE);

  MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
  regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
  lower_32_bits(cache_addr));
@@ -1307,6 +1308,8 @@ static int vcn_v4_0_start_sriov(struct 
amdgpu_device *adev)

    cache_addr = adev->vcn.inst[i].gpu_addr + offset +
  AMDGPU_VCN_STACK_SIZE;
+    memset(adev->vcn.inst[i].cpu_addr + offset + 
AMDGPU_VCN_STACK_SIZE, 0,

+    AMDGPU_VCN_STACK_SIZE);
  MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
  regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
  lower_32_bits(cache_addr));




Re: [PATCH] drm/amdgpu/vcn: Need to unpause dpg before stop dpg

2023-06-22 Thread Leo Liu

Reviewed-by: Leo Liu 

On 2023-06-20 21:29, Emily Deng wrote:

Need to unpause dpg first, or it will hit follow error during stop dpg:
"[drm] Register(1) [regUVD_POWER_STATUS] failed to reach value 0x0001 != 
0xn"

Signed-off-by: Emily Deng 
---
  drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c | 2 ++
  1 file changed, 2 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index b48bb5212488..259795098173 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -1424,8 +1424,10 @@ static int vcn_v4_0_start_sriov(struct amdgpu_device 
*adev)
   */
  static void vcn_v4_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
  {
+   struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE};
uint32_t tmp;
  
+	vcn_v4_0_pause_dpg_mode(adev, inst_idx, );

/* Wait for power status to be 1 */
SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 1,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);


[PATCH] drm/amdgpu: Use the sched from entity for amdgpu_cs trace

2023-01-16 Thread Leo Liu
The problem is that base sched hasn't been assigned yet at this moment, causing 
something like "ring=0"
all the time from trace.
 mpv:cs0-3473[002] .   129.047431: amdgpu_cs: 
bo_list=92ffb3070400, ring=0, dw=48, fences=0
 mpv:cs0-3473[002] .   129.089125: amdgpu_cs: 
bo_list=92ffba4b7000, ring=0, dw=48, fences=0
 mpv:cs0-3473[002] .   129.130987: amdgpu_cs: 
bo_list=92ff84831c00, ring=0, dw=48, fences=0
 mpv:cs0-3473[002] .   129.172478: amdgpu_cs: 
bo_list=92ffa4b84c00, ring=0, dw=48, fences=0
 mpv:cs0-3473[003] .   129.214000: amdgpu_cs: 
bo_list=92ffb405b400, ring=0, dw=48, fences=0
 mpv:cs0-3473[003] .   129.255763: amdgpu_cs: 
bo_list=92ffba4b7000, ring=0, dw=48, fences=0
 mpv:cs0-3473[003] .   129.297316: amdgpu_cs: 
bo_list=92ffa4b84c00, ring=0, dw=48, fences=0
 mpv:cs0-3473[003] .   129.338978: amdgpu_cs: 
bo_list=92ff84831c00, ring=0, dw=48, fences=0
 mpv:cs0-3473[003] .   129.380685: amdgpu_cs: 
bo_list=92ffba4b7000, ring=0, dw=48, fences=0
 mpv:cs0-3473[003] .   129.421993: amdgpu_cs: 
bo_list=92ffdb4c3400, ring=0, dw=48, fences=0

Fixes: 4624459c84d7 ("drm/amdgpu: add gang submit frontend v6")
Signed-off-by: Leo Liu 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 5e6ddc7e101c..6cd6ea765d37 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -153,10 +153,10 @@ TRACE_EVENT(amdgpu_cs,
 
TP_fast_assign(
   __entry->bo_list = p->bo_list;
-  __entry->ring = to_amdgpu_ring(job->base.sched)->idx;
+  __entry->ring = 
to_amdgpu_ring(job->base.entity->rq->sched)->idx;
   __entry->dw = ib->length_dw;
   __entry->fences = amdgpu_fence_count_emitted(
-   to_amdgpu_ring(job->base.sched));
+   to_amdgpu_ring(job->base.entity->rq->sched));
   ),
TP_printk("bo_list=%p, ring=%u, dw=%u, fences=%u",
  __entry->bo_list, __entry->ring, __entry->dw,
-- 
2.25.1



Re: [bug][vaapi][h264] The commit 7cbe08a930a132d84b4cf79953b00b074ec7a2a7 on certain video files leads to problems with VAAPI hardware decoding.

2022-12-09 Thread Leo Liu

Please try the latest AMDGPU driver:

https://gitlab.freedesktop.org/agd5f/linux/-/commits/amd-staging-drm-next/

On 2022-12-07 15:54, Alex Deucher wrote:

+ Leo, Thong

On Wed, Dec 7, 2022 at 3:43 PM Mikhail Gavrilov
 wrote:

On Wed, Dec 7, 2022 at 7:58 PM Alex Deucher  wrote:


What GPU do you have and what entries do you have in
sys/class/drm/card0/device/ip_discovery/die/0/UVD for the device?

I bisected the issue on the Radeon 6800M.

Parent commit for 7cbe08a930a132d84b4cf79953b00b074ec7a2a7 is
46dd2965bdd1c5a4f6499c73ff32e636fa8f9769.
For both commits ip_discovery is absent.
# ls /sys/class/drm/card0/device/ | grep ip
# ls /sys/class/drm/card1/device/ | grep ip

But from verbose info I see that player for
7cbe08a930a132d84b4cf79953b00b074ec7a2a7 use acceleration:
$ vlc -v Downloads/test_sample_480_2.mp4
VLC media player 3.0.18 Vetinari (revision )
[561f72097520] main libvlc: Running vlc with the default
interface. Use 'cvlc' to use vlc without interface.
[7fa224001190] mp4 demux warning: elst box found
[7fa224001190] mp4 demux warning: STTS table of 1 entries
[7fa224001190] mp4 demux warning: CTTS table of 78 entries
[7fa224001190] mp4 demux warning: elst box found
[7fa224001190] mp4 demux warning: STTS table of 1 entries
[7fa224001190] mp4 demux warning: elst old=0 new=1
[7fa224d19010] faad decoder warning: decoded zero sample
[7fa224001190] mp4 demux warning: elst old=0 new=1
[7fa214007030] gl gl: Initialized libplacebo v4.208.0 (API v208)
libva info: VA-API version 1.16.0
libva error: vaGetDriverNameByIndex() failed with unknown libva error,
driver_name = (null)
[7fa214007030] glconv_vaapi_x11 gl error: vaInitialize: unknown libva error
libva info: VA-API version 1.16.0
libva info: Trying to open /usr/lib64/dri/radeonsi_drv_video.so
libva info: Found init function __vaDriverInit_1_16
libva info: va_openDriver() returns 0
[7fa224c0b3a0] avcodec decoder: Using Mesa Gallium driver
23.0.0-devel for AMD Radeon RX 6800M (navi22, LLVM 15.0.4, DRM 3.42,
5.14.0-rc4-14-7cbe08a930a132d84b4cf79953b00b074ec7a2a7+) for hardware
decoding
[h264 @ 0x7fa224c3fa40] Using deprecated struct vaapi_context in decode.
[561f72174de0] pulse audio output warning: starting late (-9724 us)

And for 46dd2965bdd1c5a4f6499c73ff32e636fa8f9769 commit did not use
acceleration:
$ vlc -v Downloads/test_sample_480_2.mp4
VLC media player 3.0.18 Vetinari (revision )
[55f61ad35520] main libvlc: Running vlc with the default
interface. Use 'cvlc' to use vlc without interface.
[7fc7e8001190] mp4 demux warning: elst box found
[7fc7e8001190] mp4 demux warning: STTS table of 1 entries
[7fc7e8001190] mp4 demux warning: CTTS table of 78 entries
[7fc7e8001190] mp4 demux warning: elst box found
[7fc7e8001190] mp4 demux warning: STTS table of 1 entries
[7fc7e8001190] mp4 demux warning: elst old=0 new=1
[7fc7e8d19010] faad decoder warning: decoded zero sample
[7fc7e8001190] mp4 demux warning: elst old=0 new=1
[7fc7d8007030] gl gl: Initialized libplacebo v4.208.0 (API v208)
libva info: VA-API version 1.16.0
libva error: vaGetDriverNameByIndex() failed with unknown libva error,
driver_name = (null)
[7fc7d8007030] glconv_vaapi_x11 gl error: vaInitialize: unknown libva error
libva info: VA-API version 1.16.0
libva info: Trying to open /usr/lib64/dri/radeonsi_drv_video.so
libva info: Found init function __vaDriverInit_1_16
libva info: va_openDriver() returns 0
[7fc7d40b3260] vaapi generic error: profile(7) is not supported
[7fc7d8a089c0] gl gl: Initialized libplacebo v4.208.0 (API v208)
Failed to open VDPAU backend libvdpau_nvidia.so: cannot open shared
object file: No such file or directory
Failed to open VDPAU backend libvdpau_nvidia.so: cannot open shared
object file: No such file or directory
[7fc7d89e4f80] gl gl: Initialized libplacebo v4.208.0 (API v208)
[55f61ae12de0] pulse audio output warning: starting late (-13537 us)

So my bisect didn't make sense :(
Anyway can you reproduce the issue with the attached sample file and
vlc on fresh kernel (6.1-rc8)?

Thanks!

--
Best Regards,
Mike Gavrilov.


[PATCH] drm/amdgpu: enable Vangogh VCN indirect sram mode

2022-11-29 Thread Leo Liu
So that uses PSP to initialize HW.

Fixes: 0c2c02b6 (drm/amdgpu/vcn: add firmware support for dimgrey_cavefish)

Signed-off-by: Leo Liu 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index c448c1bdf84d..72fa14ff862f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -156,6 +156,9 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
break;
case IP_VERSION(3, 0, 2):
fw_name = FIRMWARE_VANGOGH;
+   if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
+   (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
+   adev->vcn.indirect_sram = true;
break;
case IP_VERSION(3, 0, 16):
fw_name = FIRMWARE_DIMGREY_CAVEFISH;
-- 
2.25.1



Re: [PATCH] drm/amdgpu/vcn: update vcn4 fw shared data structure

2022-09-23 Thread Leo Liu

Reviewed-by: Leo Liu 

On 2022-09-22 15:30, Ruijing Dong wrote:

update VF_RB_SETUP_FLAG, add SMU_DPM_INTERFACE_FLAG,
and corresponding change in VCN4.

Signed-off-by: Ruijing Dong 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h | 8 +++-
  drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c   | 4 
  2 files changed, 11 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 80b7a6cfd026..253ea6b159df 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -161,7 +161,8 @@
  #define AMDGPU_VCN_SW_RING_FLAG   (1 << 9)
  #define AMDGPU_VCN_FW_LOGGING_FLAG(1 << 10)
  #define AMDGPU_VCN_SMU_VERSION_INFO_FLAG (1 << 11)
-#define AMDGPU_VCN_VF_RB_SETUP_FLAG (1 << 12)
+#define AMDGPU_VCN_SMU_DPM_INTERFACE_FLAG (1 << 11)
+#define AMDGPU_VCN_VF_RB_SETUP_FLAG (1 << 14)
  
  #define AMDGPU_VCN_IB_FLAG_DECODE_BUFFER	0x0001

  #define AMDGPU_VCN_CMD_FLAG_MSG_BUFFER0x0001
@@ -171,6 +172,9 @@
  #define VCN_CODEC_DISABLE_MASK_HEVC (1 << 2)
  #define VCN_CODEC_DISABLE_MASK_H264 (1 << 3)
  
+#define AMDGPU_VCN_SMU_DPM_INTERFACE_DGPU (0)

+#define AMDGPU_VCN_SMU_DPM_INTERFACE_APU (1)
+
  enum fw_queue_mode {
FW_QUEUE_RING_RESET = 1,
FW_QUEUE_DPG_HOLD_OFF = 2,
@@ -335,7 +339,9 @@ struct amdgpu_vcn4_fw_shared {
struct amdgpu_fw_shared_unified_queue_struct sq;
uint8_t pad1[8];
struct amdgpu_fw_shared_fw_logging fw_log;
+   uint8_t pad2[20];
struct amdgpu_fw_shared_rb_setup rb_setup;
+   struct amdgpu_fw_shared_smu_interface_info smu_dpm_interface;
  };
  
  struct amdgpu_vcn_fwlog {

diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index b6f73b87c47e..897a5ce9c9da 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -150,6 +150,10 @@ static int vcn_v4_0_sw_init(void *handle)
fw_shared->present_flag_0 = 
cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
fw_shared->sq.is_enabled = 1;
  
+		fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_SMU_DPM_INTERFACE_FLAG);

+   fw_shared->smu_dpm_interface.smu_interface_type = (adev->flags 
& AMD_IS_APU) ?
+   AMDGPU_VCN_SMU_DPM_INTERFACE_APU : 
AMDGPU_VCN_SMU_DPM_INTERFACE_DGPU;
+
if (amdgpu_sriov_vf(adev))
fw_shared->present_flag_0 |= 
cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG);
  


Re: [PATCH v4] drm/amdgpu: add HW_IP_VCN_UNIFIED type

2022-07-18 Thread Leo Liu



On 2022-07-18 02:57, Christian König wrote:

Am 15.07.22 um 22:04 schrieb Ruijing Dong:

 From VCN4, AMDGPU_HW_IP_VCN_UNIFIED is used to support
both encoding and decoding jobs, it re-uses the same
queue number of AMDGPU_HW_IP_VCN_ENC.

link: 
https://gitlab.freedesktop.org/mesa/drm/-/merge_requests/245/commits


Signed-off-by: Ruijing Dong 
---
  include/uapi/drm/amdgpu_drm.h | 6 ++
  1 file changed, 6 insertions(+)

diff --git a/include/uapi/drm/amdgpu_drm.h 
b/include/uapi/drm/amdgpu_drm.h

index 18d3246d636e..e268cd3cdb12 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -560,6 +560,12 @@ struct drm_amdgpu_gem_va {
  #define AMDGPU_HW_IP_UVD_ENC  5
  #define AMDGPU_HW_IP_VCN_DEC  6
  #define AMDGPU_HW_IP_VCN_ENC  7
+/**


Please don't use "/**" here, that is badly formated for a kerneldoc 
comment.



+ * From VCN4, AMDGPU_HW_IP_VCN_UNIFIED is used to support
+ * both encoding and decoding jobs, it re-uses the same
+ * queue number of AMDGPU_HW_IP_VCN_ENC.
+ */
+#define AMDGPU_HW_IP_VCN_UNIFIED  AMDGPU_HW_IP_VCN_ENC


I'm still in doubt that adding another define with the same value as 
AMDGPU_HW_IP_VCN_ENC is a good idea.


Hi Christian,

From VCN4, there is no VCN dec and enc ring type any more, the 
decode/encode will go through the unified queue, so using 
AMDGPU_HW_IP_VCN_ENC is no longer accurate . Keeping 
AMDGPU_HW_IP_VCN_ENC type is for legacy HW, and the new 
AMDGPU_HW_IP_VCN_UNIFIED just happen to use the same HW ring as legacy 
encode ring, so reuse the value, and that is the whole idea.


Thanks,

Leo





Instead we should just add the comment to AMDGPU_HW_IP_VCN_ENC.

Regards,
Christian.


  #define AMDGPU_HW_IP_VCN_JPEG 8
  #define AMDGPU_HW_IP_NUM  9




Re: [PATCH 2/2] drm/amdgpu/vcn: support unified queue only in vcn4

2022-06-11 Thread Leo Liu

The series are:

Acked-by: Leo Liu 

On 2022-06-07 14:36, Ruijing Dong wrote:

- remove multiple queue support.
- add unified queue related functions.

Signed-off-by: Ruijing Dong 
---
  drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c | 563 +++---
  1 file changed, 140 insertions(+), 423 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index 9119e966..d6f134ef9633 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -29,7 +29,6 @@
  #include "soc15d.h"
  #include "soc15_hw_ip.h"
  #include "vcn_v2_0.h"
-#include "vcn_sw_ring.h"
  
  #include "vcn/vcn_4_0_0_offset.h"

  #include "vcn/vcn_4_0_0_sh_mask.h"
@@ -45,15 +44,12 @@
  #define VCN_VID_SOC_ADDRESS_2_0   
0x1fb00
  #define VCN1_VID_SOC_ADDRESS_3_0  
0x48300
  
-bool unifiedQ_enabled = false;

-
  static int amdgpu_ih_clientid_vcns[] = {
SOC15_IH_CLIENTID_VCN,
SOC15_IH_CLIENTID_VCN1
  };
  
-static void vcn_v4_0_set_dec_ring_funcs(struct amdgpu_device *adev);

-static void vcn_v4_0_set_enc_ring_funcs(struct amdgpu_device *adev);
+static void vcn_v4_0_set_unified_ring_funcs(struct amdgpu_device *adev);
  static void vcn_v4_0_set_irq_funcs(struct amdgpu_device *adev);
  static int vcn_v4_0_set_powergating_state(void *handle,
  enum amd_powergating_state state);
@@ -71,36 +67,15 @@ static int vcn_v4_0_early_init(void *handle)
  {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  
-	if (unifiedQ_enabled) {

-   adev->vcn.num_vcn_inst = 1;
-   adev->vcn.num_enc_rings = 1;
-   } else {
-   adev->vcn.num_enc_rings = 2;
-   }
-
-   if (!unifiedQ_enabled)
-   vcn_v4_0_set_dec_ring_funcs(adev);
+   /* re-use enc ring as unified ring */
+   adev->vcn.num_enc_rings = 1;
  
-	vcn_v4_0_set_enc_ring_funcs(adev);

+   vcn_v4_0_set_unified_ring_funcs(adev);
vcn_v4_0_set_irq_funcs(adev);
  
  	return 0;

  }
  
-static void amdgpu_vcn_setup_unified_queue_ucode(struct amdgpu_device *adev)

-{
-   if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
-   const struct common_firmware_header *hdr;
-
-   hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
-   adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = 
AMDGPU_UCODE_ID_VCN;
-   adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
-   adev->firmware.fw_size +=
-   ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
-   DRM_INFO("PSP loading VCN firmware\n");
-   }
-}
-
  /**
   * vcn_v4_0_sw_init - sw init for VCN block
   *
@@ -111,17 +86,14 @@ static void amdgpu_vcn_setup_unified_queue_ucode(struct 
amdgpu_device *adev)
  static int vcn_v4_0_sw_init(void *handle)
  {
struct amdgpu_ring *ring;
-   int i, j, r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+   int i, r;
  
  	r = amdgpu_vcn_sw_init(adev);

if (r)
return r;
  
-	if (unifiedQ_enabled)

-   amdgpu_vcn_setup_unified_queue_ucode(adev);
-   else
-   amdgpu_vcn_setup_ucode(adev);
+   amdgpu_vcn_setup_ucode(adev);
  
  	r = amdgpu_vcn_resume(adev);

if (r)
@@ -129,81 +101,40 @@ static int vcn_v4_0_sw_init(void *handle)
  
  	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {

volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+
if (adev->vcn.harvest_config & (1 << i))
continue;
-   /* VCN DEC TRAP */
+
+   atomic_set(>vcn.inst[i].sched_score, 0);
+
+   /* VCN UNIFIED TRAP */
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
-   VCN_4_0__SRCID__UVD_TRAP, 
>vcn.inst[i].irq);
+   VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE, 
>vcn.inst[i].irq);
if (r)
return r;
  
-		atomic_set(>vcn.inst[i].sched_score, 0);

-   if (!unifiedQ_enabled) {
-   ring = >vcn.inst[i].ring_dec;
-   ring->use_doorbell = true;
-
-   /* VCN4 doorbell layout
-* 1: VCN_JPEG_DB_CTRL UVD_JRBC_RB_WPTR; (jpeg)
-* 2: VCN_RB1_DB_CTRL  UVD_RB_WPTR; (decode/encode for 
unified queue)
-* 3: VCN_RB2_DB_CTRL  UVD_RB_WPTR2; (encode only for 
swqueue)
-* 4: VCN_RB3_DB_CTRL  UVD_RB_WPTR3; (Reserved)
-* 5: VCN_RB4_DB_CTRL  UVD_RB_WPTR4; (decode only for 
swqueue)
-*/
-
-   

Re: [PATCH V3] drm/amdgpu/vcn3: send smu interface type

2022-03-30 Thread Leo Liu

Reviewed-by: Leo Liu 

On 2022-03-30 20:59, boyuan.zh...@amd.com wrote:

From: Boyuan Zhang 

For VCN FW to detect ASIC type, in order to use different mailbox registers.

V2: simplify codes and fix format issue.
V3: re-order if/else condition from the smallest version.

Signed-off-by: Boyuan Zhang 
Acked-by Huang Rui 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h | 7 +++
  drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c   | 5 +
  2 files changed, 12 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index e2fde88aaf5e..f06fb7f882e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -159,6 +159,7 @@
  #define AMDGPU_VCN_MULTI_QUEUE_FLAG   (1 << 8)
  #define AMDGPU_VCN_SW_RING_FLAG   (1 << 9)
  #define AMDGPU_VCN_FW_LOGGING_FLAG(1 << 10)
+#define AMDGPU_VCN_SMU_VERSION_INFO_FLAG (1 << 11)
  
  #define AMDGPU_VCN_IB_FLAG_DECODE_BUFFER	0x0001

  #define AMDGPU_VCN_CMD_FLAG_MSG_BUFFER0x0001
@@ -279,6 +280,11 @@ struct amdgpu_fw_shared_fw_logging {
uint32_t size;
  };
  
+struct amdgpu_fw_shared_smu_interface_info {

+   uint8_t smu_interface_type;
+   uint8_t padding[3];
+};
+
  struct amdgpu_fw_shared {
uint32_t present_flag_0;
uint8_t pad[44];
@@ -287,6 +293,7 @@ struct amdgpu_fw_shared {
struct amdgpu_fw_shared_multi_queue multi_queue;
struct amdgpu_fw_shared_sw_ring sw_ring;
struct amdgpu_fw_shared_fw_logging fw_log;
+   struct amdgpu_fw_shared_smu_interface_info smu_interface_info;
  };
  
  struct amdgpu_vcn_fwlog {

diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index 0d590183328f..508b3d8464f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -219,6 +219,11 @@ static int vcn_v3_0_sw_init(void *handle)
 
cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG) |
 
cpu_to_le32(AMDGPU_VCN_FW_SHARED_FLAG_0_RB);
fw_shared->sw_ring.is_enabled = 
cpu_to_le32(DEC_SW_RING_ENABLED);
+   fw_shared->present_flag_0 |= AMDGPU_VCN_SMU_VERSION_INFO_FLAG;
+   if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(3, 1, 1))
+   fw_shared->smu_interface_info.smu_interface_type = 1;
+   else if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(3, 1, 2))
+   fw_shared->smu_interface_info.smu_interface_type = 2;
  
  		if (amdgpu_vcnfw_log)

amdgpu_vcn_fwlog_init(>vcn.inst[i]);


Re: [PATCH v2] drm/amdgpu: add workarounds for Raven VCN TMZ issue

2022-03-10 Thread Leo Liu
No need for encode. Encrypting uses TEE/TA to convert clear bitstream to 
encrypted bitstream, and has nothing to do with VCN encode and tmz.


Regards,

Leo


On 2022-03-10 04:53, Christian König wrote:

Leo you didn't answered the question if we need TMZ for encode as well.

Regards,
Christian.

Am 10.03.22 um 09:45 schrieb Lang Yu:

Ping.

On 03/08/ , Leo Liu wrote:

On 2022-03-08 11:18, Leo Liu wrote:

On 2022-03-08 04:16, Christian König wrote:

Am 08.03.22 um 09:06 schrieb Lang Yu:

On 03/08/ , Christian König wrote:

Am 08.03.22 um 08:33 schrieb Lang Yu:

On 03/08/ , Christian König wrote:

Am 08.03.22 um 04:39 schrieb Lang Yu:

It is a hardware issue that VCN can't handle a GTT
backing stored TMZ buffer on Raven.

Move such a TMZ buffer to VRAM domain before command
submission.

v2:
 - Use patch_cs_in_place callback.

Suggested-by: Christian König 
Signed-off-by: Lang Yu 
---
 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 68
+++
 1 file changed, 68 insertions(+)

diff --git
a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 7bbb9ba6b80b..810932abd3af 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -24,6 +24,7 @@
 #include 
 #include "amdgpu.h"
+#include "amdgpu_cs.h"
 #include "amdgpu_vcn.h"
 #include "amdgpu_pm.h"
 #include "soc15.h"
@@ -1905,6 +1906,72 @@ static const struct
amd_ip_funcs vcn_v1_0_ip_funcs = {
 .set_powergating_state = 
vcn_v1_0_set_powergating_state,

 };
+/**
+ * It is a hardware issue that Raven VCN can't
handle a GTT TMZ buffer.
+ * Move such a GTT TMZ buffer to VRAM domain
before command submission.
+ */
+static int vcn_v1_0_validate_bo(struct amdgpu_cs_parser 
*parser,

+    struct amdgpu_job *job,
+    uint64_t addr)
+{
+    struct ttm_operation_ctx ctx = { false, false };
+    struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
+    struct amdgpu_vm *vm = >vm;
+    struct amdgpu_bo_va_mapping *mapping;
+    struct amdgpu_bo *bo;
+    int r;
+
+    addr &= AMDGPU_GMC_HOLE_MASK;
+    if (addr & 0x7) {
+    DRM_ERROR("VCN messages must be 8 byte aligned!\n");
+    return -EINVAL;
+    }
+
+    mapping = amdgpu_vm_bo_lookup_mapping(vm,
addr/AMDGPU_GPU_PAGE_SIZE);
+    if (!mapping || !mapping->bo_va || 
!mapping->bo_va->base.bo)

+    return -EINVAL;
+
+    bo = mapping->bo_va->base.bo;
+    if (!(bo->flags & AMDGPU_GEM_CREATE_ENCRYPTED))
+    return 0;
+
+    amdgpu_bo_placement_from_domain(bo, 
AMDGPU_GEM_DOMAIN_VRAM);

+    r = ttm_bo_validate(>tbo, >placement, );
+    if (r) {
+    DRM_ERROR("Failed validating the VCN
message BO (%d)!\n", r);
+    return r;
+    }

Well, exactly that won't work.

The message structure isn't TMZ protected because
otherwise the driver won't
be able to stitch it together.

What is TMZ protected are the surfaces the message
structure is pointing to.
So what you would need to do is to completely parse
the structure and then
move on the relevant buffers into VRAM.

Leo or James, can you help with that?

   From my observations, when decoding secure contents, register
GPCOM_VCPU_DATA0 and GPCOM_VCPU_DATA1 are set to a TMZ
buffer address.
And this way works when allocating TMZ buffers in GTT domain.

As far as I remember that's only the case for the decoding,
encoding works
by putting the addresses into the message buffer.

But could be that decoding is sufficient, Leo and James need
to comment on
this.

It seems that only decode needs TMZ buffers. Only observe
si_vid_create_tmz_buffer()
was called in rvcn_dec_message_decode() in
src/gallium/drivers/radeon/radeon_vcn_dec.c.

Mhm, good point. Let's wait for Leo and James to wake up, when we
don't need encode support than that would makes things much easier.
For secure playback, the buffer required in TMZ are dpb, dt and 
ctx, for
the rest esp. those for CPU access don't need that E.g. msg buffer, 
and

bitstream buffer.

 From radeon_vcn_dec.c, you can see the buffer for dpb and ctx, and dt
buffer frontend/va/surface is set to PIPE_BIND_PROTECTED.


Regards,

Leo

For VCN1, due to performance reason, the msg and fb buffer was 
allocated
into VRAM instead of GTT(for other HW), but those are not TMZ in 
order to

have CPU access.


Regards,

Leo






Regards,
Christian.


Regards,
Lang


Regards,
Christian.


Regards,
Lang


Regards,
Christian.


+
+    return r;
+}
+
+static int
vcn_v1_0_ring_patch_cs_in_place(struct
amdgpu_cs_parser *p,
+   struct amdgpu_job *job,
+   struct amdgpu_ib *ib)
+{
+    uint32_t msg_lo = 0, msg_hi = 0;
+    int i, r;
+
+    for (i = 0; i < ib->length_dw; i += 2) {
+    uint32_t reg = amdgpu_ib_get_value(ib, i);
+    uint32_t val = amdgpu_ib_get_value(ib, i + 1);
+
+    if (reg == PACKET0(p->

Re: [PATCH v2] drm/amdgpu: add workarounds for Raven VCN TMZ issue

2022-03-08 Thread Leo Liu



On 2022-03-08 11:18, Leo Liu wrote:


On 2022-03-08 04:16, Christian König wrote:

Am 08.03.22 um 09:06 schrieb Lang Yu:

On 03/08/ , Christian König wrote:

Am 08.03.22 um 08:33 schrieb Lang Yu:

On 03/08/ , Christian König wrote:

Am 08.03.22 um 04:39 schrieb Lang Yu:

It is a hardware issue that VCN can't handle a GTT
backing stored TMZ buffer on Raven.

Move such a TMZ buffer to VRAM domain before command
submission.

v2:
    - Use patch_cs_in_place callback.

Suggested-by: Christian König 
Signed-off-by: Lang Yu 
---
    drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 68 
+++

    1 file changed, 68 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c

index 7bbb9ba6b80b..810932abd3af 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -24,6 +24,7 @@
    #include 
    #include "amdgpu.h"
+#include "amdgpu_cs.h"
    #include "amdgpu_vcn.h"
    #include "amdgpu_pm.h"
    #include "soc15.h"
@@ -1905,6 +1906,72 @@ static const struct amd_ip_funcs 
vcn_v1_0_ip_funcs = {

    .set_powergating_state = vcn_v1_0_set_powergating_state,
    };
+/**
+ * It is a hardware issue that Raven VCN can't handle a GTT TMZ 
buffer.
+ * Move such a GTT TMZ buffer to VRAM domain before command 
submission.

+ */
+static int vcn_v1_0_validate_bo(struct amdgpu_cs_parser *parser,
+    struct amdgpu_job *job,
+    uint64_t addr)
+{
+    struct ttm_operation_ctx ctx = { false, false };
+    struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
+    struct amdgpu_vm *vm = >vm;
+    struct amdgpu_bo_va_mapping *mapping;
+    struct amdgpu_bo *bo;
+    int r;
+
+    addr &= AMDGPU_GMC_HOLE_MASK;
+    if (addr & 0x7) {
+    DRM_ERROR("VCN messages must be 8 byte aligned!\n");
+    return -EINVAL;
+    }
+
+    mapping = amdgpu_vm_bo_lookup_mapping(vm, 
addr/AMDGPU_GPU_PAGE_SIZE);

+    if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
+    return -EINVAL;
+
+    bo = mapping->bo_va->base.bo;
+    if (!(bo->flags & AMDGPU_GEM_CREATE_ENCRYPTED))
+    return 0;
+
+    amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
+    r = ttm_bo_validate(>tbo, >placement, );
+    if (r) {
+    DRM_ERROR("Failed validating the VCN message BO 
(%d)!\n", r);

+    return r;
+    }

Well, exactly that won't work.

The message structure isn't TMZ protected because otherwise the 
driver won't

be able to stitch it together.

What is TMZ protected are the surfaces the message structure is 
pointing to.
So what you would need to do is to completely parse the structure 
and then

move on the relevant buffers into VRAM.

Leo or James, can you help with that?

  From my observations, when decoding secure contents, register
GPCOM_VCPU_DATA0 and GPCOM_VCPU_DATA1 are set to a TMZ buffer 
address.

And this way works when allocating TMZ buffers in GTT domain.
As far as I remember that's only the case for the decoding, 
encoding works

by putting the addresses into the message buffer.

But could be that decoding is sufficient, Leo and James need to 
comment on

this.
It seems that only decode needs TMZ buffers. Only observe 
si_vid_create_tmz_buffer()
was called in rvcn_dec_message_decode() in 
src/gallium/drivers/radeon/radeon_vcn_dec.c.


Mhm, good point. Let's wait for Leo and James to wake up, when we 
don't need encode support than that would makes things much easier.


For secure playback, the buffer required in TMZ are dpb, dt and ctx, 
for the rest esp. those for CPU access don't need that E.g. msg 
buffer, and bitstream buffer.


From radeon_vcn_dec.c, you can see the buffer for dpb and ctx, and dt 
buffer frontend/va/surface is set to PIPE_BIND_PROTECTED.



Regards,

Leo

For VCN1, due to performance reason, the msg and fb buffer was allocated 
into VRAM instead of GTT(for other HW), but those are not TMZ in order 
to have CPU access.



Regards,

Leo








Regards,
Christian.



Regards,
Lang


Regards,
Christian.


Regards,
Lang


Regards,
Christian.


+
+    return r;
+}
+
+static int vcn_v1_0_ring_patch_cs_in_place(struct 
amdgpu_cs_parser *p,

+   struct amdgpu_job *job,
+   struct amdgpu_ib *ib)
+{
+    uint32_t msg_lo = 0, msg_hi = 0;
+    int i, r;
+
+    for (i = 0; i < ib->length_dw; i += 2) {
+    uint32_t reg = amdgpu_ib_get_value(ib, i);
+    uint32_t val = amdgpu_ib_get_value(ib, i + 1);
+
+    if (reg == PACKET0(p->adev->vcn.internal.data0, 0)) {
+    msg_lo = val;
+    } else if (reg == PACKET0(p->adev->vcn.internal.data1, 
0)) {

+    msg_hi = val;
+    } else if (reg == PACKET0(p->adev->vcn.internal.cmd, 0)) {
+    r = vcn_v1_0_validate_bo(p, job,
+ ((u64)msg_hi) << 32 | msg_lo);
+    

Re: [PATCH v2] drm/amdgpu: add workarounds for Raven VCN TMZ issue

2022-03-08 Thread Leo Liu



On 2022-03-08 04:16, Christian König wrote:

Am 08.03.22 um 09:06 schrieb Lang Yu:

On 03/08/ , Christian König wrote:

Am 08.03.22 um 08:33 schrieb Lang Yu:

On 03/08/ , Christian König wrote:

Am 08.03.22 um 04:39 schrieb Lang Yu:

It is a hardware issue that VCN can't handle a GTT
backing stored TMZ buffer on Raven.

Move such a TMZ buffer to VRAM domain before command
submission.

v2:
    - Use patch_cs_in_place callback.

Suggested-by: Christian König 
Signed-off-by: Lang Yu 
---
    drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 68 
+++

    1 file changed, 68 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c

index 7bbb9ba6b80b..810932abd3af 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -24,6 +24,7 @@
    #include 
    #include "amdgpu.h"
+#include "amdgpu_cs.h"
    #include "amdgpu_vcn.h"
    #include "amdgpu_pm.h"
    #include "soc15.h"
@@ -1905,6 +1906,72 @@ static const struct amd_ip_funcs 
vcn_v1_0_ip_funcs = {

    .set_powergating_state = vcn_v1_0_set_powergating_state,
    };
+/**
+ * It is a hardware issue that Raven VCN can't handle a GTT TMZ 
buffer.
+ * Move such a GTT TMZ buffer to VRAM domain before command 
submission.

+ */
+static int vcn_v1_0_validate_bo(struct amdgpu_cs_parser *parser,
+    struct amdgpu_job *job,
+    uint64_t addr)
+{
+    struct ttm_operation_ctx ctx = { false, false };
+    struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
+    struct amdgpu_vm *vm = >vm;
+    struct amdgpu_bo_va_mapping *mapping;
+    struct amdgpu_bo *bo;
+    int r;
+
+    addr &= AMDGPU_GMC_HOLE_MASK;
+    if (addr & 0x7) {
+    DRM_ERROR("VCN messages must be 8 byte aligned!\n");
+    return -EINVAL;
+    }
+
+    mapping = amdgpu_vm_bo_lookup_mapping(vm, 
addr/AMDGPU_GPU_PAGE_SIZE);

+    if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
+    return -EINVAL;
+
+    bo = mapping->bo_va->base.bo;
+    if (!(bo->flags & AMDGPU_GEM_CREATE_ENCRYPTED))
+    return 0;
+
+    amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
+    r = ttm_bo_validate(>tbo, >placement, );
+    if (r) {
+    DRM_ERROR("Failed validating the VCN message BO 
(%d)!\n", r);

+    return r;
+    }

Well, exactly that won't work.

The message structure isn't TMZ protected because otherwise the 
driver won't

be able to stitch it together.

What is TMZ protected are the surfaces the message structure is 
pointing to.
So what you would need to do is to completely parse the structure 
and then

move on the relevant buffers into VRAM.

Leo or James, can you help with that?

  From my observations, when decoding secure contents, register
GPCOM_VCPU_DATA0 and GPCOM_VCPU_DATA1 are set to a TMZ buffer address.
And this way works when allocating TMZ buffers in GTT domain.
As far as I remember that's only the case for the decoding, encoding 
works

by putting the addresses into the message buffer.

But could be that decoding is sufficient, Leo and James need to 
comment on

this.
It seems that only decode needs TMZ buffers. Only observe 
si_vid_create_tmz_buffer()
was called in rvcn_dec_message_decode() in 
src/gallium/drivers/radeon/radeon_vcn_dec.c.


Mhm, good point. Let's wait for Leo and James to wake up, when we 
don't need encode support than that would makes things much easier.


For secure playback, the buffer required in TMZ are dpb, dt and ctx, for 
the rest esp. those for CPU access don't need that E.g. msg buffer, and 
bitstream buffer.


From radeon_vcn_dec.c, you can see the buffer for dpb and ctx, and dt 
buffer frontend/va/surface is set to PIPE_BIND_PROTECTED.



Regards,

Leo





Regards,
Christian.



Regards,
Lang


Regards,
Christian.


Regards,
Lang


Regards,
Christian.


+
+    return r;
+}
+
+static int vcn_v1_0_ring_patch_cs_in_place(struct 
amdgpu_cs_parser *p,

+   struct amdgpu_job *job,
+   struct amdgpu_ib *ib)
+{
+    uint32_t msg_lo = 0, msg_hi = 0;
+    int i, r;
+
+    for (i = 0; i < ib->length_dw; i += 2) {
+    uint32_t reg = amdgpu_ib_get_value(ib, i);
+    uint32_t val = amdgpu_ib_get_value(ib, i + 1);
+
+    if (reg == PACKET0(p->adev->vcn.internal.data0, 0)) {
+    msg_lo = val;
+    } else if (reg == PACKET0(p->adev->vcn.internal.data1, 
0)) {

+    msg_hi = val;
+    } else if (reg == PACKET0(p->adev->vcn.internal.cmd, 0)) {
+    r = vcn_v1_0_validate_bo(p, job,
+ ((u64)msg_hi) << 32 | msg_lo);
+    if (r)
+    return r;
+    }
+    }
+
+    return 0;
+}
+
+
    static const struct amdgpu_ring_funcs 
vcn_v1_0_dec_ring_vm_funcs = {

    .type = AMDGPU_RING_TYPE_VCN_DEC,
    .align_mask = 0xf,
@@ -1914,6 +1981,7 @@ static const struct amdgpu_ring_funcs 
vcn_v1_0_dec_ring_vm_funcs = {

    .get_rptr = vcn_v1_0_dec_ring_get_rptr,
    

Re: [PATCH] drm/radeon: fix UVD suspend error

2022-01-17 Thread Leo Liu



On 2022-01-17 2:47 a.m., Qiang Ma wrote:

I met a bug recently and the kernel log:

[  330.171875] radeon :03:00.0: couldn't schedule ib
[  330.175781] [drm:radeon_uvd_suspend [radeon]] *ERROR* Error destroying UVD 
(-22)!

In radeon drivers, using UVD suspend is as follows:

if (rdev->has_uvd) {
 uvd_v1_0_fini(rdev);
 radeon_uvd_suspend(rdev);
}

In radeon_ib_schedule function, we check the 'ring->ready' state,
but in uvd_v1_0_fini funciton, we've cleared the ready state.
So, just modify the suspend code flow to fix error.


It seems reasonable to me. The suspend sends the destroy message if 
there is still incomplete job, so it should be before the fini which 
stops the hardware.


The series are:

Reviewed-by: Leo Liu 




Signed-off-by: Qiang Ma 
---
  drivers/gpu/drm/radeon/cik.c   | 2 +-
  drivers/gpu/drm/radeon/evergreen.c | 2 +-
  drivers/gpu/drm/radeon/ni.c| 2 +-
  drivers/gpu/drm/radeon/r600.c  | 2 +-
  drivers/gpu/drm/radeon/rv770.c | 2 +-
  drivers/gpu/drm/radeon/si.c| 2 +-
  6 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 81b4de7be9f2..5819737c21c6 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -8517,8 +8517,8 @@ int cik_suspend(struct radeon_device *rdev)
cik_cp_enable(rdev, false);
cik_sdma_enable(rdev, false);
if (rdev->has_uvd) {
-   uvd_v1_0_fini(rdev);
radeon_uvd_suspend(rdev);
+   uvd_v1_0_fini(rdev);
}
if (rdev->has_vce)
radeon_vce_suspend(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen.c 
b/drivers/gpu/drm/radeon/evergreen.c
index eeb590d2dec2..455f8036aa54 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -5156,8 +5156,8 @@ int evergreen_suspend(struct radeon_device *rdev)
radeon_pm_suspend(rdev);
radeon_audio_fini(rdev);
if (rdev->has_uvd) {
-   uvd_v1_0_fini(rdev);
radeon_uvd_suspend(rdev);
+   uvd_v1_0_fini(rdev);
}
r700_cp_stop(rdev);
r600_dma_stop(rdev);
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 4a364ca7a1be..927e5f42e97d 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -2323,8 +2323,8 @@ int cayman_suspend(struct radeon_device *rdev)
cayman_cp_enable(rdev, false);
cayman_dma_stop(rdev);
if (rdev->has_uvd) {
-   uvd_v1_0_fini(rdev);
radeon_uvd_suspend(rdev);
+   uvd_v1_0_fini(rdev);
}
evergreen_irq_suspend(rdev);
radeon_wb_disable(rdev);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index ca3fcae2adb5..dd78fc499402 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -3232,8 +3232,8 @@ int r600_suspend(struct radeon_device *rdev)
radeon_audio_fini(rdev);
r600_cp_stop(rdev);
if (rdev->has_uvd) {
-   uvd_v1_0_fini(rdev);
radeon_uvd_suspend(rdev);
+   uvd_v1_0_fini(rdev);
}
r600_irq_suspend(rdev);
radeon_wb_disable(rdev);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index e592e57be1bb..38796af4fadd 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -1894,8 +1894,8 @@ int rv770_suspend(struct radeon_device *rdev)
radeon_pm_suspend(rdev);
radeon_audio_fini(rdev);
if (rdev->has_uvd) {
-   uvd_v1_0_fini(rdev);
radeon_uvd_suspend(rdev);
+   uvd_v1_0_fini(rdev);
}
r700_cp_stop(rdev);
r600_dma_stop(rdev);
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 013e44ed0f39..8d5e4b25609d 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -6800,8 +6800,8 @@ int si_suspend(struct radeon_device *rdev)
si_cp_enable(rdev, false);
cayman_dma_stop(rdev);
if (rdev->has_uvd) {
-   uvd_v1_0_fini(rdev);
radeon_uvd_suspend(rdev);
+   uvd_v1_0_fini(rdev);
}
if (rdev->has_vce)
radeon_vce_suspend(rdev);


Re: [PATCH 4/4] drm/amdgpu/vcn3.0: remove intermediate variable

2021-10-21 Thread Leo Liu

The series are:

Reviewed-by: Leo Liu 

On 2021-10-19 4:10 p.m., Alex Deucher wrote:

No need to use the id variable, just use the constant
plus instance offset directly.

Signed-off-by: Alex Deucher 
---
  drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c | 11 ++-
  1 file changed, 2 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index 57b62fb04750..da11ceba0698 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -60,11 +60,6 @@ static int amdgpu_ih_clientid_vcns[] = {
SOC15_IH_CLIENTID_VCN1
  };
  
-static int amdgpu_ucode_id_vcns[] = {

-   AMDGPU_UCODE_ID_VCN,
-   AMDGPU_UCODE_ID_VCN1
-};
-
  static int vcn_v3_0_start_sriov(struct amdgpu_device *adev);
  static void vcn_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev);
  static void vcn_v3_0_set_enc_ring_funcs(struct amdgpu_device *adev);
@@ -1278,7 +1273,6 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device 
*adev)
uint32_t param, resp, expected;
uint32_t offset, cache_size;
uint32_t tmp, timeout;
-   uint32_t id;
  
  	struct amdgpu_mm_table *table = >virt.mm_table;

uint32_t *table_loc;
@@ -1322,13 +1316,12 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device 
*adev)
cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
  
  		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {

-   id = amdgpu_ucode_id_vcns[i];
MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
-   adev->firmware.ucode[id].tmr_mc_addr_lo);
+   adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + 
i].tmr_mc_addr_lo);
MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
-   adev->firmware.ucode[id].tmr_mc_addr_hi);
+   adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + 
i].tmr_mc_addr_hi);
offset = 0;
MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
mmUVD_VCPU_CACHE_OFFSET0),


Re: [PATCH 1/2] drm/amdgpu/jpeg2: move jpeg2 shared macro to header file

2021-09-30 Thread Leo Liu

The series are:

Reviewed-by: Leo Liu 

On 2021-09-29 3:57 p.m., James Zhu wrote:

Move jpeg2 shared macro to header file

Signed-off-by: James Zhu 
---
  drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c | 20 
  drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h | 20 
  2 files changed, 20 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c 
b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
index 85967a5..299de1d 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
@@ -32,26 +32,6 @@
  #include "vcn/vcn_2_0_0_sh_mask.h"
  #include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
  
-#define mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET0x1bfff

-#define mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET   0x4029
-#define mmUVD_JPEG_GPCOM_DATA0_INTERNAL_OFFSET 0x402a
-#define mmUVD_JPEG_GPCOM_DATA1_INTERNAL_OFFSET 0x402b
-#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40ea
-#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_INTERNAL_OFFSET
0x40eb
-#define mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET 0x40cf
-#define mmUVD_LMI_JPEG_VMID_INTERNAL_OFFSET0x40d1
-#define mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET
0x40e8
-#define mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET   0x40e9
-#define mmUVD_JRBC_IB_SIZE_INTERNAL_OFFSET 0x4082
-#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40ec
-#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_INTERNAL_OFFSET
0x40ed
-#define mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET0x4085
-#define mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET 0x4084
-#define mmUVD_JRBC_STATUS_INTERNAL_OFFSET  0x4089
-#define mmUVD_JPEG_PITCH_INTERNAL_OFFSET   0x401f
-
-#define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR   0x18000
-
  static void jpeg_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev);
  static void jpeg_v2_0_set_irq_funcs(struct amdgpu_device *adev);
  static int jpeg_v2_0_set_powergating_state(void *handle,
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h 
b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h
index 15a344e..1a03baa 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h
@@ -24,6 +24,26 @@
  #ifndef __JPEG_V2_0_H__
  #define __JPEG_V2_0_H__
  
+#define mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET0x1bfff

+#define mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET   0x4029
+#define mmUVD_JPEG_GPCOM_DATA0_INTERNAL_OFFSET 0x402a
+#define mmUVD_JPEG_GPCOM_DATA1_INTERNAL_OFFSET 0x402b
+#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40ea
+#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_INTERNAL_OFFSET
0x40eb
+#define mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET 0x40cf
+#define mmUVD_LMI_JPEG_VMID_INTERNAL_OFFSET0x40d1
+#define mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET
0x40e8
+#define mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET   0x40e9
+#define mmUVD_JRBC_IB_SIZE_INTERNAL_OFFSET 0x4082
+#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40ec
+#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_INTERNAL_OFFSET
0x40ed
+#define mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET0x4085
+#define mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET 0x4084
+#define mmUVD_JRBC_STATUS_INTERNAL_OFFSET  0x4089
+#define mmUVD_JPEG_PITCH_INTERNAL_OFFSET   0x401f
+
+#define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR   0x18000
+
  void jpeg_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring);
  void jpeg_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring);
  void jpeg_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 
seq,


Re: [PATCH v2] drm/amd/amdgpu: Recovery vcn instance iterate.

2021-07-20 Thread Leo Liu

It looks good to me for the non-sriov part.

Regards,

Leo


On 2021-07-15 10:14 p.m., Zhou, Peng Ju wrote:

[AMD Official Use Only]

Hi @Liu, Leo

Can you help to review this patch?
Monk and Alex have reviewed it.


--
BW
Pengju Zhou




-Original Message-
From: Liu, Monk 
Sent: Thursday, July 15, 2021 7:54 AM
To: Alex Deucher ; Zhou, Peng Ju
; Liu, Leo 
Cc: amd-gfx list 
Subject: RE: [PATCH v2] drm/amd/amdgpu: Recovery vcn instance iterate.

[AMD Official Use Only]

Reviewed-by: Monk Liu 

You might need @Liu, Leo's review as well

Thanks

--
Monk Liu | Cloud-GPU Core team
--

-Original Message-
From: amd-gfx  On Behalf Of Alex
Deucher
Sent: Wednesday, July 14, 2021 10:49 PM
To: Zhou, Peng Ju 
Cc: amd-gfx list 
Subject: Re: [PATCH v2] drm/amd/amdgpu: Recovery vcn instance iterate.

On Tue, Jul 13, 2021 at 6:31 AM Peng Ju Zhou  wrote:

The previous logic is recording the amount of valid vcn instances to
use them on SRIOV, it is a hard task due to the vcn accessment is
based on the index of the vcn instance.

Check if the vcn instance enabled before do instance init.

Signed-off-by: Peng Ju Zhou 

Acked-by: Alex Deucher 


---
  drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c | 33
---
  1 file changed, 20 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index c3580de3ea9c..d11fea2c9d90 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -88,9 +88,7 @@ static int vcn_v3_0_early_init(void *handle)
 int i;

 if (amdgpu_sriov_vf(adev)) {
-   for (i = 0; i < VCN_INSTANCES_SIENNA_CICHLID; i++)
-   if (amdgpu_vcn_is_disabled_vcn(adev, VCN_DECODE_RING, 
i))
-   adev->vcn.num_vcn_inst++;
+   adev->vcn.num_vcn_inst = VCN_INSTANCES_SIENNA_CICHLID;
 adev->vcn.harvest_config = 0;
 adev->vcn.num_enc_rings = 1;

@@ -151,8 +149,7 @@ static int vcn_v3_0_sw_init(void *handle)
 adev->firmware.fw_size +=
 ALIGN(le32_to_cpu(hdr->ucode_size_bytes),
PAGE_SIZE);

-   if ((adev->vcn.num_vcn_inst == VCN_INSTANCES_SIENNA_CICHLID)

||

-   (amdgpu_sriov_vf(adev) && adev->asic_type ==

CHIP_SIENNA_CICHLID)) {

+   if (adev->vcn.num_vcn_inst ==
+ VCN_INSTANCES_SIENNA_CICHLID) {
 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].ucode_id =

AMDGPU_UCODE_ID_VCN1;

 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].fw = adev-
vcn.fw;
 adev->firmware.fw_size += @@ -322,18 +319,28
@@ static int vcn_v3_0_hw_init(void *handle)
 continue;

 ring = >vcn.inst[i].ring_dec;
-   ring->wptr = 0;
-   ring->wptr_old = 0;
-   vcn_v3_0_dec_ring_set_wptr(ring);
-   ring->sched.ready = true;
-
-   for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
-   ring = >vcn.inst[i].ring_enc[j];
+   if (amdgpu_vcn_is_disabled_vcn(adev, VCN_DECODE_RING, 
i))

{

+   ring->sched.ready = false;
+   dev_info(adev->dev, "ring %s is disabled by 
hypervisor\n",

ring->name);

+   } else {
 ring->wptr = 0;
 ring->wptr_old = 0;
-   vcn_v3_0_enc_ring_set_wptr(ring);
+   vcn_v3_0_dec_ring_set_wptr(ring);
 ring->sched.ready = true;
 }
+
+   for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
+   ring = >vcn.inst[i].ring_enc[j];
+   if (amdgpu_vcn_is_disabled_vcn(adev,

VCN_ENCODE_RING, i)) {

+   ring->sched.ready = false;
+   dev_info(adev->dev, "ring %s is 
disabled by

hypervisor\n", ring->name);

+   } else {
+   ring->wptr = 0;
+   ring->wptr_old = 0;
+   vcn_v3_0_enc_ring_set_wptr(ring);
+   ring->sched.ready = true;
+   }
+   }
 }
 } else {
 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
--
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flist

Re: [PATCH] drm/amdgpu: Fix Gstreamer api vaapih264enc missing

2021-05-27 Thread Leo Liu


On 2021-05-26 6:49 a.m., Christian König wrote:

Am 26.05.21 um 12:13 schrieb Li, Xin (Justin):

since vcn decoding ring is not required, so just disable it.

Cc: Alex.Deucher 
Cc: Christian.Konig 
Signed-off-by: Li.Xin.Justin 
Signed-off-by: Frank.Min 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c |  6 +++---
  drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c   | 25 ++---
  2 files changed, 17 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c

index 524e4fe5efe8..614e6b06e94e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -427,7 +427,9 @@ static int amdgpu_hw_ip_info(struct amdgpu_device 
*adev,

  if (adev->uvd.harvest_config & (1 << i))
  continue;
  -    if (adev->vcn.inst[i].ring_dec.sched.ready)
+    if (adev->vcn.inst[i].ring_dec.sched.ready ||
+    (adev->asic_type == CHIP_NAVI12 &&
+    amdgpu_sriov_vf(adev)))


Leo needs to take a closer look, but that looks fishy to me.


The decode is explicitly disabled with sriov case with vcn2

+    ring->sched.ready = false;

and I don't understand either why to add the ring number here if already 
have it disabled. If you are trying to workaround some issues, the 
changes from here is very bad hack and you probably need to find the 
real root cause.


Regards,

Leo




Why should the ring be available if it is disabled? That doesn't make 
sense.


Christian.


  ++num_rings;
  }
  ib_start_alignment = 16;
@@ -770,8 +772,6 @@ int amdgpu_info_ioctl(struct drm_device *dev, 
void *data, struct drm_file *filp)

  dev_info->ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
  if (amdgpu_mcbp || amdgpu_sriov_vf(adev))
  dev_info->ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
-    if (amdgpu_is_tmz(adev))
-    dev_info->ids_flags |= AMDGPU_IDS_FLAGS_TMZ;
    vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
  vm_size -= AMDGPU_VA_RESERVED_SIZE;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c

index 8af567c546db..dc8a36766c4a 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -220,17 +220,20 @@ static int vcn_v2_0_hw_init(void *handle)
  {
  struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  struct amdgpu_ring *ring = >vcn.inst->ring_dec;
-    int i, r;
+    int i, r = -1;
    adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
   ring->doorbell_index, 0);
  -    if (amdgpu_sriov_vf(adev))
+    if (amdgpu_sriov_vf(adev)) {
  vcn_v2_0_start_sriov(adev);
-
-    r = amdgpu_ring_test_helper(ring);
-    if (r)
-    goto done;
+    if (adev->asic_type == CHIP_NAVI12)
+    ring->sched.ready = false;
+    } else {
+    r = amdgpu_ring_test_helper(ring);
+    if (r)
+    goto done;
+    }
    //Disable vcn decode for sriov
  if (amdgpu_sriov_vf(adev))
@@ -245,8 +248,11 @@ static int vcn_v2_0_hw_init(void *handle)
    done:
  if (!r)
-    DRM_INFO("VCN decode and encode initialized 
successfully(under %s).\n",
-    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG 
Mode":"SPG Mode");

+    DRM_INFO("VCN %s encode initialized successfully(under %s).\n",
+    (adev->asic_type == CHIP_NAVI12 &&
+    amdgpu_sriov_vf(adev))?"":"decode and",
+    (adev->pg_flags &
+    AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
    return r;
  }
@@ -1721,9 +1727,6 @@ int vcn_v2_0_dec_ring_test_ring(struct 
amdgpu_ring *ring)

  unsigned i;
  int r;
  -    if (amdgpu_sriov_vf(adev))
-    return 0;
-
  WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD);
  r = amdgpu_ring_alloc(ring, 4);
  if (r)



___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 1/7] drm/amdgpu/vcn1: add cancel_delayed_work_sync before power gate

2021-05-20 Thread Leo Liu

The series are:

Reviewed-by: Leo Liu 


On 2021-05-19 12:22 p.m., James Zhu wrote:

Add cancel_delayed_work_sync before set power gating state
to avoid race condition issue when power gating.

Signed-off-by: James Zhu 
---
  drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 6 +-
  1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 0c1beef..27b1ced 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -231,9 +231,13 @@ static int vcn_v1_0_hw_fini(void *handle)
  {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  
+	cancel_delayed_work_sync(>vcn.idle_work);

+
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
-   RREG32_SOC15(VCN, 0, mmUVD_STATUS))
+   (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
+RREG32_SOC15(VCN, 0, mmUVD_STATUS))) {
vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
+   }
  
  	return 0;

  }

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu: add video_codecs query support for aldebaran

2021-05-18 Thread Leo Liu

Reviewed-by: Leo Liu 

On 2021-05-18 8:47 a.m., James Zhu wrote:

Add video_codecs query support for aldebaran.

Signed-off-by: James Zhu 
---
  drivers/gpu/drm/amd/amdgpu/soc15.c | 1 +
  1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c 
b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 080e715..75008cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -302,6 +302,7 @@ static int soc15_query_video_codecs(struct amdgpu_device 
*adev, bool encode,
*codecs = _video_codecs_decode;
return 0;
case CHIP_ARCTURUS:
+   case CHIP_ALDEBARAN:
case CHIP_RENOIR:
if (encode)
*codecs = _video_codecs_encode;

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu: add cancel_delayed_work_sync before power gate

2021-05-18 Thread Leo Liu

Reviewed-by: Leo Liu 

On 2021-05-17 4:42 p.m., James Zhu wrote:

Add cancel_delayed_work_sync before set power gating state
to avoid race condition issue when power gating.

Signed-off-by: James Zhu 
---
  drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c | 19 +--
  1 file changed, 13 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index 779e585..360dff2 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -388,6 +388,19 @@ static int vcn_v3_0_hw_fini(void *handle)
continue;
  
  		ring = >vcn.inst[i].ring_dec;

+   ring->sched.ready = false;
+
+   for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
+   ring = >vcn.inst[i].ring_enc[j];
+   ring->sched.ready = false;
+   }
+   }
+
+   cancel_delayed_work_sync(>vcn.idle_work);
+
+   for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+   if (adev->vcn.harvest_config & (1 << i))
+   continue;
  
  		if (!amdgpu_sriov_vf(adev)) {

if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
@@ -396,12 +409,6 @@ static int vcn_v3_0_hw_fini(void *handle)
vcn_v3_0_set_powergating_state(adev, 
AMD_PG_STATE_GATE);
}
}
-   ring->sched.ready = false;
-
-   for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
-   ring = >vcn.inst[i].ring_enc[j];
-   ring->sched.ready = false;
-   }
}
  
  	return 0;

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH v2 1/2] drm/amdgpu: enhance amdgpu_vcn_suspend

2021-05-17 Thread Leo Liu
To be accurate, the Bo is mapped to engine cache window, and the runtime 
of engine stacks, so we should save it before the poweroff.



On 2021-05-17 2:15 p.m., Leo Liu wrote:


The saved data are from the engine cache, it's the runtime of engine 
before suspend, it might be different after you have the engine 
powered off.



Regards,

Leo



On 2021-05-17 2:11 p.m., Zhu, James wrote:


[AMD Official Use Only - Internal Distribution Only]


save_bo needn't ungate vcn,  it just keeps data in memory.

Thanks & Best Regards!


James Zhu


*From:* Liu, Leo 
*Sent:* Monday, May 17, 2021 2:07 PM
*To:* Zhu, James ; Zhu, James ; 
amd-gfx@lists.freedesktop.org 

*Subject:* Re: [PATCH v2 1/2] drm/amdgpu: enhance amdgpu_vcn_suspend

Definitely, we need to move cancel_delayed_work_sync moved to before 
power gate.


Should "save_bo" be step 4 before power gate ?

Regards,

Leo


On 2021-05-17 1:59 p.m., James Zhu wrote:


Then we forgot the proposal I provided before.

I think the below seq may fixed the race condition issue that we are 
facing.


1. stop scheduling new jobs

    for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
        if (adev->vcn.harvest_config & (1 << i))
            continue;

        ring = >vcn.inst[i].ring_dec;
        ring->sched.ready = false;

        for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
            ring = >vcn.inst[i].ring_enc[j];
            ring->sched.ready = false;
        }
    }

2. cancel_delayed_work_sync(>vcn.idle_work);

3. SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
     UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);

4. amdgpu_device_ip_set_powergating_state(adev, 
AMD_IP_BLOCK_TYPE_VCN,   AMD_PG_STATE_GATE);


5.  saved_bo

Best Regards!

James

On 2021-05-17 1:43 p.m., Leo Liu wrote:


On 2021-05-17 12:54 p.m., James Zhu wrote:
I am wondering if there are still some jobs kept in the queue, it 
is lucky to check


Yes it's possible, in this case delayed handler is set, so 
cancelling once is enough.





UVD_POWER_STATUS done, but after, fw start a new job that list in 
the queue.


To handle this situation perfectly, we need add mechanism to 
suspend fw first.


I think that should be handled by the sequence from 
vcn_v3_0_stop_dpg_mode().





Another case, if it is unlucky, that  vcn fw hung at that time, 
UVD_POWER_STATUS


always keeps busy.   then it needs force powering gate the vcn hw 
after certain time waiting.


Yep, we still need to gate VCN power after certain timeout.


Regards,

Leo





Best Regards!

James

On 2021-05-17 12:34 p.m., Leo Liu wrote:


On 2021-05-17 11:52 a.m., James Zhu wrote:

During vcn suspends, stop ring continue to receive new requests,
and try to wait for all vcn jobs to finish gracefully.

v2: Forced powering gate vcn hardware after few wainting retry.

Signed-off-by: James Zhu  
<mailto:james@amd.com>

---
  drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 22 
+-

  1 file changed, 21 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c

index 2016459..9f3a6e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -275,9 +275,29 @@ int amdgpu_vcn_suspend(struct amdgpu_device 
*adev)

  {
  unsigned size;
  void *ptr;
+    int retry_max = 6;
  int i;
  - cancel_delayed_work_sync(>vcn.idle_work);
+    for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+    if (adev->vcn.harvest_config & (1 << i))
+    continue;
+    ring = >vcn.inst[i].ring_dec;
+    ring->sched.ready = false;
+
+    for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
+    ring = >vcn.inst[i].ring_enc[j];
+    ring->sched.ready = false;
+    }
+    }
+
+    while (retry_max-- && 
cancel_delayed_work_sync(>vcn.idle_work))

+    mdelay(5);


I think it's possible to have one pending job unprocessed with 
VCN when suspend sequence getting here, but it shouldn't be more 
than one, cancel_delayed_work_sync probably return false after 
the first time, so calling cancel_delayed_work_sync once should 
be enough here. we probably need to wait longer from:


SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
        UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);

to make sure the unprocessed job get done.


Regards,

Leo



+    if (!retry_max && !amdgpu_sriov_vf(adev)) {
+    if (RREG32_SOC15(VCN, i, mmUVD_STATUS)) {
+    dev_warn(adev->dev, "Forced powering gate vcn 
hardware!");
+    vcn_v3_0_set_powergating_state(adev, 
AMD_PG_STATE_GATE);

+    }
+    }
    for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
  if (adev->vcn.harvest_config & (1 << i))


___
amd-gfx mailing li

Re: [PATCH v2 1/2] drm/amdgpu: enhance amdgpu_vcn_suspend

2021-05-17 Thread Leo Liu
The saved data are from the engine cache, it's the runtime of engine 
before suspend, it might be different after you have the engine powered off.



Regards,

Leo



On 2021-05-17 2:11 p.m., Zhu, James wrote:


[AMD Official Use Only - Internal Distribution Only]


save_bo needn't ungate vcn,  it just keeps data in memory.

Thanks & Best Regards!


James Zhu


*From:* Liu, Leo 
*Sent:* Monday, May 17, 2021 2:07 PM
*To:* Zhu, James ; Zhu, James ; 
amd-gfx@lists.freedesktop.org 

*Subject:* Re: [PATCH v2 1/2] drm/amdgpu: enhance amdgpu_vcn_suspend

Definitely, we need to move cancel_delayed_work_sync moved to before 
power gate.


Should "save_bo" be step 4 before power gate ?

Regards,

Leo


On 2021-05-17 1:59 p.m., James Zhu wrote:


Then we forgot the proposal I provided before.

I think the below seq may fixed the race condition issue that we are 
facing.


1. stop scheduling new jobs

    for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
        if (adev->vcn.harvest_config & (1 << i))
            continue;

        ring = >vcn.inst[i].ring_dec;
        ring->sched.ready = false;

        for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
            ring = >vcn.inst[i].ring_enc[j];
            ring->sched.ready = false;
        }
    }

2. cancel_delayed_work_sync(>vcn.idle_work);

3. SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
     UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);

4. amdgpu_device_ip_set_powergating_state(adev, 
AMD_IP_BLOCK_TYPE_VCN,   AMD_PG_STATE_GATE);


5.  saved_bo

Best Regards!

James

On 2021-05-17 1:43 p.m., Leo Liu wrote:


On 2021-05-17 12:54 p.m., James Zhu wrote:
I am wondering if there are still some jobs kept in the queue, it 
is lucky to check


Yes it's possible, in this case delayed handler is set, so 
cancelling once is enough.





UVD_POWER_STATUS done, but after, fw start a new job that list in 
the queue.


To handle this situation perfectly, we need add mechanism to 
suspend fw first.


I think that should be handled by the sequence from 
vcn_v3_0_stop_dpg_mode().





Another case, if it is unlucky, that  vcn fw hung at that time, 
UVD_POWER_STATUS


always keeps busy.   then it needs force powering gate the vcn hw 
after certain time waiting.


Yep, we still need to gate VCN power after certain timeout.


Regards,

Leo





Best Regards!

James

On 2021-05-17 12:34 p.m., Leo Liu wrote:


On 2021-05-17 11:52 a.m., James Zhu wrote:

During vcn suspends, stop ring continue to receive new requests,
and try to wait for all vcn jobs to finish gracefully.

v2: Forced powering gate vcn hardware after few wainting retry.

Signed-off-by: James Zhu  
<mailto:james@amd.com>

---
  drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 22 
+-

  1 file changed, 21 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c

index 2016459..9f3a6e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -275,9 +275,29 @@ int amdgpu_vcn_suspend(struct amdgpu_device 
*adev)

  {
  unsigned size;
  void *ptr;
+    int retry_max = 6;
  int i;
  - cancel_delayed_work_sync(>vcn.idle_work);
+    for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+    if (adev->vcn.harvest_config & (1 << i))
+    continue;
+    ring = >vcn.inst[i].ring_dec;
+    ring->sched.ready = false;
+
+    for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
+    ring = >vcn.inst[i].ring_enc[j];
+    ring->sched.ready = false;
+    }
+    }
+
+    while (retry_max-- && 
cancel_delayed_work_sync(>vcn.idle_work))

+    mdelay(5);


I think it's possible to have one pending job unprocessed with VCN 
when suspend sequence getting here, but it shouldn't be more than 
one, cancel_delayed_work_sync probably return false after the 
first time, so calling cancel_delayed_work_sync once should be 
enough here. we probably need to wait longer from:


SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
        UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);

to make sure the unprocessed job get done.


Regards,

Leo



+    if (!retry_max && !amdgpu_sriov_vf(adev)) {
+    if (RREG32_SOC15(VCN, i, mmUVD_STATUS)) {
+    dev_warn(adev->dev, "Forced powering gate vcn 
hardware!");
+    vcn_v3_0_set_powergating_state(adev, 
AMD_PG_STATE_GATE);

+    }
+    }
    for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
  if (adev->vcn.harvest_config & (1 << i))
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH v2 1/2] drm/amdgpu: enhance amdgpu_vcn_suspend

2021-05-17 Thread Leo Liu
Definitely, we need to move cancel_delayed_work_sync moved to before 
power gate.


Should "save_bo" be step 4 before power gate ?

Regards,

Leo


On 2021-05-17 1:59 p.m., James Zhu wrote:


Then we forgot the proposal I provided before.

I think the below seq may fixed the race condition issue that we are 
facing.


1. stop scheduling new jobs

    for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
        if (adev->vcn.harvest_config & (1 << i))
            continue;

        ring = >vcn.inst[i].ring_dec;
        ring->sched.ready = false;

        for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
            ring = >vcn.inst[i].ring_enc[j];
            ring->sched.ready = false;
        }
    }

2.    cancel_delayed_work_sync(>vcn.idle_work);

3. SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
     UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);

4. amdgpu_device_ip_set_powergating_state(adev, 
AMD_IP_BLOCK_TYPE_VCN,   AMD_PG_STATE_GATE);


5.  saved_bo

Best Regards!

James

On 2021-05-17 1:43 p.m., Leo Liu wrote:


On 2021-05-17 12:54 p.m., James Zhu wrote:
I am wondering if there are still some jobs kept in the queue, it is 
lucky to check


Yes it's possible, in this case delayed handler is set, so cancelling 
once is enough.





UVD_POWER_STATUS done, but after, fw start a new job that list in 
the queue.


To handle this situation perfectly, we need add mechanism to suspend 
fw first.


I think that should be handled by the sequence from 
vcn_v3_0_stop_dpg_mode().





Another case, if it is unlucky, that  vcn fw hung at that time, 
UVD_POWER_STATUS


always keeps busy.   then it needs force powering gate the vcn hw 
after certain time waiting.


Yep, we still need to gate VCN power after certain timeout.


Regards,

Leo





Best Regards!

James

On 2021-05-17 12:34 p.m., Leo Liu wrote:


On 2021-05-17 11:52 a.m., James Zhu wrote:

During vcn suspends, stop ring continue to receive new requests,
and try to wait for all vcn jobs to finish gracefully.

v2: Forced powering gate vcn hardware after few wainting retry.

Signed-off-by: James Zhu 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 22 +-
  1 file changed, 21 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c

index 2016459..9f3a6e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -275,9 +275,29 @@ int amdgpu_vcn_suspend(struct amdgpu_device 
*adev)

  {
  unsigned size;
  void *ptr;
+    int retry_max = 6;
  int i;
  - cancel_delayed_work_sync(>vcn.idle_work);
+    for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+    if (adev->vcn.harvest_config & (1 << i))
+    continue;
+    ring = >vcn.inst[i].ring_dec;
+    ring->sched.ready = false;
+
+    for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
+    ring = >vcn.inst[i].ring_enc[j];
+    ring->sched.ready = false;
+    }
+    }
+
+    while (retry_max-- && 
cancel_delayed_work_sync(>vcn.idle_work))

+    mdelay(5);


I think it's possible to have one pending job unprocessed with VCN 
when suspend sequence getting here, but it shouldn't be more than 
one, cancel_delayed_work_sync probably return false after the first 
time, so calling cancel_delayed_work_sync once should be enough 
here. we probably need to wait longer from:


SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
        UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);

to make sure the unprocessed job get done.


Regards,

Leo



+    if (!retry_max && !amdgpu_sriov_vf(adev)) {
+    if (RREG32_SOC15(VCN, i, mmUVD_STATUS)) {
+    dev_warn(adev->dev, "Forced powering gate vcn 
hardware!");

+    vcn_v3_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
+    }
+    }
    for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
  if (adev->vcn.harvest_config & (1 << i))
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH v2 1/2] drm/amdgpu: enhance amdgpu_vcn_suspend

2021-05-17 Thread Leo Liu


On 2021-05-17 12:54 p.m., James Zhu wrote:
I am wondering if there are still some jobs kept in the queue, it is 
lucky to check


Yes it's possible, in this case delayed handler is set, so cancelling 
once is enough.





UVD_POWER_STATUS done, but after, fw start a new job that list in the 
queue.


To handle this situation perfectly, we need add mechanism to suspend 
fw first.


I think that should be handled by the sequence from 
vcn_v3_0_stop_dpg_mode().





Another case, if it is unlucky, that  vcn fw hung at that time, 
UVD_POWER_STATUS


always keeps busy.   then it needs force powering gate the vcn hw 
after certain time waiting.


Yep, we still need to gate VCN power after certain timeout.


Regards,

Leo





Best Regards!

James

On 2021-05-17 12:34 p.m., Leo Liu wrote:


On 2021-05-17 11:52 a.m., James Zhu wrote:

During vcn suspends, stop ring continue to receive new requests,
and try to wait for all vcn jobs to finish gracefully.

v2: Forced powering gate vcn hardware after few wainting retry.

Signed-off-by: James Zhu 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 22 +-
  1 file changed, 21 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c

index 2016459..9f3a6e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -275,9 +275,29 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev)
  {
  unsigned size;
  void *ptr;
+    int retry_max = 6;
  int i;
  -    cancel_delayed_work_sync(>vcn.idle_work);
+    for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+    if (adev->vcn.harvest_config & (1 << i))
+    continue;
+    ring = >vcn.inst[i].ring_dec;
+    ring->sched.ready = false;
+
+    for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
+    ring = >vcn.inst[i].ring_enc[j];
+    ring->sched.ready = false;
+    }
+    }
+
+    while (retry_max-- && 
cancel_delayed_work_sync(>vcn.idle_work))

+    mdelay(5);


I think it's possible to have one pending job unprocessed with VCN 
when suspend sequence getting here, but it shouldn't be more than 
one, cancel_delayed_work_sync probably return false after the first 
time, so calling cancel_delayed_work_sync once should be enough here. 
we probably need to wait longer from:


SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
        UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);

to make sure the unprocessed job get done.


Regards,

Leo



+    if (!retry_max && !amdgpu_sriov_vf(adev)) {
+    if (RREG32_SOC15(VCN, i, mmUVD_STATUS)) {
+    dev_warn(adev->dev, "Forced powering gate vcn hardware!");
+    vcn_v3_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
+    }
+    }
    for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
  if (adev->vcn.harvest_config & (1 << i))

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH v2 1/2] drm/amdgpu: enhance amdgpu_vcn_suspend

2021-05-17 Thread Leo Liu


On 2021-05-17 11:52 a.m., James Zhu wrote:

During vcn suspends, stop ring continue to receive new requests,
and try to wait for all vcn jobs to finish gracefully.

v2: Forced powering gate vcn hardware after few wainting retry.

Signed-off-by: James Zhu 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 22 +-
  1 file changed, 21 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 2016459..9f3a6e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -275,9 +275,29 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev)
  {
unsigned size;
void *ptr;
+   int retry_max = 6;
int i;
  
-	cancel_delayed_work_sync(>vcn.idle_work);

+   for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+   if (adev->vcn.harvest_config & (1 << i))
+   continue;
+   ring = >vcn.inst[i].ring_dec;
+   ring->sched.ready = false;
+
+   for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
+   ring = >vcn.inst[i].ring_enc[j];
+   ring->sched.ready = false;
+   }
+   }
+
+   while (retry_max-- && cancel_delayed_work_sync(>vcn.idle_work))
+   mdelay(5);


I think it's possible to have one pending job unprocessed with VCN when 
suspend sequence getting here, but it shouldn't be more than one, 
cancel_delayed_work_sync probably return false after the first time, so 
calling cancel_delayed_work_sync once should be enough here. we probably 
need to wait longer from:


SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
        UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);

to make sure the unprocessed job get done.


Regards,

Leo



+   if (!retry_max && !amdgpu_sriov_vf(adev)) {
+   if (RREG32_SOC15(VCN, i, mmUVD_STATUS)) {
+   dev_warn(adev->dev, "Forced powering gate vcn 
hardware!");
+   vcn_v3_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
+   }
+   }
  
  	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {

if (adev->vcn.harvest_config & (1 << i))

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu: update vcn1.0 Non-DPG suspend sequence

2021-05-05 Thread Leo Liu

Reviewed-by: Leo Liu 


On 2021-05-04 11:10 a.m., Sathishkumar S wrote:

update suspend register settings in Non-DPG mode.

Signed-off-by: Sathishkumar S 
---
  drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 13 +
  1 file changed, 9 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 51a773a37a35..0c1beefa3e49 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -1119,10 +1119,10 @@ static int vcn_v1_0_stop_spg_mode(struct amdgpu_device 
*adev)
UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_LMI_STATUS, tmp, tmp);
  
-	/* put VCPU into reset */

-   WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
-   UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
-   ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
+   /* stall UMC channel */
+   WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
+   UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
+   ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
  
  	tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |

UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
@@ -1141,6 +1141,11 @@ static int vcn_v1_0_stop_spg_mode(struct amdgpu_device 
*adev)
UVD_SOFT_RESET__LMI_SOFT_RESET_MASK,
~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK);
  
+	/* put VCPU into reset */

+   WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
+   UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
+   ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
+
WREG32_SOC15(UVD, 0, mmUVD_STATUS, 0);
  
  	vcn_v1_0_enable_clock_gating(adev);

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu: Init GFX10_ADDR_CONFIG for VCN v3 in DPG mode.

2021-05-05 Thread Leo Liu



On 2021-05-05 2:17 p.m., Alex Deucher wrote:

Applied.  Thanks!  Do we need a similar fix for other VCN variants?


VCN3 is the only one missing that.

Regards,

Leo




Alex

On Tue, May 4, 2021 at 10:14 PM Leo Liu  wrote:

Reviewed-and-Tested by: Leo Liu 

On 2021-05-04 9:27 p.m., Bas Nieuwenhuizen wrote:

Otherwise tiling modes that require the values form this field
(In particular _*_X) would be corrupted upon video decode.

Copied from the VCN v2 code.

Fixes: 99541f392b4d ("drm/amdgpu: add mc resume DPG mode for VCN3.0")
Signed-off-by: Bas Nieuwenhuizen 
---
   drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c | 4 
   1 file changed, 4 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index 3f15bf34123a..cf165ab5dd26 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -589,6 +589,10 @@ static void vcn_v3_0_mc_resume_dpg_mode(struct 
amdgpu_device *adev, int inst_idx
   WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
   VCN, inst_idx, mmUVD_VCPU_NONCACHE_SIZE0),
   AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 
0, indirect);
+
+ /* VCN global tiling registers */
+ WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
+ UVD, 0, mmUVD_GFX10_ADDR_CONFIG), 
adev->gfx.config.gb_addr_config, 0, indirect);
   }

   static void vcn_v3_0_disable_static_power_gating(struct amdgpu_device *adev, 
int inst)

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu: Init GFX10_ADDR_CONFIG for VCN v3 in DPG mode.

2021-05-04 Thread Leo Liu

Reviewed-and-Tested by: Leo Liu 

On 2021-05-04 9:27 p.m., Bas Nieuwenhuizen wrote:

Otherwise tiling modes that require the values form this field
(In particular _*_X) would be corrupted upon video decode.

Copied from the VCN v2 code.

Fixes: 99541f392b4d ("drm/amdgpu: add mc resume DPG mode for VCN3.0")
Signed-off-by: Bas Nieuwenhuizen 
---
  drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c | 4 
  1 file changed, 4 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index 3f15bf34123a..cf165ab5dd26 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -589,6 +589,10 @@ static void vcn_v3_0_mc_resume_dpg_mode(struct 
amdgpu_device *adev, int inst_idx
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, inst_idx, mmUVD_VCPU_NONCACHE_SIZE0),
AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 
0, indirect);
+
+   /* VCN global tiling registers */
+   WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
+   UVD, 0, mmUVD_GFX10_ADDR_CONFIG), 
adev->gfx.config.gb_addr_config, 0, indirect);
  }
  
  static void vcn_v3_0_disable_static_power_gating(struct amdgpu_device *adev, int inst)

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 1/3] drm/amdgpu: make sure we unpin the UVD BO

2021-04-16 Thread Leo Liu

Acked-by: Leo Liu 

On 2021-04-16 8:54 a.m., Christian König wrote:

Ping?

Am 15.04.21 um 10:47 schrieb Christian König:

Releasing pinned BOs is illegal now.

Signed-off-by: Christian König 
---
  drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 1 +
  1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 
b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c

index 7cd67cb2ac5f..1a2bf2ca1be5 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -363,6 +363,7 @@ static int uvd_v7_0_enc_ring_test_ib(struct 
amdgpu_ring *ring, long timeout)

    error:
  dma_fence_put(fence);
+    amdgpu_bo_unpin(bo);
  amdgpu_bo_unreserve(bo);
  amdgpu_bo_unref();
  return r;


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfxdata=04%7C01%7Cleo.liu%40amd.com%7C5823d278fae848e2292008d900d6bd76%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637541744618109453%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000sdata=TIn5u5%2FPS50wcKCd6%2FwTnpPm%2BgCPa8KOT1cz6r7Xgl0%3Dreserved=0 


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [BUG] VAAPI encoder cause kernel panic if encoded video in 4K

2021-04-13 Thread Leo Liu



On 2021-04-13 5:24 p.m., Mikhail Gavrilov wrote:

On Tue, 13 Apr 2021 at 04:55, Leo Liu  wrote:

It curious why ffmpeg does not cause such issues.
For example such command not cause kernel panic:
$ ffmpeg -f x11grab -framerate 60 -video_size 3840x2160 -i :0.0 -vf
'format=nv12,hwupload' -vaapi_device /dev/dri/renderD128 -vcodec
h264_vaapi output3.mp4

What command are you using to see the issue or how can the issue be reproduced?

$ mpv output4.mp4


This is decode command line, are you seeing issue with encode or 
decode?, you also said `ffmpeg -f x11grab -framerate 60 -video_size 
3840x2160 -i :0.0 -vf 'format=nv12,hwupload' -vaapi_device 
/dev/dri/renderD128 -vcodec h264_vaapi output3.mp4` doesn't cause such 
issue, right? What command line can cause the issue then?





And of course, I know how it should works because when I encode video
with CPU encoder (libx264) all fine.
$ ffmpeg -f x11grab -framerate 60 -video_size 3840x2160 -i :0.0
-vcodec libx264 output3.mp4


Please file a freedesktop gitlab issue, so we can keep track of it.

Here? 
https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Fgitlab.freedesktop.org%2Fdrm%2Famd%2F-%2Fissuesdata=04%7C01%7Cleo.liu%40amd.com%7C3cd466c3286e4303f2b108d8fec2833a%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637539458675499474%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000sdata=GbiA7%2FrkiLwh2E9js9tGhWkZyr%2B9TY57H6G6cL7ex8s%3Dreserved=0


Yes.



Also, I found that other users face the same problem.
https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Fbbs.archlinux.org%2Fviewtopic.php%3Fid%3D261965data=04%7C01%7Cleo.liu%40amd.com%7C3cd466c3286e4303f2b108d8fec2833a%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637539458675499474%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000sdata=YNnk%2BZnPS0DVtuDfttnTThYfHOvmP38%2BwNpNZ5voLuk%3Dreserved=0


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [BUG] VAAPI encoder cause kernel panic if encoded video in 4K

2021-04-12 Thread Leo Liu

It curious why ffmpeg does not cause such issues.
For example such command not cause kernel panic:
$ ffmpeg -f x11grab -framerate 60 -video_size 3840x2160 -i :0.0 -vf
'format=nv12,hwupload' -vaapi_device /dev/dri/renderD128 -vcodec
h264_vaapi output3.mp4


What command are you using to see the issue or how can the issue be reproduced?
Please file a freedesktop gitlab issue, so we can keep track of it.


On 2021-04-12 6:05 p.m., Mikhail Gavrilov wrote:


Video demonstration: 
https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Fyoutu.be%2F3nkvUeB0GSwdata=04%7C01%7Cleo.liu%40amd.com%7C87e4541e8fe14d78058108d8fdff115f%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637538619239490302%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000sdata=CJRVsYixJlfnt5%2BkSCCi5BqP6WK9izh%2FE1ZMEsaR5rU%3Dreserved=0

How looks kernel traces.

1.
[ 7315.156460] amdgpu :0b:00.0: amdgpu: [mmhub] page fault
(src_id:0 ring:0 vmid:6 pasid:32779, for process obs pid 23963 thread
obs:cs0 pid 23977)
[ 7315.156490] amdgpu :0b:00.0: amdgpu:   in page starting at
address 0x80011fdf5000 from client 18
[ 7315.156495] amdgpu :0b:00.0: amdgpu:
MMVM_L2_PROTECTION_FAULT_STATUS:0x00641A51
[ 7315.156500] amdgpu :0b:00.0: amdgpu: Faulty UTCL2 client ID: VCN1 (0xd)
[ 7315.156503] amdgpu :0b:00.0: amdgpu: MORE_FAULTS: 0x1
[ 7315.156505] amdgpu :0b:00.0: amdgpu: WALKER_ERROR: 0x0
[ 7315.156509] amdgpu :0b:00.0: amdgpu: PERMISSION_FAULTS: 0x5
[ 7315.156510] amdgpu :0b:00.0: amdgpu: MAPPING_ERROR: 0x0
[ 7315.156513] amdgpu :0b:00.0: amdgpu: RW: 0x1
[ 7315.156545] amdgpu :0b:00.0: amdgpu: [mmhub] page fault
(src_id:0 ring:0 vmid:6 pasid:32779, for process obs pid 23963 thread
obs:cs0 pid 23977)
[ 7315.156549] amdgpu :0b:00.0: amdgpu:   in page starting at
address 0x80011fdf6000 from client 18
[ 7315.156551] amdgpu :0b:00.0: amdgpu:
MMVM_L2_PROTECTION_FAULT_STATUS:0x00641A51
[ 7315.156554] amdgpu :0b:00.0: amdgpu: Faulty UTCL2 client ID: VCN1 (0xd)
[ 7315.156556] amdgpu :0b:00.0: amdgpu: MORE_FAULTS: 0x1
[ 7315.156559] amdgpu :0b:00.0: amdgpu: WALKER_ERROR: 0x0
[ 7315.156561] amdgpu :0b:00.0: amdgpu: PERMISSION_FAULTS: 0x5
[ 7315.156564] amdgpu :0b:00.0: amdgpu: MAPPING_ERROR: 0x0
[ 7315.156566] amdgpu :0b:00.0: amdgpu: RW: 0x1

This is a harmless panic, but nevertheless VAAPI does not work and the
application that tried to use the encoder crashed.

2.
If we tries again and again encode 4K stream through VAAPI we can
encounter the next trace:
[12341.860944] [ cut here ]
[12341.860961] kernel BUG at drivers/dma-buf/dma-resv.c:287!
[12341.860968] invalid opcode:  [#1] SMP NOPTI
[12341.860972] CPU: 28 PID: 18261 Comm: kworker/28:0 Tainted: G
W- ---  5.12.0-0.rc5.180.fc35.x86_64+debug #1
[12341.860977] Hardware name: System manufacturer System Product
Name/ROG STRIX X570-I GAMING, BIOS 3402 01/13/2021
[12341.860981] Workqueue: events amdgpu_irq_handle_ih_soft [amdgpu]
[12341.861102] RIP: 0010:dma_resv_add_shared_fence+0x2ab/0x2c0
[12341.861108] Code: fd ff ff be 01 00 00 00 e8 e2 74 dc ff e9 ac fd
ff ff 48 83 c4 18 be 03 00 00 00 5b 5d 41 5c 41 5d 41 5e 41 5f e9 c5
74 dc ff <0f> 0b 31 ed e9 73 fe ff ff 66 66 2e 0f 1f 84 00 00 00 00 00
90 0f
[12341.861112] RSP: 0018:b2f084c87bb0 EFLAGS: 00010246
[12341.861115] RAX: 0002 RBX: 9f9551184998 RCX: 
[12341.861119] RDX: 0002 RSI:  RDI: 9f9551184a50
[12341.861122] RBP: 0002 R08:  R09: 
[12341.861124] R10:  R11:  R12: 9f91b9a18140
[12341.861127] R13: 9f91c9020740 R14: 9f91c9020768 R15: 
[12341.861130] FS:  () GS:9f984a20()
knlGS:
[12341.861133] CS:  0010 DS:  ES:  CR0: 80050033
[12341.861136] CR2: 144e080d8000 CR3: 00010e98c000 CR4: 00350ee0
[12341.861139] Call Trace:
[12341.861143]  amdgpu_vm_sdma_commit+0x182/0x220 [amdgpu]
[12341.861251]  amdgpu_vm_bo_update_mapping.constprop.0+0x278/0x3c0 [amdgpu]
[12341.861356]  amdgpu_vm_handle_fault+0x145/0x290 [amdgpu]
[12341.861461]  gmc_v10_0_process_interrupt+0xb3/0x250 [amdgpu]
[12341.861571]  ? _raw_spin_unlock_irqrestore+0x37/0x40
[12341.861577]  ? lock_acquire+0x179/0x3a0
[12341.861583]  ? lock_acquire+0x179/0x3a0
[12341.861587]  ? amdgpu_irq_dispatch+0xc6/0x240 [amdgpu]
[12341.861692]  amdgpu_irq_dispatch+0xc6/0x240 [amdgpu]
[12341.861796]  amdgpu_ih_process+0x90/0x110 [amdgpu]
[12341.861900]  process_one_work+0x2b0/0x5e0
[12341.861906]  worker_thread+0x55/0x3c0
[12341.861910]  ? process_one_work+0x5e0/0x5e0
[12341.861915]  kthread+0x13a/0x150
[12341.861918]  ? __kthread_bind_mask+0x60/0x60
[12341.861922]  ret_from_fork+0x22/0x30
[12341.861928] Modules linked in: uinput snd_seq_dummy rfcomm
snd_hrtimer netconsole nft_objref nf_conntrack_netbios_ns

[ANNOUNCE] libdrm 2.4.105

2021-04-07 Thread Leo Liu
Alex Deucher (1):
  amdgpu: update marketing names

Alistair Delva (1):
  xf86drm: fix null pointer deref in drmGetBufInfo

Ashutosh Dixit (1):
  intel: Keep libdrm working without pread/pwrite ioctls

Emil Velikov (3):
  xf86drm: cap number of reported devices by drmGetDevice(2)
  Revert "xf86drm: cap number of reported devices by drmGetDevice(2)"
  xf86drm: cap number of reported devices by drmGetDevice(2)

Fabio Estevam (1):
  tests/util: Add mxsfb-drm driver

Fang Tan (1):
  meson: use library() instead of shared_library().

Heiko Becker (1):
  meson: Also search for rst2man.py

James Zhu (1):
  tests/amdgpu: add vcn test support for dimgrey_cavefish

Jinzhou Su (1):
  test/amdgpu: remove static varible in Syncobj test

Lang Yu (2):
  drm/tests/amdgpu: fix Metadata test failed issue
  tests/amdgpu: fix bo eviction test issue

Leo Liu (4):
  amdgpu_drm: sync up with the latest amdgpu_drm.h based on drm-next 
(https://cgit.freedesktop.org/drm/drm)
  amdgpu: sync up amdgpu_drm.h with latest from kernel
  amdgpu: add function of INFO ioctl for querying video caps
  Bump version to 2.4.105

Simon Ser (4):
  xf86drmMode: add drmIsKMS
  xf86drm: warn about GEM handle reference counting
  xf86drmMode: introduce drmModeGetPropertyType
  xf86drmMode: set FB_MODIFIERS flag when modifiers are supplied

Sonny Jiang (1):
  tests/amdgpu/vcn: clean abundant codes

Tao Zhou (1):
  tests/amdgpu: add vcn test support for navy_flounder

Tejas Upadhyay (3):
  intel: sync i915_pciids.h with kernel
  intel: add INTEL_ADLS_IDS to the pciids list
  intel: Add support for JSL

Valentin Churavy (1):
  Use dep_rt in amdgpu/meson.build

Victor Hugo Vianna Silva (1):
  Avoid some compiler errors for tests/util/pattern.c

git tag: libdrm-2.4.105

https://dri.freedesktop.org/libdrm/libdrm-2.4.105.tar.xz
SHA256: 1d1d024b7cadc63e2b59cddaca94f78864940ab440843841113fbac6afaf2a46  
libdrm-2.4.105.tar.xz
SHA512: 
083a04af7208e58be21b89c6ebdbe2db3ba00cd29f0d271bd38bfe97dfca741edafddaaf9b5b95c20fac2c9b700434ea5b21397de26f7073169ad6f5b090f715
  libdrm-2.4.105.tar.xz
PGP:  https://dri.freedesktop.org/libdrm/libdrm-2.4.105.tar.xz.sig

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 3/3] drm/amdgpu: share scheduler score on VCN3 instances

2021-02-04 Thread Leo Liu

The series are:

Reviewed-and-Tested-by: Leo Liu 


On 2021-02-04 9:44 a.m., Christian König wrote:

The VCN3 instances can do both decode as well as encode.

Share the scheduler load balancing score and remove fixing encode to
only the second instance.

Signed-off-by: Christian König 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h |  1 +
  drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c   | 11 +++
  2 files changed, 8 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 13aa417f6be7..d10bc4f0a05f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -211,6 +211,7 @@ struct amdgpu_vcn_inst {
void*saved_bo;
struct amdgpu_ring  ring_dec;
struct amdgpu_ring  ring_enc[AMDGPU_VCN_MAX_ENC_RINGS];
+   atomic_tsched_score;
struct amdgpu_irq_src   irq;
struct amdgpu_vcn_reg   external;
struct amdgpu_bo*dpg_sram_bo;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index 239a4eb52c61..b33f513fd2ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -171,6 +171,7 @@ static int vcn_v3_0_sw_init(void *handle)
  
  	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {

volatile struct amdgpu_fw_shared *fw_shared;
+
if (adev->vcn.harvest_config & (1 << i))
continue;
  
@@ -198,6 +199,8 @@ static int vcn_v3_0_sw_init(void *handle)

if (r)
return r;
  
+		atomic_set(>vcn.inst[i].sched_score, 0);

+
ring = >vcn.inst[i].ring_dec;
ring->use_doorbell = true;
if (amdgpu_sriov_vf(adev)) {
@@ -209,7 +212,8 @@ static int vcn_v3_0_sw_init(void *handle)
ring->no_scheduler = true;
sprintf(ring->name, "vcn_dec_%d", i);
r = amdgpu_ring_init(adev, ring, 512, >vcn.inst[i].irq, 0,
-AMDGPU_RING_PRIO_DEFAULT, NULL);
+AMDGPU_RING_PRIO_DEFAULT,
+>vcn.inst[i].sched_score);
if (r)
return r;
  
@@ -227,11 +231,10 @@ static int vcn_v3_0_sw_init(void *handle)

} else {
ring->doorbell_index = 
(adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + j + 8 * i;
}
-   if (adev->asic_type == CHIP_SIENNA_CICHLID && i != 1)
-   ring->no_scheduler = true;
sprintf(ring->name, "vcn_enc_%d.%d", i, j);
r = amdgpu_ring_init(adev, ring, 512, 
>vcn.inst[i].irq, 0,
-AMDGPU_RING_PRIO_DEFAULT, NULL);
+AMDGPU_RING_PRIO_DEFAULT,
+>vcn.inst[i].sched_score);
if (r)
return r;
}

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 2/2] drm/amd/amdgpu: Use IP discovery data to determine VCN enablement instead of MMSCH

2021-01-07 Thread Leo Liu



On 2021-01-05 5:54 p.m., Bokun Zhang wrote:

In the past, we use MMSCH to determine whether a VCN is enabled or not.
This is not reliable since after a FLR, MMSCH may report junk data.

It is better to use IP discovery data.

Change-Id: I8b6c32c34017b20dcaebffdaa78bb07178e9d03c
Signed-off-by: Bokun Zhang 
---
  drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c | 73 +--
  1 file changed, 45 insertions(+), 28 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index def583916294..02cac6e33219 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -27,6 +27,7 @@
  #include "amdgpu_pm.h"
  #include "soc15.h"
  #include "soc15d.h"
+#include "soc15_hw_ip.h"
  #include "vcn_v2_0.h"
  #include "mmsch_v3_0.h"
  
@@ -60,6 +61,17 @@ static int amdgpu_ucode_id_vcns[] = {

AMDGPU_UCODE_ID_VCN1
  };
  
+#define VCN_BLOCK_ENCODE_DISABLE_MASK 0x80

+#define VCN_BLOCK_DECODE_DISABLE_MASK 0x40
+#define VCN_BLOCK_QUEUE_DISABLE_MASK 0xC0
+
+enum vcn_ring_type {
+   VCN_ENCODE_RING,
+   VCN_DECODE_RING,
+   VCN_UNIFIED_RING,
+};
+
+static bool vcn_v3_0_is_disabled_vcn(struct amdgpu_device *adev, enum 
vcn_ring_type type, uint32_t vcn_instance);
  static int vcn_v3_0_start_sriov(struct amdgpu_device *adev);
  static void vcn_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev);
  static void vcn_v3_0_set_enc_ring_funcs(struct amdgpu_device *adev);
@@ -311,18 +323,26 @@ static int vcn_v3_0_hw_init(void *handle)
continue;
  
  			ring = >vcn.inst[i].ring_dec;

-   if (ring->sched.ready) {
+   if (vcn_v3_0_is_disabled_vcn(adev, VCN_DECODE_RING, i)) 
{
Since this is for SRIOV path only, and this doesn't apply to bare-metal, 
so please rename the function to something like xxx_sriov instead.


Regards,

Leo



+   ring->sched.ready = false;
+   dev_info(adev->dev, "ring %s is disabled by 
hypervisor\n", ring->name);
+   } else {
ring->wptr = 0;
ring->wptr_old = 0;
vcn_v3_0_dec_ring_set_wptr(ring);
+   ring->sched.ready = true;
}
  
  			for (j = 0; j < adev->vcn.num_enc_rings; ++j) {

ring = >vcn.inst[i].ring_enc[j];
-   if (ring->sched.ready) {
+   if (vcn_v3_0_is_disabled_vcn(adev, 
VCN_ENCODE_RING, i)) {
+   ring->sched.ready = false;
+   dev_info(adev->dev, "ring %s is disabled by 
hypervisor\n", ring->name);
+   } else {
ring->wptr = 0;
ring->wptr_old = 0;
vcn_v3_0_enc_ring_set_wptr(ring);
+   ring->sched.ready = true;
}
}
}
@@ -1266,6 +1286,29 @@ static int vcn_v3_0_start(struct amdgpu_device *adev)
return 0;
  }
  
+static bool vcn_v3_0_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance)

+{
+   bool ret = false;
+
+   int major;
+   int minor;
+   int revision;
+
+   /* if cannot find IP data, then this VCN does not exist */
+   if (amdgpu_discovery_get_ip_version(adev, VCN_HWID, vcn_instance, , 
, ) != 0)
+   return true;
+
+   if ((type == VCN_ENCODE_RING) && (revision & 
VCN_BLOCK_ENCODE_DISABLE_MASK)) {
+   ret = true;
+   } else if ((type == VCN_DECODE_RING) && (revision & 
VCN_BLOCK_DECODE_DISABLE_MASK)) {
+   ret = true;
+   } else if ((type == VCN_UNIFIED_RING) && (revision & 
VCN_BLOCK_QUEUE_DISABLE_MASK)) {
+   ret = true;
+   }
+
+   return ret;
+}
+
  static int vcn_v3_0_start_sriov(struct amdgpu_device *adev)
  {
int i, j;
@@ -1283,8 +1326,6 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device 
*adev)
uint32_t table_size;
uint32_t size, size_dw;
  
-	bool is_vcn_ready;

-
struct mmsch_v3_0_cmd_direct_write
direct_wt = { {0} };
struct mmsch_v3_0_cmd_direct_read_modify_write
@@ -1476,30 +1517,6 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device 
*adev)
}
}
  
-	/* 6, check each VCN's init_status

-* if it remains as 0, then this VCN is not assigned to current VF
-* do not start ring for this VCN
-*/
-   size = sizeof(struct mmsch_v3_0_init_header);
-   table_loc = (uint32_t *)table->cpu_addr;
-   memcpy(, (void *)table_loc, size);
-
-   for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
-   if 

Re: [PATCH v2] drm/amdgpu: fix SI UVD firmware validate resume fail

2020-11-16 Thread Leo Liu

Reviewed-by: Leo Liu 


On 2020-11-13 5:33 p.m., Jiang, Sonny wrote:


[AMD Official Use Only - Internal Distribution Only]


[AMD Official Use Only - Internal Distribution Only]


Ping.

*From:* Jiang, Sonny 
*Sent:* Monday, November 9, 2020 2:41 PM
*To:* amd-gfx@lists.freedesktop.org 
*Cc:* Jiang, Sonny 
*Subject:* [PATCH v2] drm/amdgpu: fix SI UVD firmware validate resume 
fail

The SI UVD firmware validate key is stored at the end of firmware,
which is changed during resume while playing video. So get the key
at sw_init and store it for fw validate using.

Signed-off-by: Sonny Jiang 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h |  1 +
 drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c   | 20 +++-
 2 files changed, 12 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h

index 5eb63288d157..edbb8194ee81 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
@@ -67,6 +67,7 @@ struct amdgpu_uvd {
 unsigned    harvest_config;
 /* store image width to adjust nb memory state */
 unsigned    decode_image_width;
+   uint32_t    keyselect;
 };

 int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c 
b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c

index 7cf4b11a65c5..3a5dce634cda 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
@@ -277,15 +277,8 @@ static void uvd_v3_1_mc_resume(struct 
amdgpu_device *adev)

  */
 static int uvd_v3_1_fw_validate(struct amdgpu_device *adev)
 {
-   void *ptr;
-   uint32_t ucode_len, i;
-   uint32_t keysel;
-
-   ptr = adev->uvd.inst[0].cpu_addr;
-   ptr += 192 + 16;
-   memcpy(_len, ptr, 4);
-   ptr += ucode_len;
-   memcpy(, ptr, 4);
+   int i;
+   uint32_t keysel = adev->uvd.keyselect;

 WREG32(mmUVD_FW_START, keysel);

@@ -550,6 +543,8 @@ static int uvd_v3_1_sw_init(void *handle)
 struct amdgpu_ring *ring;
 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 int r;
+   void *ptr;
+   uint32_t ucode_len;

 /* UVD TRAP */
 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 124, 
>uvd.inst->irq);

@@ -560,6 +555,13 @@ static int uvd_v3_1_sw_init(void *handle)
 if (r)
 return r;

+   /* Retrieval firmware validate key */
+   ptr = adev->uvd.inst[0].cpu_addr;
+   ptr += 192 + 16;
+   memcpy(_len, ptr, 4);
+   ptr += ucode_len;
+   memcpy(>uvd.keyselect, ptr, 4);
+
 ring = >uvd.inst->ring;
 sprintf(ring->name, "uvd");
 r = amdgpu_ring_init(adev, ring, 512, >uvd.inst->irq, 0,
--
2.25.1


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfxdata=04%7C01%7Cleo.liu%40amd.com%7C695a4d51b81b4a08dbe208d888242256%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637409036097906257%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000sdata=8u1TK45gYH0hQ8dn5x1A9ckXIYQjGCedmkp8mpt4EW4%3Dreserved=0
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] amd/amdgpu: Disable VCN DPG mode for Picasso

2020-10-30 Thread Leo Liu

Reviewed-by: Leo Liu 

On 2020-10-30 1:10 p.m., veerabadhran.gopalakrish...@amd.com wrote:

From: Veerabadhran Gopalakrishnan 

Concurrent operation of VCN and JPEG decoder in DPG mode is
causing ring timeout due to power state.

Signed-off-by: Veerabadhran Gopalakrishnan 
---
  drivers/gpu/drm/amd/amdgpu/soc15.c | 3 +--
  1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c 
b/drivers/gpu/drm/amd/amdgpu/soc15.c
index ed7342bbf801..f57c5f57efa8 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -1195,8 +1195,7 @@ static int soc15_common_early_init(void *handle)
  
  			adev->pg_flags = AMD_PG_SUPPORT_SDMA |

AMD_PG_SUPPORT_MMHUB |
-   AMD_PG_SUPPORT_VCN |
-   AMD_PG_SUPPORT_VCN_DPG;
+   AMD_PG_SUPPORT_VCN;
} else {
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 1/1] drm/amdgpu: disable gpu-sched load balance for uvd

2020-08-31 Thread Leo Liu


On 2020-08-31 5:53 p.m., Alex Deucher wrote:

On Mon, Aug 31, 2020 at 5:50 PM Leo Liu  wrote:


On 2020-08-31 1:39 p.m., Alex Deucher wrote:

On Mon, Aug 31, 2020 at 10:55 AM Nirmoy  wrote:

Hi Alex,

On 8/31/20 4:17 PM, Alex Deucher wrote:

On Mon, Aug 31, 2020 at 6:41 AM Nirmoy Das  wrote:

UVD dependent jobs should run on the same udv instance.
This patch disables gpu scheduler's load balancer for
a context which binds jobs from same the context to a udv
instance.

typos: udv -> uvd
With that fixed:
Reviewed-by: Alex Deucher 

Does VCE need a similar fix?  What about UVD_ENC?

I am not sure, can you please confirm this.

@Leo Liu can you confirm?

Vega20 have 2 UVDs and 1 VCE, so UVD_ENC(AMDGPU_HW_IP_UVD_ENC) should
need the same fix.

What about other chips?  Didn't CZ and tonga have two VCE instances?
I guess any engine with hw contexts needs this.


Vega20  2 UVDs are identical and separated, exposed separated own set of 
rings to driver, and have their own scheduler.


For CZ and Tonga, it got 2 VCE instances internally, and just expose one 
set of rings to driver. i.e the instances will receive the IB thru the 
same ring, and different IB structures will decide that one instance or 
two instances will be used.


Regards,

Leo



___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 1/1] drm/amdgpu: disable gpu-sched load balance for uvd

2020-08-31 Thread Leo Liu



On 2020-08-31 1:39 p.m., Alex Deucher wrote:

On Mon, Aug 31, 2020 at 10:55 AM Nirmoy  wrote:

Hi Alex,

On 8/31/20 4:17 PM, Alex Deucher wrote:

On Mon, Aug 31, 2020 at 6:41 AM Nirmoy Das  wrote:

UVD dependent jobs should run on the same udv instance.
This patch disables gpu scheduler's load balancer for
a context which binds jobs from same the context to a udv
instance.

typos: udv -> uvd
With that fixed:
Reviewed-by: Alex Deucher 

Does VCE need a similar fix?  What about UVD_ENC?


I am not sure, can you please confirm this.

@Leo Liu can you confirm?


Vega20 have 2 UVDs and 1 VCE, so UVD_ENC(AMDGPU_HW_IP_UVD_ENC) should 
need the same fix.


Regards,

Leo





Alex



Nirmoy




Alex



Signed-off-by: Nirmoy Das 
---
   drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 4 +++-
   1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 59032c26fc82..7cd398d25498 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -114,7 +114,9 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, 
u32 hw_ip,
  scheds = adev->gpu_sched[hw_ip][hw_prio].sched;
  num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds;

-   if (hw_ip == AMDGPU_HW_IP_VCN_ENC || hw_ip == AMDGPU_HW_IP_VCN_DEC) {
+   if (hw_ip == AMDGPU_HW_IP_VCN_ENC ||
+   hw_ip == AMDGPU_HW_IP_VCN_DEC ||
+   hw_ip == AMDGPU_HW_IP_UVD) {
  sched = drm_sched_pick_best(scheds, num_scheds);
  scheds = 
  num_scheds = 1;
--
2.28.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfxdata=02%7C01%7Cleo.liu%40amd.com%7Caaf07c8faa4d46f0b35f08d84dd4e875%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637344924116323889sdata=kvny71irAythb60XTzLHokUD7ZifHAKkTND8zSNs3T0%3Dreserved=0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amdgpu/jpeg: remove redundant check when it returns

2020-08-14 Thread Leo Liu
Fix warning from kernel test robot
v2: remove the local variable as well

Signed-off-by: Leo Liu 
Reviewed-by: Alex Deucher 
---
 drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c | 7 +--
 1 file changed, 1 insertion(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c 
b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
index c41e5590a701..3a0dff53654d 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
@@ -460,15 +460,10 @@ static bool jpeg_v3_0_is_idle(void *handle)
 static int jpeg_v3_0_wait_for_idle(void *handle)
 {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-   int ret;
 
-   ret = SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_JRBC_STATUS,
+   return SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_JRBC_STATUS,
UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
-   if (ret)
-   return ret;
-
-   return ret;
 }
 
 static int jpeg_v3_0_set_clockgating_state(void *handle,
-- 
2.25.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amdgpu/jpeg: remove redundant check when it returns

2020-08-13 Thread Leo Liu
Fix warning from kernel test robot

Signed-off-by: Leo Liu 
---
 drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c | 2 --
 1 file changed, 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c 
b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
index c41e5590a701..f4ba423af051 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
@@ -465,8 +465,6 @@ static int jpeg_v3_0_wait_for_idle(void *handle)
ret = SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_JRBC_STATUS,
UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
-   if (ret)
-   return ret;
 
return ret;
 }
-- 
2.25.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 5/5] drm/amd/sriov skip vcn powergating and dec_ring_test

2020-07-14 Thread Leo Liu



On 2020-07-13 10:47 p.m., Jack Zhang wrote:

1.Skip decode_ring test in VF, because VCN in SRIOV does not
support direct register read/write.

2.Skip powergating configuration in hw fini because
VCN3.0 SRIOV doesn't support powergating.

Signed-off-by: Jack Zhang 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c |  4 
  drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c   | 17 +++--
  2 files changed, 19 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 15ff30c53e24..92a55e40bc48 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -421,6 +421,10 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
unsigned i;
int r;
  
+	/* VCN in SRIOV does not support direct register read/write */

+   if (amdgpu_sriov_vf(adev))
+   return 0;
+
WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 3);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index 0a0ca10bf55b..8e5de9ed64f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -354,10 +354,13 @@ static int vcn_v3_0_hw_fini(void *handle)
  
  		ring = >vcn.inst[i].ring_dec;
  
-		if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||

+   if (amdgpu_sriov_vf(adev)) {
+   /* for SRIOV, VM does not control PG state */
+   }


It looks odd to me that only comments inside the bracket. Can you 
combine it to the "else if" line?




else if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
(adev->vcn.cur_state != AMD_PG_STATE_GATE &&
-   RREG32_SOC15(VCN, i, mmUVD_STATUS)))
+   RREG32_SOC15(VCN, i, mmUVD_STATUS))) {
vcn_v3_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
+   }
  
  		ring->sched.ready = false;
  
@@ -1208,6 +1211,7 @@ static int vcn_v3_0_start(struct amdgpu_device *adev)

return 0;
  }
  
+


Why do you add an empty line here?

Regards,

Leo




  static int vcn_v3_0_start_sriov(struct amdgpu_device *adev)
  {
int i, j;
@@ -1861,6 +1865,15 @@ static int vcn_v3_0_set_powergating_state(void *handle,
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret;
  
+	/* for SRIOV, guest should not control VCN Power-gating

+* MMSCH FW should control Power-gating and clock-gating
+* guest should avoid touching CGC and PG
+*/
+   if (amdgpu_sriov_vf(adev)) {
+   adev->vcn.cur_state = AMD_PG_STATE_UNGATE;
+   return 0;
+   }
+
if(state == adev->vcn.cur_state)
return 0;
  

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 4/5] drm/amd/sriov porting sriov cap to vcn3.0

2020-07-14 Thread Leo Liu

This patch is:

Reviewed-by: Leo Liu 

On 2020-07-13 10:47 p.m., Jack Zhang wrote:

1.In early_init and for sriov, hardcode
   harvest_config=0, enc_num=1

2.sw_init/fini
   alloc & free mm_table for sriov
   doorbell setting for sriov

3.hw_init/fini
   Under sriov, add start_sriov to config mmsch
   Skip ring_test to avoid mmio in VF, but need to initialize wptr for vcn 
rings.

4.Implementation for vcn_v3_0_start_sriov

V2:Clean-up some uneccessary funciton declaration.

Signed-off-by: Jack Zhang 
---
  drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c | 350 +++---
  1 file changed, 318 insertions(+), 32 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index 90fe95f345e3..0a0ca10bf55b 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -28,6 +28,7 @@
  #include "soc15.h"
  #include "soc15d.h"
  #include "vcn_v2_0.h"
+#include "mmsch_v3_0.h"
  
  #include "vcn/vcn_3_0_0_offset.h"

  #include "vcn/vcn_3_0_0_sh_mask.h"
@@ -48,6 +49,17 @@
  
  #define VCN_INSTANCES_SIENNA_CICHLID	 2
  
+static int amdgpu_ih_clientid_vcns[] = {

+   SOC15_IH_CLIENTID_VCN,
+   SOC15_IH_CLIENTID_VCN1
+};
+
+static int amdgpu_ucode_id_vcns[] = {
+   AMDGPU_UCODE_ID_VCN,
+   AMDGPU_UCODE_ID_VCN1
+};
+
+static int vcn_v3_0_start_sriov(struct amdgpu_device *adev);
  static void vcn_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev);
  static void vcn_v3_0_set_enc_ring_funcs(struct amdgpu_device *adev);
  static void vcn_v3_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -56,10 +68,8 @@ static int vcn_v3_0_set_powergating_state(void *handle,
  static int vcn_v3_0_pause_dpg_mode(struct amdgpu_device *adev,
int inst_idx, struct dpg_pause_state *new_state);
  
-static int amdgpu_ih_clientid_vcns[] = {

-   SOC15_IH_CLIENTID_VCN,
-   SOC15_IH_CLIENTID_VCN1
-};
+static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring);
+static void vcn_v3_0_enc_ring_set_wptr(struct amdgpu_ring *ring);
  
  /**

   * vcn_v3_0_early_init - set function pointers
@@ -71,25 +81,33 @@ static int amdgpu_ih_clientid_vcns[] = {
  static int vcn_v3_0_early_init(void *handle)
  {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-   if (adev->asic_type == CHIP_SIENNA_CICHLID) {
-   u32 harvest;
-   int i;
  
+	if (amdgpu_sriov_vf(adev)) {

adev->vcn.num_vcn_inst = VCN_INSTANCES_SIENNA_CICHLID;
-   for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
-   harvest = RREG32_SOC15(VCN, i, mmCC_UVD_HARVESTING);
-   if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
-   adev->vcn.harvest_config |= 1 << i;
-   }
+   adev->vcn.harvest_config = 0;
+   adev->vcn.num_enc_rings = 1;
  
-		if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |

-AMDGPU_VCN_HARVEST_VCN1))
-   /* both instances are harvested, disable the block */
-   return -ENOENT;
-   } else
-   adev->vcn.num_vcn_inst = 1;
+   } else {
+   if (adev->asic_type == CHIP_SIENNA_CICHLID) {
+   u32 harvest;
+   int i;
+
+   adev->vcn.num_vcn_inst = VCN_INSTANCES_SIENNA_CICHLID;
+   for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+   harvest = RREG32_SOC15(VCN, i, 
mmCC_UVD_HARVESTING);
+   if (harvest & 
CC_UVD_HARVESTING__UVD_DISABLE_MASK)
+   adev->vcn.harvest_config |= 1 << i;
+   }
  
-	adev->vcn.num_enc_rings = 2;

+   if (adev->vcn.harvest_config == 
(AMDGPU_VCN_HARVEST_VCN0 |
+   AMDGPU_VCN_HARVEST_VCN1))
+   /* both instances are harvested, disable the 
block */
+   return -ENOENT;
+   } else
+   adev->vcn.num_vcn_inst = 1;
+
+   adev->vcn.num_enc_rings = 2;
+   }
  
  	vcn_v3_0_set_dec_ring_funcs(adev);

vcn_v3_0_set_enc_ring_funcs(adev);
@@ -109,6 +127,7 @@ static int vcn_v3_0_sw_init(void *handle)
  {
struct amdgpu_ring *ring;
int i, j, r;
+   int vcn_doorbell_index = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  
  	r = amdgpu_vcn_sw_init(adev);

@@ -136,6 +155,12 @@ static int vcn_v3_0_sw_init(void *handle)
if (r)
return r;
  
+	if (amdgpu_sriov_vf(adev)) {

+   vcn_doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1;
+   /* get DWORD offset */
+   

Re: [PATCH] drm/amdgpu/jpeg: fix race condition issue for jpeg start

2020-06-11 Thread Leo Liu

Reviewed-by: Leo Liu 

On 2020-06-10 12:36 p.m., James Zhu wrote:

Fix race condition issue when multiple jpeg starts are called.

Signed-off-by: James Zhu 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c | 16 
  drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h |  2 ++
  2 files changed, 14 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
index d31d65e..8996cb4e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
@@ -37,6 +37,8 @@ static void amdgpu_jpeg_idle_work_handler(struct work_struct 
*work);
  int amdgpu_jpeg_sw_init(struct amdgpu_device *adev)
  {
INIT_DELAYED_WORK(>jpeg.idle_work, amdgpu_jpeg_idle_work_handler);
+   mutex_init(>jpeg.jpeg_pg_lock);
+   atomic_set(>jpeg.total_submission_cnt, 0);
  
  	return 0;

  }
@@ -54,6 +56,8 @@ int amdgpu_jpeg_sw_fini(struct amdgpu_device *adev)
amdgpu_ring_fini(>jpeg.inst[i].ring_dec);
}
  
+	mutex_destroy(>jpeg.jpeg_pg_lock);

+
return 0;
  }
  
@@ -83,7 +87,7 @@ static void amdgpu_jpeg_idle_work_handler(struct work_struct *work)

fences += 
amdgpu_fence_count_emitted(>jpeg.inst[i].ring_dec);
}
  
-	if (fences == 0)

+   if (!fences && !atomic_read(>jpeg.total_submission_cnt))
amdgpu_device_ip_set_powergating_state(adev, 
AMD_IP_BLOCK_TYPE_JPEG,
   AMD_PG_STATE_GATE);
else
@@ -93,15 +97,19 @@ static void amdgpu_jpeg_idle_work_handler(struct 
work_struct *work)
  void amdgpu_jpeg_ring_begin_use(struct amdgpu_ring *ring)
  {
struct amdgpu_device *adev = ring->adev;
-   bool set_clocks = !cancel_delayed_work_sync(>jpeg.idle_work);
  
-	if (set_clocks)

-   amdgpu_device_ip_set_powergating_state(adev, 
AMD_IP_BLOCK_TYPE_JPEG,
+   atomic_inc(>jpeg.total_submission_cnt);
+   cancel_delayed_work_sync(>jpeg.idle_work);
+
+   mutex_lock(>jpeg.jpeg_pg_lock);
+   amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_JPEG,
   AMD_PG_STATE_UNGATE);
+   mutex_unlock(>jpeg.jpeg_pg_lock);
  }
  
  void amdgpu_jpeg_ring_end_use(struct amdgpu_ring *ring)

  {
+   atomic_dec(>adev->jpeg.total_submission_cnt);
schedule_delayed_work(>adev->jpeg.idle_work, JPEG_IDLE_TIMEOUT);
  }
  
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h

index 5131a0a..55fbff2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
@@ -46,6 +46,8 @@ struct amdgpu_jpeg {
unsigned harvest_config;
struct delayed_work idle_work;
enum amd_powergating_state cur_state;
+   struct mutex jpeg_pg_lock;
+   atomic_t total_submission_cnt;
  };
  
  int amdgpu_jpeg_sw_init(struct amdgpu_device *adev);

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu/vcn2.5: wait for tiles off after unpause

2020-04-28 Thread Leo Liu

Reviewed-by: Leo Liu 

On 2020-04-27 4:05 p.m., James Zhu wrote:

Wait for tiles off after unpause to fix transcode timeout issue.
It is a work around.

Signed-off-by: James Zhu 
---
  drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c | 6 +++---
  1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index 0fa1c5c..38ca4a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -1404,7 +1404,7 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device 
*adev,
  {
struct amdgpu_ring *ring;
uint32_t reg_data = 0;
-   int ret_code;
+   int ret_code = 0;
  
  	/* pause/unpause if state is changed */

if (adev->vcn.inst[inst_idx].pause_state.fw_based != 
new_state->fw_based) {
@@ -1414,7 +1414,6 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device 
*adev,
(~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
  
  		if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {

-   ret_code = 0;
SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, 
0x1,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, 
ret_code);
  
@@ -1469,9 +1468,10 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,

   UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, 
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
}
} else {
-   /* unpause dpg, no need to wait */
reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
WREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE, reg_data);
+   SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, 
0x1,
+   UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, 
ret_code);
}
adev->vcn.inst[inst_idx].pause_state.fw_based = 
new_state->fw_based;
}

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 3/5] drm/amdgpu/vcn: Add firmware share memory support

2020-03-30 Thread Leo Liu



On 2020-03-30 8:13 a.m., James Zhu wrote:

Added firmware share memory support for VCN. Current multiple
queue mode is enabled only.

Signed-off-by: James Zhu 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 13 +
  drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h | 24 
  2 files changed, 37 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 7a0b074..328b6ce 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -182,6 +182,14 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
return r;
}
}
+
+   r = amdgpu_bo_create_kernel(adev, 
AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)),
+   PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 
>vcn.inst[i].fw_shared_bo,
+   >vcn.inst[i].fw_shared_gpu_addr, 
>vcn.inst[i].fw_shared_cpu_addr);
+   if (r) {
+   dev_err(adev->dev, "VCN %d (%d) failed to allocate fimware 
shared bo\n", i, r);
+   return r;
+   }
}
  
  	return 0;

@@ -196,6 +204,11 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
if (adev->vcn.harvest_config & (1 << j))
continue;
+
+   amdgpu_bo_free_kernel(>vcn.inst[j].fw_shared_bo,
+ >vcn.inst[j].fw_shared_gpu_addr,
+ (void 
**)>vcn.inst[j].fw_shared_cpu_addr);
+
if (adev->vcn.indirect_sram) {
amdgpu_bo_free_kernel(>vcn.inst[j].dpg_sram_bo,
  
>vcn.inst[j].dpg_sram_gpu_addr,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index e913de8..853f0cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -179,11 +179,14 @@ struct amdgpu_vcn_inst {
struct amdgpu_irq_src   irq;
struct amdgpu_vcn_reg   external;
struct amdgpu_bo*dpg_sram_bo;
+   struct amdgpu_bo*fw_shared_bo;
struct dpg_pause_state  pause_state;
void*dpg_sram_cpu_addr;
uint64_tdpg_sram_gpu_addr;
uint32_t*dpg_sram_curr_addr;
atomic_tdpg_enc_submission_cnt;
+   void*fw_shared_cpu_addr;
+   uint64_tfw_shared_gpu_addr;
  };
  
  struct amdgpu_vcn {

@@ -209,6 +212,27 @@ struct amdgpu_vcn {
int inst_idx, struct dpg_pause_state *new_state);
  };
  
+#define AMDGPU_VCN_MULTI_QUEUE_FLAG(1 << 8)

+
+enum fw_queue_mode {
+   fw_queue_ring_reset = 1,
+   fw_queue_dpg_hold_off = 2,
+};


Please move the define and enum to the top as others. With that fixed, 
the series are


Reviewed-by: Leo Liu 




+
+struct amdgpu_fw_shared_multi_queue {
+   uint8_t decode_queue_mode;
+   uint8_t encode_generalpurpose_queue_mode;
+   uint8_t encode_lowlatency_queue_mode;
+   uint8_t encode_realtime_queue_mode;
+   uint8_t padding[4];
+};
+
+struct amdgpu_fw_shared {
+   uint32_t present_flag_0;
+   uint8_t pad[53];
+   struct amdgpu_fw_shared_multi_queue multi_queue;
+} __attribute__((__packed__));
+
  int amdgpu_vcn_sw_init(struct amdgpu_device *adev);
  int amdgpu_vcn_sw_fini(struct amdgpu_device *adev);
  int amdgpu_vcn_suspend(struct amdgpu_device *adev);

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH v2 4/4] drm/amdgpu/vcn2.5: add sync when WPTR/RPTR reset

2020-03-10 Thread Leo Liu



On 2020-03-10 3:58 p.m., James Zhu wrote:

Add vcn harware and firmware synchronization to fix race condition
issue among vcn driver, hardware and firmware

v2: WA: Add scratch 3 to sync with vcn firmware during W/R pointer reset

Signed-off-by: James Zhu 
---
  drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c | 12 
  1 file changed, 12 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index 2d64ba1..9480039 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -1034,6 +1034,9 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, tmp);
  
+		/* Set scratch3 to start dec/enc registers reset */

+   WREG32_SOC15(UVD, i, mmUVD_SCRATCH3, 1);
+
/* programm the RB_BASE for ring buffer */
WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
lower_32_bits(ring->gpu_addr));
@@ -1059,6 +1062,9 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
WREG32_SOC15(UVD, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
WREG32_SOC15(UVD, i, mmUVD_RB_BASE_HI2, 
upper_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
+
+   /* Clear scratch3 to finish dec/enc registers reset */
+   WREG32_SOC15(UVD, i, mmUVD_SCRATCH3, 0);
}
  
  	return 0;

@@ -1388,8 +1394,11 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device 
*adev,
   UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
   
UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code);
  
+/* Stall DPG before WPTR/RPTR reset */

+   WREG32_P(SOC15_REG_OFFSET(UVD, inst_idx, 
mmUVD_POWER_STATUS), UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK, 
~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);


You can wrap up the line shorter? With that fixed, this patch is

Reviewed-by: Leo Liu 


Leo



/* Restore */
ring = >vcn.inst[inst_idx].ring_enc[0];
+   ring->wptr = 0;
WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_LO, 
ring->gpu_addr);
WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_HI, 
upper_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, inst_idx, mmUVD_RB_SIZE, 
ring->ring_size / 4);
@@ -1397,6 +1406,7 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device 
*adev,
WREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR, 
lower_32_bits(ring->wptr));
  
  ring = >vcn.inst[inst_idx].ring_enc[1];

+   ring->wptr = 0;
WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_LO2, 
ring->gpu_addr);
WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_HI2, 
upper_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, inst_idx, mmUVD_RB_SIZE2, 
ring->ring_size / 4);
@@ -1405,6 +1415,8 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device 
*adev,
  
  WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR,

   RREG32_SOC15(UVD, inst_idx, 
mmUVD_SCRATCH2) & 0x7FFF);
+   /* Unstall DPG */
+   WREG32_P(SOC15_REG_OFFSET(UVD, inst_idx, 
mmUVD_POWER_STATUS), 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
  
  SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS,

   UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, 
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [enable VCN2.0 for NV12 SRIOV 6/6] drm/amdgpu: clear warning on unused var

2020-03-05 Thread Leo Liu

Is this warning introduced by your patch 4?

On 2020-03-05 8:33 a.m., Monk Liu wrote:

Signed-off-by: Monk Liu 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 2 --
  1 file changed, 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index ae9754f..a41272f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -493,7 +493,6 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct 
amdgpu_ring *ring, uint32_t han
  
  int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)

  {
-   struct amdgpu_device *adev = ring->adev;
struct dma_fence *fence;
long r;
  
@@ -655,7 +654,6 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
  
  int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)

  {
-   struct amdgpu_device *adev = ring->adev;
struct dma_fence *fence = NULL;
struct amdgpu_bo *bo = NULL;
long r;

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [enable VCN2.0 for NV12 SRIOV 5/6] drm/amdgpu: disable clock/power gating for SRIOV

2020-03-05 Thread Leo Liu

This patch is:

Acked-by: Leo Liu 

On 2020-03-05 8:33 a.m., Monk Liu wrote:

and disable MC resum in VCN2.0 as well

those are not concerned by VF driver

Signed-off-by: Monk Liu 
---
  drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c | 23 +++
  1 file changed, 23 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index dd500d1..f2745fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -320,6 +320,9 @@ static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
uint32_t offset;
  
+	if (amdgpu_sriov_vf(adev))

+   return;
+
/* cache window 0: fw */
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
@@ -464,6 +467,9 @@ static void vcn_v2_0_disable_clock_gating(struct 
amdgpu_device *adev)
  {
uint32_t data;
  
+	if (amdgpu_sriov_vf(adev))

+   return;
+
/* UVD disable CGC */
data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
@@ -622,6 +628,9 @@ static void vcn_v2_0_enable_clock_gating(struct 
amdgpu_device *adev)
  {
uint32_t data = 0;
  
+	if (amdgpu_sriov_vf(adev))

+   return;
+
/* enable UVD CGC */
data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
@@ -674,6 +683,9 @@ static void vcn_v2_0_disable_static_power_gating(struct 
amdgpu_device *adev)
uint32_t data = 0;
int ret;
  
+	if (amdgpu_sriov_vf(adev))

+   return;
+
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
| 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
@@ -721,6 +733,9 @@ static void vcn_v2_0_enable_static_power_gating(struct 
amdgpu_device *adev)
uint32_t data = 0;
int ret;
  
+	if (amdgpu_sriov_vf(adev))

+   return;
+
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
/* Before power off, this indicator has to be turned on */
data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
@@ -1231,6 +1246,9 @@ static int vcn_v2_0_set_clockgating_state(void *handle,
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = (state == AMD_CG_STATE_GATE);
  
+	if (amdgpu_sriov_vf(adev))

+   return 0;
+
if (enable) {
/* wait for STATUS to clear */
if (vcn_v2_0_is_idle(handle))
@@ -1686,6 +1704,11 @@ static int vcn_v2_0_set_powergating_state(void *handle,
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  
+	if (amdgpu_sriov_vf(adev)) {

+   adev->vcn.cur_state = AMD_PG_STATE_UNGATE;
+   return 0;
+   }
+
if (state == adev->vcn.cur_state)
return 0;
  

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [enable VCN2.0 for NV12 SRIOV 4/6] drm/amdgpu: cleanup ring/ib test for SRIOV vcn2.0

2020-03-05 Thread Leo Liu

This patch is:

Reviewed-by: Leo Liu 

On 2020-03-05 8:33 a.m., Monk Liu wrote:

support IB test on dec/enc ring
disable ring test on dec/enc ring (MMSCH limitation)

Signed-off-by: Monk Liu 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 11 +++
  drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c   |  3 +++
  2 files changed, 6 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index f96464e..ae9754f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -497,10 +497,6 @@ int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, 
long timeout)
struct dma_fence *fence;
long r;
  
-	/* temporarily disable ib test for sriov */

-   if (amdgpu_sriov_vf(adev))
-   return 0;
-
r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL);
if (r)
goto error;
@@ -527,6 +523,9 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
unsigned i;
int r;
  
+	if (amdgpu_sriov_vf(adev))

+   return 0;
+
r = amdgpu_ring_alloc(ring, 16);
if (r)
return r;
@@ -661,10 +660,6 @@ int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, 
long timeout)
struct amdgpu_bo *bo = NULL;
long r;
  
-	/* temporarily disable ib test for sriov */

-   if (amdgpu_sriov_vf(adev))
-   return 0;
-
r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
  AMDGPU_GEM_DOMAIN_VRAM,
  , NULL, NULL);
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index 421e5bf..dd500d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -1647,6 +1647,9 @@ int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring)
unsigned i;
int r;
  
+	if (amdgpu_sriov_vf(adev))

+   return 0;
+
WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 4);
if (r)

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [enable VCN2.0 for NV12 SRIOV 3/6] drm/amdgpu: implement initialization part on VCN2.0 for SRIOV

2020-03-05 Thread Leo Liu



On 2020-03-05 8:33 a.m., Monk Liu wrote:

one dec ring and one enc ring

It seems more than that, you might add more messages.




Signed-off-by: Monk Liu 
---
  drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c | 231 +-
  1 file changed, 228 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index c387c81..421e5bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -29,6 +29,7 @@
  #include "soc15d.h"
  #include "amdgpu_pm.h"
  #include "amdgpu_psp.h"
+#include "mmsch_v2_0.h"
  
  #include "vcn/vcn_2_0_0_offset.h"

  #include "vcn/vcn_2_0_0_sh_mask.h"
@@ -54,7 +55,7 @@ static int vcn_v2_0_set_powergating_state(void *handle,
enum amd_powergating_state state);
  static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
int inst_idx, struct dpg_pause_state 
*new_state);
-
+static int vcn_v2_0_start_sriov(struct amdgpu_device *adev);


Please keep the empty line here.



  /**
   * vcn_v2_0_early_init - set function pointers
   *
@@ -67,7 +68,10 @@ static int vcn_v2_0_early_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  
  	adev->vcn.num_vcn_inst = 1;

-   adev->vcn.num_enc_rings = 2;
+   if (amdgpu_sriov_vf(adev))
+   adev->vcn.num_enc_rings = 1;
+   else
+   adev->vcn.num_enc_rings = 2;
  
  	vcn_v2_0_set_dec_ring_funcs(adev);

vcn_v2_0_set_enc_ring_funcs(adev);
@@ -154,7 +158,10 @@ static int vcn_v2_0_sw_init(void *handle)
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
ring = >vcn.inst->ring_enc[i];
ring->use_doorbell = true;
-   ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 
1) + 2 + i;
+   if (!amdgpu_sriov_vf(adev))
+   ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 
<< 1) + 2 + i;
+   else
+   ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 
<< 1) + 1 + i;
sprintf(ring->name, "vcn_enc%d", i);
r = amdgpu_ring_init(adev, ring, 512, >vcn.inst->irq, 0);
if (r)
@@ -163,6 +170,10 @@ static int vcn_v2_0_sw_init(void *handle)
  
  	adev->vcn.pause_dpg_mode = vcn_v2_0_pause_dpg_mode;
  
+	r = amdgpu_virt_alloc_mm_table(adev);

+   if (r)
+   return r;
+


This is not needed for bare metal.



return 0;
  }
  
@@ -178,6 +189,8 @@ static int vcn_v2_0_sw_fini(void *handle)

int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  
+	amdgpu_virt_free_mm_table(adev);

+


same as above here.


Regards,

Leo




r = amdgpu_vcn_suspend(adev);
if (r)
return r;
@@ -203,6 +216,9 @@ static int vcn_v2_0_hw_init(void *handle)
adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
 ring->doorbell_index, 0);
  
+	if (amdgpu_sriov_vf(adev))

+   vcn_v2_0_start_sriov(adev);
+
r = amdgpu_ring_test_helper(ring);
if (r)
goto done;
@@ -1680,6 +1696,215 @@ static int vcn_v2_0_set_powergating_state(void *handle,
return ret;
  }
  
+static int vcn_v2_0_start_mmsch(struct amdgpu_device *adev,

+   struct amdgpu_mm_table *table)
+{
+   uint32_t data = 0, loop;
+   uint64_t addr = table->gpu_addr;
+   struct mmsch_v2_0_init_header *header;
+   uint32_t size;
+   int i;
+
+   header = (struct mmsch_v2_0_init_header *)table->cpu_addr;
+   size = header->header_size + header->vcn_table_size;
+
+   /* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr
+* of memory descriptor location
+*/
+   WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
+   WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
+
+   /* 2, update vmid of descriptor */
+   data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID);
+   data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
+   /* use domain0 for MM scheduler */
+   data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
+   WREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID, data);
+
+   /* 3, notify mmsch about the size of this descriptor */
+   WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_SIZE, size);
+
+   /* 4, set resp to zero */
+   WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
+
+   adev->vcn.inst->ring_dec.wptr = 0;
+   adev->vcn.inst->ring_dec.wptr_old = 0;
+   vcn_v2_0_dec_ring_set_wptr(>vcn.inst->ring_dec);
+
+   for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+   adev->vcn.inst->ring_enc[i].wptr = 0;
+   adev->vcn.inst->ring_enc[i].wptr_old = 0;
+   vcn_v2_0_enc_ring_set_wptr(>vcn.inst->ring_enc[i]);
+   }
+
+   /* 5, kick off 

Re: [enable VCN2.0 for NV12 SRIOV 2/6] drm/amdgpu: disable jpeg block for SRIOV

2020-03-05 Thread Leo Liu


On 2020-03-05 8:39 a.m., Liu, Monk wrote:

This is not supported by MMSCH FW...


With this added to commit message, this patch is:

Reviewed-by: Leo Liu 





_
Monk Liu|GPU Virtualization Team |AMD


-Original Message-
From: Christian König 
Sent: Thursday, March 5, 2020 9:38 PM
To: Liu, Monk ; amd-gfx@lists.freedesktop.org
Subject: Re: [enable VCN2.0 for NV12 SRIOV 2/6] drm/amdgpu: disable jpeg block 
for SRIOV

A commit message explaining why we disable it and if it could be enabled again 
or if this is permanent would be nice to have.

Christian.

Am 05.03.20 um 14:33 schrieb Monk Liu:

Signed-off-by: Monk Liu 
---
   drivers/gpu/drm/amd/amdgpu/nv.c | 3 ++-
   1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c
b/drivers/gpu/drm/amd/amdgpu/nv.c index 2d1bebd..033cbbc 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -516,7 +516,8 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
!amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, _v11_0_ip_block);
amdgpu_device_ip_block_add(adev, _v2_0_ip_block);
-   amdgpu_device_ip_block_add(adev, _v2_0_ip_block);
+   if (!amdgpu_sriov_vf(adev))
+   amdgpu_device_ip_block_add(adev, _v2_0_ip_block);
break;
default:
return -EINVAL;

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfxdata=02%7C01%7Cleo.liu%40amd.com%7C73f28c93e88241bc4c5908d7c10a96e3%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637190123555130366sdata=CVcac9dEWc3mR0oNkcrkOTtxXqvdtjzEN78c%2FBYty8E%3Dreserved=0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu/vcn2.5: fix warning

2020-02-10 Thread Leo Liu

Acked-by: Leo Liu 

On 2020-02-07 8:17 a.m., James Zhu wrote:

Fix warning during switching to dpg pause mode for
VCN firmware Version ENC: 1.1 DEC: 1 VEP: 0 Revision: 16

Signed-off-by: James Zhu 
---
  drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c | 2 +-
  1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index 97ab44c..2d64ba1 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -1407,7 +1407,7 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device 
*adev,
   RREG32_SOC15(UVD, inst_idx, 
mmUVD_SCRATCH2) & 0x7FFF);
  
  SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS,

-  0x0, 
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
+  UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, 
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
}
} else {
/* unpause dpg, no need to wait */

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu/vcn2.5: fix DPG mode power off issue on instance 1

2020-02-05 Thread Leo Liu



On 2020-02-05 9:45 a.m., James Zhu wrote:

Support pause_state for multiple instance, and it will fix vcn2.5 DPG mode
power off issue on instance 1.

Signed-off-by: James Zhu 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h |  3 +--
  drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c   | 14 --
  drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c   |  6 +++---
  drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c   |  6 +++---
  4 files changed, 15 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index d6deb0e..fb3dfe3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -179,6 +179,7 @@ struct amdgpu_vcn_inst {
struct amdgpu_irq_src   irq;
struct amdgpu_vcn_reg   external;
struct amdgpu_bo*dpg_sram_bo;
+   struct dpg_pause_state pause_state;


Can this variable be aligned with other variables in the structure? With 
that fixed, the patch is


Reviewed-by: Leo Liu 



void*dpg_sram_cpu_addr;
uint64_tdpg_sram_gpu_addr;
uint32_t*dpg_sram_curr_addr;
@@ -190,8 +191,6 @@ struct amdgpu_vcn {
const struct firmware   *fw;/* VCN firmware */
unsignednum_enc_rings;
enum amd_powergating_state cur_state;
-   struct dpg_pause_state pause_state;
-
boolindirect_sram;
  
  	uint8_t	num_vcn_inst;

diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 1a24fad..71f61af 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -1207,9 +1207,10 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device 
*adev,
struct amdgpu_ring *ring;
  
  	/* pause/unpause if state is changed */

-   if (adev->vcn.pause_state.fw_based != new_state->fw_based) {
+   if (adev->vcn.inst[inst_idx].pause_state.fw_based != 
new_state->fw_based) {
DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d",
-   adev->vcn.pause_state.fw_based, 
adev->vcn.pause_state.jpeg,
+   adev->vcn.inst[inst_idx].pause_state.fw_based,
+   adev->vcn.inst[inst_idx].pause_state.jpeg,
new_state->fw_based, new_state->jpeg);
  
  		reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &

@@ -1258,13 +1259,14 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device 
*adev,
reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
}
-   adev->vcn.pause_state.fw_based = new_state->fw_based;
+   adev->vcn.inst[inst_idx].pause_state.fw_based = 
new_state->fw_based;
}
  
  	/* pause/unpause if state is changed */

-   if (adev->vcn.pause_state.jpeg != new_state->jpeg) {
+   if (adev->vcn.inst[inst_idx].pause_state.jpeg != new_state->jpeg) {
DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d",
-   adev->vcn.pause_state.fw_based, 
adev->vcn.pause_state.jpeg,
+   adev->vcn.inst[inst_idx].pause_state.fw_based,
+   adev->vcn.inst[inst_idx].pause_state.jpeg,
new_state->fw_based, new_state->jpeg);
  
  		reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &

@@ -1318,7 +1320,7 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device 
*adev,
reg_data &= ~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK;
WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
}
-   adev->vcn.pause_state.jpeg = new_state->jpeg;
+   adev->vcn.inst[inst_idx].pause_state.jpeg = new_state->jpeg;
}
  
  	return 0;

diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index 4f72167..c387c81 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -1137,9 +1137,9 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device 
*adev,
int ret_code;
  
  	/* pause/unpause if state is changed */

-   if (adev->vcn.pause_state.fw_based != new_state->fw_based) {
+   if (adev->vcn.inst[inst_idx].pause_state.fw_based != 
new_state->fw_based) {
DRM_DEBUG("dpg pause state changed %d -> %d",
-   adev->vcn.pause_state.fw_based,  
new_state->fw_based);
+   adev->vcn.inst[inst_idx].pause_state.fw_based,   
new_state->fw_based);
reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
(~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
  
@@ -1185,7 +1185,7 @@ static int vcn_v2_0_pause_dpg_mo

Re: [PATCH v2 3/5] drm/amdgpu/vcn: fix vcn2.5 instance issue

2020-01-21 Thread Leo Liu



On 2020-01-21 5:21 p.m., James Zhu wrote:

Fix vcn2.5 instance issue, vcn0 and vcn1 have same register offset

Signed-off-by: James Zhu 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h |  2 +-
  drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c   | 88 -
  2 files changed, 45 insertions(+), 45 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index c4984c5..bf7f2aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -91,7 +91,7 @@
uint32_t internal_reg_offset, addr; 
\
bool video_range, aon_range;
\

\
-   addr = (adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + 
reg);\
+   addr = (adev->reg_offset[ip##_HWIP][0][reg##_BASE_IDX] + reg);  
 \


Why do you still have hard coded here? please have it reverted. With 
that fixed the patch is:


Reviewed-by: Leo Liu 




addr <<= 2; 
  \
video_range = 0xF & addr) >= (VCN_VID_SOC_ADDRESS_2_0)) 
&&   \
((0xF & addr) < ((VCN_VID_SOC_ADDRESS_2_0 + 
0x2600); \
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index 740a291..f513c6d 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -435,88 +435,88 @@ static void vcn_v2_5_mc_resume_dpg_mode(struct 
amdgpu_device *adev, int inst_idx
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
if (!indirect) {
WREG32_SOC15_DPG_MODE_2_0(inst_idx, 
SOC15_DPG_MODE_OFFSET_2_0(
-   UVD, inst_idx, 
mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+   UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + 
inst_idx].tmr_mc_addr_lo), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, 
SOC15_DPG_MODE_OFFSET_2_0(
-   UVD, inst_idx, 
mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+   UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + 
inst_idx].tmr_mc_addr_hi), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, 
SOC15_DPG_MODE_OFFSET_2_0(
-   UVD, inst_idx, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, 
indirect);
+   UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, 
indirect);
} else {
WREG32_SOC15_DPG_MODE_2_0(inst_idx, 
SOC15_DPG_MODE_OFFSET_2_0(
-   UVD, inst_idx, 
mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
+   UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 
0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, 
SOC15_DPG_MODE_OFFSET_2_0(
-   UVD, inst_idx, 
mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
+   UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 
0, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, 
SOC15_DPG_MODE_OFFSET_2_0(
-   UVD, inst_idx, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, 
indirect);
+   UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, 
indirect);
}
offset = 0;
} else {
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-   UVD, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+   UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, 
indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-   UVD, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+   UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, 
indirect);
offset = size;
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-   UVD, inst_idx, mmUVD_VCPU_CACHE_OFFSET0),
+   UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
}
  
  	if (!indirect)

WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
-   UVD, inst_idx, mmUVD_VCPU_CACHE_SIZE

Re: [PATCH v2 4/5] drm/amdgpu/vcn: fix typo error

2020-01-21 Thread Leo Liu

Patch v2 4 and 5 are:

Reviewed-by: Leo Liu 

On 2020-01-21 4:39 p.m., James Zhu wrote:

Fix typo error, should be inst_idx instead of inst.

Signed-off-by: James Zhu 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h | 2 +-
  1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 60fe3c4..ff4f4f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -111,7 +111,7 @@
  
  #define RREG32_SOC15_DPG_MODE_2_0(inst_idx, offset, mask_en) 	\

({  
\
-   WREG32_SOC15(VCN, inst, mmUVD_DPG_LMA_CTL,  
\
+   WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_CTL,  
\
(0x0 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT |
  \
mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT |
  \
offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT));
  \

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 4/4] drm/amdgpu/vcn: use inst_idx relacing inst

2020-01-21 Thread Leo Liu


On 2020-01-21 3:58 p.m., James Zhu wrote:
Actually RREG32_SOC15_DPG_MODE_2_0 has a bug inside to miss using inst 
as inst_idx,


If there is a bug, you should clearly specify the bug in the commit 
message and fix it in a patch, and then if you think the code-name 
conversion need to be fixed, you should have that in a separated patch.


Leo




So I want to clean up the header first to use inst_idx for 
abbreviation of instance index.


James

On 2020-01-21 3:30 p.m., Leo Liu wrote:


On 2020-01-21 12:50 p.m., James Zhu wrote:


On 2020-01-21 12:43 p.m., Leo Liu wrote:


On 2020-01-21 11:19 a.m., James Zhu wrote:

Use inst_idx relacing inst in SOC15_DPG_MODE macro

Signed-off-by: James Zhu 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h | 22 +++---
  1 file changed, 11 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h

index 60fe3c4..98c1893 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -65,23 +65,23 @@
  /* 1 second timeout */
  #define VCN_IDLE_TIMEOUT    msecs_to_jiffies(1000)
  -#define RREG32_SOC15_DPG_MODE(ip, inst, reg, mask, 
sram_sel) \
-    ({    WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_MASK, 
mask); \

-    WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_CTL, \
+#define RREG32_SOC15_DPG_MODE(ip, inst_idx, reg, mask, 
sram_sel) \
+    ({    WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_MASK, 
mask); \
+    WREG32_SOC15(ip, inst_idx, 
mmUVD_DPG_LMA_CTL, \


I have only seen you are using inst_idx to replace inst, havn't 
you? this is not necessary, because we are using inst as the idx.


Since we use inst for instance in struct amdgpu_vcn. To avoid 
confusing, I create this patch,


If only variable name changes, please drop these patch, we are using 
inst all the places.


Leo






James



Leo




UVD_DPG_LMA_CTL__MASK_EN_MASK | \
- ((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) \
+ ((adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + 
reg) \
  << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT) 
| \
  (sram_sel << 
UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); \
-    RREG32_SOC15(ip, inst, 
mmUVD_DPG_LMA_DATA); \
+    RREG32_SOC15(ip, inst_idx, 
mmUVD_DPG_LMA_DATA); \

  })
  -#define WREG32_SOC15_DPG_MODE(ip, inst, reg, value, mask, 
sram_sel) \
+#define WREG32_SOC15_DPG_MODE(ip, inst_idx, reg, value, mask, 
sram_sel) \

  do { \
-    WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_DATA, 
value); \
-    WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_MASK, 
mask); \

-    WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_CTL, \
+    WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_DATA, 
value); \
+    WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_MASK, 
mask); \
+    WREG32_SOC15(ip, inst_idx, 
mmUVD_DPG_LMA_CTL, \

  UVD_DPG_LMA_CTL__READ_WRITE_MASK | \
- ((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) \
+ ((adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + 
reg) \
  << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT) 
| \
  (sram_sel << 
UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); \

  } while (0)
@@ -111,7 +111,7 @@
    #define RREG32_SOC15_DPG_MODE_2_0(inst_idx, offset, 
mask_en) \

  ({    \
-    WREG32_SOC15(VCN, inst, 
mmUVD_DPG_LMA_CTL, \
+    WREG32_SOC15(VCN, inst_idx, 
mmUVD_DPG_LMA_CTL, \
  (0x0 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT 
|    \
  mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT 
|    \
  offset << 
UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT));    \

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 3/4] drm/amdgpu/vcn: fix vcn2.5 instance issue

2020-01-21 Thread Leo Liu


On 2020-01-21 3:55 p.m., James Zhu wrote:
Since SOC15_DPG_MODE_OFFSET is always the same for all instances, we 
should not put [inst]


in the argument list. I will easily make bug in the future.


Like being said, we have the consistent format throughout the entire 
driver for the offset as 
"adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg",


so the format should be kept here as well

Leo




James

On 2020-01-21 3:23 p.m., Leo Liu wrote:


On 2020-01-21 12:48 p.m., James Zhu wrote:


On 2020-01-21 12:40 p.m., Leo Liu wrote:


On 2020-01-21 11:19 a.m., James Zhu wrote:

Fix vcn2.5 instance issue, vcn0 and vcn1 have same register offset

Signed-off-by: James Zhu 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h |  4 +-
  drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c   | 86 

  drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c   | 88 
-

  3 files changed, 89 insertions(+), 89 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h

index c4984c5..60fe3c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -86,12 +86,12 @@
  (sram_sel << 
UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); \

  } while (0)
  -#define SOC15_DPG_MODE_OFFSET_2_0(ip, inst, 
reg) \
+#define SOC15_DPG_MODE_OFFSET_2_0(ip, 
reg) \

  ({    \
  uint32_t internal_reg_offset, 
addr;    \

  bool video_range, aon_range;    \
  \
-    addr = (adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] 
+ reg);    \


This is based the soc15_common.h

#define SOC15_REG_OFFSET(ip, inst, reg) 
(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg)


You are saying that is not right offset for 2nd instance?


Yes, DPG mode is executed by individual instance, so the register 
offset  is the same.


Then you should use inst idx as 0 for the 2nd instance as well, 
instead of changing the Macro.


Leo





James




Leo


+    addr = (adev->reg_offset[ip##_HWIP][0][reg##_BASE_IDX] + 
reg);    \

  addr <<= 2; \
  video_range = 0xF & addr) >= 
(VCN_VID_SOC_ADDRESS_2_0)) && \
  ((0xF & addr) < ((VCN_VID_SOC_ADDRESS_2_0 + 
0x2600);    \
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c

index e2ad5afe..ad11c8e 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -352,88 +352,88 @@ static void 
vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirec

  if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
  if (!indirect) {
  WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
-    UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+    UVD, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo), 0, 
indirect);

  WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
-    UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+    UVD, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi), 0, 
indirect);

  WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
-    UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
+    UVD, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
  } else {
  WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
-    UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 
0, indirect);
+    UVD, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, 
indirect);

  WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
-    UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 
0, indirect);
+    UVD, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, 
indirect);

  WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
-    UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
+    UVD, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
  }
  offset = 0;
  } else {
  WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
-    UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+    UVD, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
lower_32_bits(adev->vcn.inst->gpu_addr), 0, indirect);
  WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
-    UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+    UVD, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
upper_32_bits(adev->vcn.inst->gpu_addr), 0, indirect);
  offset = size;
  WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
-    

Re: [PATCH 4/4] drm/amdgpu/vcn: use inst_idx relacing inst

2020-01-21 Thread Leo Liu


On 2020-01-21 12:50 p.m., James Zhu wrote:


On 2020-01-21 12:43 p.m., Leo Liu wrote:


On 2020-01-21 11:19 a.m., James Zhu wrote:

Use inst_idx relacing inst in SOC15_DPG_MODE macro

Signed-off-by: James Zhu 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h | 22 +++---
  1 file changed, 11 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h

index 60fe3c4..98c1893 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -65,23 +65,23 @@
  /* 1 second timeout */
  #define VCN_IDLE_TIMEOUT    msecs_to_jiffies(1000)
  -#define RREG32_SOC15_DPG_MODE(ip, inst, reg, mask, 
sram_sel) \
-    ({    WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_MASK, 
mask); \

-    WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_CTL, \
+#define RREG32_SOC15_DPG_MODE(ip, inst_idx, reg, mask, 
sram_sel) \
+    ({    WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_MASK, 
mask); \
+    WREG32_SOC15(ip, inst_idx, 
mmUVD_DPG_LMA_CTL, \


I have only seen you are using inst_idx to replace inst, havn't you? 
this is not necessary, because we are using inst as the idx.


Since we use inst for instance in struct amdgpu_vcn. To avoid 
confusing, I create this patch,


If only variable name changes, please drop these patch, we are using 
inst all the places.


Leo






James



Leo




UVD_DPG_LMA_CTL__MASK_EN_MASK | \
- ((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) \
+ ((adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg) \
  << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT) 
| \
  (sram_sel << 
UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); \

-    RREG32_SOC15(ip, inst, mmUVD_DPG_LMA_DATA); \
+    RREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_DATA); \
  })
  -#define WREG32_SOC15_DPG_MODE(ip, inst, reg, value, mask, 
sram_sel) \
+#define WREG32_SOC15_DPG_MODE(ip, inst_idx, reg, value, mask, 
sram_sel) \

  do { \
-    WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_DATA, 
value); \
-    WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_MASK, 
mask); \

-    WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_CTL, \
+    WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_DATA, 
value); \
+    WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_MASK, 
mask); \
+    WREG32_SOC15(ip, inst_idx, 
mmUVD_DPG_LMA_CTL, \

  UVD_DPG_LMA_CTL__READ_WRITE_MASK | \
- ((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) \
+ ((adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg) \
  << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT) 
| \
  (sram_sel << 
UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); \

  } while (0)
@@ -111,7 +111,7 @@
    #define RREG32_SOC15_DPG_MODE_2_0(inst_idx, offset, 
mask_en) \

  ({    \
-    WREG32_SOC15(VCN, inst, 
mmUVD_DPG_LMA_CTL, \
+    WREG32_SOC15(VCN, inst_idx, 
mmUVD_DPG_LMA_CTL, \
  (0x0 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT 
|    \
  mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT 
|    \
  offset << 
UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT));    \

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 3/4] drm/amdgpu/vcn: fix vcn2.5 instance issue

2020-01-21 Thread Leo Liu


On 2020-01-21 12:48 p.m., James Zhu wrote:


On 2020-01-21 12:40 p.m., Leo Liu wrote:


On 2020-01-21 11:19 a.m., James Zhu wrote:

Fix vcn2.5 instance issue, vcn0 and vcn1 have same register offset

Signed-off-by: James Zhu 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h |  4 +-
  drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c   | 86 

  drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c   | 88 
-

  3 files changed, 89 insertions(+), 89 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h

index c4984c5..60fe3c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -86,12 +86,12 @@
  (sram_sel << 
UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); \

  } while (0)
  -#define SOC15_DPG_MODE_OFFSET_2_0(ip, inst, 
reg) \
+#define SOC15_DPG_MODE_OFFSET_2_0(ip, 
reg) \

  ({    \
  uint32_t internal_reg_offset, addr;    \
  bool video_range, aon_range;    \
  \
-    addr = (adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + 
reg);    \


This is based the soc15_common.h

#define SOC15_REG_OFFSET(ip, inst, reg) 
(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg)


You are saying that is not right offset for 2nd instance?


Yes, DPG mode is executed by individual instance, so the register 
offset  is the same.


Then you should use inst idx as 0 for the 2nd instance as well, instead 
of changing the Macro.


Leo





James




Leo


+    addr = (adev->reg_offset[ip##_HWIP][0][reg##_BASE_IDX] + 
reg);    \

  addr <<= 2; \
  video_range = 0xF & addr) >= 
(VCN_VID_SOC_ADDRESS_2_0)) && \
  ((0xF & addr) < ((VCN_VID_SOC_ADDRESS_2_0 + 
0x2600);    \
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c

index e2ad5afe..ad11c8e 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -352,88 +352,88 @@ static void vcn_v2_0_mc_resume_dpg_mode(struct 
amdgpu_device *adev, bool indirec

  if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
  if (!indirect) {
  WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
-    UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+    UVD, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo), 0, 
indirect);

  WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
-    UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+    UVD, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi), 0, 
indirect);

  WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
-    UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
+    UVD, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
  } else {
  WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
-    UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, 
indirect);
+    UVD, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, 
indirect);

  WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
-    UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, 
indirect);
+    UVD, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, 
indirect);

  WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
-    UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
+    UVD, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
  }
  offset = 0;
  } else {
  WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
-    UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+    UVD, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
  lower_32_bits(adev->vcn.inst->gpu_addr), 0, indirect);
  WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
-    UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+    UVD, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
  upper_32_bits(adev->vcn.inst->gpu_addr), 0, indirect);
  offset = size;
  WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
-    UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
+    UVD, mmUVD_VCPU_CACHE_OFFSET0),
  AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
  }
    if (!indirect)
  WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
-    UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
+    UVD, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
  else
  WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OF

Re: [PATCH 2/4] drm/amdgpu/vcn2.5: fix a bug for the 2nd vcn instance

2020-01-21 Thread Leo Liu


On 2020-01-21 12:47 p.m., James Zhu wrote:


On 2020-01-21 12:29 p.m., Leo Liu wrote:


On 2020-01-21 11:19 a.m., James Zhu wrote:

Fix a bug for the 2nd vcn instance at start and stop.

Signed-off-by: James Zhu 
---
  drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c | 12 
  1 file changed, 8 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c

index c351d1a..740a291 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -891,8 +891,10 @@ static int vcn_v2_5_start(struct amdgpu_device 
*adev)

  for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
  if (adev->vcn.harvest_config & (1 << i))
  continue;
-    if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
-    return vcn_v2_5_start_dpg_mode(adev, i, 
adev->vcn.indirect_sram);

+    if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+    r = vcn_v2_5_start_dpg_mode(adev, i, 
adev->vcn.indirect_sram);

+    continue;
+    }


"r" is not being considered, and after the loop, it will be going to 
the code below, is it correct?
Since DPG mode start/stop always return 0. I have added code to return 
0 below under DPG mode


Then you should move the "return 0" here instead of adding two more 
unnecessary lines.







  /* disable register anti-hang mechanism */
  WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_POWER_STATUS), 0,
@@ -903,6 +905,9 @@ static int vcn_v2_5_start(struct amdgpu_device 
*adev)

  WREG32_SOC15(UVD, i, mmUVD_STATUS, tmp);
  }
  +    if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
+    return 0;
+
  /*SW clock gating */
  vcn_v2_5_disable_clock_gating(adev);
  @@ -1294,10 +1299,9 @@ static int vcn_v2_5_stop(struct 
amdgpu_device *adev)

  for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
  if (adev->vcn.harvest_config & (1 << i))
  continue;
-
  if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
  r = vcn_v2_5_stop_dpg_mode(adev, i);
-    goto power_off;
+    continue;
  }


same problem as above, don't go through the code that isn't necessary.


should be fine under DPG mode.


It's about clean implementation. if not necessary, why do we need to add 
them.


Leo





JAmes



Regards,

Leo



    /* wait for vcn idle */

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 4/4] drm/amdgpu/vcn: use inst_idx relacing inst

2020-01-21 Thread Leo Liu



On 2020-01-21 11:19 a.m., James Zhu wrote:

Use inst_idx relacing inst in SOC15_DPG_MODE macro

Signed-off-by: James Zhu 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h | 22 +++---
  1 file changed, 11 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 60fe3c4..98c1893 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -65,23 +65,23 @@
  /* 1 second timeout */
  #define VCN_IDLE_TIMEOUT  msecs_to_jiffies(1000)
  
-#define RREG32_SOC15_DPG_MODE(ip, inst, reg, mask, sram_sel) \

-   ({  WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_MASK, mask);   
\
-   WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_CTL,   
\
+#define RREG32_SOC15_DPG_MODE(ip, inst_idx, reg, mask, sram_sel)   
\
+   ({  WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_MASK, mask);   
\
+   WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_CTL,   
\


I have only seen you are using inst_idx to replace inst, havn't you? 
this is not necessary, because we are using inst as the idx.


Leo




UVD_DPG_LMA_CTL__MASK_EN_MASK | 
\
-   ((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + 
reg)   \
+   ((adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] 
+ reg)   \
<< UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT) |   
  \
(sram_sel << UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT));
  \
-   RREG32_SOC15(ip, inst, mmUVD_DPG_LMA_DATA); 
\
+   RREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_DATA); 
\
})
  
-#define WREG32_SOC15_DPG_MODE(ip, inst, reg, value, mask, sram_sel) 			\

+#define WREG32_SOC15_DPG_MODE(ip, inst_idx, reg, value, mask, sram_sel)
\
do {
\
-   WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_DATA, value);  
\
-   WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_MASK, mask);   
\
-   WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_CTL,   
\
+   WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_DATA, value);  
\
+   WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_MASK, mask);   
\
+   WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_CTL,   
\
UVD_DPG_LMA_CTL__READ_WRITE_MASK |  
\
-   ((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + 
reg)   \
+   ((adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] 
+ reg)   \
<< UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT) |   
  \
(sram_sel << UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT));
  \
} while (0)
@@ -111,7 +111,7 @@
  
  #define RREG32_SOC15_DPG_MODE_2_0(inst_idx, offset, mask_en) 	\

({  
\
-   WREG32_SOC15(VCN, inst, mmUVD_DPG_LMA_CTL,  
\
+   WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_CTL,  
\
(0x0 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT |
  \
mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT |
  \
offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT));
  \

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 3/4] drm/amdgpu/vcn: fix vcn2.5 instance issue

2020-01-21 Thread Leo Liu



On 2020-01-21 11:19 a.m., James Zhu wrote:

Fix vcn2.5 instance issue, vcn0 and vcn1 have same register offset

Signed-off-by: James Zhu 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h |  4 +-
  drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c   | 86 
  drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c   | 88 -
  3 files changed, 89 insertions(+), 89 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index c4984c5..60fe3c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -86,12 +86,12 @@
(sram_sel << UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT));
  \
} while (0)
  
-#define SOC15_DPG_MODE_OFFSET_2_0(ip, inst, reg) 		\

+#define SOC15_DPG_MODE_OFFSET_2_0(ip, reg) 
\
({  
\
uint32_t internal_reg_offset, addr; 
\
bool video_range, aon_range;
\

\
-   addr = (adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + 
reg);\


This is based the soc15_common.h

#define SOC15_REG_OFFSET(ip, inst, reg) 
(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg)


You are saying that is not right offset for 2nd instance?


Leo



+   addr = (adev->reg_offset[ip##_HWIP][0][reg##_BASE_IDX] + reg);  
 \
addr <<= 2; 
  \
video_range = 0xF & addr) >= (VCN_VID_SOC_ADDRESS_2_0)) 
&&   \
((0xF & addr) < ((VCN_VID_SOC_ADDRESS_2_0 + 
0x2600); \
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index e2ad5afe..ad11c8e 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -352,88 +352,88 @@ static void vcn_v2_0_mc_resume_dpg_mode(struct 
amdgpu_device *adev, bool indirec
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
if (!indirect) {
WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
-   UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+   UVD, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),

(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
-   UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+   UVD, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),

(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
-   UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, 
indirect);
+   UVD, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
} else {
WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
-   UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 
0, indirect);
+   UVD, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, 
indirect);
WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
-   UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 
0, 0, indirect);
+   UVD, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 
0, indirect);
WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
-   UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, 
indirect);
+   UVD, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
}
offset = 0;
} else {
WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
-   UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+   UVD, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
lower_32_bits(adev->vcn.inst->gpu_addr), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
-   UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+   UVD, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
upper_32_bits(adev->vcn.inst->gpu_addr), 0, indirect);
offset = size;
WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
-   UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
+   UVD, 

Re: [PATCH 2/4] drm/amdgpu/vcn2.5: fix a bug for the 2nd vcn instance

2020-01-21 Thread Leo Liu



On 2020-01-21 11:19 a.m., James Zhu wrote:

Fix a bug for the 2nd vcn instance at start and stop.

Signed-off-by: James Zhu 
---
  drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c | 12 
  1 file changed, 8 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index c351d1a..740a291 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -891,8 +891,10 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
-   if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
-   return vcn_v2_5_start_dpg_mode(adev, i, 
adev->vcn.indirect_sram);
+   if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+   r = vcn_v2_5_start_dpg_mode(adev, i, 
adev->vcn.indirect_sram);
+   continue;
+   }
  


"r" is not being considered, and after the loop, it will be going to the 
code below, is it correct?




/* disable register anti-hang mechanism */
WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_POWER_STATUS), 0,
@@ -903,6 +905,9 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
WREG32_SOC15(UVD, i, mmUVD_STATUS, tmp);
}
  
+	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)

+   return 0;
+
/*SW clock gating */
vcn_v2_5_disable_clock_gating(adev);
  
@@ -1294,10 +1299,9 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev)

for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
-
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
r = vcn_v2_5_stop_dpg_mode(adev, i);
-   goto power_off;
+   continue;
}


same problem as above, don't go through the code that isn't necessary.

Regards,

Leo


  
  		/* wait for vcn idle */

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 1/4] drm/amdgpu/vcn: Share vcn_v2_0_dec_ring_test_ring to vcn2.5

2020-01-21 Thread Leo Liu

This patch is

Reviewed-by: Leo Liu 

On 2020-01-21 11:19 a.m., James Zhu wrote:

Share vcn_v2_0_dec_ring_test_ring to vcn2.5 to support
vcn software ring.

Signed-off-by: James Zhu 
---
  drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c | 2 +-
  drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h | 1 +
  drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c | 2 +-
  3 files changed, 3 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index f4db8af6..e2ad5afe 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -1624,7 +1624,7 @@ static int vcn_v2_0_process_interrupt(struct 
amdgpu_device *adev,
return 0;
  }
  
-static int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring)

+int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring)
  {
struct amdgpu_device *adev = ring->adev;
uint32_t tmp = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h 
b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h
index ef749b0..6c9de18 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h
@@ -37,6 +37,7 @@ extern void vcn_v2_0_dec_ring_emit_vm_flush(struct 
amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr);
  extern void vcn_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring,
uint32_t reg, uint32_t val);
+extern int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring);
  
  extern void vcn_v2_0_enc_ring_insert_end(struct amdgpu_ring *ring);

  extern void vcn_v2_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index c8b63d5..c351d1a 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -1488,7 +1488,7 @@ static const struct amdgpu_ring_funcs 
vcn_v2_5_dec_ring_vm_funcs = {
.emit_ib = vcn_v2_0_dec_ring_emit_ib,
.emit_fence = vcn_v2_0_dec_ring_emit_fence,
.emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
-   .test_ring = amdgpu_vcn_dec_ring_test_ring,
+   .test_ring = vcn_v2_0_dec_ring_test_ring,
.test_ib = amdgpu_vcn_dec_ring_test_ib,
.insert_nop = vcn_v2_0_dec_ring_insert_nop,
.insert_start = vcn_v2_0_dec_ring_insert_start,

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 1/2] drm/amdgpu/vcn1.0: add 1_0 to Macro and fix build warning

2020-01-15 Thread Leo Liu
Fixes: 2f60d5f2bc4 "drm/amdgpu/vcn: move macro from vcn2.0 to share amdgpu_vcn"

Signed-off-by: Leo Liu 
---
 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 14 +++---
 1 file changed, 7 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index a70351f2740c..e654938f6cca 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -39,10 +39,10 @@
 #include "ivsrcid/vcn/irqsrcs_vcn_1_0.h"
 #include "jpeg_v1_0.h"
 
-#define mmUVD_RBC_XX_IB_REG_CHECK  0x05ab
-#define mmUVD_RBC_XX_IB_REG_CHECK_BASE_IDX 1
-#define mmUVD_REG_XX_MASK  
0x05ac
-#define mmUVD_REG_XX_MASK_BASE_IDX 1
+#define mmUVD_RBC_XX_IB_REG_CHECK_1_0  0x05ab
+#define mmUVD_RBC_XX_IB_REG_CHECK_1_0_BASE_IDX 1
+#define mmUVD_REG_XX_MASK_1_0  0x05ac
+#define mmUVD_REG_XX_MASK_1_0_BASE_IDX 1
 
 static int vcn_v1_0_stop(struct amdgpu_device *adev);
 static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
@@ -835,9 +835,9 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_device 
*adev)
 
vcn_v1_0_mc_resume_spg_mode(adev);
 
-   WREG32_SOC15(UVD, 0, mmUVD_REG_XX_MASK, 0x10);
-   WREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK,
-   RREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK) | 0x3);
+   WREG32_SOC15(UVD, 0, mmUVD_REG_XX_MASK_1_0, 0x10);
+   WREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK_1_0,
+   RREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK_1_0) | 0x3);
 
/* enable VCPU clock */
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK);
-- 
2.20.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 2/2] drm/amdgpu: fix the instance loop and build warning

2020-01-15 Thread Leo Liu
Fixes: 8ae1e132 "drm/amdgpu/vcn: support multiple instance direct SRAM read and 
write"

Signed-off-by: Leo Liu 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index ab51f0e9539c..f96464e2c157 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -193,9 +193,9 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
if (adev->vcn.harvest_config & (1 << j))
continue;
if (adev->vcn.indirect_sram) {
-   amdgpu_bo_free_kernel(>vcn.inst[i].dpg_sram_bo,
- 
>vcn.inst[i].dpg_sram_gpu_addr,
- (void 
**)>vcn.inst[i].dpg_sram_cpu_addr);
+   amdgpu_bo_free_kernel(>vcn.inst[j].dpg_sram_bo,
+ 
>vcn.inst[j].dpg_sram_gpu_addr,
+ (void 
**)>vcn.inst[j].dpg_sram_cpu_addr);
}
kvfree(adev->vcn.inst[j].saved_bo);
 
-- 
2.20.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH v3 0/5] support Arcturus IFM workaround

2020-01-15 Thread Leo Liu



On 2020-01-14 5:23 p.m., James Zhu wrote:

Add vcn2.5 dpg mode/dpg pause mode/dpg sram mode to support
acturus IFM(instruction fetch monitor) work around.

v2: Correct firmware ucode index in vcn_v2_5_mc_resume_dpg_mode
v3: Share multiple instance indirect DPG SRAM mode support for vcn2


With those fixed in patch2&3, the series are:

Reviewed-by: Leo Liu 


Regards,

Leo





James Zhu (5):
   drm/amdgpu/vcn: support multiple-instance dpg pause mode
   drm/amdgpu/vcn2.5: support multiple instance direct SRAM read and
 write
   drm/amdgpu/vcn2.5: add DPG mode start and stop
   drm/amdgpu/vcn2.5: add dpg pause mode
   drm/amdgpu/vcn2.5: implement indirect DPG SRAM mode

  drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c |  34 +--
  drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h |  48 ++--
  drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c   |   8 +-
  drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c   |  98 
  drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c   | 394 +++-
  5 files changed, 487 insertions(+), 95 deletions(-)


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH v3 3/5] drm/amdgpu/vcn2.5: add DPG mode start and stop

2020-01-15 Thread Leo Liu



On 2020-01-14 5:23 p.m., James Zhu wrote:

Add DPG mode start and stop functions for vcn2.5

v2: Correct firmware ucode index in vcn_v2_5_mc_resume_dpg_mode

Signed-off-by: James Zhu
---
  drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c | 295 +-
  1 file changed, 293 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index fa90249..b3ddf68 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -48,6 +48,11 @@
  #define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET   0x3b5
  #define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET 0x25c
  
+#define mmUVD_RBC_XX_IB_REG_CHECK 	0x026b

+#define mmUVD_RBC_XX_IB_REG_CHECK_BASE_IDX 1
+#define mmUVD_REG_XX_MASK  0x026c
+#define mmUVD_REG_XX_MASK_BASE_IDX 1
+


If these are the same as VCN2.0, please move them to amdgpu_vcn.h




  #define VCN25_MAX_HW_INSTANCES_ARCTURUS   2
  
  static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev);

@@ -286,7 +291,8 @@ static int vcn_v2_5_hw_init(void *handle)
  
  done:

if (!r)
-   DRM_INFO("VCN decode and encode initialized successfully.\n");
+   DRM_INFO("VCN decode and encode initialized successfully(under 
%s).\n",
+   (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG 
Mode");
  
  	return r;

  }
@@ -309,7 +315,9 @@ static int vcn_v2_5_hw_fini(void *handle)
continue;
ring = >vcn.inst[i].ring_dec;
  
-		if (RREG32_SOC15(VCN, i, mmUVD_STATUS))

+   if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
+   (adev->vcn.cur_state != AMD_PG_STATE_GATE &&


Should it add some indentation here?

Regards,

Leo



+RREG32_SOC15(VCN, i, mmUVD_STATUS)))
vcn_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
  
  		ring->sched.ready = false;

@@ -418,6 +426,78 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
}
  }
  
+static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)

+{
+   uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+   uint32_t offset;
+
+   /* cache window 0: fw */
+   if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+   WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+   UVD, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+   (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + 
inst_idx].tmr_mc_addr_lo), 0, indirect);
+   WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+   UVD, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+   (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + 
inst_idx].tmr_mc_addr_hi), 0, indirect);
+   WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+   UVD, inst_idx, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, 
indirect);
+   offset = 0;
+   } else {
+   WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+   UVD, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+   lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, 
indirect);
+   WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+   UVD, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+   upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, 
indirect);
+   offset = size;
+   WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+   UVD, inst_idx, mmUVD_VCPU_CACHE_OFFSET0),
+   AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
+   }
+
+   WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+   UVD, inst_idx, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
+
+   /* cache window 1: stack */
+   WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+   UVD, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
+   lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, 
indirect);
+   WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+   UVD, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
+   upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, 
indirect);
+   WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+   UVD, inst_idx, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
+
+   WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+   UVD, inst_idx, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 
0, indirect);
+
+   /* cache window 2: context */
+   

Re: [PATCH v3 2/5] drm/amdgpu/vcn2.5: support multiple instance direct SRAM read and write

2020-01-15 Thread Leo Liu



On 2020-01-14 5:23 p.m., James Zhu wrote:

Add multiple instance direct SRAM read and write support for vcn2.5

Signed-off-by: James Zhu 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 27 +-
  drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h | 46 
  drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c   | 94 -
  3 files changed, 83 insertions(+), 84 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 99df693..ca62d99 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -165,15 +165,15 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", 
r);
return r;
}
-   }
  
-	if (adev->vcn.indirect_sram) {

-   r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
-   AMDGPU_GEM_DOMAIN_VRAM, >vcn.dpg_sram_bo,
-   >vcn.dpg_sram_gpu_addr, 
>vcn.dpg_sram_cpu_addr);
-   if (r) {
-   dev_err(adev->dev, "(%d) failed to allocate DPG bo\n", 
r);
-   return r;
+   if (adev->vcn.indirect_sram) {
+   r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
+   AMDGPU_GEM_DOMAIN_VRAM, 
>vcn.inst[i].dpg_sram_bo,
+   >vcn.inst[i].dpg_sram_gpu_addr, 
>vcn.inst[i].dpg_sram_cpu_addr);
+   if (r) {
+   dev_err(adev->dev, "VCN %d (%d) failed to allocate 
DPG bo\n", i, r);
+   return r;
+   }
}
}
  
@@ -186,15 +186,14 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
  
  	cancel_delayed_work_sync(>vcn.idle_work);
  
-	if (adev->vcn.indirect_sram) {

-   amdgpu_bo_free_kernel(>vcn.dpg_sram_bo,
- >vcn.dpg_sram_gpu_addr,
- (void **)>vcn.dpg_sram_cpu_addr);
-   }
-
for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
if (adev->vcn.harvest_config & (1 << j))
continue;
+   if (adev->vcn.indirect_sram) {
+   amdgpu_bo_free_kernel(>vcn.inst[i].dpg_sram_bo,
+ 
>vcn.inst[i].dpg_sram_gpu_addr,
+ (void 
**)>vcn.inst[i].dpg_sram_cpu_addr);
+   }
kvfree(adev->vcn.inst[j].saved_bo);
  
  		amdgpu_bo_free_kernel(>vcn.inst[j].vcpu_bo,

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 26c6623..63c55bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -104,27 +104,27 @@
internal_reg_offset >>= 2;  
  \
})
  
-#define RREG32_SOC15_DPG_MODE_2_0(offset, mask_en) 		\

-   ({  
\
-   WREG32_SOC15(VCN, 0, mmUVD_DPG_LMA_CTL, 
\
-   (0x0 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT |
  \
-   mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT |
  \
-   offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT));
  \
-   RREG32_SOC15(VCN, 0, mmUVD_DPG_LMA_DATA);   
\
+#define RREG32_SOC15_DPG_MODE_2_0(inst_idx, offset, mask_en)   
\
+   ({  
\
+   WREG32_SOC15(VCN, inst, mmUVD_DPG_LMA_CTL,  
\
+   (0x0 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT |
  \
+   mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT |
  \
+   offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT));
  \
+   RREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_DATA);
\


Please keep the backslash aligned with those above and below.


Regards,

Leo




})
  
-#define WREG32_SOC15_DPG_MODE_2_0(offset, value, mask_en, indirect)\

-   do {
\
-   if (!indirect) {
\
-   WREG32_SOC15(VCN, 0, mmUVD_DPG_LMA_DATA, value);
\
-   WREG32_SOC15(VCN, 0, mmUVD_DPG_LMA_CTL, 

Re: [PATCH 4/6] drm/amdgpu/vcn2.5: add dpg pause mode

2020-01-14 Thread Leo Liu



On 2020-01-14 12:58 p.m., James Zhu wrote:

Add dpg pause mode support for vcn2.5

Signed-off-by: James Zhu 
---
  drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c | 70 +++
  1 file changed, 70 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index ea70aa8..8de51c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -60,6 +60,8 @@ static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device 
*adev);
  static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev);
  static int vcn_v2_5_set_powergating_state(void *handle,
enum amd_powergating_state state);
+static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
+   int inst_idx, struct dpg_pause_state 
*new_state);
  static int vcn_v2_5_sriov_start(struct amdgpu_device *adev);
  
  static int amdgpu_ih_clientid_vcns[] = {

@@ -217,6 +219,9 @@ static int vcn_v2_5_sw_init(void *handle)
return r;
}
  
+	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)

+   adev->vcn.pause_dpg_mode = vcn_v2_5_pause_dpg_mode;
+
return 0;
  }
  
@@ -1327,6 +1332,67 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev)

return 0;
  }
  
+static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,

+   int inst_idx, struct dpg_pause_state *new_state)


I think here is the same thing, add instance to v2_0, and avoid the 
duplication.



Regards,

Leo



+{
+   struct amdgpu_ring *ring;
+   uint32_t reg_data = 0;
+   int ret_code;
+
+   /* pause/unpause if state is changed */
+   if (adev->vcn.pause_state.fw_based != new_state->fw_based) {
+   DRM_DEBUG("dpg pause state changed %d -> %d",
+   adev->vcn.pause_state.fw_based,  
new_state->fw_based);
+   reg_data = RREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE) &
+   (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
+
+   if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
+   ret_code = 0;
+   SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, 
0x1,
+   UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, 
ret_code);
+
+   if (!ret_code) {
+   /* pause DPG */
+   reg_data |= 
UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
+   WREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE, 
reg_data);
+
+   /* wait for ACK */
+   SOC15_WAIT_ON_RREG(UVD, inst_idx, 
mmUVD_DPG_PAUSE,
+  UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
+  
UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code);
+
+   /* Restore */
+   ring = >vcn.inst[inst_idx].ring_enc[0];
+   WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_LO, 
ring->gpu_addr);
+   WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_HI, 
upper_32_bits(ring->gpu_addr));
+   WREG32_SOC15(UVD, inst_idx, mmUVD_RB_SIZE, 
ring->ring_size / 4);
+   WREG32_SOC15(UVD, inst_idx, mmUVD_RB_RPTR, 
lower_32_bits(ring->wptr));
+   WREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR, 
lower_32_bits(ring->wptr));
+
+   ring = >vcn.inst[inst_idx].ring_enc[1];
+   WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_LO2, 
ring->gpu_addr);
+   WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_HI2, 
upper_32_bits(ring->gpu_addr));
+   WREG32_SOC15(UVD, inst_idx, mmUVD_RB_SIZE2, 
ring->ring_size / 4);
+   WREG32_SOC15(UVD, inst_idx, mmUVD_RB_RPTR2, 
lower_32_bits(ring->wptr));
+   WREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR2, 
lower_32_bits(ring->wptr));
+
+   WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR,
+  RREG32_SOC15(UVD, inst_idx, 
mmUVD_SCRATCH2) & 0x7FFF);
+
+   SOC15_WAIT_ON_RREG(UVD, inst_idx, 
mmUVD_POWER_STATUS,
+  0x0, 
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
+   }
+   } else {
+   /* unpause dpg, no need to wait */
+   reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
+   WREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE, reg_data);
+   }
+   adev->vcn.pause_state.fw_based = new_state->fw_based;
+   }
+
+   return 0;
+}
+
  /**
   * vcn_v2_5_dec_ring_get_rptr - get read pointer
   *
@@ -1369,6 +1435,10 @@ 

Re: [PATCH 2/6] drm/amdgpu/vcn2.5: add direct SRAM read and write

2020-01-14 Thread Leo Liu
I think you can avoid the duplication, instead adding instance to 
"RREG32(WREG)_SOC15_DPG_MODE_2_0(offset, mask_en) ", just like adding 
instance to other part of the code.


Regards,

Leo


On 2020-01-14 12:58 p.m., James Zhu wrote:

Add direct SRAM read and write MACRO for vcn2.5

Signed-off-by: James Zhu 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h | 18 ++
  1 file changed, 18 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 26c6623..d3d75ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -127,6 +127,24 @@
}   
\
} while (0)
  
+#define RREG32_SOC15_DPG_MODE_2_5(inst_idx, offset, mask_en) 		\

+   ({  
\
+   WREG32_SOC15(VCN, inst, mmUVD_DPG_LMA_CTL,  
\
+   (0x0 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT |
  \
+   mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT |
  \
+   offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT));
  \
+   RREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_DATA);
\
+   })
+
+#define WREG32_SOC15_DPG_MODE_2_5(inst_idx, offset, value, mask_en, indirect)  
\
+   do {
\
+   WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_DATA, value); 
\
+   WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_CTL,  
\
+   (0x1 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT |
  \
+mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT |   
  \
+offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT));   
  \
+   } while (0)
+
  enum engine_status_constants {
UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON = 0x20,
UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON_2_0 = 0x0,

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 1/6] drm/amdgpu/vcn: support multiple-instance dpg pause mode

2020-01-14 Thread Leo Liu

Reviewed-by: Leo Liu 

On 2020-01-14 12:58 p.m., James Zhu wrote:

Add multiple-instance dpg pause mode support for VCN2.5

Signed-off-by: James Zhu 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 4 ++--
  drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h | 2 +-
  drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c   | 8 
  drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c   | 4 ++--
  4 files changed, 9 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index ed106d9..99df693 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -298,7 +298,7 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct 
*work)
else
new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
  
-			adev->vcn.pause_dpg_mode(adev, _state);

+   adev->vcn.pause_dpg_mode(adev, j, _state);
}
  
  		fence[j] += amdgpu_fence_count_emitted(>vcn.inst[j].ring_dec);

@@ -341,7 +341,7 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
new_state.fw_based = VCN_DPG_STATE__PAUSE;
  
-		adev->vcn.pause_dpg_mode(adev, _state);

+   adev->vcn.pause_dpg_mode(adev, ring->me, _state);
}
  }
  
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h

index e6dee82..26c6623 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -199,7 +199,7 @@ struct amdgpu_vcn {
  
  	unsigned	harvest_config;

int (*pause_dpg_mode)(struct amdgpu_device *adev,
-   struct dpg_pause_state *new_state);
+   int inst_idx, struct dpg_pause_state *new_state);
  };
  
  int amdgpu_vcn_sw_init(struct amdgpu_device *adev);

diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 3b025a3..a70351f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -50,7 +50,7 @@ static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device 
*adev);
  static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev);
  static int vcn_v1_0_set_powergating_state(void *handle, enum 
amd_powergating_state state);
  static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
-   struct dpg_pause_state *new_state);
+   int inst_idx, struct dpg_pause_state 
*new_state);
  
  static void vcn_v1_0_idle_work_handler(struct work_struct *work);
  
@@ -1199,7 +1199,7 @@ static int vcn_v1_0_stop(struct amdgpu_device *adev)

  }
  
  static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,

-   struct dpg_pause_state *new_state)
+   int inst_idx, struct dpg_pause_state *new_state)
  {
int ret_code;
uint32_t reg_data = 0;
@@ -1786,7 +1786,7 @@ static void vcn_v1_0_idle_work_handler(struct work_struct 
*work)
else
new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
  
-		adev->vcn.pause_dpg_mode(adev, _state);

+   adev->vcn.pause_dpg_mode(adev, 0, _state);
}
  
  	fences += amdgpu_fence_count_emitted(>jpeg.inst->ring_dec);

@@ -1840,7 +1840,7 @@ void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring)
else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
new_state.jpeg = VCN_DPG_STATE__PAUSE;
  
-		adev->vcn.pause_dpg_mode(adev, _state);

+   adev->vcn.pause_dpg_mode(adev, 0, _state);
}
  }
  
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c

index d76ece3..dcdc7ad 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -58,7 +58,7 @@ static void vcn_v2_0_set_irq_funcs(struct amdgpu_device 
*adev);
  static int vcn_v2_0_set_powergating_state(void *handle,
enum amd_powergating_state state);
  static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
-   struct dpg_pause_state *new_state);
+   int inst_idx, struct dpg_pause_state 
*new_state);
  
  /**

   * vcn_v2_0_early_init - set function pointers
@@ -1135,7 +1135,7 @@ static int vcn_v2_0_stop(struct amdgpu_device *adev)
  }
  
  static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,

-   struct dpg_pause_state *new_state)
+   int inst_idx, struct dpg_pause_state *new_state)
  {
struct amdgpu_ring *ring;
uint32_t reg_data = 0;

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 2/2] drm/amdgpu: enable VCN2.5 IP block for Arcturus

2020-01-07 Thread Leo Liu
With default PSP FW loading

Signed-off-by: Leo Liu 
---
 drivers/gpu/drm/amd/amdgpu/soc15.c | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c 
b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 714cf4dfd0a7..e4a7245939c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -834,8 +834,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
if (likely(adev->firmware.load_type == 
AMDGPU_FW_LOAD_PSP))
amdgpu_device_ip_block_add(adev, 
_v2_5_ip_block);
} else {
-   if (unlikely(adev->firmware.load_type == 
AMDGPU_FW_LOAD_DIRECT))
-   amdgpu_device_ip_block_add(adev, 
_v2_5_ip_block);
+   amdgpu_device_ip_block_add(adev, _v2_5_ip_block);
}
if (!amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, _v2_5_ip_block);
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 1/2] drm/amdgpu/vcn2.5: fix PSP FW loading for the second instance

2020-01-07 Thread Leo Liu
ucodes for instances are from different location

Signed-off-by: Leo Liu 
---
 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index 4ea8e20ed15d..fa9024988918 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -384,9 +384,9 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
/* cache window 0: fw */
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
-   
(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo));
+   (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + 
i].tmr_mc_addr_lo));
WREG32_SOC15(UVD, i, 
mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
-   
(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi));
+   (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + 
i].tmr_mc_addr_hi));
WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
offset = 0;
} else {
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amdgpu/vcn: remove unnecessary included headers

2019-12-16 Thread Leo Liu
Esp. VCN1.0 headers should not be here

v2: add back the  to keep consistent.

Signed-off-by: Leo Liu 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 6 --
 1 file changed, 6 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index e522025430c7..623b9f9ef1ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -28,16 +28,10 @@
 #include 
 #include 
 
-#include 
-
 #include "amdgpu.h"
 #include "amdgpu_pm.h"
 #include "amdgpu_vcn.h"
 #include "soc15d.h"
-#include "soc15_common.h"
-
-#include "vcn/vcn_1_0_offset.h"
-#include "vcn/vcn_1_0_sh_mask.h"
 
 /* Firmware Names */
 #define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin"
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu/vcn: remove unnecessary included headers

2019-12-16 Thread Leo Liu

Hi Alex,

I searched and found why it get built okay:

amdgpu.h includes amdgpu_mode.h, and that include linux/i2c.h.

And linux/i2c.h includes linux/acpi.h and that includes linux/modules.h.

Tested it by commenting out linux/modules.h from linux/acpi.h, then the 
build for amdgpu.ko would fail at the MODULE_FIRMWARE for VCN.


So in order to keep it consistent, and I will send v2 to keep 



Regards,

Leo


On 2019-12-16 11:50 a.m., Leo Liu wrote:


On 2019-12-16 11:36 a.m., Alex Deucher wrote:

On Mon, Dec 16, 2019 at 11:06 AM Leo Liu  wrote:

Esp. VCN1.0 headers should not be here

Signed-off-by: Leo Liu 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 7 ---
  1 file changed, 7 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c

index e522025430c7..371f55de42dc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -25,19 +25,12 @@
   */

  #include 
-#include 

Don't we still need module.h for MODULE_FIRMWARE()?


It's got built okay by removing it. I will keep it anyway.

Regards,
Leo





Alex


  #include 

-#include 
-
  #include "amdgpu.h"
  #include "amdgpu_pm.h"
  #include "amdgpu_vcn.h"
  #include "soc15d.h"
-#include "soc15_common.h"
-
-#include "vcn/vcn_1_0_offset.h"
-#include "vcn/vcn_1_0_sh_mask.h"

  /* Firmware Names */
  #define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin"
--
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfxdata=02%7C01%7Cleo.liu%40amd.com%7C041e573a661b498216ea08d782481d8e%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637121119112744072sdata=TtjVEgl%2BB2sprSvHOJvCpZsxXD5jw9WSreusaDoeKmU%3Dreserved=0 


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfxdata=02%7C01%7Cleo.liu%40amd.com%7C041e573a661b498216ea08d782481d8e%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637121119112744072sdata=TtjVEgl%2BB2sprSvHOJvCpZsxXD5jw9WSreusaDoeKmU%3Dreserved=0 


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH] drm/amdgpu/vcn: remove unnecessary included headers

2019-12-16 Thread Leo Liu



On 2019-12-16 11:36 a.m., Alex Deucher wrote:

On Mon, Dec 16, 2019 at 11:06 AM Leo Liu  wrote:

Esp. VCN1.0 headers should not be here

Signed-off-by: Leo Liu 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 7 ---
  1 file changed, 7 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index e522025430c7..371f55de42dc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -25,19 +25,12 @@
   */

  #include 
-#include 

Don't we still need module.h for MODULE_FIRMWARE()?


It's got built okay by removing it. I will keep it anyway.

Regards,
Leo





Alex


  #include 

-#include 
-
  #include "amdgpu.h"
  #include "amdgpu_pm.h"
  #include "amdgpu_vcn.h"
  #include "soc15d.h"
-#include "soc15_common.h"
-
-#include "vcn/vcn_1_0_offset.h"
-#include "vcn/vcn_1_0_sh_mask.h"

  /* Firmware Names */
  #define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin"
--
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfxdata=02%7C01%7Cleo.liu%40amd.com%7C471b86fd37a94fdefe8008d7824612b5%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637121109780682361sdata=7rC8gPrHsi5xa8Zsis%2B%2FXH0URESxBb6AQaMgppVGDJs%3Dreserved=0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH] drm/amdgpu/vcn: remove unnecessary included headers

2019-12-16 Thread Leo Liu
Esp. VCN1.0 headers should not be here

Signed-off-by: Leo Liu 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 7 ---
 1 file changed, 7 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index e522025430c7..371f55de42dc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -25,19 +25,12 @@
  */
 
 #include 
-#include 
 #include 
 
-#include 
-
 #include "amdgpu.h"
 #include "amdgpu_pm.h"
 #include "amdgpu_vcn.h"
 #include "soc15d.h"
-#include "soc15_common.h"
-
-#include "vcn/vcn_1_0_offset.h"
-#include "vcn/vcn_1_0_sh_mask.h"
 
 /* Firmware Names */
 #define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin"
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 1/2] drm/amdgpu/vcn1.0: use its own idle handler and begin use funcs

2019-12-13 Thread Leo Liu
Because VCN1.0 power management and DPG mode are managed together with
JPEG1.0 under both HW and FW, so separated them from general VCN code.
Also the multiple instances case got removed, since VCN1.0 HW just have
a single instance.

v2: override work func with vcn1.0's own

Signed-off-by: Leo Liu 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c |  3 -
 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h |  3 +
 drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c  |  3 +-
 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c   | 90 -
 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.h   |  2 +
 5 files changed, 95 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 428cfd58b37d..717f0a218c5d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -39,9 +39,6 @@
 #include "vcn/vcn_1_0_offset.h"
 #include "vcn/vcn_1_0_sh_mask.h"
 
-/* 1 second timeout */
-#define VCN_IDLE_TIMEOUT   msecs_to_jiffies(1000)
-
 /* Firmware Names */
 #define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin"
 #define FIRMWARE_PICASSO   "amdgpu/picasso_vcn.bin"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 402a5046b985..3484ead62046 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -56,6 +56,9 @@
 #define VCN_VID_IP_ADDRESS_2_0 0x0
 #define VCN_AON_IP_ADDRESS_2_0 0x3
 
+/* 1 second timeout */
+#define VCN_IDLE_TIMEOUT   msecs_to_jiffies(1000)
+
 #define RREG32_SOC15_DPG_MODE(ip, inst, reg, mask, sram_sel)   
\
({  WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_MASK, mask);   
\
WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_CTL,   
\
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c 
b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
index a141408dfb23..0debfd9f428c 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
@@ -25,6 +25,7 @@
 #include "amdgpu_jpeg.h"
 #include "soc15.h"
 #include "soc15d.h"
+#include "vcn_v1_0.h"
 
 #include "vcn/vcn_1_0_offset.h"
 #include "vcn/vcn_1_0_sh_mask.h"
@@ -561,7 +562,7 @@ static const struct amdgpu_ring_funcs 
jpeg_v1_0_decode_ring_vm_funcs = {
.insert_start = jpeg_v1_0_decode_ring_insert_start,
.insert_end = jpeg_v1_0_decode_ring_insert_end,
.pad_ib = amdgpu_ring_generic_pad_ib,
-   .begin_use = amdgpu_vcn_ring_begin_use,
+   .begin_use = vcn_v1_0_ring_begin_use,
.end_use = amdgpu_vcn_ring_end_use,
.emit_wreg = jpeg_v1_0_decode_ring_emit_wreg,
.emit_reg_wait = jpeg_v1_0_decode_ring_emit_reg_wait,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 652cecc030b3..3b025a3f8c7d 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -25,6 +25,7 @@
 
 #include "amdgpu.h"
 #include "amdgpu_vcn.h"
+#include "amdgpu_pm.h"
 #include "soc15.h"
 #include "soc15d.h"
 #include "soc15_common.h"
@@ -51,6 +52,8 @@ static int vcn_v1_0_set_powergating_state(void *handle, enum 
amd_powergating_sta
 static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
struct dpg_pause_state *new_state);
 
+static void vcn_v1_0_idle_work_handler(struct work_struct *work);
+
 /**
  * vcn_v1_0_early_init - set function pointers
  *
@@ -105,6 +108,9 @@ static int vcn_v1_0_sw_init(void *handle)
if (r)
return r;
 
+   /* Override the work func */
+   adev->vcn.idle_work.work.func = vcn_v1_0_idle_work_handler;
+
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
const struct common_firmware_header *hdr;
hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
@@ -1758,6 +1764,86 @@ static int vcn_v1_0_set_powergating_state(void *handle,
return ret;
 }
 
+static void vcn_v1_0_idle_work_handler(struct work_struct *work)
+{
+   struct amdgpu_device *adev =
+   container_of(work, struct amdgpu_device, vcn.idle_work.work);
+   unsigned int fences = 0, i;
+
+   for (i = 0; i < adev->vcn.num_enc_rings; ++i)
+   fences += 
amdgpu_fence_count_emitted(>vcn.inst->ring_enc[i]);
+
+   if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+   struct dpg_pause_state new_state;
+
+   if (fences)
+   new_state.fw_based = VCN_DPG_STATE__PAUSE;
+   else
+   new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
+
+   if (amdgpu_fence_count_emitted(>jpeg.inst->ring_dec))
+   new_state.jpeg = VCN_DPG_STATE__PAUSE;
+  

Re: [PATCH] drm/amdgpu: add JPEG check to VCN idle handler and begin use

2019-12-12 Thread Leo Liu


On 2019-12-12 3:18 a.m., Christian König wrote:

Am 11.12.19 um 20:48 schrieb Leo Liu:

Since it's only needed with VCN1.0 when HW has no its
own JPEG HW IP block


Wouldn't it be simpler/cleaner to just define a 
vcn_v1_0_ring_begin_use() and vcn_v1_0_idle_work_handler() instead?


Yeah, this way should be cleaner, even though the changes got bigger, 
the new set will be sent shortly.


Thanks,

Leo





Regards,
Christian.



Signed-off-by: Leo Liu 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 29 +++--
  drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h |  2 ++
  2 files changed, 20 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c

index 428cfd58b37d..95ac721f2de0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -186,6 +186,9 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
  }
  }
  +    adev->vcn.has_jpeg_block = 
(amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ?

+    true : false;
+
  return 0;
  }
  @@ -306,15 +309,17 @@ static void 
amdgpu_vcn_idle_work_handler(struct work_struct *work)

  else
  new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
  -    if 
(amdgpu_fence_count_emitted(>jpeg.inst[j].ring_dec))

-    new_state.jpeg = VCN_DPG_STATE__PAUSE;
-    else
-    new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
-
+    if (!adev->vcn.has_jpeg_block) {
+    if 
(amdgpu_fence_count_emitted(>jpeg.inst[j].ring_dec))

+    new_state.jpeg = VCN_DPG_STATE__PAUSE;
+    else
+    new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
+    }
  adev->vcn.pause_dpg_mode(adev, _state);
  }
  -    fence[j] += 
amdgpu_fence_count_emitted(>jpeg.inst[j].ring_dec);

+    if (!adev->vcn.has_jpeg_block)
+    fence[j] += 
amdgpu_fence_count_emitted(>jpeg.inst[j].ring_dec);
  fence[j] += 
amdgpu_fence_count_emitted(>vcn.inst[j].ring_dec);

  fences += fence[j];
  }
@@ -358,14 +363,16 @@ void amdgpu_vcn_ring_begin_use(struct 
amdgpu_ring *ring)

  else
  new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
  -    if 
(amdgpu_fence_count_emitted(>jpeg.inst[ring->me].ring_dec))

-    new_state.jpeg = VCN_DPG_STATE__PAUSE;
-    else
-    new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
+    if (!adev->vcn.has_jpeg_block) {
+    if 
(amdgpu_fence_count_emitted(>jpeg.inst[ring->me].ring_dec))

+    new_state.jpeg = VCN_DPG_STATE__PAUSE;
+    else
+    new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
+    }
    if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
  new_state.fw_based = VCN_DPG_STATE__PAUSE;
-    else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
+    else if (!adev->vcn.has_jpeg_block && ring->funcs->type == 
AMDGPU_RING_TYPE_VCN_JPEG)

  new_state.jpeg = VCN_DPG_STATE__PAUSE;
    adev->vcn.pause_dpg_mode(adev, _state);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h

index 402a5046b985..9a2381d006c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -192,6 +192,8 @@ struct amdgpu_vcn {
  unsigned    harvest_config;
  int (*pause_dpg_mode)(struct amdgpu_device *adev,
  struct dpg_pause_state *new_state);
+
+    bool has_jpeg_block;
  };
    int amdgpu_vcn_sw_init(struct amdgpu_device *adev);



___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 2/2] drm/amdgpu/vcn: remove JPEG related code from idle handler and begin use

2019-12-12 Thread Leo Liu
For VCN2.0 and above, VCN has been separated from JPEG

Signed-off-by: Leo Liu 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 28 +
 1 file changed, 5 insertions(+), 23 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index e962c87d04cf..2ff04d0047ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -293,6 +293,7 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct 
*work)
for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
if (adev->vcn.harvest_config & (1 << j))
continue;
+
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
fence[j] += 
amdgpu_fence_count_emitted(>vcn.inst[j].ring_enc[i]);
}
@@ -305,26 +306,17 @@ static void amdgpu_vcn_idle_work_handler(struct 
work_struct *work)
else
new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
 
-   if 
(amdgpu_fence_count_emitted(>jpeg.inst[j].ring_dec))
-   new_state.jpeg = VCN_DPG_STATE__PAUSE;
-   else
-   new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
-
adev->vcn.pause_dpg_mode(adev, _state);
}
 
-   fence[j] += 
amdgpu_fence_count_emitted(>jpeg.inst[j].ring_dec);
fence[j] += 
amdgpu_fence_count_emitted(>vcn.inst[j].ring_dec);
fences += fence[j];
}
 
if (fences == 0) {
amdgpu_gfx_off_ctrl(adev, true);
-   if (adev->asic_type < CHIP_ARCTURUS && adev->pm.dpm_enabled)
-   amdgpu_dpm_enable_uvd(adev, false);
-   else
-   amdgpu_device_ip_set_powergating_state(adev, 
AMD_IP_BLOCK_TYPE_VCN,
-  
AMD_PG_STATE_GATE);
+   amdgpu_device_ip_set_powergating_state(adev, 
AMD_IP_BLOCK_TYPE_VCN,
+  AMD_PG_STATE_GATE);
} else {
schedule_delayed_work(>vcn.idle_work, VCN_IDLE_TIMEOUT);
}
@@ -337,11 +329,8 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
 
if (set_clocks) {
amdgpu_gfx_off_ctrl(adev, false);
-   if (adev->asic_type < CHIP_ARCTURUS && adev->pm.dpm_enabled)
-   amdgpu_dpm_enable_uvd(adev, true);
-   else
-   amdgpu_device_ip_set_powergating_state(adev, 
AMD_IP_BLOCK_TYPE_VCN,
-  
AMD_PG_STATE_UNGATE);
+   amdgpu_device_ip_set_powergating_state(adev, 
AMD_IP_BLOCK_TYPE_VCN,
+  AMD_PG_STATE_UNGATE);
}
 
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG){
@@ -357,15 +346,8 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
else
new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
 
-   if 
(amdgpu_fence_count_emitted(>jpeg.inst[ring->me].ring_dec))
-   new_state.jpeg = VCN_DPG_STATE__PAUSE;
-   else
-   new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
-
if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
new_state.fw_based = VCN_DPG_STATE__PAUSE;
-   else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
-   new_state.jpeg = VCN_DPG_STATE__PAUSE;
 
adev->vcn.pause_dpg_mode(adev, _state);
}
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 1/2] drm/amdgpu/vcn1.0: use its own idle handler and begin use funcs

2019-12-12 Thread Leo Liu
Because VCN1.0 power management and DPG mode are managed together with
JPEG1.0 under both HW and FW, so separated them from general VCN code.
Also the multiple instances case got removed, since VCN1.0 HW just have
a single instance.

Signed-off-by: Leo Liu 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c |  7 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h |  3 +
 drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c  |  3 +-
 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c   | 88 -
 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.h   |  2 +
 5 files changed, 96 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 428cfd58b37d..e962c87d04cf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -39,9 +39,6 @@
 #include "vcn/vcn_1_0_offset.h"
 #include "vcn/vcn_1_0_sh_mask.h"
 
-/* 1 second timeout */
-#define VCN_IDLE_TIMEOUT   msecs_to_jiffies(1000)
-
 /* Firmware Names */
 #define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin"
 #define FIRMWARE_PICASSO   "amdgpu/picasso_vcn.bin"
@@ -71,7 +68,9 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
unsigned char fw_check;
int i, r;
 
-   INIT_DELAYED_WORK(>vcn.idle_work, amdgpu_vcn_idle_work_handler);
+   /* For VCN2.0 and above */
+   if (adev->asic_type >= CHIP_ARCTURUS)
+   INIT_DELAYED_WORK(>vcn.idle_work, 
amdgpu_vcn_idle_work_handler);
 
switch (adev->asic_type) {
case CHIP_RAVEN:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 402a5046b985..3484ead62046 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -56,6 +56,9 @@
 #define VCN_VID_IP_ADDRESS_2_0 0x0
 #define VCN_AON_IP_ADDRESS_2_0 0x3
 
+/* 1 second timeout */
+#define VCN_IDLE_TIMEOUT   msecs_to_jiffies(1000)
+
 #define RREG32_SOC15_DPG_MODE(ip, inst, reg, mask, sram_sel)   
\
({  WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_MASK, mask);   
\
WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_CTL,   
\
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c 
b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
index a141408dfb23..0debfd9f428c 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
@@ -25,6 +25,7 @@
 #include "amdgpu_jpeg.h"
 #include "soc15.h"
 #include "soc15d.h"
+#include "vcn_v1_0.h"
 
 #include "vcn/vcn_1_0_offset.h"
 #include "vcn/vcn_1_0_sh_mask.h"
@@ -561,7 +562,7 @@ static const struct amdgpu_ring_funcs 
jpeg_v1_0_decode_ring_vm_funcs = {
.insert_start = jpeg_v1_0_decode_ring_insert_start,
.insert_end = jpeg_v1_0_decode_ring_insert_end,
.pad_ib = amdgpu_ring_generic_pad_ib,
-   .begin_use = amdgpu_vcn_ring_begin_use,
+   .begin_use = vcn_v1_0_ring_begin_use,
.end_use = amdgpu_vcn_ring_end_use,
.emit_wreg = jpeg_v1_0_decode_ring_emit_wreg,
.emit_reg_wait = jpeg_v1_0_decode_ring_emit_reg_wait,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 652cecc030b3..7395286540e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -25,6 +25,7 @@
 
 #include "amdgpu.h"
 #include "amdgpu_vcn.h"
+#include "amdgpu_pm.h"
 #include "soc15.h"
 #include "soc15d.h"
 #include "soc15_common.h"
@@ -51,6 +52,8 @@ static int vcn_v1_0_set_powergating_state(void *handle, enum 
amd_powergating_sta
 static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
struct dpg_pause_state *new_state);
 
+static void vcn_v1_0_idle_work_handler(struct work_struct *work);
+
 /**
  * vcn_v1_0_early_init - set function pointers
  *
@@ -101,6 +104,7 @@ static int vcn_v1_0_sw_init(void *handle)
return r;
}
 
+   INIT_DELAYED_WORK(>vcn.idle_work, vcn_v1_0_idle_work_handler);
r = amdgpu_vcn_sw_init(adev);
if (r)
return r;
@@ -1758,6 +1762,86 @@ static int vcn_v1_0_set_powergating_state(void *handle,
return ret;
 }
 
+static void vcn_v1_0_idle_work_handler(struct work_struct *work)
+{
+   struct amdgpu_device *adev =
+   container_of(work, struct amdgpu_device, vcn.idle_work.work);
+   unsigned int fences = 0, i;
+
+   for (i = 0; i < adev->vcn.num_enc_rings; ++i)
+   fences += 
amdgpu_fence_count_emitted(>vcn.inst->ring_enc[i]);
+
+   if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+   struct dpg_pause_state new_state;
+
+   if (fences)
+   new_state.fw_based = VCN_DPG_STATE__PAUSE;
+  

[PATCH] drm/amdgpu: add JPEG check to VCN idle handler and begin use

2019-12-11 Thread Leo Liu
Since it's only needed with VCN1.0 when HW has no its
own JPEG HW IP block

Signed-off-by: Leo Liu 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 29 +++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h |  2 ++
 2 files changed, 20 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 428cfd58b37d..95ac721f2de0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -186,6 +186,9 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
}
}
 
+   adev->vcn.has_jpeg_block = (amdgpu_device_ip_get_ip_block(adev, 
AMD_IP_BLOCK_TYPE_JPEG)) ?
+   true : false;
+
return 0;
 }
 
@@ -306,15 +309,17 @@ static void amdgpu_vcn_idle_work_handler(struct 
work_struct *work)
else
new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
 
-   if 
(amdgpu_fence_count_emitted(>jpeg.inst[j].ring_dec))
-   new_state.jpeg = VCN_DPG_STATE__PAUSE;
-   else
-   new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
-
+   if (!adev->vcn.has_jpeg_block) {
+   if 
(amdgpu_fence_count_emitted(>jpeg.inst[j].ring_dec))
+   new_state.jpeg = VCN_DPG_STATE__PAUSE;
+   else
+   new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
+   }
adev->vcn.pause_dpg_mode(adev, _state);
}
 
-   fence[j] += 
amdgpu_fence_count_emitted(>jpeg.inst[j].ring_dec);
+   if (!adev->vcn.has_jpeg_block)
+   fence[j] += 
amdgpu_fence_count_emitted(>jpeg.inst[j].ring_dec);
fence[j] += 
amdgpu_fence_count_emitted(>vcn.inst[j].ring_dec);
fences += fence[j];
}
@@ -358,14 +363,16 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
else
new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
 
-   if 
(amdgpu_fence_count_emitted(>jpeg.inst[ring->me].ring_dec))
-   new_state.jpeg = VCN_DPG_STATE__PAUSE;
-   else
-   new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
+   if (!adev->vcn.has_jpeg_block) {
+   if 
(amdgpu_fence_count_emitted(>jpeg.inst[ring->me].ring_dec))
+   new_state.jpeg = VCN_DPG_STATE__PAUSE;
+   else
+   new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
+   }
 
if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
new_state.fw_based = VCN_DPG_STATE__PAUSE;
-   else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
+   else if (!adev->vcn.has_jpeg_block && ring->funcs->type == 
AMDGPU_RING_TYPE_VCN_JPEG)
new_state.jpeg = VCN_DPG_STATE__PAUSE;
 
adev->vcn.pause_dpg_mode(adev, _state);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 402a5046b985..9a2381d006c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -192,6 +192,8 @@ struct amdgpu_vcn {
unsignedharvest_config;
int (*pause_dpg_mode)(struct amdgpu_device *adev,
struct dpg_pause_state *new_state);
+
+   bool has_jpeg_block;
 };
 
 int amdgpu_vcn_sw_init(struct amdgpu_device *adev);
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 2/2] drm/amdgpu: fix JPEG instance checking when ctx init

2019-12-09 Thread Leo Liu
Fixes: 0388aee76("drm/amdgpu: use the JPEG structure for
general driver support")

Signed-off-by: Leo Liu 
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index a0d3d7b756eb..db4b6283c28c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -170,7 +170,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
break;
case AMDGPU_HW_IP_VCN_JPEG:
for (j = 0; j < adev->jpeg.num_jpeg_inst; ++j) {
-   if (adev->vcn.harvest_config & (1 << j))
+   if (adev->jpeg.harvest_config & (1 << j))
continue;
rings[num_rings++] = 
>jpeg.inst[j].ring_dec;
}
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 1/2] drm/amdgpu: fix VCN2.x number of irq types

2019-12-09 Thread Leo Liu
The JPEG irq type has been moved to its own structure

Signed-off-by: Leo Liu 
---
 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c | 2 +-
 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index 5649190cb629..d76ece38c97b 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -1788,7 +1788,7 @@ static const struct amdgpu_irq_src_funcs 
vcn_v2_0_irq_funcs = {
 
 static void vcn_v2_0_set_irq_funcs(struct amdgpu_device *adev)
 {
-   adev->vcn.inst->irq.num_types = adev->vcn.num_enc_rings + 2;
+   adev->vcn.inst->irq.num_types = adev->vcn.num_enc_rings + 1;
adev->vcn.inst->irq.funcs = _v2_0_irq_funcs;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index 42d6b9f0553b..f67fca38c1a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -1138,7 +1138,7 @@ static void vcn_v2_5_set_irq_funcs(struct amdgpu_device 
*adev)
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
-   adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 2;
+   adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
adev->vcn.inst[i].irq.funcs = _v2_5_irq_funcs;
}
 }
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH] drm/amdgpu/vcn2.5: fix the enc loop with hw fini

2019-11-15 Thread Leo Liu


On 2019-11-15 5:15 p.m., Leo Liu wrote:

Fixes: 9072c584 (drm/amdgpu: move JPEG2.5 out from VCN2.5)


Just checked the bug was existing before the changes above.


Regards,

Leo




Signed-off-by: Leo Liu 
---
  drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c | 6 +++---
  1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index 451dc814d845..42d6b9f0553b 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -268,7 +268,7 @@ static int vcn_v2_5_hw_fini(void *handle)
  {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring;
-   int i;
+   int i, j;
  
  	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {

if (adev->vcn.harvest_config & (1 << i))
@@ -280,8 +280,8 @@ static int vcn_v2_5_hw_fini(void *handle)
  
  		ring->sched.ready = false;
  
-		for (i = 0; i < adev->vcn.num_enc_rings; ++i) {

-   ring = >vcn.inst[i].ring_enc[i];
+   for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
+   ring = >vcn.inst[i].ring_enc[j];
ring->sched.ready = false;
}
}

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH] drm/amdgpu/vcn2.5: fix the enc loop with hw fini

2019-11-15 Thread Leo Liu
Fixes: 9072c584 (drm/amdgpu: move JPEG2.5 out from VCN2.5)

Signed-off-by: Leo Liu 
---
 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index 451dc814d845..42d6b9f0553b 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -268,7 +268,7 @@ static int vcn_v2_5_hw_fini(void *handle)
 {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring;
-   int i;
+   int i, j;
 
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
@@ -280,8 +280,8 @@ static int vcn_v2_5_hw_fini(void *handle)
 
ring->sched.ready = false;
 
-   for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
-   ring = >vcn.inst[i].ring_enc[i];
+   for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
+   ring = >vcn.inst[i].ring_enc[j];
ring->sched.ready = false;
}
}
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Re: [PATCH 16/21] drm/amdgpu: enable JPEG2.0 dpm

2019-11-14 Thread Leo Liu


On 2019-11-14 11:03 a.m., Alex Deucher wrote:

On Tue, Nov 12, 2019 at 1:04 PM Leo Liu  wrote:

By using its own enabling function

Signed-off-by: Leo Liu 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c |  8 
  drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h |  1 +
  drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c | 10 +-
  3 files changed, 18 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index f205f56e3358..b7150171e8d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -2718,6 +2718,14 @@ void amdgpu_pm_print_power_states(struct amdgpu_device 
*adev)

  }

+void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
+{

I would add an "if (is_support_sw_smu(adev))" check here in case this
ever gets called on older asics.


Thanks Alex for the reviews.

I got "is_support_sw_smu()" in place in the beginning, and later has it 
removed.


because from "is_support_sw_smu",

"if (adev->asic_type == CHIP_VEGA20)
        return (amdgpu_dpm == 2) ? true : false;
else if (adev->asic_type >= CHIP_ARCTURUS)
        return true;
else
        return false;"

JPEG2.0 and above, fits >=CHIP_ARCTURUS, so always return true.

But I will add it back as you suggested, since it has no harm, also in 
case when condition changed.


Thanks again.

Leo



Other than that, the series (with
the v2 patches) is:
Reviewed-by: Alex Deucher 


+   int ret = smu_dpm_set_power_gate(>smu, AMD_IP_BLOCK_TYPE_JPEG, 
enable);
+   if (ret)
+   DRM_ERROR("[SW SMU]: dpm enable jpeg failed, state = %s, ret = %d. 
\n",
+ enable ? "true" : "false", ret);
+}
+
  int amdgpu_pm_virt_sysfs_init(struct amdgpu_device *adev)
  {
 int ret = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
index ef31448ee8d8..3da1da277805 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
@@ -41,5 +41,6 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev);
  void amdgpu_dpm_thermal_work_handler(struct work_struct *work);
  void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable);
  void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable);
+void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable);

  #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c 
b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
index 3869730b2331..a78292d84854 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
@@ -333,6 +333,9 @@ static int jpeg_v2_0_start(struct amdgpu_device *adev)
 struct amdgpu_ring *ring = >jpeg.inst->ring_dec;
 int r;

+   if (adev->pm.dpm_enabled)
+   amdgpu_dpm_enable_jpeg(adev, true);
+
 /* disable power gating */
 r = jpeg_v2_0_disable_power_gating(adev);
 if (r)
@@ -388,8 +391,13 @@ static int jpeg_v2_0_stop(struct amdgpu_device *adev)

 /* enable power gating */
 r = jpeg_v2_0_enable_power_gating(adev);
+   if (r)
+   return r;

-   return r;
+   if (adev->pm.dpm_enabled)
+   amdgpu_dpm_enable_jpeg(adev, false);
+
+   return 0;
  }

  /**
--
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 14/21] drm/amd/powerplay: add JPEG power control for Renoir

2019-11-13 Thread Leo Liu
By using its own JPEG PowerUp and PowerDown messages

v2: add argument to PowerDownJpeg message

Signed-off-by: Leo Liu 
---
 drivers/gpu/drm/amd/powerplay/renoir_ppt.c | 26 ++
 1 file changed, 26 insertions(+)

diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c 
b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
index 492a201554e8..784903a313b7 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
@@ -301,6 +301,31 @@ static int renoir_dpm_set_uvd_enable(struct smu_context 
*smu, bool enable)
return ret;
 }
 
+static int renoir_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
+{
+   struct smu_power_context *smu_power = >smu_power;
+   struct smu_power_gate *power_gate = _power->power_gate;
+   int ret = 0;
+
+   if (enable) {
+   if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
+   ret = smu_send_smc_msg_with_param(smu, 
SMU_MSG_PowerUpJpeg, 0);
+   if (ret)
+   return ret;
+   }
+   power_gate->jpeg_gated = false;
+   } else {
+   if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
+   ret = smu_send_smc_msg_with_param(smu, 
SMU_MSG_PowerDownJpeg, 0);
+   if (ret)
+   return ret;
+   }
+   power_gate->jpeg_gated = true;
+   }
+
+   return ret;
+}
+
 static int renoir_force_dpm_limit_value(struct smu_context *smu, bool highest)
 {
int ret = 0, i = 0;
@@ -683,6 +708,7 @@ static const struct pptable_funcs renoir_ppt_funcs = {
.print_clk_levels = renoir_print_clk_levels,
.get_current_power_state = renoir_get_current_power_state,
.dpm_set_uvd_enable = renoir_dpm_set_uvd_enable,
+   .dpm_set_jpeg_enable = renoir_dpm_set_jpeg_enable,
.force_dpm_limit_value = renoir_force_dpm_limit_value,
.unforce_dpm_levels = renoir_unforce_dpm_levels,
.get_workload_type = renoir_get_workload_type,
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 13/21] drm/amd/powerplay: add Powergate JPEG for Renoir

2019-11-13 Thread Leo Liu
Similar to SDMA, VCN etc.

v2: add argument to both PowerUpJpeg and PowerDownJpeg messages

Signed-off-by: Leo Liu 
---
 drivers/gpu/drm/amd/powerplay/amdgpu_smu.c|  2 ++
 drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h |  2 ++
 drivers/gpu/drm/amd/powerplay/renoir_ppt.c|  1 +
 drivers/gpu/drm/amd/powerplay/smu_internal.h  |  2 ++
 drivers/gpu/drm/amd/powerplay/smu_v12_0.c | 11 +++
 5 files changed, 18 insertions(+)

diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c 
b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 69243a858dd5..211934521d37 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -1229,6 +1229,7 @@ static int smu_hw_init(void *handle)
if (adev->flags & AMD_IS_APU) {
smu_powergate_sdma(>smu, false);
smu_powergate_vcn(>smu, false);
+   smu_powergate_jpeg(>smu, false);
smu_set_gfx_cgpg(>smu, true);
}
 
@@ -1287,6 +1288,7 @@ static int smu_hw_fini(void *handle)
if (adev->flags & AMD_IS_APU) {
smu_powergate_sdma(>smu, true);
smu_powergate_vcn(>smu, true);
+   smu_powergate_jpeg(>smu, true);
}
 
ret = smu_stop_thermal_control(smu);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h 
b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
index 9b9f5df0911c..1745e0146fba 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
@@ -58,6 +58,8 @@ int smu_v12_0_powergate_sdma(struct smu_context *smu, bool 
gate);
 
 int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate);
 
+int smu_v12_0_powergate_jpeg(struct smu_context *smu, bool gate);
+
 int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable);
 
 uint32_t smu_v12_0_get_gfxoff_status(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c 
b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
index 04daf7e9fe05..492a201554e8 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
@@ -697,6 +697,7 @@ static const struct pptable_funcs renoir_ppt_funcs = {
.check_fw_version = smu_v12_0_check_fw_version,
.powergate_sdma = smu_v12_0_powergate_sdma,
.powergate_vcn = smu_v12_0_powergate_vcn,
+   .powergate_jpeg = smu_v12_0_powergate_jpeg,
.send_smc_msg = smu_v12_0_send_msg,
.send_smc_msg_with_param = smu_v12_0_send_msg_with_param,
.read_smc_arg = smu_v12_0_read_arg,
diff --git a/drivers/gpu/drm/amd/powerplay/smu_internal.h 
b/drivers/gpu/drm/amd/powerplay/smu_internal.h
index 8bcda7871309..70c4d66721cd 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_internal.h
+++ b/drivers/gpu/drm/amd/powerplay/smu_internal.h
@@ -42,6 +42,8 @@
((smu)->ppt_funcs->powergate_sdma ? 
(smu)->ppt_funcs->powergate_sdma((smu), (gate)) : 0)
 #define smu_powergate_vcn(smu, gate) \
((smu)->ppt_funcs->powergate_vcn ? 
(smu)->ppt_funcs->powergate_vcn((smu), (gate)) : 0)
+#define smu_powergate_jpeg(smu, gate) \
+   ((smu)->ppt_funcs->powergate_jpeg ? 
(smu)->ppt_funcs->powergate_jpeg((smu), (gate)) : 0)
 
 #define smu_get_vbios_bootup_values(smu) \
((smu)->ppt_funcs->get_vbios_bootup_values ? 
(smu)->ppt_funcs->get_vbios_bootup_values((smu)) : 0)
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c 
b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
index 139dd737eaa5..18b24f954380 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
@@ -203,6 +203,17 @@ int smu_v12_0_powergate_vcn(struct smu_context *smu, bool 
gate)
return smu_send_smc_msg(smu, SMU_MSG_PowerUpVcn);
 }
 
+int smu_v12_0_powergate_jpeg(struct smu_context *smu, bool gate)
+{
+   if (!(smu->adev->flags & AMD_IS_APU))
+   return 0;
+
+   if (gate)
+   return smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 
0);
+   else
+   return smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0);
+}
+
 int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
 {
if (!(smu->adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[PATCH 12/21] drm/amd/powerplay: add JPEG power control for Navi1x

2019-11-13 Thread Leo Liu
By separating the JPEG power feature, and using its
own PowerUp and PowerDown messages

v2: remove PowerUpJpeg message argument

Signed-off-by: Leo Liu 
---
 drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 32 --
 1 file changed, 30 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c 
b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index aeb9c1e341c7..dce6f76ecbe5 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -384,8 +384,10 @@ navi10_get_allowed_feature_mask(struct smu_context *smu,
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ATHUB_PG_BIT);
 
if (smu->adev->pg_flags & AMD_PG_SUPPORT_VCN)
-   *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VCN_PG_BIT)
-   | FEATURE_MASK(FEATURE_JPEG_PG_BIT);
+   *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VCN_PG_BIT);
+
+   if (smu->adev->pg_flags & AMD_PG_SUPPORT_JPEG)
+   *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_JPEG_PG_BIT);
 
/* disable DPM UCLK and DS SOCCLK on navi10 A0 secure board */
if (is_asic_secure(smu)) {
@@ -665,6 +667,31 @@ static int navi10_dpm_set_uvd_enable(struct smu_context 
*smu, bool enable)
return ret;
 }
 
+static int navi10_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
+{
+   struct smu_power_context *smu_power = >smu_power;
+   struct smu_power_gate *power_gate = _power->power_gate;
+   int ret = 0;
+
+   if (enable) {
+   if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
+   ret = smu_send_smc_msg(smu, SMU_MSG_PowerUpJpeg);
+   if (ret)
+   return ret;
+   }
+   power_gate->jpeg_gated = false;
+   } else {
+   if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
+   ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownJpeg);
+   if (ret)
+   return ret;
+   }
+   power_gate->jpeg_gated = true;
+   }
+
+   return ret;
+}
+
 static int navi10_get_current_clk_freq_by_table(struct smu_context *smu,
   enum smu_clk_type clk_type,
   uint32_t *value)
@@ -1996,6 +2023,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.get_allowed_feature_mask = navi10_get_allowed_feature_mask,
.set_default_dpm_table = navi10_set_default_dpm_table,
.dpm_set_uvd_enable = navi10_dpm_set_uvd_enable,
+   .dpm_set_jpeg_enable = navi10_dpm_set_jpeg_enable,
.get_current_clk_freq_by_table = navi10_get_current_clk_freq_by_table,
.print_clk_levels = navi10_print_clk_levels,
.force_clk_levels = navi10_force_clk_levels,
-- 
2.17.1

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

  1   2   3   >