Register aqua vanjaram vcn poison irq, add vcn poison handle.

Signed-off-by: Stanley.Yang <[email protected]>
Reviewed-by: Hawking Zhang <[email protected]>
---
 drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c | 65 +++++++++++++++++++++++++
 drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h |  6 +++
 2 files changed, 71 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
index 139c83bd165e..fbf9695cc62f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
@@ -168,6 +168,10 @@ static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block 
*ip_block)
        if (r)
                return r;
 
+       /* VCN POISON TRAP */
+       r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
+               VCN_4_0__SRCID_UVD_POISON, &adev->vcn.inst->ras_poison_irq);
+
        for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
 
                r = amdgpu_vcn_sw_init(adev, i);
@@ -386,6 +390,9 @@ static int vcn_v4_0_3_hw_fini(struct amdgpu_ip_block 
*ip_block)
                        vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
        }
 
+       if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
+               amdgpu_irq_put(adev, &adev->vcn.inst->ras_poison_irq, 0);
+
        return 0;
 }
 
@@ -1813,11 +1820,24 @@ static int vcn_v4_0_3_process_interrupt(struct 
amdgpu_device *adev,
        return 0;
 }
 
+static int vcn_v4_0_3_set_ras_interrupt_state(struct amdgpu_device *adev,
+                                       struct amdgpu_irq_src *source,
+                                       unsigned int type,
+                                       enum amdgpu_interrupt_state state)
+{
+       return 0;
+}
+
 static const struct amdgpu_irq_src_funcs vcn_v4_0_3_irq_funcs = {
        .set = vcn_v4_0_3_set_interrupt_state,
        .process = vcn_v4_0_3_process_interrupt,
 };
 
+static const struct amdgpu_irq_src_funcs vcn_v4_0_3_ras_irq_funcs = {
+       .set = vcn_v4_0_3_set_ras_interrupt_state,
+       .process = amdgpu_vcn_process_poison_irq,
+};
+
 /**
  * vcn_v4_0_3_set_irq_funcs - set VCN block interrupt irq functions
  *
@@ -1833,6 +1853,9 @@ static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device 
*adev)
                adev->vcn.inst->irq.num_types++;
        }
        adev->vcn.inst->irq.funcs = &vcn_v4_0_3_irq_funcs;
+
+       adev->vcn.inst->ras_poison_irq.num_types = 1;
+       adev->vcn.inst->ras_poison_irq.funcs = &vcn_v4_0_3_ras_irq_funcs;
 }
 
 static void vcn_v4_0_3_print_ip_state(struct amdgpu_ip_block *ip_block, struct 
drm_printer *p)
@@ -1980,9 +2003,44 @@ static void vcn_v4_0_3_reset_ras_error_count(struct 
amdgpu_device *adev)
                vcn_v4_0_3_inst_reset_ras_error_count(adev, i);
 }
 
+static uint32_t vcn_v4_0_3_query_poison_by_instance(struct amdgpu_device *adev,
+                       uint32_t instance, uint32_t sub_block)
+{
+       uint32_t poison_stat = 0, reg_value = 0;
+
+       switch (sub_block) {
+       case AMDGPU_VCN_V4_0_3_VCPU_VCODEC:
+               reg_value = RREG32_SOC15(VCN, instance, 
regUVD_RAS_VCPU_VCODEC_STATUS);
+               poison_stat = REG_GET_FIELD(reg_value, 
UVD_RAS_VCPU_VCODEC_STATUS, POISONED_PF);
+               break;
+       default:
+               break;
+       }
+
+       if (poison_stat)
+               dev_info(adev->dev, "Poison detected in VCN%d, sub_block%d\n",
+                       instance, sub_block);
+
+       return poison_stat;
+}
+
+static bool vcn_v4_0_3_query_poison_status(struct amdgpu_device *adev)
+{
+       uint32_t inst, sub;
+       uint32_t poison_stat = 0;
+
+       for (inst = 0; inst < adev->vcn.num_vcn_inst; inst++)
+               for (sub = 0; sub < AMDGPU_VCN_V4_0_3_MAX_SUB_BLOCK; sub++)
+                       poison_stat +=
+                       vcn_v4_0_3_query_poison_by_instance(adev, inst, sub);
+
+       return !!poison_stat;
+}
+
 static const struct amdgpu_ras_block_hw_ops vcn_v4_0_3_ras_hw_ops = {
        .query_ras_error_count = vcn_v4_0_3_query_ras_error_count,
        .reset_ras_error_count = vcn_v4_0_3_reset_ras_error_count,
+       .query_poison_status = vcn_v4_0_3_query_poison_status,
 };
 
 static int vcn_v4_0_3_aca_bank_parser(struct aca_handle *handle, struct 
aca_bank *bank,
@@ -2058,6 +2116,13 @@ static int vcn_v4_0_3_ras_late_init(struct amdgpu_device 
*adev, struct ras_commo
        if (r)
                return r;
 
+       if (amdgpu_ras_is_supported(adev, ras_block->block) &&
+               adev->vcn.inst->ras_poison_irq.funcs) {
+               r = amdgpu_irq_get(adev, &adev->vcn.inst->ras_poison_irq, 0);
+               if (r)
+                       goto late_fini;
+       }
+
        r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__VCN,
                                &vcn_v4_0_3_aca_info, NULL);
        if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h 
b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h
index 03572a1d0c9c..aeab89853a92 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h
@@ -24,6 +24,12 @@
 #ifndef __VCN_V4_0_3_H__
 #define __VCN_V4_0_3_H__
 
+enum amdgpu_vcn_v4_0_3_sub_block {
+       AMDGPU_VCN_V4_0_3_VCPU_VCODEC = 0,
+
+       AMDGPU_VCN_V4_0_3_MAX_SUB_BLOCK,
+};
+
 extern const struct amdgpu_ip_block_version vcn_v4_0_3_ip_block;
 
 void vcn_v4_0_3_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
-- 
2.25.1

Reply via email to