Re: [PATCH 2/2] drm/amdgpu: attempt xgmi perfmon re-arm on failed arm

2019-12-17 Thread Felix Kuehling

On 2019-12-17 12:28, Jonathan Kim wrote:

The DF routines to arm xGMI performance will attempt to re-arm both on
performance monitoring start and read on initial failure to arm.

Signed-off-by: Jonathan Kim 
---
  drivers/gpu/drm/amd/amdgpu/df_v3_6.c | 153 ---
  1 file changed, 117 insertions(+), 36 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c 
b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
index 4043ebcea5de..af445054305f 100644
--- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
+++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
@@ -162,25 +162,45 @@ static void df_v3_6_perfmon_rreg(struct amdgpu_device 
*adev,
  }
  
  /*

- * df_v3_6_perfmon_wreg - write to perfmon lo and hi
- *
- * required to be atomic.  no mmio method provided so subsequent reads after
- * data writes cannot occur to preserve data fabrics finite state machine.
+ * retry arming counters every 100 usecs within 1 millisecond interval.
+ * if retry fails after time out, return error.
   */
-static void df_v3_6_perfmon_wreg(struct amdgpu_device *adev, uint32_t lo_addr,
-   uint32_t lo_val, uint32_t hi_addr, uint32_t hi_val)
+#define ARM_RETRY_USEC_TIMEOUT 1000
+#define ARM_RETRY_USEC_INTERVAL100
+static int df_v3_6_perfmon_arm_with_retry(struct amdgpu_device *adev,
+ uint32_t lo_addr, uint32_t lo_val,
+ uint32_t hi_addr, uint32_t  hi_val)
  {
unsigned long flags, address, data;
+   uint32_t lo_val_rb, hi_val_rb;
+   int countdown = ARM_RETRY_USEC_TIMEOUT;
  
  	address = adev->nbio.funcs->get_pcie_index_offset(adev);

data = adev->nbio.funcs->get_pcie_data_offset(adev);
  
  	spin_lock_irqsave(>pcie_idx_lock, flags);

-   WREG32(address, lo_addr);
-   WREG32(data, lo_val);
-   WREG32(address, hi_addr);
-   WREG32(data, hi_val);
+
+   while (countdown) {
+   WREG32(address, lo_addr);
+   WREG32(data, lo_val);
+   WREG32(address, hi_addr);
+   WREG32(data, hi_val);
+
+   WREG32(address, lo_addr);
+   lo_val_rb = RREG32(data);
+   WREG32(address, hi_addr);
+   hi_val_rb = RREG32(data);
+
+   if (lo_val == lo_val_rb && hi_val == hi_val_rb)
+   break;
+
+   countdown -= ARM_RETRY_USEC_INTERVAL;
+   udelay(ARM_RETRY_USEC_INTERVAL);
+   }
+
spin_unlock_irqrestore(>pcie_idx_lock, flags);


I don't think it's a good idea to hold the spin lock for the entire 
duration of this retry loop. Maybe put that inside the loop and release 
the lock while waiting in udelay.




+
+   return countdown > 0 ? 0 : -ETIME;
  }
  
  /* get the number of df counters available */

@@ -334,20 +354,20 @@ static void df_v3_6_pmc_get_addr(struct amdgpu_device 
*adev,
switch (target_cntr) {
  
  	case 0:

-   *lo_base_addr = is_ctrl ? smnPerfMonCtlLo0 : smnPerfMonCtrLo0;
-   *hi_base_addr = is_ctrl ? smnPerfMonCtlHi0 : smnPerfMonCtrHi0;
+   *lo_base_addr = is_ctrl ? smnPerfMonCtlLo4 : smnPerfMonCtrLo4;
+   *hi_base_addr = is_ctrl ? smnPerfMonCtlHi4 : smnPerfMonCtrHi4;
break;
case 1:
-   *lo_base_addr = is_ctrl ? smnPerfMonCtlLo1 : smnPerfMonCtrLo1;
-   *hi_base_addr = is_ctrl ? smnPerfMonCtlHi1 : smnPerfMonCtrHi1;
+   *lo_base_addr = is_ctrl ? smnPerfMonCtlLo5 : smnPerfMonCtrLo5;
+   *hi_base_addr = is_ctrl ? smnPerfMonCtlHi5 : smnPerfMonCtrHi5;
break;
case 2:
-   *lo_base_addr = is_ctrl ? smnPerfMonCtlLo2 : smnPerfMonCtrLo2;
-   *hi_base_addr = is_ctrl ? smnPerfMonCtlHi2 : smnPerfMonCtrHi2;
+   *lo_base_addr = is_ctrl ? smnPerfMonCtlLo6 : smnPerfMonCtrLo6;
+   *hi_base_addr = is_ctrl ? smnPerfMonCtlHi6 : smnPerfMonCtrHi6;
break;
case 3:
-   *lo_base_addr = is_ctrl ? smnPerfMonCtlLo3 : smnPerfMonCtrLo3;
-   *hi_base_addr = is_ctrl ? smnPerfMonCtlHi3 : smnPerfMonCtrHi3;
+   *lo_base_addr = is_ctrl ? smnPerfMonCtlLo7 : smnPerfMonCtrLo7;
+   *hi_base_addr = is_ctrl ? smnPerfMonCtlHi7 : smnPerfMonCtrHi7;
break;
  
  	}

@@ -422,6 +442,42 @@ static int df_v3_6_pmc_add_cntr(struct amdgpu_device *adev,
return -ENOSPC;
  }
  
+#define DEFERRED_ARM_MASK	(1 << 31)

+static int df_v3_6_pmc_defer_cntr(struct amdgpu_device *adev,
+ uint64_t config, int err)


Consider renaming this function. I found its usage confusing because 
it's used to defer arming as well as clearing the deferred flag. Maybe 
df_v3_6_pmc_set_deferred. The "err" parameter could be named "defer" to 
better indicate its meaning and maybe make it bool, since that's what's 
returned by the counterpart df_v3_6_pmc_is_deferred.




+{
+   int 

[PATCH 2/2] drm/amdgpu: attempt xgmi perfmon re-arm on failed arm

2019-12-17 Thread Jonathan Kim
The DF routines to arm xGMI performance will attempt to re-arm both on
performance monitoring start and read on initial failure to arm.

Signed-off-by: Jonathan Kim 
---
 drivers/gpu/drm/amd/amdgpu/df_v3_6.c | 153 ---
 1 file changed, 117 insertions(+), 36 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c 
b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
index 4043ebcea5de..af445054305f 100644
--- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
+++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
@@ -162,25 +162,45 @@ static void df_v3_6_perfmon_rreg(struct amdgpu_device 
*adev,
 }
 
 /*
- * df_v3_6_perfmon_wreg - write to perfmon lo and hi
- *
- * required to be atomic.  no mmio method provided so subsequent reads after
- * data writes cannot occur to preserve data fabrics finite state machine.
+ * retry arming counters every 100 usecs within 1 millisecond interval.
+ * if retry fails after time out, return error.
  */
-static void df_v3_6_perfmon_wreg(struct amdgpu_device *adev, uint32_t lo_addr,
-   uint32_t lo_val, uint32_t hi_addr, uint32_t hi_val)
+#define ARM_RETRY_USEC_TIMEOUT 1000
+#define ARM_RETRY_USEC_INTERVAL100
+static int df_v3_6_perfmon_arm_with_retry(struct amdgpu_device *adev,
+ uint32_t lo_addr, uint32_t lo_val,
+ uint32_t hi_addr, uint32_t  hi_val)
 {
unsigned long flags, address, data;
+   uint32_t lo_val_rb, hi_val_rb;
+   int countdown = ARM_RETRY_USEC_TIMEOUT;
 
address = adev->nbio.funcs->get_pcie_index_offset(adev);
data = adev->nbio.funcs->get_pcie_data_offset(adev);
 
spin_lock_irqsave(>pcie_idx_lock, flags);
-   WREG32(address, lo_addr);
-   WREG32(data, lo_val);
-   WREG32(address, hi_addr);
-   WREG32(data, hi_val);
+
+   while (countdown) {
+   WREG32(address, lo_addr);
+   WREG32(data, lo_val);
+   WREG32(address, hi_addr);
+   WREG32(data, hi_val);
+
+   WREG32(address, lo_addr);
+   lo_val_rb = RREG32(data);
+   WREG32(address, hi_addr);
+   hi_val_rb = RREG32(data);
+
+   if (lo_val == lo_val_rb && hi_val == hi_val_rb)
+   break;
+
+   countdown -= ARM_RETRY_USEC_INTERVAL;
+   udelay(ARM_RETRY_USEC_INTERVAL);
+   }
+
spin_unlock_irqrestore(>pcie_idx_lock, flags);
+
+   return countdown > 0 ? 0 : -ETIME;
 }
 
 /* get the number of df counters available */
@@ -334,20 +354,20 @@ static void df_v3_6_pmc_get_addr(struct amdgpu_device 
*adev,
switch (target_cntr) {
 
case 0:
-   *lo_base_addr = is_ctrl ? smnPerfMonCtlLo0 : smnPerfMonCtrLo0;
-   *hi_base_addr = is_ctrl ? smnPerfMonCtlHi0 : smnPerfMonCtrHi0;
+   *lo_base_addr = is_ctrl ? smnPerfMonCtlLo4 : smnPerfMonCtrLo4;
+   *hi_base_addr = is_ctrl ? smnPerfMonCtlHi4 : smnPerfMonCtrHi4;
break;
case 1:
-   *lo_base_addr = is_ctrl ? smnPerfMonCtlLo1 : smnPerfMonCtrLo1;
-   *hi_base_addr = is_ctrl ? smnPerfMonCtlHi1 : smnPerfMonCtrHi1;
+   *lo_base_addr = is_ctrl ? smnPerfMonCtlLo5 : smnPerfMonCtrLo5;
+   *hi_base_addr = is_ctrl ? smnPerfMonCtlHi5 : smnPerfMonCtrHi5;
break;
case 2:
-   *lo_base_addr = is_ctrl ? smnPerfMonCtlLo2 : smnPerfMonCtrLo2;
-   *hi_base_addr = is_ctrl ? smnPerfMonCtlHi2 : smnPerfMonCtrHi2;
+   *lo_base_addr = is_ctrl ? smnPerfMonCtlLo6 : smnPerfMonCtrLo6;
+   *hi_base_addr = is_ctrl ? smnPerfMonCtlHi6 : smnPerfMonCtrHi6;
break;
case 3:
-   *lo_base_addr = is_ctrl ? smnPerfMonCtlLo3 : smnPerfMonCtrLo3;
-   *hi_base_addr = is_ctrl ? smnPerfMonCtlHi3 : smnPerfMonCtrHi3;
+   *lo_base_addr = is_ctrl ? smnPerfMonCtlLo7 : smnPerfMonCtrLo7;
+   *hi_base_addr = is_ctrl ? smnPerfMonCtlHi7 : smnPerfMonCtrHi7;
break;
 
}
@@ -422,6 +442,42 @@ static int df_v3_6_pmc_add_cntr(struct amdgpu_device *adev,
return -ENOSPC;
 }
 
+#define DEFERRED_ARM_MASK  (1 << 31)
+static int df_v3_6_pmc_defer_cntr(struct amdgpu_device *adev,
+ uint64_t config, int err)
+{
+   int target_cntr;
+
+   target_cntr = df_v3_6_pmc_config_2_cntr(adev, config);
+
+   if (target_cntr < 0)
+   return -EINVAL;
+
+   if (err)
+   adev->df_perfmon_config_assign_mask[target_cntr] |=
+   DEFERRED_ARM_MASK;
+   else
+   adev->df_perfmon_config_assign_mask[target_cntr] &=
+   ~DEFERRED_ARM_MASK;
+
+   return 0;
+}
+
+static bool df_v3_6_pmc_is_deferred(struct amdgpu_device *adev,
+