Re: [Intel-gfx] [PATCH 13/23] drm/i915/mtl: memory latency data from LATENCY_LPX_LPY for WM

2022-08-10 Thread Jani Nikula
On Wed, 10 Aug 2022, Jani Nikula  wrote:
> On Tue, 02 Aug 2022, Matt Roper  wrote:
>> On Wed, Jul 27, 2022 at 06:34:10PM -0700, Radhakrishna Sripada wrote:
>>> Since Xe LPD+, Memory latency data are in LATENCY_LPX_LPY registers
>>> instead of GT driver mailbox.
>>> 
>>> Bspec: 64608
>>> 
>>> Cc: Matt Roper 
>>> Original Author: Caz Yokoyama
>>> Signed-off-by: Radhakrishna Sripada 
>>> ---
>>>  drivers/gpu/drm/i915/i915_reg.h |   7 +++
>>>  drivers/gpu/drm/i915/intel_pm.c | 105 +++-
>>>  2 files changed, 71 insertions(+), 41 deletions(-)
>>> 
>>> diff --git a/drivers/gpu/drm/i915/i915_reg.h 
>>> b/drivers/gpu/drm/i915/i915_reg.h
>>> index 6087d40eed70..23b50d671550 100644
>>> --- a/drivers/gpu/drm/i915/i915_reg.h
>>> +++ b/drivers/gpu/drm/i915/i915_reg.h
>>> @@ -8754,4 +8754,11 @@ enum skl_power_gate {
>>>  #define GEN12_STATE_ACK_DEBUG  _MMIO(0x20BC)
>>>  
>>>  #define MTL_MEDIA_GSI_BASE 0x38
>>> +
>>> +#define MTL_LATENCY_LP0_LP1_MMIO(0x45780)
>>> +#define MTL_LATENCY_LP2_LP3_MMIO(0x45784)
>>> +#define MTL_LATENCY_LP4_LP5_MMIO(0x45788)
>>> +#define  MTL_LATENCY_LEVEL0_2_4_MASK   REG_GENMASK(12, 0)
>>> +#define  MTL_LATENCY_LEVEL1_3_5_MASK   REG_GENMASK(28, 16)
>>> +
>>>  #endif /* _I915_REG_H_ */
>>> diff --git a/drivers/gpu/drm/i915/intel_pm.c 
>>> b/drivers/gpu/drm/i915/intel_pm.c
>>> index ef7553b494ea..fac565d23d57 100644
>>> --- a/drivers/gpu/drm/i915/intel_pm.c
>>> +++ b/drivers/gpu/drm/i915/intel_pm.c
>>> @@ -2861,16 +2861,75 @@ static void ilk_compute_wm_level(const struct 
>>> drm_i915_private *dev_priv,
>>> result->enable = true;
>>>  }
>>>  
>>> +static void
>>> +adjust_wm_latency(u16 wm[], int max_level, int read_latency,
>>> + bool wm_lv_0_adjust_needed)
>>
>> The refactoring to separate the adjustment from the readout should
>> probably be a separate patch before you add the MTL-specific changes on
>> top.
>
> Agreed.

And to elaborate, this kind of stuff should happen upstream months
before anyone's even heard of the new platform!

BR,
Jani.

>
>>
>>
>> Matt
>>
>>> +{
>>> +   int i, level;
>>> +
>>> +   /*
>>> +* If a level n (n > 1) has a 0us latency, all levels m (m >= n)
>>> +* need to be disabled. We make sure to sanitize the values out
>>> +* of the punit to satisfy this requirement.
>>> +*/
>>> +   for (level = 1; level <= max_level; level++) {
>>> +   if (wm[level] == 0) {
>>> +   for (i = level + 1; i <= max_level; i++)
>>> +   wm[i] = 0;
>>> +
>>> +   max_level = level - 1;
>>> +   break;
>>> +   }
>>> +   }
>>> +
>>> +   /*
>>> +* WaWmMemoryReadLatency
>>> +*
>>> +* punit doesn't take into account the read latency so we need
>>> +* to add proper adjustement to each valid level we retrieve
>>> +* from the punit when level 0 response data is 0us.
>>> +*/
>>> +   if (wm[0] == 0) {
>>> +   for (level = 0; level <= max_level; level++)
>>> +   wm[level] += read_latency;
>>> +   }
>>> +
>>> +   /*
>>> +* WA Level-0 adjustment for 16GB DIMMs: SKL+
>>> +* If we could not get dimm info enable this WA to prevent from
>>> +* any underrun. If not able to get Dimm info assume 16GB dimm
>>> +* to avoid any underrun.
>>> +*/
>>> +   if (wm_lv_0_adjust_needed)
>>> +   wm[0] += 1;
>>> +}
>>> +
>>>  static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
>>>   u16 wm[])
>>>  {
>>> struct intel_uncore *uncore = _priv->uncore;
>>> +   int max_level = ilk_wm_max_level(dev_priv);
>>>  
>>> -   if (DISPLAY_VER(dev_priv) >= 9) {
>>> +   if (DISPLAY_VER(dev_priv) >= 14) {
>>> u32 val;
>>> -   int ret, i;
>>> -   int level, max_level = ilk_wm_max_level(dev_priv);
>>> +
>>> +   val = intel_uncore_read(uncore, MTL_LATENCY_LP0_LP1);
>>> +   wm[0] = REG_FIELD_GET(MTL_LATENCY_LEVEL0_2_4_MASK, val);
>>> +   wm[1] = REG_FIELD_GET(MTL_LATENCY_LEVEL1_3_5_MASK, val);
>>> +   val = intel_uncore_read(uncore, MTL_LATENCY_LP2_LP3);
>>> +   wm[2] = REG_FIELD_GET(MTL_LATENCY_LEVEL0_2_4_MASK, val);
>>> +   wm[3] = REG_FIELD_GET(MTL_LATENCY_LEVEL1_3_5_MASK, val);
>>> +   val = intel_uncore_read(uncore, MTL_LATENCY_LP4_LP5);
>>> +   wm[4] = REG_FIELD_GET(MTL_LATENCY_LEVEL0_2_4_MASK, val);
>>> +   wm[5] = REG_FIELD_GET(MTL_LATENCY_LEVEL1_3_5_MASK, val);
>>> +
>>> +   adjust_wm_latency(wm, max_level, 6,
>>> + dev_priv->dram_info.wm_lv_0_adjust_needed);
>>> +   } else if (DISPLAY_VER(dev_priv) >= 9) {
>>> +   int read_latency = DISPLAY_VER(dev_priv) >= 12 ? 3 : 2;
>>> int mult = IS_DG2(dev_priv) ? 2 : 1;
>>> +   u32 val;
>>> +   int ret;
>>>  
>>> /* read the first set of memory latencies[0:3] */
>>>  

Re: [Intel-gfx] [PATCH 13/23] drm/i915/mtl: memory latency data from LATENCY_LPX_LPY for WM

2022-08-10 Thread Jani Nikula
On Tue, 02 Aug 2022, Matt Roper  wrote:
> On Wed, Jul 27, 2022 at 06:34:10PM -0700, Radhakrishna Sripada wrote:
>> Since Xe LPD+, Memory latency data are in LATENCY_LPX_LPY registers
>> instead of GT driver mailbox.
>> 
>> Bspec: 64608
>> 
>> Cc: Matt Roper 
>> Original Author: Caz Yokoyama
>> Signed-off-by: Radhakrishna Sripada 
>> ---
>>  drivers/gpu/drm/i915/i915_reg.h |   7 +++
>>  drivers/gpu/drm/i915/intel_pm.c | 105 +++-
>>  2 files changed, 71 insertions(+), 41 deletions(-)
>> 
>> diff --git a/drivers/gpu/drm/i915/i915_reg.h 
>> b/drivers/gpu/drm/i915/i915_reg.h
>> index 6087d40eed70..23b50d671550 100644
>> --- a/drivers/gpu/drm/i915/i915_reg.h
>> +++ b/drivers/gpu/drm/i915/i915_reg.h
>> @@ -8754,4 +8754,11 @@ enum skl_power_gate {
>>  #define GEN12_STATE_ACK_DEBUG   _MMIO(0x20BC)
>>  
>>  #define MTL_MEDIA_GSI_BASE  0x38
>> +
>> +#define MTL_LATENCY_LP0_LP1 _MMIO(0x45780)
>> +#define MTL_LATENCY_LP2_LP3 _MMIO(0x45784)
>> +#define MTL_LATENCY_LP4_LP5 _MMIO(0x45788)
>> +#define  MTL_LATENCY_LEVEL0_2_4_MASKREG_GENMASK(12, 0)
>> +#define  MTL_LATENCY_LEVEL1_3_5_MASKREG_GENMASK(28, 16)
>> +
>>  #endif /* _I915_REG_H_ */
>> diff --git a/drivers/gpu/drm/i915/intel_pm.c 
>> b/drivers/gpu/drm/i915/intel_pm.c
>> index ef7553b494ea..fac565d23d57 100644
>> --- a/drivers/gpu/drm/i915/intel_pm.c
>> +++ b/drivers/gpu/drm/i915/intel_pm.c
>> @@ -2861,16 +2861,75 @@ static void ilk_compute_wm_level(const struct 
>> drm_i915_private *dev_priv,
>>  result->enable = true;
>>  }
>>  
>> +static void
>> +adjust_wm_latency(u16 wm[], int max_level, int read_latency,
>> +  bool wm_lv_0_adjust_needed)
>
> The refactoring to separate the adjustment from the readout should
> probably be a separate patch before you add the MTL-specific changes on
> top.

Agreed.

>
>
> Matt
>
>> +{
>> +int i, level;
>> +
>> +/*
>> + * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
>> + * need to be disabled. We make sure to sanitize the values out
>> + * of the punit to satisfy this requirement.
>> + */
>> +for (level = 1; level <= max_level; level++) {
>> +if (wm[level] == 0) {
>> +for (i = level + 1; i <= max_level; i++)
>> +wm[i] = 0;
>> +
>> +max_level = level - 1;
>> +break;
>> +}
>> +}
>> +
>> +/*
>> + * WaWmMemoryReadLatency
>> + *
>> + * punit doesn't take into account the read latency so we need
>> + * to add proper adjustement to each valid level we retrieve
>> + * from the punit when level 0 response data is 0us.
>> + */
>> +if (wm[0] == 0) {
>> +for (level = 0; level <= max_level; level++)
>> +wm[level] += read_latency;
>> +}
>> +
>> +/*
>> + * WA Level-0 adjustment for 16GB DIMMs: SKL+
>> + * If we could not get dimm info enable this WA to prevent from
>> + * any underrun. If not able to get Dimm info assume 16GB dimm
>> + * to avoid any underrun.
>> + */
>> +if (wm_lv_0_adjust_needed)
>> +wm[0] += 1;
>> +}
>> +
>>  static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
>>u16 wm[])
>>  {
>>  struct intel_uncore *uncore = _priv->uncore;
>> +int max_level = ilk_wm_max_level(dev_priv);
>>  
>> -if (DISPLAY_VER(dev_priv) >= 9) {
>> +if (DISPLAY_VER(dev_priv) >= 14) {
>>  u32 val;
>> -int ret, i;
>> -int level, max_level = ilk_wm_max_level(dev_priv);
>> +
>> +val = intel_uncore_read(uncore, MTL_LATENCY_LP0_LP1);
>> +wm[0] = REG_FIELD_GET(MTL_LATENCY_LEVEL0_2_4_MASK, val);
>> +wm[1] = REG_FIELD_GET(MTL_LATENCY_LEVEL1_3_5_MASK, val);
>> +val = intel_uncore_read(uncore, MTL_LATENCY_LP2_LP3);
>> +wm[2] = REG_FIELD_GET(MTL_LATENCY_LEVEL0_2_4_MASK, val);
>> +wm[3] = REG_FIELD_GET(MTL_LATENCY_LEVEL1_3_5_MASK, val);
>> +val = intel_uncore_read(uncore, MTL_LATENCY_LP4_LP5);
>> +wm[4] = REG_FIELD_GET(MTL_LATENCY_LEVEL0_2_4_MASK, val);
>> +wm[5] = REG_FIELD_GET(MTL_LATENCY_LEVEL1_3_5_MASK, val);
>> +
>> +adjust_wm_latency(wm, max_level, 6,
>> +  dev_priv->dram_info.wm_lv_0_adjust_needed);
>> +} else if (DISPLAY_VER(dev_priv) >= 9) {
>> +int read_latency = DISPLAY_VER(dev_priv) >= 12 ? 3 : 2;
>>  int mult = IS_DG2(dev_priv) ? 2 : 1;
>> +u32 val;
>> +int ret;
>>  
>>  /* read the first set of memory latencies[0:3] */
>>  val = 0; /* data0 to be programmed to 0 for first set */
>> @@ -2909,44 +2968,8 @@ static void intel_read_wm_latency(struct 
>> drm_i915_private *dev_priv,
>>  wm[7] = ((val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
>> 

Re: [Intel-gfx] [PATCH 13/23] drm/i915/mtl: memory latency data from LATENCY_LPX_LPY for WM

2022-08-02 Thread Matt Roper
On Wed, Jul 27, 2022 at 06:34:10PM -0700, Radhakrishna Sripada wrote:
> Since Xe LPD+, Memory latency data are in LATENCY_LPX_LPY registers
> instead of GT driver mailbox.
> 
> Bspec: 64608
> 
> Cc: Matt Roper 
> Original Author: Caz Yokoyama
> Signed-off-by: Radhakrishna Sripada 
> ---
>  drivers/gpu/drm/i915/i915_reg.h |   7 +++
>  drivers/gpu/drm/i915/intel_pm.c | 105 +++-
>  2 files changed, 71 insertions(+), 41 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
> index 6087d40eed70..23b50d671550 100644
> --- a/drivers/gpu/drm/i915/i915_reg.h
> +++ b/drivers/gpu/drm/i915/i915_reg.h
> @@ -8754,4 +8754,11 @@ enum skl_power_gate {
>  #define GEN12_STATE_ACK_DEBUG_MMIO(0x20BC)
>  
>  #define MTL_MEDIA_GSI_BASE   0x38
> +
> +#define MTL_LATENCY_LP0_LP1  _MMIO(0x45780)
> +#define MTL_LATENCY_LP2_LP3  _MMIO(0x45784)
> +#define MTL_LATENCY_LP4_LP5  _MMIO(0x45788)
> +#define  MTL_LATENCY_LEVEL0_2_4_MASK REG_GENMASK(12, 0)
> +#define  MTL_LATENCY_LEVEL1_3_5_MASK REG_GENMASK(28, 16)
> +
>  #endif /* _I915_REG_H_ */
> diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
> index ef7553b494ea..fac565d23d57 100644
> --- a/drivers/gpu/drm/i915/intel_pm.c
> +++ b/drivers/gpu/drm/i915/intel_pm.c
> @@ -2861,16 +2861,75 @@ static void ilk_compute_wm_level(const struct 
> drm_i915_private *dev_priv,
>   result->enable = true;
>  }
>  
> +static void
> +adjust_wm_latency(u16 wm[], int max_level, int read_latency,
> +   bool wm_lv_0_adjust_needed)

The refactoring to separate the adjustment from the readout should
probably be a separate patch before you add the MTL-specific changes on
top.


Matt

> +{
> + int i, level;
> +
> + /*
> +  * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
> +  * need to be disabled. We make sure to sanitize the values out
> +  * of the punit to satisfy this requirement.
> +  */
> + for (level = 1; level <= max_level; level++) {
> + if (wm[level] == 0) {
> + for (i = level + 1; i <= max_level; i++)
> + wm[i] = 0;
> +
> + max_level = level - 1;
> + break;
> + }
> + }
> +
> + /*
> +  * WaWmMemoryReadLatency
> +  *
> +  * punit doesn't take into account the read latency so we need
> +  * to add proper adjustement to each valid level we retrieve
> +  * from the punit when level 0 response data is 0us.
> +  */
> + if (wm[0] == 0) {
> + for (level = 0; level <= max_level; level++)
> + wm[level] += read_latency;
> + }
> +
> + /*
> +  * WA Level-0 adjustment for 16GB DIMMs: SKL+
> +  * If we could not get dimm info enable this WA to prevent from
> +  * any underrun. If not able to get Dimm info assume 16GB dimm
> +  * to avoid any underrun.
> +  */
> + if (wm_lv_0_adjust_needed)
> + wm[0] += 1;
> +}
> +
>  static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
> u16 wm[])
>  {
>   struct intel_uncore *uncore = _priv->uncore;
> + int max_level = ilk_wm_max_level(dev_priv);
>  
> - if (DISPLAY_VER(dev_priv) >= 9) {
> + if (DISPLAY_VER(dev_priv) >= 14) {
>   u32 val;
> - int ret, i;
> - int level, max_level = ilk_wm_max_level(dev_priv);
> +
> + val = intel_uncore_read(uncore, MTL_LATENCY_LP0_LP1);
> + wm[0] = REG_FIELD_GET(MTL_LATENCY_LEVEL0_2_4_MASK, val);
> + wm[1] = REG_FIELD_GET(MTL_LATENCY_LEVEL1_3_5_MASK, val);
> + val = intel_uncore_read(uncore, MTL_LATENCY_LP2_LP3);
> + wm[2] = REG_FIELD_GET(MTL_LATENCY_LEVEL0_2_4_MASK, val);
> + wm[3] = REG_FIELD_GET(MTL_LATENCY_LEVEL1_3_5_MASK, val);
> + val = intel_uncore_read(uncore, MTL_LATENCY_LP4_LP5);
> + wm[4] = REG_FIELD_GET(MTL_LATENCY_LEVEL0_2_4_MASK, val);
> + wm[5] = REG_FIELD_GET(MTL_LATENCY_LEVEL1_3_5_MASK, val);
> +
> + adjust_wm_latency(wm, max_level, 6,
> +   dev_priv->dram_info.wm_lv_0_adjust_needed);
> + } else if (DISPLAY_VER(dev_priv) >= 9) {
> + int read_latency = DISPLAY_VER(dev_priv) >= 12 ? 3 : 2;
>   int mult = IS_DG2(dev_priv) ? 2 : 1;
> + u32 val;
> + int ret;
>  
>   /* read the first set of memory latencies[0:3] */
>   val = 0; /* data0 to be programmed to 0 for first set */
> @@ -2909,44 +2968,8 @@ static void intel_read_wm_latency(struct 
> drm_i915_private *dev_priv,
>   wm[7] = ((val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
>   GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
>  
> - /*
> -  * If a level n (n > 1) has a 0us latency, all 

[Intel-gfx] [PATCH 13/23] drm/i915/mtl: memory latency data from LATENCY_LPX_LPY for WM

2022-07-27 Thread Radhakrishna Sripada
Since Xe LPD+, Memory latency data are in LATENCY_LPX_LPY registers
instead of GT driver mailbox.

Bspec: 64608

Cc: Matt Roper 
Original Author: Caz Yokoyama
Signed-off-by: Radhakrishna Sripada 
---
 drivers/gpu/drm/i915/i915_reg.h |   7 +++
 drivers/gpu/drm/i915/intel_pm.c | 105 +++-
 2 files changed, 71 insertions(+), 41 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 6087d40eed70..23b50d671550 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -8754,4 +8754,11 @@ enum skl_power_gate {
 #define GEN12_STATE_ACK_DEBUG  _MMIO(0x20BC)
 
 #define MTL_MEDIA_GSI_BASE 0x38
+
+#define MTL_LATENCY_LP0_LP1_MMIO(0x45780)
+#define MTL_LATENCY_LP2_LP3_MMIO(0x45784)
+#define MTL_LATENCY_LP4_LP5_MMIO(0x45788)
+#define  MTL_LATENCY_LEVEL0_2_4_MASK   REG_GENMASK(12, 0)
+#define  MTL_LATENCY_LEVEL1_3_5_MASK   REG_GENMASK(28, 16)
+
 #endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index ef7553b494ea..fac565d23d57 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2861,16 +2861,75 @@ static void ilk_compute_wm_level(const struct 
drm_i915_private *dev_priv,
result->enable = true;
 }
 
+static void
+adjust_wm_latency(u16 wm[], int max_level, int read_latency,
+ bool wm_lv_0_adjust_needed)
+{
+   int i, level;
+
+   /*
+* If a level n (n > 1) has a 0us latency, all levels m (m >= n)
+* need to be disabled. We make sure to sanitize the values out
+* of the punit to satisfy this requirement.
+*/
+   for (level = 1; level <= max_level; level++) {
+   if (wm[level] == 0) {
+   for (i = level + 1; i <= max_level; i++)
+   wm[i] = 0;
+
+   max_level = level - 1;
+   break;
+   }
+   }
+
+   /*
+* WaWmMemoryReadLatency
+*
+* punit doesn't take into account the read latency so we need
+* to add proper adjustement to each valid level we retrieve
+* from the punit when level 0 response data is 0us.
+*/
+   if (wm[0] == 0) {
+   for (level = 0; level <= max_level; level++)
+   wm[level] += read_latency;
+   }
+
+   /*
+* WA Level-0 adjustment for 16GB DIMMs: SKL+
+* If we could not get dimm info enable this WA to prevent from
+* any underrun. If not able to get Dimm info assume 16GB dimm
+* to avoid any underrun.
+*/
+   if (wm_lv_0_adjust_needed)
+   wm[0] += 1;
+}
+
 static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
  u16 wm[])
 {
struct intel_uncore *uncore = _priv->uncore;
+   int max_level = ilk_wm_max_level(dev_priv);
 
-   if (DISPLAY_VER(dev_priv) >= 9) {
+   if (DISPLAY_VER(dev_priv) >= 14) {
u32 val;
-   int ret, i;
-   int level, max_level = ilk_wm_max_level(dev_priv);
+
+   val = intel_uncore_read(uncore, MTL_LATENCY_LP0_LP1);
+   wm[0] = REG_FIELD_GET(MTL_LATENCY_LEVEL0_2_4_MASK, val);
+   wm[1] = REG_FIELD_GET(MTL_LATENCY_LEVEL1_3_5_MASK, val);
+   val = intel_uncore_read(uncore, MTL_LATENCY_LP2_LP3);
+   wm[2] = REG_FIELD_GET(MTL_LATENCY_LEVEL0_2_4_MASK, val);
+   wm[3] = REG_FIELD_GET(MTL_LATENCY_LEVEL1_3_5_MASK, val);
+   val = intel_uncore_read(uncore, MTL_LATENCY_LP4_LP5);
+   wm[4] = REG_FIELD_GET(MTL_LATENCY_LEVEL0_2_4_MASK, val);
+   wm[5] = REG_FIELD_GET(MTL_LATENCY_LEVEL1_3_5_MASK, val);
+
+   adjust_wm_latency(wm, max_level, 6,
+ dev_priv->dram_info.wm_lv_0_adjust_needed);
+   } else if (DISPLAY_VER(dev_priv) >= 9) {
+   int read_latency = DISPLAY_VER(dev_priv) >= 12 ? 3 : 2;
int mult = IS_DG2(dev_priv) ? 2 : 1;
+   u32 val;
+   int ret;
 
/* read the first set of memory latencies[0:3] */
val = 0; /* data0 to be programmed to 0 for first set */
@@ -2909,44 +2968,8 @@ static void intel_read_wm_latency(struct 
drm_i915_private *dev_priv,
wm[7] = ((val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
 
-   /*
-* If a level n (n > 1) has a 0us latency, all levels m (m >= n)
-* need to be disabled. We make sure to sanitize the values out
-* of the punit to satisfy this requirement.
-*/
-   for (level = 1; level <= max_level; level++) {
-   if (wm[level] == 0) {
-   for (i =