Add a check during atomic crtc check phase to ensure the programmed VRR
guardband is sufficient to cover latencies introduced by enabled features
such as DSC, PSR/PR, scalers, and DP SDPs.
Currently, the guardband is programmed to match the vblank length, so
existing checks in skl_is_vblank_too_short() are valid. However, upcoming
changes will optimize the guardband independently of vblank, making those
checks incorrect.
Introduce an explicit guardband check to prepare for future updates
that will remove checking against the vblank length and later program an
optimized guardband.
v2: Use new helper for PSR2/Panel Replay latency.
Signed-off-by: Ankit Nautiyal <ankit.k.nauti...@intel.com>
---
drivers/gpu/drm/i915/display/intel_display.c | 138 +++++++++++++++++++
drivers/gpu/drm/i915/display/skl_watermark.c | 2 +-
drivers/gpu/drm/i915/display/skl_watermark.h | 1 +
3 files changed, 140 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/i915/display/intel_display.c
b/drivers/gpu/drm/i915/display/intel_display.c
index af4d54672d0d..c542a3110051 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -4227,6 +4227,138 @@ static int hsw_compute_linetime_wm(struct
intel_atomic_state *state,
return 0;
}
+static int
+cdclk_prefill_adjustment(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(crtc_state->uapi.state);
+ const struct intel_cdclk_state *cdclk_state;
+
+ cdclk_state = intel_atomic_get_cdclk_state(state);
+ if (IS_ERR(cdclk_state)) {
+ drm_WARN_ON(display->drm, PTR_ERR(cdclk_state));
+ return 1;
+ }
+
+ return min(1, DIV_ROUND_UP(crtc_state->pixel_rate,
+ 2 * intel_cdclk_logical(cdclk_state)));
+}
+
+static int
+dsc_prefill_latency(const struct intel_crtc_state *crtc_state, int linetime)
+{
+ const struct intel_crtc_scaler_state *scaler_state =
&crtc_state->scaler_state;
+ int chroma_downscaling_factor =
skl_scaler_chroma_downscale_factor(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ int num_scaler_users = hweight32(scaler_state->scaler_users);
+ u64 hscale_k[ARRAY_SIZE(scaler_state->scalers)];
+ u64 vscale_k[ARRAY_SIZE(scaler_state->scalers)];
+ u32 dsc_prefill_latency = 0;
+
+ if (!crtc_state->dsc.compression_enable ||
+ !num_scaler_users ||
+ num_scaler_users > crtc->num_scalers ||
+ num_scaler_users > ARRAY_SIZE(scaler_state->scalers))
+ return dsc_prefill_latency;
+
+ for (int i = 0; i < num_scaler_users; i++) {
+ hscale_k[i] = max(1000, mul_u32_u32(scaler_state->scalers[i].hscale,
1000) >> 16);
+ vscale_k[i] = max(1000, mul_u32_u32(scaler_state->scalers[i].vscale,
1000) >> 16);
+ }
+
+ dsc_prefill_latency =
+ intel_display_dsc_prefill_latency(num_scaler_users, hscale_k,
vscale_k,
+ chroma_downscaling_factor,
+
cdclk_prefill_adjustment(crtc_state),
+ linetime);
+
+ return dsc_prefill_latency;
+}
+
+static int
+scaler_prefill_latency(const struct intel_crtc_state *crtc_state, int linetime)
+{
+ const struct intel_crtc_scaler_state *scaler_state =
&crtc_state->scaler_state;
+ int chroma_downscaling_factor =
skl_scaler_chroma_downscale_factor(crtc_state);
+ int num_scaler_users = hweight32(scaler_state->scaler_users);
+ u64 hscale_k = 1000, vscale_k = 1000;
+ int scaler_prefill_latency = 0;
+
+ if (!num_scaler_users)
+ return scaler_prefill_latency;
+
+ if (num_scaler_users > 1) {
+ hscale_k = max(1000, mul_u32_u32(scaler_state->scalers[0].hscale,
1000) >> 16);
+ vscale_k = max(1000, mul_u32_u32(scaler_state->scalers[0].vscale,
1000) >> 16);
+ }
+
+ scaler_prefill_latency =
+ intel_display_scaler_prefill_latency(num_scaler_users,
hscale_k, vscale_k,
+ chroma_downscaling_factor,
+
cdclk_prefill_adjustment(crtc_state),
+ linetime);
+
+ return scaler_prefill_latency;
+}
+
+static int intel_crtc_check_guardband(struct intel_crtc_state *crtc_state)