From: Ben Skeggs <[email protected]>

Programming -1 (vc_start_slot, if alloc fails) into HW probably isn't
the best idea.

Signed-off-by: Ben Skeggs <[email protected]>
---
 drivers/gpu/drm/nouveau/dispnv50/disp.c | 11 +++++++----
 1 file changed, 7 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c 
b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 4e7c9c353c51..2911167bf22a 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -912,6 +912,7 @@ nv50_msto_prepare(struct drm_atomic_state *state,
        struct nv50_mstm *mstm = mstc->mstm;
        struct drm_dp_mst_topology_state *old_mst_state;
        struct drm_dp_mst_atomic_payload *payload, *old_payload;
+       int ret = 0;
 
        NV_ATOMIC(drm, "%s: msto prepare\n", msto->encoder.name);
 
@@ -920,18 +921,20 @@ nv50_msto_prepare(struct drm_atomic_state *state,
        payload = drm_atomic_get_mst_payload_state(mst_state, mstc->port);
        old_payload = drm_atomic_get_mst_payload_state(old_mst_state, 
mstc->port);
 
-       // TODO: Figure out if we want to do a better job of handling VCPI 
allocation failures here?
        if (msto->disabled) {
                drm_dp_remove_payload(mgr, mst_state, old_payload, payload);
-
-               nvif_outp_dp_mst_vcpi(&mstm->outp->outp, 
msto->head->base.index, 0, 0, 0, 0);
+               ret = 1;
        } else {
                if (msto->enabled)
-                       drm_dp_add_payload_part1(mgr, mst_state, payload);
+                       ret = drm_dp_add_payload_part1(mgr, mst_state, payload);
+       }
 
+       if (ret == 0) {
                nvif_outp_dp_mst_vcpi(&mstm->outp->outp, msto->head->base.index,
                                      payload->vc_start_slot, 
payload->time_slots,
                                      payload->pbn, payload->time_slots * 
mst_state->pbn_div);
+       } else {
+               nvif_outp_dp_mst_vcpi(&mstm->outp->outp, 
msto->head->base.index, 0, 0, 0, 0);
        }
 }
 
-- 
2.41.0

Reply via email to