Replacing with simpler linked list helper iterators.

Signed-off-by: Jeykumar Sankaran <jsa...@codeaurora.org>
---
 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c | 120 +++++++++++++--------------------
 1 file changed, 46 insertions(+), 74 deletions(-)

diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c 
b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index 1234991..a79456c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -45,49 +45,6 @@ struct dpu_rm_hw_blk {
        struct dpu_hw_blk *hw;
 };
 
-/**
- * struct dpu_rm_hw_iter - iterator for use with dpu_rm
- * @blk: dpu_rm internal block representation. Clients ignore. Used as 
iterator.
- * @type: Hardware Block Type client wishes to search for.
- */
-struct dpu_rm_hw_iter {
-       struct dpu_rm_hw_blk *blk;
-       enum dpu_hw_blk_type type;
-};
-
-static void _dpu_rm_init_hw_iter(
-               struct dpu_rm_hw_iter *iter,
-               enum dpu_hw_blk_type type)
-{
-       memset(iter, 0, sizeof(*iter));
-       iter->type = type;
-}
-
-static bool _dpu_rm_get_hw_locked(struct dpu_rm *rm, struct dpu_rm_hw_iter *i)
-{
-       struct list_head *blk_list;
-
-       if (!rm || !i || i->type >= DPU_HW_BLK_MAX) {
-               DPU_ERROR("invalid rm\n");
-               return false;
-       }
-
-       blk_list = &rm->hw_blks[i->type];
-
-       if (i->blk && (&i->blk->list == blk_list)) {
-               DPU_DEBUG("attempt resume iteration past last\n");
-               return false;
-       }
-
-       i->blk = list_prepare_entry(i->blk, blk_list, list);
-
-       list_for_each_entry_continue(i->blk, blk_list, list)
-               if (!i->blk->in_use)
-                       return true;
-
-       return false;
-}
-
 static void _dpu_rm_hw_destroy(enum dpu_hw_blk_type type, void *hw)
 {
        switch (type) {
@@ -301,7 +258,8 @@ static bool _dpu_rm_check_lm_and_get_connected_blks(
                struct dpu_rm_hw_blk *primary_lm)
 {
        const struct dpu_lm_cfg *lm_cfg = to_dpu_hw_mixer(lm->hw)->cap;
-       struct dpu_rm_hw_iter iter;
+       struct dpu_rm_hw_blk *iter;
+       struct list_head *blk_list = &rm->hw_blks[DPU_HW_BLK_PINGPONG];
 
        *pp = NULL;
 
@@ -320,10 +278,12 @@ static bool _dpu_rm_check_lm_and_get_connected_blks(
                }
        }
 
-       _dpu_rm_init_hw_iter(&iter, DPU_HW_BLK_PINGPONG);
-       while (_dpu_rm_get_hw_locked(rm, &iter)) {
-               if (iter.blk->hw->id == lm_cfg->pingpong) {
-                       *pp = iter.blk;
+       list_for_each_entry(iter, blk_list, list) {
+               if (iter->in_use)
+                       continue;
+
+               if (iter->hw->id == lm_cfg->pingpong) {
+                       *pp = iter;
                        break;
                }
        }
@@ -343,7 +303,8 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
 {
        struct dpu_rm_hw_blk *lm[MAX_BLOCKS];
        struct dpu_rm_hw_blk *pp[MAX_BLOCKS];
-       struct dpu_rm_hw_iter iter_i, iter_j;
+       struct dpu_rm_hw_blk *iter_i, *iter_j;
+       struct list_head *blk_list = &rm->hw_blks[DPU_HW_BLK_LM];
        int lm_count = 0;
        int i, rc = 0;
 
@@ -353,14 +314,18 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
        }
 
        /* Find a primary mixer */
-       _dpu_rm_init_hw_iter(&iter_i, DPU_HW_BLK_LM);
-       while (lm_count != reqs->topology.num_lm &&
-                       _dpu_rm_get_hw_locked(rm, &iter_i)) {
+       list_for_each_entry(iter_i, blk_list, list) {
+               if (iter_i->in_use)
+                       continue;
+
+               if (lm_count == reqs->topology.num_lm)
+                       break;
+
                memset(&lm, 0, sizeof(lm));
                memset(&pp, 0, sizeof(pp));
 
                lm_count = 0;
-               lm[lm_count] = iter_i.blk;
+               lm[lm_count] = iter_i;
 
                if (!_dpu_rm_check_lm_and_get_connected_blks(
                                rm, reqs, lm[lm_count],
@@ -370,19 +335,22 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
                ++lm_count;
 
                /* Valid primary mixer found, find matching peers */
-               _dpu_rm_init_hw_iter(&iter_j, DPU_HW_BLK_LM);
+               list_for_each_entry(iter_j, blk_list, list) {
+                       if (iter_j->in_use)
+                               continue;
 
-               while (lm_count != reqs->topology.num_lm &&
-                               _dpu_rm_get_hw_locked(rm, &iter_j)) {
-                       if (iter_i.blk == iter_j.blk)
+                       if (lm_count == reqs->topology.num_lm)
+                               break;
+
+                       if (iter_i == iter_j)
                                continue;
 
                        if (!_dpu_rm_check_lm_and_get_connected_blks(
-                                       rm, reqs, iter_j.blk,
-                                       &pp[lm_count], iter_i.blk))
+                                       rm, reqs, iter_j,
+                                       &pp[lm_count], iter_i))
                                continue;
 
-                       lm[lm_count] = iter_j.blk;
+                       lm[lm_count] = iter_j;
                        ++lm_count;
                }
        }
@@ -417,7 +385,9 @@ static int _dpu_rm_reserve_ctls(
                const struct msm_display_topology *top)
 {
        struct dpu_rm_hw_blk *ctls[MAX_BLOCKS];
-       struct dpu_rm_hw_iter iter;
+       struct dpu_rm_hw_blk *iter;
+       struct list_head *blk_list = &rm->hw_blks[DPU_HW_BLK_CTL];
+
        int i = 0, num_ctls = 0;
        bool needs_split_display = false;
 
@@ -428,21 +398,23 @@ static int _dpu_rm_reserve_ctls(
 
        needs_split_display = _dpu_rm_needs_split_display(top);
 
-       _dpu_rm_init_hw_iter(&iter, DPU_HW_BLK_CTL);
-       while (_dpu_rm_get_hw_locked(rm, &iter)) {
-               const struct dpu_hw_ctl *ctl = to_dpu_hw_ctl(iter.blk->hw);
+       list_for_each_entry(iter, blk_list, list)  {
+               const struct dpu_hw_ctl *ctl = to_dpu_hw_ctl(iter->hw);
                unsigned long features = ctl->caps->features;
                bool has_split_display;
 
+               if (iter->in_use)
+                       continue;
+
                has_split_display = BIT(DPU_CTL_SPLIT_DISPLAY) & features;
 
-               DPU_DEBUG("ctl %d caps 0x%lX\n", iter.blk->hw->id, features);
+               DPU_DEBUG("ctl %d caps 0x%lX\n", iter->hw->id, features);
 
                if (needs_split_display != has_split_display)
                        continue;
 
-               ctls[i] = iter.blk;
-               DPU_DEBUG("ctl %d match\n", iter.blk->hw->id);
+               ctls[i] = iter;
+               DPU_DEBUG("ctl %d match\n", iter->hw->id);
 
                if (++i == num_ctls)
                        break;
@@ -468,26 +440,26 @@ static struct dpu_rm_hw_blk *_dpu_rm_reserve_intf(
                uint32_t id,
                enum dpu_hw_blk_type type)
 {
-       struct dpu_rm_hw_iter iter;
+       struct dpu_rm_hw_blk *iter;
+       struct list_head *blk_list = &rm->hw_blks[DPU_HW_BLK_INTF];
 
        /* Find the block entry in the rm, and note the reservation */
-       _dpu_rm_init_hw_iter(&iter, type);
-       while (_dpu_rm_get_hw_locked(rm, &iter)) {
-               if (iter.blk->hw->id != id)
+       list_for_each_entry(iter, blk_list, list)  {
+               if (iter->hw->id != id || iter->in_use)
                        continue;
 
-               trace_dpu_rm_reserve_intf(iter.blk->hw->id, DPU_HW_BLK_INTF);
+               trace_dpu_rm_reserve_intf(iter->hw->id, DPU_HW_BLK_INTF);
 
                break;
        }
 
        /* Shouldn't happen since intfs are fixed at probe */
-       if (!iter.blk) {
+       if (!iter) {
                DPU_ERROR("couldn't find type %d id %d\n", type, id);
                return NULL;
        }
 
-       return iter.blk;
+       return iter;
 }
 
 static int _dpu_rm_reserve_intf_related_hw(
-- 
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
a Linux Foundation Collaborative Project

_______________________________________________
Freedreno mailing list
Freedreno@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/freedreno

Reply via email to