In preparation to reworking the IRQ indices, move irq_tbl access to
separate helper.

Reviewed-by: Marijn Suijten <marijn.suij...@somainline.org>
Signed-off-by: Dmitry Baryshkov <dmitry.barysh...@linaro.org>
---
 .../gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c | 48 +++++++++++++------
 .../gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h | 12 +++--
 2 files changed, 41 insertions(+), 19 deletions(-)

diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c 
b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
index 81d03b6c67d1..14d374de30c5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
@@ -206,6 +206,12 @@ static inline bool dpu_core_irq_is_valid(struct 
dpu_hw_intr *intr,
        return irq_idx >= 0 && irq_idx < intr->total_irqs;
 }
 
+static inline struct dpu_hw_intr_entry *dpu_core_irq_get_entry(struct 
dpu_hw_intr *intr,
+                                                              int irq_idx)
+{
+       return &intr->irq_tbl[irq_idx];
+}
+
 /**
  * dpu_core_irq_callback_handler - dispatch core interrupts
  * @dpu_kms:           Pointer to DPU's KMS structure
@@ -213,17 +219,19 @@ static inline bool dpu_core_irq_is_valid(struct 
dpu_hw_intr *intr,
  */
 static void dpu_core_irq_callback_handler(struct dpu_kms *dpu_kms, int irq_idx)
 {
+       struct dpu_hw_intr_entry *irq_entry = 
dpu_core_irq_get_entry(dpu_kms->hw_intr, irq_idx);
+
        VERB("irq_idx=%d\n", irq_idx);
 
-       if (!dpu_kms->hw_intr->irq_tbl[irq_idx].cb)
+       if (!irq_entry->cb)
                DRM_ERROR("no registered cb, idx:%d\n", irq_idx);
 
-       atomic_inc(&dpu_kms->hw_intr->irq_tbl[irq_idx].count);
+       atomic_inc(&irq_entry->count);
 
        /*
         * Perform registered function callback
         */
-       
dpu_kms->hw_intr->irq_tbl[irq_idx].cb(dpu_kms->hw_intr->irq_tbl[irq_idx].arg);
+       irq_entry->cb(irq_entry->arg);
 }
 
 irqreturn_t dpu_core_irq(struct msm_kms *kms)
@@ -510,6 +518,7 @@ int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, 
int irq_idx,
                void (*irq_cb)(void *arg),
                void *irq_arg)
 {
+       struct dpu_hw_intr_entry *irq_entry;
        unsigned long irq_flags;
        int ret;
 
@@ -527,15 +536,16 @@ int dpu_core_irq_register_callback(struct dpu_kms 
*dpu_kms, int irq_idx,
 
        spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
 
-       if (unlikely(WARN_ON(dpu_kms->hw_intr->irq_tbl[irq_idx].cb))) {
+       irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, irq_idx);
+       if (unlikely(WARN_ON(irq_entry->cb))) {
                spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
 
                return -EBUSY;
        }
 
        trace_dpu_core_irq_register_callback(irq_idx, irq_cb);
-       dpu_kms->hw_intr->irq_tbl[irq_idx].arg = irq_arg;
-       dpu_kms->hw_intr->irq_tbl[irq_idx].cb = irq_cb;
+       irq_entry->arg = irq_arg;
+       irq_entry->cb = irq_cb;
 
        ret = dpu_hw_intr_enable_irq_locked(
                                dpu_kms->hw_intr,
@@ -552,6 +562,7 @@ int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, 
int irq_idx,
 
 int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx)
 {
+       struct dpu_hw_intr_entry *irq_entry;
        unsigned long irq_flags;
        int ret;
 
@@ -570,8 +581,9 @@ int dpu_core_irq_unregister_callback(struct dpu_kms 
*dpu_kms, int irq_idx)
                DPU_ERROR("Fail to disable IRQ for irq_idx:%d: %d\n",
                                        irq_idx, ret);
 
-       dpu_kms->hw_intr->irq_tbl[irq_idx].cb = NULL;
-       dpu_kms->hw_intr->irq_tbl[irq_idx].arg = NULL;
+       irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, irq_idx);
+       irq_entry->cb = NULL;
+       irq_entry->arg = NULL;
 
        spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
 
@@ -584,14 +596,16 @@ int dpu_core_irq_unregister_callback(struct dpu_kms 
*dpu_kms, int irq_idx)
 static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
 {
        struct dpu_kms *dpu_kms = s->private;
+       struct dpu_hw_intr_entry *irq_entry;
        unsigned long irq_flags;
        int i, irq_count;
        void *cb;
 
        for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) {
                spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
-               irq_count = atomic_read(&dpu_kms->hw_intr->irq_tbl[i].count);
-               cb = dpu_kms->hw_intr->irq_tbl[i].cb;
+               irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, i);
+               irq_count = atomic_read(&irq_entry->count);
+               cb = irq_entry->cb;
                spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
 
                if (irq_count || cb)
@@ -614,6 +628,7 @@ void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
 void dpu_core_irq_preinstall(struct msm_kms *kms)
 {
        struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+       struct dpu_hw_intr_entry *irq_entry;
        int i;
 
        pm_runtime_get_sync(&dpu_kms->pdev->dev);
@@ -621,22 +636,27 @@ void dpu_core_irq_preinstall(struct msm_kms *kms)
        dpu_disable_all_irqs(dpu_kms);
        pm_runtime_put_sync(&dpu_kms->pdev->dev);
 
-       for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++)
-               atomic_set(&dpu_kms->hw_intr->irq_tbl[i].count, 0);
+       for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) {
+               irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, i);
+               atomic_set(&irq_entry->count, 0);
+       }
 }
 
 void dpu_core_irq_uninstall(struct msm_kms *kms)
 {
        struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+       struct dpu_hw_intr_entry *irq_entry;
        int i;
 
        if (!dpu_kms->hw_intr)
                return;
 
        pm_runtime_get_sync(&dpu_kms->pdev->dev);
-       for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++)
-               if (dpu_kms->hw_intr->irq_tbl[i].cb)
+       for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) {
+               irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, i);
+               if (irq_entry->cb)
                        DPU_ERROR("irq_idx=%d still enabled/registered\n", i);
+       }
 
        dpu_clear_irqs(dpu_kms);
        dpu_disable_all_irqs(dpu_kms);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h 
b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
index e2b00dd32619..391fb268ad90 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
@@ -38,6 +38,12 @@ enum dpu_hw_intr_reg {
 
 #define DPU_IRQ_IDX(reg_idx, offset)   (reg_idx * 32 + offset)
 
+struct dpu_hw_intr_entry {
+       void (*cb)(void *arg);
+       void *arg;
+       atomic_t count;
+};
+
 /**
  * struct dpu_hw_intr: hw interrupts handling data structure
  * @hw:               virtual address mapping
@@ -57,11 +63,7 @@ struct dpu_hw_intr {
        unsigned long irq_mask;
        const struct dpu_intr_reg *intr_set;
 
-       struct {
-               void (*cb)(void *arg);
-               void *arg;
-               atomic_t count;
-       } irq_tbl[];
+       struct dpu_hw_intr_entry irq_tbl[];
 };
 
 /**
-- 
2.39.2

Reply via email to