The current IRQ helpers do not guarantee mutual exclusion that covers the entire transaction from accessing the mask member and modifying the mask register.
This makes it hard, if not impossible, to implement mask modification helpers that may change one of these outside the normal suspend/resume/isr code paths. Add a spinlock to struct panthor_irq that protects both the mask member and register. Acquire it in all code paths that access these. Then, add the aforementioned new helpers: mask_enable, mask_disable, and resume_restore. The first two work by ORing and NANDing the mask bits, and the latter relies on the new behaviour that panthor_irq::mask is not set to 0 on suspend. panthor_irq::suspended remains an atomic, as it's necessarily written to outside the mask_lock in the suspend path. Signed-off-by: Nicolas Frattaroli <[email protected]> --- drivers/gpu/drm/panthor/panthor_device.h | 55 +++++++++++++++++++++++++++----- 1 file changed, 47 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/panthor/panthor_device.h b/drivers/gpu/drm/panthor/panthor_device.h index f35e52b9546a..eb75c83e2db3 100644 --- a/drivers/gpu/drm/panthor/panthor_device.h +++ b/drivers/gpu/drm/panthor/panthor_device.h @@ -73,11 +73,14 @@ struct panthor_irq { /** @irq: IRQ number. */ int irq; - /** @mask: Current mask being applied to xxx_INT_MASK. */ + /** @mask: Values to write to xxx_INT_MASK if active. */ u32 mask; /** @suspended: Set to true when the IRQ is suspended. */ atomic_t suspended; + + /** @mask_lock: protects modifications to _INT_MASK and @mask */ + spinlock_t mask_lock; }; /** @@ -410,6 +413,8 @@ static irqreturn_t panthor_ ## __name ## _irq_raw_handler(int irq, void *data) struct panthor_irq *pirq = data; \ struct panthor_device *ptdev = pirq->ptdev; \ \ + guard(spinlock_irqsave)(&pirq->mask_lock); \ + \ if (atomic_read(&pirq->suspended)) \ return IRQ_NONE; \ if (!gpu_read(ptdev, __reg_prefix ## _INT_STAT)) \ @@ -425,8 +430,10 @@ static irqreturn_t panthor_ ## __name ## _irq_threaded_handler(int irq, void *da struct panthor_device *ptdev = pirq->ptdev; \ irqreturn_t ret = IRQ_NONE; \ \ + guard(spinlock_irqsave)(&pirq->mask_lock); \ + \ while (true) { \ - u32 status = gpu_read(ptdev, __reg_prefix ## _INT_RAWSTAT) & pirq->mask; \ + u32 status = (gpu_read(ptdev, __reg_prefix ## _INT_RAWSTAT) & pirq->mask); \ \ if (!status) \ break; \ @@ -443,18 +450,30 @@ static irqreturn_t panthor_ ## __name ## _irq_threaded_handler(int irq, void *da \ static inline void panthor_ ## __name ## _irq_suspend(struct panthor_irq *pirq) \ { \ - pirq->mask = 0; \ - gpu_write(pirq->ptdev, __reg_prefix ## _INT_MASK, 0); \ + scoped_guard(spinlock_irqsave, &pirq->mask_lock) { \ + gpu_write(pirq->ptdev, __reg_prefix ## _INT_MASK, 0); \ + } \ synchronize_irq(pirq->irq); \ atomic_set(&pirq->suspended, true); \ } \ \ static inline void panthor_ ## __name ## _irq_resume(struct panthor_irq *pirq, u32 mask) \ { \ - atomic_set(&pirq->suspended, false); \ + guard(spinlock_irqsave)(&pirq->mask_lock); \ + \ pirq->mask = mask; \ - gpu_write(pirq->ptdev, __reg_prefix ## _INT_CLEAR, mask); \ - gpu_write(pirq->ptdev, __reg_prefix ## _INT_MASK, mask); \ + atomic_set(&pirq->suspended, false); \ + gpu_write(pirq->ptdev, __reg_prefix ## _INT_CLEAR, pirq->mask); \ + gpu_write(pirq->ptdev, __reg_prefix ## _INT_MASK, pirq->mask); \ +} \ + \ +static inline void panthor_ ## __name ## _irq_resume_restore(struct panthor_irq *pirq) \ +{ \ + guard(spinlock_irqsave)(&pirq->mask_lock); \ + \ + atomic_set(&pirq->suspended, false); \ + gpu_write(pirq->ptdev, __reg_prefix ## _INT_CLEAR, pirq->mask); \ + gpu_write(pirq->ptdev, __reg_prefix ## _INT_MASK, pirq->mask); \ } \ \ static int panthor_request_ ## __name ## _irq(struct panthor_device *ptdev, \ @@ -463,13 +482,33 @@ static int panthor_request_ ## __name ## _irq(struct panthor_device *ptdev, \ { \ pirq->ptdev = ptdev; \ pirq->irq = irq; \ - panthor_ ## __name ## _irq_resume(pirq, mask); \ + pirq->mask = mask; \ + spin_lock_init(&pirq->mask_lock); \ + panthor_ ## __name ## _irq_resume_restore(pirq); \ \ return devm_request_threaded_irq(ptdev->base.dev, irq, \ panthor_ ## __name ## _irq_raw_handler, \ panthor_ ## __name ## _irq_threaded_handler, \ IRQF_SHARED, KBUILD_MODNAME "-" # __name, \ pirq); \ +} \ + \ +static inline void panthor_ ## __name ## _irq_mask_enable(struct panthor_irq *pirq, u32 mask) \ +{ \ + guard(spinlock_irqsave)(&pirq->mask_lock); \ + \ + pirq->mask |= mask; \ + if (!atomic_read(&pirq->suspended)) \ + gpu_write(pirq->ptdev, __reg_prefix ## _INT_MASK, pirq->mask); \ +} \ + \ +static inline void panthor_ ## __name ## _irq_mask_disable(struct panthor_irq *pirq, u32 mask) \ +{ \ + guard(spinlock_irqsave)(&pirq->mask_lock); \ + \ + pirq->mask &= ~mask; \ + if (!atomic_read(&pirq->suspended)) \ + gpu_write(pirq->ptdev, __reg_prefix ## _INT_MASK, pirq->mask); \ } extern struct workqueue_struct *panthor_cleanup_wq; -- 2.52.0
