When a channel is configured to suppress host side interrupts (RIE=0),
the host side driver cannot rely on IRQ-driven progress. Add an optional
polling path for such channels. Polling is only enabled for channels
where dw_edma_chan_ignore_irq() is true.

Signed-off-by: Koichiro Den <[email protected]>
---
 drivers/dma/dw-edma/dw-edma-core.c | 98 ++++++++++++++++++++++++------
 drivers/dma/dw-edma/dw-edma-core.h |  4 ++
 2 files changed, 85 insertions(+), 17 deletions(-)

diff --git a/drivers/dma/dw-edma/dw-edma-core.c 
b/drivers/dma/dw-edma/dw-edma-core.c
index 059b3996d383..696b9f3ea378 100644
--- a/drivers/dma/dw-edma/dw-edma-core.c
+++ b/drivers/dma/dw-edma/dw-edma-core.c
@@ -308,23 +308,6 @@ static int dw_edma_device_terminate_all(struct dma_chan 
*dchan)
        return err;
 }
 
-static void dw_edma_device_issue_pending(struct dma_chan *dchan)
-{
-       struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
-       unsigned long flags;
-
-       if (!chan->configured)
-               return;
-
-       spin_lock_irqsave(&chan->vc.lock, flags);
-       if (vchan_issue_pending(&chan->vc) && chan->request == EDMA_REQ_NONE &&
-           chan->status == EDMA_ST_IDLE) {
-               chan->status = EDMA_ST_BUSY;
-               dw_edma_start_transfer(chan);
-       }
-       spin_unlock_irqrestore(&chan->vc.lock, flags);
-}
-
 static enum dma_status
 dw_edma_device_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
                         struct dma_tx_state *txstate)
@@ -710,6 +693,69 @@ static irqreturn_t dw_edma_interrupt_common(int irq, void 
*data)
        return ret;
 }
 
+static void dw_edma_done_arm(struct dw_edma_chan *chan)
+{
+       if (!dw_edma_chan_ignore_irq(&chan->vc.chan))
+               /* no need to arm since it's not to be ignored */
+               return;
+
+       queue_delayed_work(system_wq, &chan->poll_work, 1);
+}
+
+static void dw_edma_chan_poll_done(struct dma_chan *dchan)
+{
+       struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
+       enum dma_status st;
+
+       if (!dw_edma_chan_ignore_irq(dchan))
+               /* no need to poll since it's not to be ignored */
+               return;
+
+       guard(spinlock_irqsave)(&chan->poll_lock);
+
+       if (chan->status != EDMA_ST_BUSY)
+               return;
+
+       st = dw_edma_core_ch_status(chan);
+
+       switch (st) {
+       case DMA_COMPLETE:
+               dw_edma_done_interrupt(chan);
+               if (chan->status == EDMA_ST_BUSY)
+                       dw_edma_done_arm(chan);
+               break;
+       case DMA_IN_PROGRESS:
+               dw_edma_done_arm(chan);
+               break;
+       case DMA_ERROR:
+               dw_edma_abort_interrupt(chan);
+               break;
+       default:
+               break;
+       }
+}
+
+static void dw_edma_device_issue_pending(struct dma_chan *dchan)
+{
+       struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
+       unsigned long flags;
+
+       if (!chan->configured)
+               return;
+
+       dw_edma_chan_poll_done(dchan);
+
+       spin_lock_irqsave(&chan->vc.lock, flags);
+       if (vchan_issue_pending(&chan->vc) && chan->request == EDMA_REQ_NONE &&
+           chan->status == EDMA_ST_IDLE) {
+               chan->status = EDMA_ST_BUSY;
+               dw_edma_start_transfer(chan);
+       } else {
+               dw_edma_done_arm(chan);
+       }
+       spin_unlock_irqrestore(&chan->vc.lock, flags);
+}
+
 static int dw_edma_alloc_chan_resources(struct dma_chan *dchan)
 {
        struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
@@ -1063,6 +1109,19 @@ int dw_edma_remove(struct dw_edma_chip *chip)
 }
 EXPORT_SYMBOL_GPL(dw_edma_remove);
 
+static void dw_edma_poll_work(struct work_struct *work)
+{
+       struct delayed_work *dwork = to_delayed_work(work);
+       struct dw_edma_chan *chan =
+               container_of(dwork, struct dw_edma_chan, poll_work);
+       struct dma_chan *dchan = &chan->vc.chan;
+
+       if (!chan->configured)
+               return;
+
+       dw_edma_chan_poll_done(dchan);
+}
+
 int dw_edma_chan_irq_config(struct dma_chan *dchan,
                            enum dw_edma_ch_irq_mode mode)
 {
@@ -1090,6 +1149,11 @@ int dw_edma_chan_irq_config(struct dma_chan *dchan,
                 str_write_read(chan->dir == EDMA_DIR_WRITE),
                 chan->id, mode);
 
+       if (dw_edma_chan_ignore_irq(&chan->vc.chan)) {
+               spin_lock_init(&chan->poll_lock);
+               INIT_DELAYED_WORK(&chan->poll_work, dw_edma_poll_work);
+       }
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(dw_edma_chan_irq_config);
diff --git a/drivers/dma/dw-edma/dw-edma-core.h 
b/drivers/dma/dw-edma/dw-edma-core.h
index 8458d676551a..11fe4532f0bf 100644
--- a/drivers/dma/dw-edma/dw-edma-core.h
+++ b/drivers/dma/dw-edma/dw-edma-core.h
@@ -11,6 +11,7 @@
 
 #include <linux/msi.h>
 #include <linux/dma/edma.h>
+#include <linux/workqueue.h>
 
 #include "../virt-dma.h"
 
@@ -83,6 +84,9 @@ struct dw_edma_chan {
 
        enum dw_edma_ch_irq_mode        irq_mode;
 
+       struct delayed_work             poll_work;
+       spinlock_t                      poll_lock;
+
        enum dw_edma_request            request;
        enum dw_edma_status             status;
        u8                              configured;
-- 
2.51.0


Reply via email to