[ 025/102] dma: pl330: Fix cyclic transfers

2013-08-08 Thread Greg Kroah-Hartman
3.10-stable review patch.  If anyone has any objections, please let me know.

--

From: Lars-Peter Clausen 

commit fc51446021f42aca8906e701fc2292965aafcb15 upstream.

Allocate a descriptor for each period of a cyclic transfer, not just the first.
Also since the callback needs to be called for each finished period make sure to
initialize the callback and callback_param fields of each descriptor in a cyclic
transfer.

Signed-off-by: Lars-Peter Clausen 
Signed-off-by: Vinod Koul 
Signed-off-by: Greg Kroah-Hartman 

---
 drivers/dma/pl330.c |   93 +---
 1 file changed, 67 insertions(+), 26 deletions(-)

--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2527,6 +2527,10 @@ static dma_cookie_t pl330_tx_submit(stru
/* Assign cookies to all nodes */
while (!list_empty(>node)) {
desc = list_entry(last->node.next, struct dma_pl330_desc, node);
+   if (pch->cyclic) {
+   desc->txd.callback = last->txd.callback;
+   desc->txd.callback_param = last->txd.callback_param;
+   }
 
dma_cookie_assign(>txd);
 
@@ -2710,45 +2714,82 @@ static struct dma_async_tx_descriptor *p
size_t period_len, enum dma_transfer_direction direction,
unsigned long flags, void *context)
 {
-   struct dma_pl330_desc *desc;
+   struct dma_pl330_desc *desc = NULL, *first = NULL;
struct dma_pl330_chan *pch = to_pchan(chan);
+   struct dma_pl330_dmac *pdmac = pch->dmac;
+   unsigned int i;
dma_addr_t dst;
dma_addr_t src;
 
-   desc = pl330_get_desc(pch);
-   if (!desc) {
-   dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
-   __func__, __LINE__);
+   if (len % period_len != 0)
return NULL;
-   }
 
-   switch (direction) {
-   case DMA_MEM_TO_DEV:
-   desc->rqcfg.src_inc = 1;
-   desc->rqcfg.dst_inc = 0;
-   desc->req.rqtype = MEMTODEV;
-   src = dma_addr;
-   dst = pch->fifo_addr;
-   break;
-   case DMA_DEV_TO_MEM:
-   desc->rqcfg.src_inc = 0;
-   desc->rqcfg.dst_inc = 1;
-   desc->req.rqtype = DEVTOMEM;
-   src = pch->fifo_addr;
-   dst = dma_addr;
-   break;
-   default:
+   if (!is_slave_direction(direction)) {
dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n",
__func__, __LINE__);
return NULL;
}
 
-   desc->rqcfg.brst_size = pch->burst_sz;
-   desc->rqcfg.brst_len = 1;
+   for (i = 0; i < len / period_len; i++) {
+   desc = pl330_get_desc(pch);
+   if (!desc) {
+   dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch 
desc\n",
+   __func__, __LINE__);
 
-   pch->cyclic = true;
+   if (!first)
+   return NULL;
+
+   spin_lock_irqsave(>pool_lock, flags);
+
+   while (!list_empty(>node)) {
+   desc = list_entry(first->node.next,
+   struct dma_pl330_desc, node);
+   list_move_tail(>node, >desc_pool);
+   }
+
+   list_move_tail(>node, >desc_pool);
+
+   spin_unlock_irqrestore(>pool_lock, flags);
+
+   return NULL;
+   }
+
+   switch (direction) {
+   case DMA_MEM_TO_DEV:
+   desc->rqcfg.src_inc = 1;
+   desc->rqcfg.dst_inc = 0;
+   desc->req.rqtype = MEMTODEV;
+   src = dma_addr;
+   dst = pch->fifo_addr;
+   break;
+   case DMA_DEV_TO_MEM:
+   desc->rqcfg.src_inc = 0;
+   desc->rqcfg.dst_inc = 1;
+   desc->req.rqtype = DEVTOMEM;
+   src = pch->fifo_addr;
+   dst = dma_addr;
+   break;
+   default:
+   break;
+   }
 
-   fill_px(>px, dst, src, period_len);
+   desc->rqcfg.brst_size = pch->burst_sz;
+   desc->rqcfg.brst_len = 1;
+   fill_px(>px, dst, src, period_len);
+
+   if (!first)
+   first = desc;
+   else
+   list_add_tail(>node, >node);
+
+   dma_addr += period_len;
+   }
+
+   if (!desc)
+   return NULL;
+
+   pch->cyclic = true;
+   desc->txd.flags = flags;
 
return >txd;
 }


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a 

[ 025/102] dma: pl330: Fix cyclic transfers

2013-08-08 Thread Greg Kroah-Hartman
3.10-stable review patch.  If anyone has any objections, please let me know.

--

From: Lars-Peter Clausen l...@metafoo.de

commit fc51446021f42aca8906e701fc2292965aafcb15 upstream.

Allocate a descriptor for each period of a cyclic transfer, not just the first.
Also since the callback needs to be called for each finished period make sure to
initialize the callback and callback_param fields of each descriptor in a cyclic
transfer.

Signed-off-by: Lars-Peter Clausen l...@metafoo.de
Signed-off-by: Vinod Koul vinod.k...@intel.com
Signed-off-by: Greg Kroah-Hartman gre...@linuxfoundation.org

---
 drivers/dma/pl330.c |   93 +---
 1 file changed, 67 insertions(+), 26 deletions(-)

--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2527,6 +2527,10 @@ static dma_cookie_t pl330_tx_submit(stru
/* Assign cookies to all nodes */
while (!list_empty(last-node)) {
desc = list_entry(last-node.next, struct dma_pl330_desc, node);
+   if (pch-cyclic) {
+   desc-txd.callback = last-txd.callback;
+   desc-txd.callback_param = last-txd.callback_param;
+   }
 
dma_cookie_assign(desc-txd);
 
@@ -2710,45 +2714,82 @@ static struct dma_async_tx_descriptor *p
size_t period_len, enum dma_transfer_direction direction,
unsigned long flags, void *context)
 {
-   struct dma_pl330_desc *desc;
+   struct dma_pl330_desc *desc = NULL, *first = NULL;
struct dma_pl330_chan *pch = to_pchan(chan);
+   struct dma_pl330_dmac *pdmac = pch-dmac;
+   unsigned int i;
dma_addr_t dst;
dma_addr_t src;
 
-   desc = pl330_get_desc(pch);
-   if (!desc) {
-   dev_err(pch-dmac-pif.dev, %s:%d Unable to fetch desc\n,
-   __func__, __LINE__);
+   if (len % period_len != 0)
return NULL;
-   }
 
-   switch (direction) {
-   case DMA_MEM_TO_DEV:
-   desc-rqcfg.src_inc = 1;
-   desc-rqcfg.dst_inc = 0;
-   desc-req.rqtype = MEMTODEV;
-   src = dma_addr;
-   dst = pch-fifo_addr;
-   break;
-   case DMA_DEV_TO_MEM:
-   desc-rqcfg.src_inc = 0;
-   desc-rqcfg.dst_inc = 1;
-   desc-req.rqtype = DEVTOMEM;
-   src = pch-fifo_addr;
-   dst = dma_addr;
-   break;
-   default:
+   if (!is_slave_direction(direction)) {
dev_err(pch-dmac-pif.dev, %s:%d Invalid dma direction\n,
__func__, __LINE__);
return NULL;
}
 
-   desc-rqcfg.brst_size = pch-burst_sz;
-   desc-rqcfg.brst_len = 1;
+   for (i = 0; i  len / period_len; i++) {
+   desc = pl330_get_desc(pch);
+   if (!desc) {
+   dev_err(pch-dmac-pif.dev, %s:%d Unable to fetch 
desc\n,
+   __func__, __LINE__);
 
-   pch-cyclic = true;
+   if (!first)
+   return NULL;
+
+   spin_lock_irqsave(pdmac-pool_lock, flags);
+
+   while (!list_empty(first-node)) {
+   desc = list_entry(first-node.next,
+   struct dma_pl330_desc, node);
+   list_move_tail(desc-node, pdmac-desc_pool);
+   }
+
+   list_move_tail(first-node, pdmac-desc_pool);
+
+   spin_unlock_irqrestore(pdmac-pool_lock, flags);
+
+   return NULL;
+   }
+
+   switch (direction) {
+   case DMA_MEM_TO_DEV:
+   desc-rqcfg.src_inc = 1;
+   desc-rqcfg.dst_inc = 0;
+   desc-req.rqtype = MEMTODEV;
+   src = dma_addr;
+   dst = pch-fifo_addr;
+   break;
+   case DMA_DEV_TO_MEM:
+   desc-rqcfg.src_inc = 0;
+   desc-rqcfg.dst_inc = 1;
+   desc-req.rqtype = DEVTOMEM;
+   src = pch-fifo_addr;
+   dst = dma_addr;
+   break;
+   default:
+   break;
+   }
 
-   fill_px(desc-px, dst, src, period_len);
+   desc-rqcfg.brst_size = pch-burst_sz;
+   desc-rqcfg.brst_len = 1;
+   fill_px(desc-px, dst, src, period_len);
+
+   if (!first)
+   first = desc;
+   else
+   list_add_tail(desc-node, first-node);
+
+   dma_addr += period_len;
+   }
+
+   if (!desc)
+   return NULL;
+
+   pch-cyclic = true;
+   desc-txd.flags = flags;
 
return desc-txd;
 }