From: Matthew Murrian <matthew.murr...@goctsi.com>

Several code sections incorrectly use struct xilinx_axidma_tx_segment
instead of struct xilinx_aximcdma_tx_segment when operating as
Multichannel DMA. As their structures are similar, this just works.

Fixes: 6ccd692bfb7f ("dmaengine: xilinx_dma: Add Xilinx AXI MCDMA Engine driver 
support")
Signed-off-by: Matthew Murrian <matthew.murr...@goctsi.com>
Signed-off-by: Radhey Shyam Pandey <radhey.shyam.pan...@xilinx.com>
---
 drivers/dma/xilinx/xilinx_dma.c | 29 ++++++++++++++++++++++++-----
 1 file changed, 24 insertions(+), 5 deletions(-)

diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 9c747b08bb0f..ade4e6e1a5bd 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -948,8 +948,10 @@ static u32 xilinx_dma_get_residue(struct xilinx_dma_chan 
*chan,
 {
        struct xilinx_cdma_tx_segment *cdma_seg;
        struct xilinx_axidma_tx_segment *axidma_seg;
+       struct xilinx_aximcdma_tx_segment *aximcdma_seg;
        struct xilinx_cdma_desc_hw *cdma_hw;
        struct xilinx_axidma_desc_hw *axidma_hw;
+       struct xilinx_aximcdma_desc_hw *aximcdma_hw;
        struct list_head *entry;
        u32 residue = 0;
 
@@ -961,13 +963,23 @@ static u32 xilinx_dma_get_residue(struct xilinx_dma_chan 
*chan,
                        cdma_hw = &cdma_seg->hw;
                        residue += (cdma_hw->control - cdma_hw->status) &
                                   chan->xdev->max_buffer_len;
-               } else {
+               } else if (chan->xdev->dma_config->dmatype ==
+                          XDMA_TYPE_AXIDMA) {
                        axidma_seg = list_entry(entry,
                                                struct xilinx_axidma_tx_segment,
                                                node);
                        axidma_hw = &axidma_seg->hw;
                        residue += (axidma_hw->control - axidma_hw->status) &
                                   chan->xdev->max_buffer_len;
+               } else {
+                       aximcdma_seg =
+                               list_entry(entry,
+                                          struct xilinx_aximcdma_tx_segment,
+                                          node);
+                       aximcdma_hw = &aximcdma_seg->hw;
+                       residue +=
+                               (aximcdma_hw->control - aximcdma_hw->status) &
+                               chan->xdev->max_buffer_len;
                }
        }
 
@@ -1135,7 +1147,7 @@ static int xilinx_dma_alloc_chan_resources(struct 
dma_chan *dchan)
                        upper_32_bits(chan->seg_p + sizeof(*chan->seg_mv) *
                                ((i + 1) % XILINX_DMA_NUM_DESCS));
                        chan->seg_mv[i].phys = chan->seg_p +
-                               sizeof(*chan->seg_v) * i;
+                               sizeof(*chan->seg_mv) * i;
                        list_add_tail(&chan->seg_mv[i].node,
                                      &chan->free_seg_list);
                }
@@ -1560,7 +1572,7 @@ static void xilinx_dma_start_transfer(struct 
xilinx_dma_chan *chan)
 static void xilinx_mcdma_start_transfer(struct xilinx_dma_chan *chan)
 {
        struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
-       struct xilinx_axidma_tx_segment *tail_segment;
+       struct xilinx_aximcdma_tx_segment *tail_segment;
        u32 reg;
 
        /*
@@ -1582,7 +1594,7 @@ static void xilinx_mcdma_start_transfer(struct 
xilinx_dma_chan *chan)
        tail_desc = list_last_entry(&chan->pending_list,
                                    struct xilinx_dma_tx_descriptor, node);
        tail_segment = list_last_entry(&tail_desc->segments,
-                                      struct xilinx_axidma_tx_segment, node);
+                                      struct xilinx_aximcdma_tx_segment, node);
 
        reg = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest));
 
@@ -1864,6 +1876,7 @@ static void append_desc_queue(struct xilinx_dma_chan 
*chan,
        struct xilinx_vdma_tx_segment *tail_segment;
        struct xilinx_dma_tx_descriptor *tail_desc;
        struct xilinx_axidma_tx_segment *axidma_tail_segment;
+       struct xilinx_aximcdma_tx_segment *aximcdma_tail_segment;
        struct xilinx_cdma_tx_segment *cdma_tail_segment;
 
        if (list_empty(&chan->pending_list))
@@ -1885,11 +1898,17 @@ static void append_desc_queue(struct xilinx_dma_chan 
*chan,
                                                struct xilinx_cdma_tx_segment,
                                                node);
                cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
-       } else {
+       } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
                axidma_tail_segment = list_last_entry(&tail_desc->segments,
                                               struct xilinx_axidma_tx_segment,
                                               node);
                axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
+       } else {
+               aximcdma_tail_segment =
+                       list_last_entry(&tail_desc->segments,
+                                       struct xilinx_aximcdma_tx_segment,
+                                       node);
+               aximcdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
        }
 
        /*
-- 
2.7.4

Reply via email to