Re: [PATCH v1 2/2] dmaengine: Add DW AXI DMAC driver

2017-03-28 Thread Eugeniy Paltsev
Hi Vinod!

Thanks for respond.
My comments below.

On Tue, 2017-03-14 at 08:30 +0530, Vinod Koul wrote:
> On Tue, Feb 21, 2017 at 11:38:04PM +0300, Eugeniy Paltsev wrote:
> 
> > +static void vchan_desc_put(struct virt_dma_desc *vdesc)
> > +{
> > +   axi_desc_put(vd_to_axi_desc(vdesc));
> > +}
> well this has no logic and becomes a dummy fn, so why do we need it.
> 
Both functions (vchan_desc_put and axi_desc_put) are used.
First one is put as callback to vchan, second one is used when we need
to free descriptors after errors in preparing function (before we call
vchan_tx_prep).

> > 
> > +
> > +static enum dma_status
> > +dma_chan_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
> > +     struct dma_tx_state *txstate)
> > +{
> > +   struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
> > +   enum dma_status ret;
> > +
> > +   ret = dma_cookie_status(dchan, cookie, txstate);
> > +
> > +   if (chan->is_paused && ret == DMA_IN_PROGRESS)
> > +   return DMA_PAUSED;
> no residue calculation?

I don't think we need residue calculation in mem-to-mem transfers. I'm
planning to add residue calculation at once with DMA_SLAVE
functionality.

> > 
> > +static void dma_chan_free_chan_resources(struct dma_chan *dchan)
> > +{
> > +   struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
> > +
> > +   /* ASSERT: channel is idle */
> > +   if (axi_chan_is_hw_enable(chan))
> > +   dev_err(dchan2dev(dchan), "%s is non-idle!\n",
> > +   axi_chan_name(chan));
> > +
> > +   axi_chan_disable(chan);
> > +   axi_chan_irq_disable(chan, DWAXIDMAC_IRQ_ALL);
> > +
> > +   vchan_free_chan_resources(&chan->vc);
> > +
> > +   dev_vdbg(dchan2dev(dchan), "%s: %s: descriptor still
> > allocated: %u\n",
> > +   __func__, axi_chan_name(chan), chan-
> > >descs_allocated);
> > +
> > +   pm_runtime_put_sync_suspend(chan->chip->dev);
> any reason why sync variant is used?
> 
I re-read the documentation and yes - I can use async variant here (and
in the other parts of this driver).

> > +   block_ts = xfer_len >> src_width;
> > +   if (block_ts > max_block_ts) {
> > +   block_ts = max_block_ts;
> > +   xfer_len = max_block_ts << src_width;
> > +   }
> > +
> > +   desc = axi_desc_get(chan);
> > +   if (unlikely(!desc))
> > +   goto err_desc_get;
> > +
> > +   write_desc_sar(desc, src_adr);
> > +   write_desc_dar(desc, dst_adr);
> > +   desc->lli.block_ts_lo = cpu_to_le32(block_ts - 1);
> > +   desc->lli.ctl_hi =
> > cpu_to_le32(CH_CTL_H_LLI_VALID);
> > +
> > +   reg = (DWAXIDMAC_BURST_TRANS_LEN_4 <<
> > CH_CTL_L_DST_MSIZE_POS |
> > +      DWAXIDMAC_BURST_TRANS_LEN_4 <<
> > CH_CTL_L_SRC_MSIZE_POS |
> > +      dst_width << CH_CTL_L_DST_WIDTH_POS |
> > +      src_width << CH_CTL_L_SRC_WIDTH_POS |
> > +      DWAXIDMAC_CH_CTL_L_INC <<
> > CH_CTL_L_DST_INC_POS |
> > +      DWAXIDMAC_CH_CTL_L_INC <<
> > CH_CTL_L_SRC_INC_POS);
> > +   desc->lli.ctl_lo = cpu_to_le32(reg);
> > +
> > +   set_desc_src_master(desc);
> > +   set_desc_dest_master(desc);
> > +
> > +   /* Manage transfer list (xfer_list) */
> > +   if (!first) {
> > +   first = desc;
> > +   } else {
> > +   list_add_tail(&desc->xfer_list, &first-
> > >xfer_list);
> and since you use vchan why do you need this list

Each virtual descriptor encapsulates several hardware descriptors,
which belong to same transfer.
This list (xfer_list) is used only for allocating/freeing these
descriptors and it doesn't affect on virtual dma work logic.

-- 
 Eugeniy Paltsev

Re: [PATCH v1 2/2] dmaengine: Add DW AXI DMAC driver

2017-03-13 Thread Vinod Koul
On Tue, Feb 21, 2017 at 11:38:04PM +0300, Eugeniy Paltsev wrote:

> +static struct axi_dma_desc *axi_desc_get(struct axi_dma_chan *chan)
> +{
> + struct dw_axi_dma *dw = chan->chip->dw;
> + struct axi_dma_desc *desc;
> + dma_addr_t phys;
> +
> + desc = dma_pool_zalloc(dw->desc_pool, GFP_ATOMIC, &phys);

GFP_NOWAIT please

> +static void axi_desc_put(struct axi_dma_desc *desc)
> +{
> + struct axi_dma_chan *chan = desc->chan;
> + struct dw_axi_dma *dw = chan->chip->dw;
> + struct axi_dma_desc *child, *_next;
> + unsigned int descs_put = 0;
> +
> + list_for_each_entry_safe(child, _next, &desc->xfer_list, xfer_list) {
> + list_del(&child->xfer_list);
> + dma_pool_free(dw->desc_pool, child, child->vd.tx.phys);
> + descs_put++;
> + }
> +
> + dma_pool_free(dw->desc_pool, desc, desc->vd.tx.phys);
> + descs_put++;
> +
> + chan->descs_allocated -= descs_put;

why not decrement chan->descs_allocated?

> +
> + dev_vdbg(chan2dev(chan), "%s: %d descs put, %d still allocated\n",
> + axi_chan_name(chan), descs_put, chan->descs_allocated);
> +}
> +
> +static void vchan_desc_put(struct virt_dma_desc *vdesc)
> +{
> + axi_desc_put(vd_to_axi_desc(vdesc));
> +}

well this has no logic and becomes a dummy fn, so why do we need it.

> +
> +static enum dma_status
> +dma_chan_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
> +   struct dma_tx_state *txstate)
> +{
> + struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
> + enum dma_status ret;
> +
> + ret = dma_cookie_status(dchan, cookie, txstate);
> +
> + if (chan->is_paused && ret == DMA_IN_PROGRESS)
> + return DMA_PAUSED;

no residue calculation?

> +static void axi_chan_start_first_queued(struct axi_dma_chan *chan)
> +{
> + struct axi_dma_desc *desc;
> + struct virt_dma_desc *vd;
> +
> + vd = vchan_next_desc(&chan->vc);

unnecessary empty line

> +static void dma_chan_free_chan_resources(struct dma_chan *dchan)
> +{
> + struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
> +
> + /* ASSERT: channel is idle */
> + if (axi_chan_is_hw_enable(chan))
> + dev_err(dchan2dev(dchan), "%s is non-idle!\n",
> + axi_chan_name(chan));
> +
> + axi_chan_disable(chan);
> + axi_chan_irq_disable(chan, DWAXIDMAC_IRQ_ALL);
> +
> + vchan_free_chan_resources(&chan->vc);
> +
> + dev_vdbg(dchan2dev(dchan), "%s: %s: descriptor still allocated: %u\n",
> + __func__, axi_chan_name(chan), chan->descs_allocated);
> +
> + pm_runtime_put_sync_suspend(chan->chip->dev);

any reason why sync variant is used?

> +static struct dma_async_tx_descriptor *
> +dma_chan_prep_dma_sg(struct dma_chan *dchan,
> +  struct scatterlist *dst_sg, unsigned int dst_nents,
> +  struct scatterlist *src_sg, unsigned int src_nents,
> +  unsigned long flags)
> +{
> + struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
> + struct axi_dma_desc *first = NULL, *desc = NULL, *prev = NULL;
> + size_t dst_len = 0, src_len = 0, xfer_len = 0;
> + dma_addr_t dst_adr = 0, src_adr = 0;
> + u32 src_width, dst_width;
> + size_t block_ts, max_block_ts;
> + u32 reg;
> + u8 lms = 0;
> +
> + dev_vdbg(chan2dev(chan), "%s: %s: sn: %d dn: %d flags: 0x%lx",
> + __func__, axi_chan_name(chan), src_nents, dst_nents, flags);
> +
> + if (unlikely(dst_nents == 0 || src_nents == 0))
> + return NULL;
> +
> + if (unlikely(dst_sg == NULL || src_sg == NULL))
> + return NULL;
> +
> + max_block_ts = chan->chip->dw->hdata->block_size[chan->id];
> +
> + /*
> +  * Loop until there is either no more source or no more destination
> +  * scatterlist entry.
> +  */
> + while ((dst_len || (dst_sg && dst_nents)) &&
> +(src_len || (src_sg && src_nents))) {
> + if (dst_len == 0) {
> + dst_adr = sg_dma_address(dst_sg);
> + dst_len = sg_dma_len(dst_sg);
> +
> + dst_sg = sg_next(dst_sg);
> + dst_nents--;
> + }
> +
> + /* Process src sg list */
> + if (src_len == 0) {
> + src_adr = sg_dma_address(src_sg);
> + src_len = sg_dma_len(src_sg);
> +
> + src_sg = sg_next(src_sg);
> + src_nents--;
> + }
> +
> + /* Min of src and dest length will be this xfer length */
> + xfer_len = min_t(size_t, src_len, dst_len);
> + if (xfer_len == 0)
> + continue;
> +
> + /* Take care for the alignment */
> + src_width = axi_chan_get_xfer_width(chan, src_adr,
> + dst_adr, xfer_len);
> + /*
> +  * Actually src_width and dst_wi

[PATCH v1 2/2] dmaengine: Add DW AXI DMAC driver

2017-02-21 Thread Eugeniy Paltsev
This patch adds support for the DW AXI DMAC controller.

DW AXI DMAC is a part of upcoming development board from Synopsys.

In this driver implementation only DMA_MEMCPY and DMA_SG transfers
are supported.

Signed-off-by: Eugeniy Paltsev 
---
 drivers/dma/Kconfig|   10 +
 drivers/dma/Makefile   |1 +
 drivers/dma/axi_dma_platform.c | 1041 
 drivers/dma/axi_dma_platform.h |  118 
 drivers/dma/axi_dma_platform_reg.h |  189 +++
 5 files changed, 1359 insertions(+)
 create mode 100644 drivers/dma/axi_dma_platform.c
 create mode 100644 drivers/dma/axi_dma_platform.h
 create mode 100644 drivers/dma/axi_dma_platform_reg.h

diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 263495d..d1f2157 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -578,6 +578,16 @@ config ZX_DMA
help
  Support the DMA engine for ZTE ZX296702 platform devices.
 
+config AXI_DW_DMAC
+   tristate "Synopsys DesignWare AXI DMA support"
+   depends on OF || COMPILE_TEST
+   select DMA_ENGINE
+   select DMA_VIRTUAL_CHANNELS
+   help
+ Enable support for Synopsys DesignWare AXI DMA controller.
+ NOTE: This driver wasn't tested on 64 bit platform because
+ of lack 64 bit platform with Synopsys DW AXI DMAC. 
+
 
 # driver files
 source "drivers/dma/bestcomm/Kconfig"
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index a4fa336..9fb1dfe 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
 obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
 obj-$(CONFIG_AT_XDMAC) += at_xdmac.o
 obj-$(CONFIG_AXI_DMAC) += dma-axi-dmac.o
+obj-$(CONFIG_AXI_DW_DMAC) += axi_dma_platform.o
 obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
 obj-$(CONFIG_DMA_BCM2835) += bcm2835-dma.o
 obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
diff --git a/drivers/dma/axi_dma_platform.c b/drivers/dma/axi_dma_platform.c
new file mode 100644
index 000..5c33990
--- /dev/null
+++ b/drivers/dma/axi_dma_platform.c
@@ -0,0 +1,1041 @@
+/*
+ * Synopsys DesignWare AXI DMA Controller driver.
+ *
+ * Copyright (C) 2017 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include "axi_dma_platform.h"
+#include "axi_dma_platform_reg.h"
+#include "dmaengine.h"
+#include "virt-dma.h"
+
+#define DRV_NAME   "axi_dw_dmac"
+
+/*
+ * The set of bus widths supported by the DMA controller. DW AXI DMAC supports
+ * master data bus width up to 512 bits (for both AXI master interfaces), but
+ * it depends on IP block configurarion.
+ */
+#define AXI_DMA_BUSWIDTHS\
+   (DMA_SLAVE_BUSWIDTH_1_BYTE  | \
+   DMA_SLAVE_BUSWIDTH_2_BYTES  | \
+   DMA_SLAVE_BUSWIDTH_4_BYTES  | \
+   DMA_SLAVE_BUSWIDTH_8_BYTES  | \
+   DMA_SLAVE_BUSWIDTH_16_BYTES | \
+   DMA_SLAVE_BUSWIDTH_32_BYTES | \
+   DMA_SLAVE_BUSWIDTH_64_BYTES)
+/* TODO: check: do we need to use BIT() macro here? */
+
+static inline void
+axi_dma_iowrite32(struct axi_dma_chip *chip, u32 reg, u32 val)
+{
+   iowrite32(val, chip->regs + reg);
+}
+
+static inline u32 axi_dma_ioread32(struct axi_dma_chip *chip, u32 reg)
+{
+   return ioread32(chip->regs + reg);
+}
+
+static inline void
+axi_chan_iowrite32(struct axi_dma_chan *chan, u32 reg, u32 val)
+{
+   iowrite32(val, chan->chan_regs + reg);
+}
+
+static inline u32 axi_chan_ioread32(struct axi_dma_chan *chan, u32 reg)
+{
+   return ioread32(chan->chan_regs + reg);
+}
+
+static inline void
+axi_chan_iowrite64(struct axi_dma_chan *chan, u32 reg, u64 val)
+{
+   iowrite32(val & 0x, chan->chan_regs + reg);
+   iowrite32(val >> 32, chan->chan_regs + reg + 4);
+}
+
+static inline void axi_dma_disable(struct axi_dma_chip *chip)
+{
+   u32 val;
+
+   val = axi_dma_ioread32(chip, DMAC_CFG);
+   val &= ~DMAC_EN_MASK;
+   axi_dma_iowrite32(chip, DMAC_CFG, val);
+}
+
+static inline void axi_dma_enable(struct axi_dma_chip *chip)
+{
+   u32 val;
+
+   val = axi_dma_ioread32(chip, DMAC_CFG);
+   val |= DMAC_EN_MASK;
+   axi_dma_iowrite32(chip, DMAC_CFG, val);
+}
+
+static inline void axi_dma_irq_disable(struct axi_dma_chip *chip)
+{
+   u32 val;
+
+   val = axi_dma_ioread32(chip, DMAC_CFG);
+   val &= ~INT_EN_MASK;
+   axi_dma_iowrite32(chip, DMAC_CFG, val);
+}
+
+