Sergei Shtylyov <[email protected]> writes:

> Add support for Texas Instuments Communication Port Programming Interface 4.1
> (CPPI 4.1) used on OMAP-L137/DA830.
>
> At this moment, only the DMA controller and queue manager are supported.
> Support for the buffer manager is lacking but this chip doesn't have it 
> anyway.

Looks mostly ok, although I won't pretend to have gone through through
the details of understaning the details of this HW.

Some cosmetic stuff to fixup before applying:

Please run through checkpatch and fix errors:

ERROR: code indent should use tabs where possible
#130: FILE: arch/arm/mach-davinci/cppi41.c:69:
+^I ^Ireturn -EINVAL;$

ERROR: code indent should use tabs where possible
#190: FILE: arch/arm/mach-davinci/cppi41.c:129:
+^I ^Ireturn -EINVAL;$

WARNING: line over 80 characters
#1212: FILE: arch/arm/mach-davinci/include/mach/cppi41.h:358:
+       struct cppi41_queue fdb_queue[4]; /* Free Desc/Buffer queue. Element 0 
*/

ERROR: Missing Signed-off-by: line(s)

total: 3 errors, 1 warnings, 1516 lines checked

> ---
> As DA830/OMAP-L137 code is going to reside in arch/arm/mach-davinci/, I had to
> place the code into this directory by the popular demand, despite CPPI 4.1 not
> really being arch-specific...
>
> The patch is against the recent DaVinci tree, plus my patch adding the cp_intc
> support...
>
>  arch/arm/mach-davinci/Kconfig               |    5 
>  arch/arm/mach-davinci/Makefile              |    1 
>  arch/arm/mach-davinci/cppi41.c              |  788 
> ++++++++++++++++++++++++++++
>  arch/arm/mach-davinci/include/mach/cppi41.h |  710 +++++++++++++++++++++++++
>  4 files changed, 1504 insertions(+)
>
> Index: linux-davinci/arch/arm/mach-davinci/Kconfig
> ===================================================================
> --- linux-davinci.orig/arch/arm/mach-davinci/Kconfig
> +++ linux-davinci/arch/arm/mach-davinci/Kconfig
> @@ -19,6 +19,11 @@ config ARCH_DAVINCI_DM355
>          bool "DaVinci 355 based system"
>       select CPU_V5
>  
> +config CPPI41
> +     bool "CPPI 4.1 support"
> +     help
> +       Configure this option to include the CPPI 4.1 support.
> +
>  comment "DaVinci Board Type"
>  
>  config MACH_DAVINCI_EVM
> Index: linux-davinci/arch/arm/mach-davinci/Makefile
> ===================================================================
> --- linux-davinci.orig/arch/arm/mach-davinci/Makefile
> +++ linux-davinci/arch/arm/mach-davinci/Makefile
> @@ -14,6 +14,7 @@ obj-$(CONFIG_ARCH_DAVINCI_DM644x)       
>  obj-$(CONFIG_ARCH_DAVINCI_DM646x)       += dm646x.o
>  obj-$(CONFIG_ARCH_DAVINCI_DM355)        += dm355.o
>  obj-$(CONFIG_CP_INTC)                        += cp_intc.o
> +obj-$(CONFIG_CPPI41)                 += cppi41.o
>  
>  # Board specific
>  obj-$(CONFIG_MACH_DAVINCI_EVM)       += board-dm644x-evm.o
> Index: linux-davinci/arch/arm/mach-davinci/cppi41.c
> ===================================================================
> --- /dev/null
> +++ linux-davinci/arch/arm/mach-davinci/cppi41.c
> @@ -0,0 +1,788 @@
> +/*
> + * CPPI 4.1 support
> + *
> + * Copyright (C) 2008-2009 MontaVista Software, Inc. <[email protected]>
> + *
> + * Based on the PAL CPPI 4.1 implementation
> + * Copyright (C) 1998-2006 Texas Instruments Incorporated
> + *
> + * This file contains the main implementation for CPPI 4.1 common 
> peripherals,
> + * including the DMA Controllers and the Queue Managers.
> + *
> + * This program is free software; you can distribute it and/or modify it
> + * under the terms of the GNU General Public License (Version 2) as
> + * published by the Free Software Foundation.
> + *
> + * This program is distributed in the hope it will be useful, but WITHOUT
> + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
> + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
> + * for more details.
> + *
> + * You should have received a copy of the GNU General Public License along
> + * with this program; if not, write to the Free Software Foundation, Inc.,
> + * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
> + *
> + */
> +
> +#include <linux/io.h>
> +#include <linux/init.h>
> +#include <linux/module.h>
> +#include <linux/dma-mapping.h>
> +
> +#include <mach/cppi41.h>
> +
> +#define      CPPI41_DEBUG
> +
> +#ifdef       CPPI41_DEBUG
> +#define DBG(format, args...) printk(format, ##args)
> +#else
> +#define DBG(format, args...)
> +#endif

Please drop the custom debug macros an use pr_debug(), pr_err() etc.

> +static struct {
> +     void *virt_addr;
> +     dma_addr_t phys_addr;
> +} linking_ram[CPPI41_NUM_QUEUE_MGR];
> +
> +static u32 *allocated_queues[CPPI41_NUM_QUEUE_MGR];
> +
> +/* First 32 packet descriptors are reserved for unallocated memory regions. 
> */
> +static u32 next_desc_index[CPPI41_NUM_QUEUE_MGR] = { 1 << 5 };
> +static u8  next_mem_rgn[CPPI41_NUM_QUEUE_MGR];
> +
> +static struct {
> +     size_t rgn_size;
> +     void *virt_addr;
> +     dma_addr_t phys_addr;
> +     struct cppi41_queue_obj queue_obj;
> +     u8 mem_rgn;
> +} dma_teardown[CPPI41_NUM_DMA_BLOCK];
> +
> +/******************** CPPI 4.1 Functions (External Interface) 
> *****************/
> +
> +int __init cppi41_queue_mgr_init(u8 q_mgr, dma_addr_t rgn0_base, u16 
> rgn0_size)
> +{
> +     void __iomem *q_mgr_regs;
> +     void *ptr;
> +
> +     if (q_mgr >= cppi41_num_queue_mgr)
> +             return -EINVAL;
> +
> +     q_mgr_regs = cppi41_queue_mgr[q_mgr].q_mgr_rgn_base;
> +
> +     __raw_writel(rgn0_base, q_mgr_regs + QMGR_LINKING_RAM_RGN0_BASE_REG);
> +     DBG("Linking RAM region 0 base @ %p, value: %x\n",
> +         q_mgr_regs + QMGR_LINKING_RAM_RGN0_BASE_REG,
> +         __raw_readl(q_mgr_regs + QMGR_LINKING_RAM_RGN0_BASE_REG));
> +
> +     __raw_writel(rgn0_size, q_mgr_regs + QMGR_LINKING_RAM_RGN0_SIZE_REG);
> +     DBG("Linking RAM region 0 size @ %p, value: %x\n",
> +         q_mgr_regs + QMGR_LINKING_RAM_RGN0_SIZE_REG,
> +         __raw_readl(q_mgr_regs + QMGR_LINKING_RAM_RGN0_SIZE_REG));
> +
> +     ptr = dma_alloc_coherent(NULL, 0x10000 - rgn0_size * 4,
> +                              &linking_ram[q_mgr].phys_addr,
> +                              GFP_KERNEL | GFP_DMA);
> +     if (ptr == NULL) {
> +             printk(KERN_ERR "ERROR: %s: Unable to allocate "
> +                    "linking RAM.\n", __func__);
> +             return -ENOMEM;
> +     }
> +     linking_ram[q_mgr].virt_addr = ptr;
> +
> +     __raw_writel(linking_ram[q_mgr].phys_addr,
> +                  q_mgr_regs + QMGR_LINKING_RAM_RGN1_BASE_REG);
> +     DBG("Linking RAM region 1 base @ %p, value: %x\n",
> +         q_mgr_regs + QMGR_LINKING_RAM_RGN1_BASE_REG,
> +         __raw_readl(q_mgr_regs + QMGR_LINKING_RAM_RGN1_BASE_REG));
> +
> +     ptr = kzalloc(BITS_TO_LONGS(cppi41_queue_mgr[q_mgr].num_queue),
> +                   GFP_KERNEL);
> +     if (ptr == NULL) {
> +             printk(KERN_ERR "ERROR: %s: Unable to allocate queue bitmap.\n",
> +                    __func__);
> +             dma_free_coherent(NULL, 0x10000 - rgn0_size * 4,
> +                               linking_ram[q_mgr].virt_addr,
> +                               linking_ram[q_mgr].phys_addr);
> +             return -ENOMEM;
> +     }
> +     allocated_queues[q_mgr] = ptr;
> +
> +     return 0;
> +}
> +
> +int __init cppi41_dma_block_init(u8 dma_num, u8 q_mgr, u8 num_order,
> +                              u8 *sched_tbl, u8 tbl_size)
> +{
> +     const struct cppi41_dma_block *dma_block;
> +     struct cppi41_teardown_desc *curr_td;
> +     dma_addr_t td_addr;
> +     unsigned num_desc, num_reg;
> +     void *ptr;
> +     int error, i, j, k;
> +     u16 q_num;
> +     u32 val;
> +
> +     if (dma_num >= cppi41_num_dma_block ||
> +         q_mgr >= cppi41_num_queue_mgr ||
> +         !tbl_size || sched_tbl == NULL)
> +             return -EINVAL;
> +
> +     error = cppi41_queue_alloc(CPPI41_FREE_DESC_QUEUE |
> +                                CPPI41_UNASSIGNED_QUEUE, q_mgr, &q_num);
> +     if (error) {
> +             printk(KERN_ERR "ERROR: %s: Unable to allocate teardown "
> +                    "descriptor queue.\n", __func__);
> +             return error;
> +     }
> +     DBG("Teardown descriptor queue %d in queue manager 0 "
> +         "allocated\n", q_num);
> +
> +     /*
> +      * Tell the hardware about the Teardown descriptor
> +      * queue manager and queue number.
> +      */
> +     dma_block = &cppi41_dma_block[dma_num];
> +     __raw_writel((q_mgr << DMA_TD_DESC_QMGR_SHIFT) |
> +                  (q_num << DMA_TD_DESC_QNUM_SHIFT),
> +                  dma_block->global_ctrl_base +
> +                  DMA_TEARDOWN_FREE_DESC_CTRL_REG);
> +     DBG("Teardown free descriptor control @ %p, value: %x\n",
> +         dma_block->global_ctrl_base + DMA_TEARDOWN_FREE_DESC_CTRL_REG,
> +         __raw_readl(dma_block->global_ctrl_base +
> +                     DMA_TEARDOWN_FREE_DESC_CTRL_REG));
> +
> +     num_desc = 1 << num_order;
> +     dma_teardown[dma_num].rgn_size = num_desc *
> +                                      sizeof(struct cppi41_teardown_desc);
> +
> +     /* Pre-allocate teardown descriptors. */
> +     ptr = dma_alloc_coherent(NULL, dma_teardown[dma_num].rgn_size,
> +                              &dma_teardown[dma_num].phys_addr,
> +                              GFP_KERNEL | GFP_DMA);
> +     if (ptr == NULL) {
> +             printk(KERN_ERR "ERROR: %s: Unable to allocate teardown "
> +                    "descriptors.\n", __func__);
> +             error = -ENOMEM;
> +             goto free_queue;
> +     }
> +     dma_teardown[dma_num].virt_addr = ptr;
> +
> +     error = cppi41_mem_rgn_alloc(q_mgr, dma_teardown[dma_num].phys_addr, 5,
> +                                  num_order, &dma_teardown[dma_num].mem_rgn);
> +     if (error) {
> +             printk(KERN_ERR "ERROR: %s: Unable to allocate queue manager "
> +                    "memory region for teardown descriptors.\n", __func__);
> +             goto free_mem;
> +     }
> +
> +     error = cppi41_queue_init(&dma_teardown[dma_num].queue_obj, 0, q_num);
> +     if (error) {
> +             printk(KERN_ERR "ERROR: %s: Unable to initialize teardown "
> +                    "free descriptor queue.\n", __func__);
> +             goto free_rgn;
> +     }
> +
> +     /*
> +      * Push all teardown descriptors to the free teardown queue
> +      * for the CPPI 4.1 system.
> +      */
> +     curr_td = dma_teardown[dma_num].virt_addr;
> +     td_addr = dma_teardown[dma_num].phys_addr;
> +
> +     for (i = 0; i < num_desc; i++) {
> +             cppi41_queue_push(&dma_teardown[dma_num].queue_obj, td_addr,
> +                               sizeof(*curr_td), 0);
> +             td_addr += sizeof(*curr_td);
> +     }
> +
> +     /* Initialize the DMA scheduler. */
> +     num_reg = (tbl_size + 3) / 4;
> +     for (k = i = 0; i < num_reg; i++) {
> +             for (val = j = 0; j < 4; j++, k++) {
> +                     val >>= 8;
> +                     if (k < tbl_size)
> +                             val |= sched_tbl[k] << 24;
> +             }
> +
> +             __raw_writel(val, dma_block->sched_table_base +
> +                          DMA_SCHED_TABLE_WORD_REG(i));
> +             DBG("DMA scheduler table @ %p, value written: %x\n",
> +                 dma_block->sched_table_base + DMA_SCHED_TABLE_WORD_REG(i),
> +                 val);
> +     }
> +
> +     __raw_writel((tbl_size - 1) << DMA_SCHED_LAST_ENTRY_SHIFT |
> +                  DMA_SCHED_ENABLE_MASK,
> +                  dma_block->sched_ctrl_base + DMA_SCHED_CTRL_REG);
> +     DBG("DMA scheduler control @ %p, value: %x\n",
> +         dma_block->sched_ctrl_base + DMA_SCHED_CTRL_REG,
> +         __raw_readl(dma_block->sched_ctrl_base + DMA_SCHED_CTRL_REG));
> +
> +     return 0;
> +
> +free_rgn:
> +     cppi41_mem_rgn_free(q_mgr, dma_teardown[dma_num].mem_rgn);
> +free_mem:
> +     dma_free_coherent(NULL, dma_teardown[dma_num].rgn_size,
> +                       dma_teardown[dma_num].virt_addr,
> +                       dma_teardown[dma_num].phys_addr);
> +free_queue:
> +     cppi41_queue_free(q_mgr, q_num);
> +     return error;
> +}
> +
> +/*
> + * cppi41_mem_rgn_alloc - allocate a memory region within the queue manager
> + */
> +int cppi41_mem_rgn_alloc(u8 q_mgr, dma_addr_t rgn_addr, u8 size_order,
> +                      u8 num_order, u8 *mem_rgn)
> +{
> +     void __iomem *desc_mem_regs;
> +     u32 num_desc = 1 << num_order, index, ctrl;
> +     int rgn;
> +
> +     DBG("%s called with rgn_addr = %08x, size_order = %d, num_order = %d\n",
> +         __func__, rgn_addr, size_order, num_order);
> +
> +     if (q_mgr >= cppi41_num_queue_mgr ||
> +         size_order < 5 || size_order > 13 ||
> +         num_order  < 5 || num_order  > 12 ||
> +         (rgn_addr & ((1 << size_order) - 1)))
> +             return -EINVAL;
> +
> +     rgn = next_mem_rgn[q_mgr];
> +     index = next_desc_index[q_mgr];
> +     if (rgn >= CPPI41_MAX_MEM_RGN || index + num_desc > 0x4000)
> +             return -ENOSPC;
> +
> +     next_mem_rgn[q_mgr] = rgn + 1;
> +     next_desc_index[q_mgr] = index + num_desc;
> +
> +     desc_mem_regs = cppi41_queue_mgr[q_mgr].desc_mem_rgn_base;
> +
> +     /* Write the base register */
> +     __raw_writel(rgn_addr, desc_mem_regs + QMGR_MEM_RGN_BASE_REG(rgn));
> +     DBG("Descriptor region base @ %p, value: %x\n",
> +         desc_mem_regs + QMGR_MEM_RGN_BASE_REG(rgn),
> +         __raw_readl(desc_mem_regs + QMGR_MEM_RGN_BASE_REG(rgn)));
> +
> +     /* Write the control register */
> +     ctrl = ((index << QMGR_MEM_RGN_INDEX_SHIFT) &
> +             QMGR_MEM_RGN_INDEX_MASK) |
> +            (((size_order - 5) << QMGR_MEM_RGN_DESC_SIZE_SHIFT) &
> +             QMGR_MEM_RGN_DESC_SIZE_MASK) |
> +            (((num_order - 5) << QMGR_MEM_RGN_SIZE_SHIFT) &
> +             QMGR_MEM_RGN_SIZE_MASK);
> +     __raw_writel(ctrl, desc_mem_regs + QMGR_MEM_RGN_CTRL_REG(rgn));
> +     DBG("Descriptor region control @ %p, value: %x\n",
> +         desc_mem_regs + QMGR_MEM_RGN_CTRL_REG(rgn),
> +         __raw_readl(desc_mem_regs + QMGR_MEM_RGN_CTRL_REG(rgn)));
> +
> +     *mem_rgn = rgn;
> +     return 0;
> +}
> +EXPORT_SYMBOL(cppi41_mem_rgn_alloc);
> +
> +/*
> + * cppi41_mem_rgn_free - free the memory region within the queue manager
> + */
> +int cppi41_mem_rgn_free(u8 q_mgr, u8 mem_rgn)
> +{
> +     void __iomem *desc_mem_regs;
> +
> +     DBG("%s called.\n", __func__);
> +
> +     if (q_mgr >= cppi41_num_queue_mgr || mem_rgn >= next_mem_rgn[q_mgr])
> +             return -EINVAL;
> +
> +     desc_mem_regs = cppi41_queue_mgr[q_mgr].desc_mem_rgn_base;
> +
> +     if (__raw_readl(desc_mem_regs + QMGR_MEM_RGN_BASE_REG(mem_rgn)) == 0)
> +             return -ENOENT;
> +
> +     __raw_writel(0, desc_mem_regs + QMGR_MEM_RGN_BASE_REG(mem_rgn));
> +     __raw_writel(0, desc_mem_regs + QMGR_MEM_RGN_CTRL_REG(mem_rgn));
> +
> +     return 0;
> +}
> +EXPORT_SYMBOL(cppi41_mem_rgn_free);
> +
> +/*
> + * cppi41_tx_ch_init - initialize a CPPI 4.1 Tx channel object
> + *
> + * Verify the channel info (range checking, etc.) and store the channel
> + * information within the object structure.
> + */
> +int cppi41_tx_ch_init(struct cppi41_dma_ch_obj *tx_ch_obj,
> +                   u8 dma_num, u8 ch_num)
> +{
> +     if (dma_num >= cppi41_num_dma_block ||
> +         ch_num  >= cppi41_dma_block[dma_num].num_tx_ch)
> +             return -EINVAL;
> +
> +     /* Populate the channel object structure */
> +     tx_ch_obj->base_addr  = cppi41_dma_block[dma_num].ch_ctrl_stat_base +
> +                             DMA_CH_TX_GLOBAL_CFG_REG(ch_num);
> +     tx_ch_obj->global_cfg = __raw_readl(tx_ch_obj->base_addr);
> +     return 0;
> +}
> +EXPORT_SYMBOL(cppi41_tx_ch_init);
> +
> +/*
> + * cppi41_rx_ch_init - initialize a CPPI 4.1 Rx channel object
> + *
> + * Verify the channel info (range checking, etc.) and store the channel
> + * information within the object structure.
> + */
> +int cppi41_rx_ch_init(struct cppi41_dma_ch_obj *rx_ch_obj,
> +                   u8 dma_num, u8 ch_num)
> +{
> +     if (dma_num >= cppi41_num_dma_block ||
> +         ch_num  >= cppi41_dma_block[dma_num].num_rx_ch)
> +             return -EINVAL;
> +
> +     /* Populate the channel object structure */
> +     rx_ch_obj->base_addr  = cppi41_dma_block[dma_num].ch_ctrl_stat_base +
> +                             DMA_CH_RX_GLOBAL_CFG_REG(ch_num);
> +     rx_ch_obj->global_cfg = __raw_readl(rx_ch_obj->base_addr);
> +     return 0;
> +}
> +EXPORT_SYMBOL(cppi41_rx_ch_init);
> +
> +/*
> + * We have to cache the last written Rx/Tx channel global configration 
> register
> + * value due to its bits other than enable/teardown being write-only. Yet 
> there
> + * is a caveat related to caching the enable bit: this bit may be 
> automatically
> + * cleared as a result of teardown, so we can't trust its cached value!
> + * When modifying the write only register fields, we're making use of the 
> fact
> + * that they read back as zeros, and not clearing them explicitly...
> + */
> +
> +/*
> + * cppi41_dma_ch_default_queue - set CPPI 4.1 channel default completion 
> queue
> + */
> +void cppi41_dma_ch_default_queue(struct cppi41_dma_ch_obj *dma_ch_obj,
> +                              u8 q_mgr, u16 q_num)
> +{
> +     u32 val = dma_ch_obj->global_cfg;
> +
> +     /* Clear the fields to be modified. */
> +     val &= ~(DMA_CH_TX_DEFAULT_QMGR_MASK | DMA_CH_TX_DEFAULT_QNUM_MASK |
> +              DMA_CH_TX_ENABLE_MASK);
> +
> +     /* Set the default completion queue. */
> +     val |= ((q_mgr << DMA_CH_TX_DEFAULT_QMGR_SHIFT) &
> +             DMA_CH_TX_DEFAULT_QMGR_MASK) |
> +            ((q_num << DMA_CH_TX_DEFAULT_QNUM_SHIFT) &
> +             DMA_CH_TX_DEFAULT_QNUM_MASK);
> +
> +     /* Get the current state of the enable bit. */
> +     dma_ch_obj->global_cfg = val |= __raw_readl(dma_ch_obj->base_addr);
> +     __raw_writel(val, dma_ch_obj->base_addr);
> +     DBG("Channel global configuration @ %p, value written: %x, "
> +         "value read: %x\n", dma_ch_obj->base_addr, val,
> +         __raw_readl(dma_ch_obj->base_addr));
> +
> +}
> +EXPORT_SYMBOL(cppi41_dma_ch_default_queue);
> +
> +/*
> + * cppi41_rx_ch_configure - configure CPPI 4.1 Rx channel
> + */
> +void cppi41_rx_ch_configure(struct cppi41_dma_ch_obj *rx_ch_obj,
> +                         struct cppi41_rx_ch_cfg  *cfg)
> +{
> +     void __iomem *base = rx_ch_obj->base_addr;
> +     u32 val = __raw_readl(rx_ch_obj->base_addr);
> +
> +     val |= ((cfg->sop_offset << DMA_CH_RX_SOP_OFFSET_SHIFT) &
> +             DMA_CH_RX_SOP_OFFSET_MASK) |
> +            ((cfg->default_desc_type << DMA_CH_RX_DEFAULT_DESC_TYPE_SHIFT) &
> +             DMA_CH_RX_DEFAULT_DESC_TYPE_MASK) |
> +            ((cfg->retry_starved << DMA_CH_RX_ERROR_HANDLING_SHIFT) &
> +             DMA_CH_RX_ERROR_HANDLING_MASK) |
> +            ((cfg->rx_queue.q_mgr << DMA_CH_RX_DEFAULT_RQ_QMGR_SHIFT) &
> +             DMA_CH_RX_DEFAULT_RQ_QMGR_MASK) |
> +            ((cfg->rx_queue.q_num << DMA_CH_RX_DEFAULT_RQ_QNUM_SHIFT) &
> +             DMA_CH_RX_DEFAULT_RQ_QNUM_MASK);
> +
> +     rx_ch_obj->global_cfg = val;
> +     __raw_writel(val, base);
> +     DBG("Rx channel global configuration @ %p, value written: %x, "
> +         "value read: %x\n", base, val, __raw_readl(base));
> +
> +     base -= DMA_CH_RX_GLOBAL_CFG_REG(0);
> +
> +     /*
> +      * Set up the packet configuration register
> +      * based on the descriptor type...
> +      */
> +     switch (cfg->default_desc_type) {
> +     case DMA_CH_RX_DEFAULT_DESC_EMBED:
> +             val = ((cfg->cfg.embed_pkt.fd_queue.q_mgr <<
> +                     DMA_CH_RX_EMBED_FDQ_QMGR_SHIFT) &
> +                    DMA_CH_RX_EMBED_FDQ_QMGR_MASK) |
> +                   ((cfg->cfg.embed_pkt.fd_queue.q_num <<
> +                     DMA_CH_RX_EMBED_FDQ_QNUM_SHIFT) &
> +                    DMA_CH_RX_EMBED_FDQ_QNUM_MASK) |
> +                   ((cfg->cfg.embed_pkt.num_buf_slot <<
> +                     DMA_CH_RX_EMBED_NUM_SLOT_SHIFT) &
> +                    DMA_CH_RX_EMBED_NUM_SLOT_MASK) |
> +                   ((cfg->cfg.embed_pkt.sop_slot_num <<
> +                     DMA_CH_RX_EMBED_SOP_SLOT_SHIFT) &
> +                    DMA_CH_RX_EMBED_SOP_SLOT_MASK);
> +
> +             __raw_writel(val, base + DMA_CH_RX_EMBED_PKT_CFG_REG_B(0));
> +             DBG("Rx channel embedded packet configuration B @ %p, "
> +                 "value written: %x\n",
> +                 base + DMA_CH_RX_EMBED_PKT_CFG_REG_B(0), val);
> +
> +             val = ((cfg->cfg.embed_pkt.free_buf_pool[0].b_pool <<
> +                     DMA_CH_RX_EMBED_FBP_PNUM_SHIFT(0)) &
> +                    DMA_CH_RX_EMBED_FBP_PNUM_MASK(0)) |
> +                   ((cfg->cfg.embed_pkt.free_buf_pool[0].b_mgr <<
> +                     DMA_CH_RX_EMBED_FBP_BMGR_SHIFT(0)) &
> +                    DMA_CH_RX_EMBED_FBP_BMGR_MASK(0)) |
> +                   ((cfg->cfg.embed_pkt.free_buf_pool[1].b_pool <<
> +                     DMA_CH_RX_EMBED_FBP_PNUM_SHIFT(1)) &
> +                    DMA_CH_RX_EMBED_FBP_PNUM_MASK(1)) |
> +                   ((cfg->cfg.embed_pkt.free_buf_pool[1].b_mgr <<
> +                     DMA_CH_RX_EMBED_FBP_BMGR_SHIFT(1)) &
> +                    DMA_CH_RX_EMBED_FBP_BMGR_MASK(1)) |
> +                   ((cfg->cfg.embed_pkt.free_buf_pool[2].b_pool <<
> +                     DMA_CH_RX_EMBED_FBP_PNUM_SHIFT(2)) &
> +                    DMA_CH_RX_EMBED_FBP_PNUM_MASK(2)) |
> +                   ((cfg->cfg.embed_pkt.free_buf_pool[2].b_mgr <<
> +                     DMA_CH_RX_EMBED_FBP_BMGR_SHIFT(2)) &
> +                    DMA_CH_RX_EMBED_FBP_BMGR_MASK(2)) |
> +                   ((cfg->cfg.embed_pkt.free_buf_pool[3].b_pool <<
> +                     DMA_CH_RX_EMBED_FBP_PNUM_SHIFT(3)) &
> +                    DMA_CH_RX_EMBED_FBP_PNUM_MASK(3)) |
> +                   ((cfg->cfg.embed_pkt.free_buf_pool[3].b_mgr <<
> +                     DMA_CH_RX_EMBED_FBP_BMGR_SHIFT(3)) &
> +                    DMA_CH_RX_EMBED_FBP_BMGR_MASK(3));
> +
> +             __raw_writel(val, base + DMA_CH_RX_EMBED_PKT_CFG_REG_A(0));
> +             DBG("Rx channel embedded packet configuration A @ %p, "
> +                 "value written: %x\n",
> +                 base + DMA_CH_RX_EMBED_PKT_CFG_REG_A(0), val);
> +             break;
> +     case DMA_CH_RX_DEFAULT_DESC_HOST:
> +             val = ((cfg->cfg.host_pkt.fdb_queue[0].q_num <<
> +                     DMA_CH_RX_HOST_FDQ_QNUM_SHIFT(0)) &
> +                    DMA_CH_RX_HOST_FDQ_QNUM_MASK(0)) |
> +                   ((cfg->cfg.host_pkt.fdb_queue[0].q_mgr <<
> +                     DMA_CH_RX_HOST_FDQ_QMGR_SHIFT(0)) &
> +                    DMA_CH_RX_HOST_FDQ_QMGR_MASK(0)) |
> +                   ((cfg->cfg.host_pkt.fdb_queue[1].q_num <<
> +                     DMA_CH_RX_HOST_FDQ_QNUM_SHIFT(1)) &
> +                    DMA_CH_RX_HOST_FDQ_QNUM_MASK(1)) |
> +                   ((cfg->cfg.host_pkt.fdb_queue[1].q_mgr <<
> +                     DMA_CH_RX_HOST_FDQ_QMGR_SHIFT(1)) &
> +                    DMA_CH_RX_HOST_FDQ_QMGR_MASK(1));
> +
> +             __raw_writel(val, base + DMA_CH_RX_HOST_PKT_CFG_REG_A(0));
> +             DBG("Rx channel host packet configuration A @ %p, "
> +                 "value written: %x\n",
> +                 base + DMA_CH_RX_HOST_PKT_CFG_REG_A(0), val);
> +
> +             val = ((cfg->cfg.host_pkt.fdb_queue[2].q_num <<
> +                     DMA_CH_RX_HOST_FDQ_QNUM_SHIFT(2)) &
> +                    DMA_CH_RX_HOST_FDQ_QNUM_MASK(2)) |
> +                   ((cfg->cfg.host_pkt.fdb_queue[2].q_mgr <<
> +                     DMA_CH_RX_HOST_FDQ_QMGR_SHIFT(2)) &
> +                    DMA_CH_RX_HOST_FDQ_QMGR_MASK(2)) |
> +                   ((cfg->cfg.host_pkt.fdb_queue[3].q_num <<
> +                    DMA_CH_RX_HOST_FDQ_QNUM_SHIFT(3)) &
> +                    DMA_CH_RX_HOST_FDQ_QNUM_MASK(3)) |
> +                   ((cfg->cfg.host_pkt.fdb_queue[3].q_mgr <<
> +                     DMA_CH_RX_HOST_FDQ_QMGR_SHIFT(3)) &
> +                    DMA_CH_RX_HOST_FDQ_QMGR_MASK(3));
> +
> +             __raw_writel(val, base + DMA_CH_RX_HOST_PKT_CFG_REG_B(0));
> +             DBG("Rx channel host packet configuration B @ %p, "
> +                 "value written: %x\n",
> +                 base + DMA_CH_RX_HOST_PKT_CFG_REG_B(0), val);
> +             break;
> +     case DMA_CH_RX_DEFAULT_DESC_MONO:
> +             val = ((cfg->cfg.mono_pkt.fd_queue.q_num <<
> +                     DMA_CH_RX_MONO_FDQ_QNUM_SHIFT) &
> +                    DMA_CH_RX_MONO_FDQ_QNUM_MASK) |
> +                   ((cfg->cfg.mono_pkt.fd_queue.q_mgr <<
> +                     DMA_CH_RX_MONO_FDQ_QMGR_SHIFT) &
> +                    DMA_CH_RX_MONO_FDQ_QMGR_MASK) |
> +                   ((cfg->cfg.mono_pkt.sop_offset <<
> +                     DMA_CH_RX_MONO_SOP_OFFSET_SHIFT) &
> +                    DMA_CH_RX_MONO_SOP_OFFSET_MASK);
> +
> +             __raw_writel(val, base + DMA_CH_RX_MONO_PKT_CFG_REG(0));
> +             DBG("Rx channel monolithic packet configuration @ %p, "
> +                 "value written: %x\n",
> +                 base + DMA_CH_RX_MONO_PKT_CFG_REG(0), val);
> +             break;
> +     }
> +}
> +EXPORT_SYMBOL(cppi41_rx_ch_configure);
> +
> +/*
> + * cppi41_dma_ch_teardown - teardown a given Tx/Rx channel
> + */
> +void cppi41_dma_ch_teardown(struct cppi41_dma_ch_obj *dma_ch_obj)
> +{
> +     u32 val = __raw_readl(dma_ch_obj->base_addr);
> +
> +     /* Initiate channel teardown. */
> +     val |= dma_ch_obj->global_cfg & ~DMA_CH_TX_ENABLE_MASK;
> +     dma_ch_obj->global_cfg = val |= DMA_CH_TX_TEARDOWN_MASK;
> +     __raw_writel(val, dma_ch_obj->base_addr);
> +     DBG("Tear down channel @ %p, value written: %x, value read: %x\n",
> +         dma_ch_obj->base_addr, val, __raw_readl(dma_ch_obj->base_addr));
> +}
> +EXPORT_SYMBOL(cppi41_dma_ch_teardown);
> +
> +/*
> + * cppi41_dma_ch_enable - enable Tx/Rx DMA channel in hardware
> + *
> + * Makes the channel ready for data transmission/reception.
> + */
> +void cppi41_dma_ch_enable(struct cppi41_dma_ch_obj *dma_ch_obj)
> +{
> +     u32 val = dma_ch_obj->global_cfg | DMA_CH_TX_ENABLE_MASK;
> +
> +     /* Teardown bit remains set after completion, so clear it now... */
> +     dma_ch_obj->global_cfg = val &= ~DMA_CH_TX_TEARDOWN_MASK;
> +     __raw_writel(val, dma_ch_obj->base_addr);
> +     DBG("Enable channel @ %p, value written: %x, value read: %x\n",
> +         dma_ch_obj->base_addr, val, __raw_readl(dma_ch_obj->base_addr));
> +}
> +EXPORT_SYMBOL(cppi41_dma_ch_enable);
> +
> +/*
> + * cppi41_dma_ch_disable - disable Tx/Rx DMA channel in hardware
> + */
> +void cppi41_dma_ch_disable(struct cppi41_dma_ch_obj *dma_ch_obj)
> +{
> +     dma_ch_obj->global_cfg &= ~DMA_CH_TX_ENABLE_MASK;
> +     __raw_writel(dma_ch_obj->global_cfg, dma_ch_obj->base_addr);
> +     DBG("Disable channel @ %p, value written: %x, value read: %x\n",
> +         dma_ch_obj->base_addr, dma_ch_obj->global_cfg,
> +         __raw_readl(dma_ch_obj->base_addr));
> +}
> +EXPORT_SYMBOL(cppi41_dma_ch_disable);
> +
> +/**
> + * alloc_queue - allocate a queue in the given range
> + * @allocated:       pointer to the bitmap of the allocated queues
> + * @excluded:        pointer to the bitmap of the queues excluded from 
> allocation
> + *           (optional)
> + * @start:   starting queue number
> + * @count:   number of queues available
> + *
> + * Returns queue number on success, -ENOSPC otherwise.
> + */
> +static int alloc_queue(u32 *allocated, const u32 *excluded, unsigned start,
> +                    unsigned count)
> +{
> +     u32 bit, mask = 0;
> +     int index = -1;
> +
> +     /*
> +      * We're starting the loop as if we've just wrapped around 32 bits
> +      * in order to save on preloading the bitmasks.
> +      */
> +     for (bit = 0; count--; start++, bit <<= 1) {
> +             /* Have we just wrapped around 32 bits? */
> +             if (!bit) {
> +                     /* Start over with the next bitmask word */
> +                     bit = 1;
> +                     index++;
> +                     /* Have we just entered the loop? */
> +                     if (!index) {
> +                             /* Calculate the starting values */
> +                             bit <<= start & 0x1f;
> +                             index = start >> 5;
> +                     }
> +                     /*
> +                      * Load the next word of the allocated bitmask OR'ing
> +                      * it with the excluded bitmask if it's been passed.
> +                      */
> +                     mask = allocated[index];
> +                     if (excluded != NULL)
> +                             mask |= excluded[index];
> +             }
> +             /*
> +              * If the bit in the combined bitmask is zero,
> +              * we've just found a free queue.
> +              */
> +             if (!(mask & bit)) {
> +                     allocated[index] |= bit;
> +                     return start;
> +             }
> +     }
> +     return -ENOSPC;
> +}
> +
> +/*
> + * cppi41_queue_alloc - allocate a queue of a given type in the queue manager
> + */
> +int cppi41_queue_alloc(u8 type, u8 q_mgr, u16 *q_num)
> +{
> +     int res = -ENOSPC;
> +
> +     if (q_mgr >= cppi41_num_queue_mgr)
> +             return -EINVAL;
> +
> +     /* Mask out the unsupported queue types */
> +     type &= cppi41_queue_mgr[q_mgr].queue_types;
> +     /* First see if a free descriptor queue was requested... */
> +     if (type & CPPI41_FREE_DESC_QUEUE)
> +             res = alloc_queue(allocated_queues[q_mgr], NULL,
> +                               cppi41_queue_mgr[q_mgr].base_fdq_num,  16);
> +
> +     /* Then see if a free descriptor/buffer queue was requested... */
> +     if (res < 0 && (type & CPPI41_FREE_DESC_BUF_QUEUE))
> +             res = alloc_queue(allocated_queues[q_mgr], NULL,
> +                               cppi41_queue_mgr[q_mgr].base_fdbq_num, 16);
> +
> +     /* Last see if an unassigned queue was requested... */
> +     if (res < 0 && (type & CPPI41_UNASSIGNED_QUEUE))
> +             res = alloc_queue(allocated_queues[q_mgr],
> +                               cppi41_queue_mgr[q_mgr].assigned, 0,
> +                               cppi41_queue_mgr[q_mgr].num_queue);
> +
> +     /* See if any queue was allocated... */
> +     if (res < 0)
> +             return res;
> +
> +     /* Return the queue allocated */
> +     *q_num = res;
> +     return 0;
> +}
> +EXPORT_SYMBOL(cppi41_queue_alloc);
> +
> +/*
> + * cppi41_queue_free - free the given queue in the queue manager
> + */
> +int cppi41_queue_free(u8 q_mgr, u16 q_num)
> +{
> +     int index = q_num >> 5, bit = 1 << (q_num & 0x1f);
> +
> +     if (q_mgr >= cppi41_num_queue_mgr ||
> +         q_num >= cppi41_queue_mgr[q_mgr].num_queue ||
> +         !(allocated_queues[q_mgr][index] & bit))
> +             return -EINVAL;
> +
> +     allocated_queues[q_mgr][index] &= ~bit;
> +     return 0;
> +}
> +EXPORT_SYMBOL(cppi41_queue_free);
> +
> +/*
> + * cppi41_queue_init - initialize a CPPI 4.1 queue object
> + */
> +int cppi41_queue_init(struct cppi41_queue_obj *queue_obj, u8 q_mgr, u16 
> q_num)
> +{
> +     if (q_mgr >= cppi41_num_queue_mgr ||
> +         q_num >= cppi41_queue_mgr[q_mgr].num_queue)
> +             return -EINVAL;
> +
> +     queue_obj->base_addr = cppi41_queue_mgr[q_mgr].q_mgmt_rgn_base +
> +                            QMGR_QUEUE_STATUS_REG_A(q_num);
> +
> +     return 0;
> +}
> +EXPORT_SYMBOL(cppi41_queue_init);
> +
> +/*
> + * cppi41_queue_push - push a descriptor into the given queue
> + */
> +void cppi41_queue_push(const struct cppi41_queue_obj *queue_obj, u32 
> desc_addr,
> +                    u32 desc_size, u32 pkt_size)
> +{
> +     u32 val;
> +
> +     /*
> +      * Write to the tail of the queue.
> +      * TODO: Can't think of a reason why a queue to head may be required.
> +      * If it is, the API may have to be extended.
> +      */
> +#if 0
> +     /*
> +      * Also, can't understand why packet size is required to queue up a
> +      * descriptor. The spec says packet size *must* be written prior to
> +      * the packet write operation.
> +      */
> +     if (pkt_size)
> +             val = (pkt_size << QMGR_QUEUE_PKT_SIZE_SHIFT) &
> +                   QMGR_QUEUE_PKT_SIZE_MASK;
> +     __raw_writel(val, queue_obj->base_addr + QMGR_QUEUE_REG_C(0));
> +#endif

Is this #if 0 code needed for future reference? or can it be dropped?


> +
> +     val = (((desc_size - 24) >> (2 - QMGR_QUEUE_DESC_SIZE_SHIFT)) &
> +            QMGR_QUEUE_DESC_SIZE_MASK) |
> +           (desc_addr & QMGR_QUEUE_DESC_PTR_MASK);
> +
> +     DBG("Pushing value %x to queue @ %p\n", val, queue_obj->base_addr);
> +
> +     __raw_writel(val, queue_obj->base_addr + QMGR_QUEUE_REG_D(0));
> +}
> +EXPORT_SYMBOL(cppi41_queue_push);
> +
> +/*
> + * cppi41_queue_pop - pop a descriptor from a given queue
> + */
> +unsigned long cppi41_queue_pop(const struct cppi41_queue_obj *queue_obj)
> +{
> +     u32 val = __raw_readl(queue_obj->base_addr + QMGR_QUEUE_REG_D(0));
> +
> +     DBG("Popping value %x from queue @ %p\n", val, queue_obj->base_addr);
> +
> +     return val & QMGR_QUEUE_DESC_PTR_MASK;
> +}
> +EXPORT_SYMBOL(cppi41_queue_pop);
> +
> +/*
> + * cppi41_get_teardown_info - extract information from a teardown descriptor
> + */
> +int cppi41_get_teardown_info(unsigned long addr, u32 *info)
> +{
> +     struct cppi41_teardown_desc *desc;
> +     int dma_num;
> +
> +     for (dma_num = 0; dma_num < cppi41_num_dma_block; dma_num++)
> +             if (addr >= dma_teardown[dma_num].phys_addr &&
> +                 addr <  dma_teardown[dma_num].phys_addr +
> +                         dma_teardown[dma_num].rgn_size)
> +                     break;
> +
> +     if (dma_num == cppi41_num_dma_block)
> +             return -EINVAL;
> +
> +     desc = addr - dma_teardown[dma_num].phys_addr +
> +            dma_teardown[dma_num].virt_addr;
> +
> +     if ((desc->teardown_info & CPPI41_DESC_TYPE_MASK) !=
> +         (CPPI41_DESC_TYPE_TEARDOWN << CPPI41_DESC_TYPE_SHIFT))
> +             return -EINVAL;
> +
> +     *info = desc->teardown_info;
> +#if 1
> +     /* Hardware is not giving the current DMA number as of now. :-/ */
> +     *info |= (dma_num << CPPI41_TEARDOWN_DMA_NUM_SHIFT) &
> +              CPPI41_TEARDOWN_DMA_NUM_MASK;
> +#else
> +     dma_num = (desc->teardown_info & CPPI41_TEARDOWN_DMA_NUM_MASK) >>
> +              CPPI41_TEARDOWN_DMA_NUM_SHIFT;
> +#endif

Again, drop the unused code and '#if 1'

> +     cppi41_queue_push(&dma_teardown[dma_num].queue_obj, addr,
> +                       sizeof(struct cppi41_teardown_desc), 0);
> +
> +     return 0;
> +}
> +EXPORT_SYMBOL(cppi41_get_teardown_info);
> +
> +MODULE_DESCRIPTION("TI CPPI 4.1 support");
> +MODULE_AUTHOR("MontaVista Software");
> +MODULE_LICENSE("GPL");
> Index: linux-davinci/arch/arm/mach-davinci/include/mach/cppi41.h
> ===================================================================
> --- /dev/null
> +++ linux-davinci/arch/arm/mach-davinci/include/mach/cppi41.h
> @@ -0,0 +1,710 @@
> +/*
> + * CPPI 4.1 definitions
> + *
> + * Copyright (c) 2008-2009, MontaVista Software, Inc. <[email protected]>
> + *
> + * This program is free software; you can distribute it and/or modify it
> + * under the terms of the GNU General Public License (Version 2) as
> + * published by the Free Software Foundation.
> + *
> + * This program is distributed in the hope it will be useful, but WITHOUT
> + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
> + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
> + * for more details.
> + *
> + * You should have received a copy of the GNU General Public License along
> + * with this program; if not, write to the Free Software Foundation, Inc.,
> + * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
> + *
> + */
> +
> +#include <linux/types.h>
> +
> +/*
> + * Queue Manager - Control Registers Region
> + */
> +#define QMGR_REVISION_REG            0x00    /* Major and minor versions */
> +                                             /* of the module */
> +#define QMGR_QUEUE_DIVERSION_REG     0x08    /* Queue Diversion register */
> +#define QMGR_FREE_DESC_BUF_STARVED_REG(n) (0x20 + ((n) << 2)) /* Free Desc./ 
> */
> +                                             /* Buffer Starvation Count */
> +#define QMGR_FREE_DESC_STARVED_REG(n)        (0x30 + ((n) << 2)) /* Free 
> Desc. */
> +                                             /* Starvation Count */
> +#define QMGR_LINKING_RAM_RGN0_BASE_REG       0x80    /* Linking RAM Region 0 
> Base */
> +                                             /* Address */
> +#define QMGR_LINKING_RAM_RGN0_SIZE_REG       0x84    /* Linking RAM Region 0 
> Size */
> +#define QMGR_LINKING_RAM_RGN1_BASE_REG       0x88    /* Linking RAM Region 1 
> Base */
> +                                             /* Address */
> +#define QMGR_QUEUE_PENDING_REG(n)    (0x90 + ((n) << 2)) /* Pending status */
> +                                             /* for all queues */
> +
> +/*
> + * Queue Manager - Memory Region Registers
> + */
> +#define QMGR_MEM_RGN_BASE_REG(r)     (0x00 + ((r) << 4))
> +#define QMGR_MEM_RGN_CTRL_REG(r)     (0x04 + ((r) << 4))
> +
> +/* Memory Region R Control Register bits */
> +#define QMGR_MEM_RGN_INDEX_SHIFT     16
> +#define QMGR_MEM_RGN_INDEX_MASK      (0x3fff << QMGR_MEM_RGN_INDEX_SHIFT)
> +#define QMGR_MEM_RGN_DESC_SIZE_SHIFT 8
> +#define QMGR_MEM_RGN_DESC_SIZE_MASK  (0xf << QMGR_MEM_RGN_DESC_SIZE_SHIFT)
> +#define QMGR_MEM_RGN_SIZE_SHIFT      0
> +#define QMGR_MEM_RGN_SIZE_MASK               (7 << QMGR_MEM_RGN_SIZE_SHIFT)
> +
> +/*
> + * Queue Manager - Queues Region
> + */
> +#define QMGR_QUEUE_REG_A(n)          (0x00 + ((n) << 4))
> +#define QMGR_QUEUE_REG_B(n)          (0x04 + ((n) << 4))
> +#define QMGR_QUEUE_REG_C(n)          (0x08 + ((n) << 4))
> +#define QMGR_QUEUE_REG_D(n)          (0x0C + ((n) << 4))
> +
> +/* Queue N Register C bits */
> +#define QMGR_QUEUE_HEAD_TAIL_SHIFT   31
> +#define QMGR_QUEUE_HEAD_TAIL_MASK    (1 << QMGR_QUEUE_HEAD_TAIL_SHIFT)

Could use BIT() here.  and several other places below.

> +#define QMGR_QUEUE_PKT_SIZE_SHIFT    0
> +#define QMGR_QUEUE_PKT_SIZE_MASK     (0x3fff << QMGR_QUEUE_PKT_SIZE_SHIFT)
> +/* Queue N Register D bits */
> +#define QMGR_QUEUE_DESC_PTR_SHIFT    5
> +#define QMGR_QUEUE_DESC_PTR_MASK     (0x7ffffff << QMGR_QUEUE_DESC_PTR_SHIFT)
> +#define QMGR_QUEUE_DESC_SIZE_SHIFT   0
> +#define QMGR_QUEUE_DESC_SIZE_MASK    (0x1f << QMGR_QUEUE_DESC_SIZE_SHIFT)
> +
> +/*
> + * Queue Manager - Queue Status Region
> + */
> +#define QMGR_QUEUE_STATUS_REG_A(n)   (0x00 + ((n) << 4))
> +#define QMGR_QUEUE_STATUS_REG_B(n)   (0x04 + ((n) << 4))
> +#define QMGR_QUEUE_STATUS_REG_C(n)   (0x08 + ((n) << 4))
> +
> +/*
> + * DMA Controller - Global Control Registers Region
> + */
> +#define DMA_REVISION_REG             0x00    /* Major and minor versions */
> +                                             /* of the module */
> +#define DMA_TEARDOWN_FREE_DESC_CTRL_REG 0x04 /* Queue  manager and queue */
> +                                             /* number for Teardown free */
> +                                             /* descriptor queue */
> +#define DMA_EMULATION_CTRL_REG               0x08    /* Emulation control 
> register */
> +
> +/* Teardown Free Descriptor Queue Control Register bits */
> +#define DMA_TD_DESC_QMGR_SHIFT               12
> +#define DMA_TD_DESC_QMGR_MASK                (3 << DMA_TD_DESC_QMGR_SHIFT)
> +#define DMA_TD_DESC_QNUM_SHIFT               0
> +#define DMA_TD_DESC_QNUM_MASK                (0xfff << 
> DMA_TD_DESC_QNUM_SHIFT)
> +
> +/*
> + * DMA Controller - Channel Control / Status Registers Region
> + */
> +#define DMA_CH_TX_GLOBAL_CFG_REG(n)   (0x00 + ((n) << 5))
> +#define DMA_CH_RX_GLOBAL_CFG_REG(n)   (0x08 + ((n) << 5))
> +#define DMA_CH_RX_HOST_PKT_CFG_REG_A(n)  (0x0C + ((n) << 5))
> +#define DMA_CH_RX_HOST_PKT_CFG_REG_B(n)  (0x10 + ((n) << 5))
> +#define DMA_CH_RX_EMBED_PKT_CFG_REG_A(n) (0x14 + ((n) << 5))
> +#define DMA_CH_RX_EMBED_PKT_CFG_REG_B(n) (0x18 + ((n) << 5))
> +#define DMA_CH_RX_MONO_PKT_CFG_REG(n)         (0x1C + ((n) << 5))
> +
> +/* Tx Channel N Global Configuration Register bits */
> +#define DMA_CH_TX_ENABLE_SHIFT               31
> +#define DMA_CH_TX_ENABLE_MASK                (1 << DMA_CH_TX_ENABLE_SHIFT)
> +#define DMA_CH_TX_TEARDOWN_SHIFT     30
> +#define DMA_CH_TX_TEARDOWN_MASK              (1 << DMA_CH_TX_TEARDOWN_SHIFT)
> +#define DMA_CH_TX_DEFAULT_QMGR_SHIFT 12
> +#define DMA_CH_TX_DEFAULT_QMGR_MASK  (3 << DMA_CH_TX_DEFAULT_QMGR_SHIFT)
> +#define DMA_CH_TX_DEFAULT_QNUM_SHIFT 0
> +#define DMA_CH_TX_DEFAULT_QNUM_MASK  (0xfff << DMA_CH_TX_DEFAULT_QNUM_SHIFT)
> +
> +/* Rx Channel N Global Configuration Register bits */
> +#define DMA_CH_RX_ENABLE_SHIFT               31
> +#define DMA_CH_RX_ENABLE_MASK                (1 << DMA_CH_RX_ENABLE_SHIFT)
> +#define DMA_CH_RX_TEARDOWN_SHIFT     30
> +#define DMA_CH_RX_TEARDOWN_MASK              (1 << DMA_CH_RX_TEARDOWN_SHIFT)
> +#define DMA_CH_RX_ERROR_HANDLING_SHIFT       24
> +#define DMA_CH_RX_ERROR_HANDLING_MASK        (1 << 
> DMA_CH_RX_ERROR_HANDLING_SHIFT)
> +#define DMA_CH_RX_SOP_OFFSET_SHIFT   16
> +#define DMA_CH_RX_SOP_OFFSET_MASK    (0xff << DMA_CH_RX_SOP_OFFSET_SHIFT)
> +#define DMA_CH_RX_DEFAULT_DESC_TYPE_SHIFT 14
> +#define DMA_CH_RX_DEFAULT_DESC_TYPE_MASK  (3 << \
> +                                        DMA_CH_RX_DEFAULT_DESC_TYPE_SHIFT)
> +#define DMA_CH_RX_DEFAULT_DESC_EMBED 0
> +#define DMA_CH_RX_DEFAULT_DESC_HOST  1
> +#define DMA_CH_RX_DEFAULT_DESC_MONO  2
> +#define DMA_CH_RX_DEFAULT_RQ_QMGR_SHIFT 12
> +#define DMA_CH_RX_DEFAULT_RQ_QMGR_MASK       (3 << 
> DMA_CH_RX_DEFAULT_RQ_QMGR_SHIFT)
> +#define DMA_CH_RX_DEFAULT_RQ_QNUM_SHIFT 0
> +#define DMA_CH_RX_DEFAULT_RQ_QNUM_MASK       (0xfff << \
> +                                      DMA_CH_RX_DEFAULT_RQ_QNUM_SHIFT)
> +
> +/* Rx Channel N Host Packet Configuration Register A/B bits */
> +#define DMA_CH_RX_HOST_FDQ_QMGR_SHIFT(n) (12 + 16 * ((n) & 1))
> +#define DMA_CH_RX_HOST_FDQ_QMGR_MASK(n)  (3 << 
> DMA_CH_RX_HOST_FDQ_QMGR_SHIFT(n))
> +#define DMA_CH_RX_HOST_FDQ_QNUM_SHIFT(n) (0 + 16 * ((n) & 1))
> +#define DMA_CH_RX_HOST_FDQ_QNUM_MASK(n)  (0xfff << \
> +                                       DMA_CH_RX_HOST_FDQ_QNUM_SHIFT(n))
> +
> +/* Rx Channel N Embedded Packet Configuration Register A bits */
> +#define DMA_CH_RX_EMBED_FBP_BMGR_SHIFT(n) (6 + 8 * (n))
> +#define DMA_CH_RX_EMBED_FBP_BMGR_MASK(n)  (3 << \
> +                                        DMA_CH_RX_EMBED_FBP_BMGR_SHIFT(n))
> +#define DMA_CH_RX_EMBED_FBP_PNUM_SHIFT(n) (0 + 8 * (n))
> +#define DMA_CH_RX_EMBED_FBP_PNUM_MASK(n)  (0x1f << \
> +                                        DMA_CH_RX_EMBED_FBP_PNUM_SHIFT(n))
> +
> +/* Rx Channel N Embedded Packet Configuration Register B bits */
> +#define DMA_CH_RX_EMBED_NUM_SLOT_SHIFT       24
> +#define DMA_CH_RX_EMBED_NUM_SLOT_MASK        (7 << 
> DMA_CH_RX_EMBED_NUM_SLOT_SHIFT)
> +#define DMA_CH_RX_EMBED_SOP_SLOT_SHIFT       16
> +#define DMA_CH_RX_EMBED_SOP_SLOT_MASK        (7 << 
> DMA_CH_RX_EMBED_SOP_SLOT_SHIFT)
> +#define DMA_CH_RX_EMBED_FDQ_QMGR_SHIFT       12
> +#define DMA_CH_RX_EMBED_FDQ_QMGR_MASK        (3 << 
> DMA_CH_RX_EMBED_FDQ_QMGR_SHIFT)
> +#define DMA_CH_RX_EMBED_FDQ_QNUM_SHIFT       0
> +#define DMA_CH_RX_EMBED_FDQ_QNUM_MASK        (0xfff << \
> +                                      DMA_CH_RX_EMBED_FDQ_QNUM_SHIFT)
> +
> +/* Rx Channel N Monolithic Packet Configuration Register bits */
> +#define DMA_CH_RX_MONO_SOP_OFFSET_SHIFT 16
> +#define DMA_CH_RX_MONO_SOP_OFFSET_MASK       (0xff << \
> +                                      DMA_CH_RX_MONO_SOP_OFFSET_SHIFT)
> +#define DMA_CH_RX_MONO_FDQ_QMGR_SHIFT        12
> +#define DMA_CH_RX_MONO_FDQ_QMGR_MASK (3 << DMA_CH_RX_MONO_FDQ_QMGR_SHIFT)
> +#define DMA_CH_RX_MONO_FDQ_QNUM_SHIFT        0
> +#define DMA_CH_RX_MONO_FDQ_QNUM_MASK (0xfff << DMA_CH_RX_MONO_FDQ_QNUM_SHIFT)
> +
> +/*
> + * DMA Scheduler - Control Region
> + */
> +#define DMA_SCHED_CTRL_REG           0x00
> +
> +/* DMA Scheduler Control Register bits */
> +#define DMA_SCHED_ENABLE_SHIFT               31
> +#define DMA_SCHED_ENABLE_MASK                (1 << DMA_SCHED_ENABLE_SHIFT)
> +#define DMA_SCHED_LAST_ENTRY_SHIFT   0
> +#define DMA_SCHED_LAST_ENTRY_MASK    (0xff << DMA_SCHED_LAST_ENTRY_SHIFT)
> +
> +/*
> + * DMA Scheduler - Table Region
> + */
> +#define DMA_SCHED_TABLE_WORD_REG(n)  ((n) << 2)
> +
> +/*
> + * CPPI 4.1 Host Packet Descriptor
> + */
> +struct cppi41_host_pkt_desc {
> +     u32 desc_info;          /* Descriptor type, protocol specific word */
> +                             /* count, packet length */
> +     u32 tag_info;           /* Source tag (31:16), destination tag (15:0) */
> +     u32 pkt_info;           /* Packet error state, type, protocol flags, */
> +                             /* return info, descriptor location */
> +     u32 buf_len;            /* Number of valid data bytes in the buffer */
> +     u32 buf_ptr;            /* Pointer to the buffer associated with */
> +                             /* this descriptor */
> +     u32 next_desc_ptr;      /* Pointer to the next buffer descriptor */
> +     u32 orig_buf_len;       /* Original buffer length */
> +     u32 orig_buf_ptr;       /* Original buffer pointer */
> +     u32 stk_comms_info[2];  /* Network stack private communications info */
> +};
> +
> +/*
> + * CPPI 4.1 Host Buffer Descriptor
> + */
> +struct cppi41_host_buf_desc {
> +     u32 reserved[2];
> +     u32 buf_recl_info;      /* Return info, descriptor location */
> +     u32 buf_len;            /* Number of valid data bytes in the buffer */
> +     u32 buf_ptr;            /* Pointer to the buffer associated with */
> +                             /* this descriptor */
> +     u32 next_desc_ptr;      /* Pointer to the next buffer descriptor */
> +     u32 orig_buf_len;       /* Original buffer length */
> +     u32 orig_buf_ptr;       /* Original buffer pointer */
> +};

Some of these comments are not really any more descriptive than the
field names.  The last tree in particular could be dropped.

[...]


Kevin

_______________________________________________
Davinci-linux-open-source mailing list
[email protected]
http://linux.davincidsp.com/mailman/listinfo/davinci-linux-open-source

Reply via email to