This is a proposal for target/linux/brcm47xx/patches-3.6/770-bgmac-unaligned-
addressing.patch, which adds support for DMA unaligned addressing in bgmac 
driver (needed for e.g. linksys e3200 devices).

>From analysing the 'Generic Broadcom Home Networking Division (HND) DMA 
module' (hnddma.{c,h}), the DMA index register (BGMAC_DMA_TX_INDEX or 
BGMAC_DMA_RX_INDEX) needs an extra base or offset from address register. This 
offset needs to be accounted for when reading/analysing status 
(BGMAC_DMA_TX_STATUS or BGMAC_DMA_RX_STATUS) to retrieve the current (empty) 
slot id (BGMAC_DMA_TX_STATDPTR or BGMAC_DMA_RX_STATDPTR) in the ring.
Also for retrieving the active slot id (BGMAC_DMA_TX_ERRDPTR or 
BGMAC_DMA_RX_STATDPTR) from BGMAC_DMA_TX_ERROR or BGMAC_DMA_RX_ERROR register 
respectively, this would be needed, but currently not used in driver.

The patch adds two extra fields to the bgmac_dma_ring structure, namely a 
boolean 'is_unaligned' which is set in bgmac_dma_alloc (and later used in 
bgmac_dma_init), and an unsigned int of 32 bits, holding the base for the 
index register for that ring.

For unaligned addressing (in bgmac_dma_init), the DMA table is first 
initialised, before enabling any dma tx/rx control, which is different from 
aligned addressing where it is the other way around.

I opted to introduce a CONFIG_BGMAC_UNALIGNED_ADDRESSING macro to introduce 
the changes to support unaligned addressing for the bgmac driver (it does add 
some extra bytes in bgmac_dma_ring struct). This could be an option in Kconfig 
(not yet in patch), but for now it is defined fix in bgmac.h. The 
implementation should work for both aligned and unaligned addressing when 
CONFIG_BGMAC_UNALIGNED_ADDRESSING is defined.

If there is no need for en extra Kconfig option, let me know, I'll adjust the 
patch. In the other case, I'll create an extra entry in Kconfig and add it to 
the patch aswell. I created the patch from 3.6 kernel sources, let me know if 
there are differences for the 3.8 kernel.

I hope this can be tested on different hardware supporting dma 
aligned/unaligned addressing, it should work on both. Any comments/suggestions 
are welcome..

Note: Besides the changes for unaligned addressing, I corrected some types for 
variables and formats. I also introduced an extra int j variable in the second 
loop within dma initialisation of rx ring(s).. from my understanding reusing 
the int i variable in the inner loop breaks the outer loop if there would be 
more than one rx ring.

Signed-off-by: Tijs Van Buggenhout (t...@able.be)
---

--- a/drivers/net/ethernet/broadcom/bgmac.h     2013-02-20 12:41:03.138480108 
+0100
+++ b/drivers/net/ethernet/broadcom/bgmac.h     2013-02-25 13:22:06.187475387 
+0100
@@ -15,6 +15,10 @@
 #include <linux/bcma/bcma.h>
 #include <linux/netdevice.h>
 
+#ifndef CONFIG_BGMAC_UNALIGNED_ADDRESSING
+#define CONFIG_BGMAC_UNALIGNED_ADDRESSING
+#endif
+
 #define BGMAC_DEV_CTL                          0x000
 #define  BGMAC_DC_TSM                          0x00000002
 #define  BGMAC_DC_CFCO                         0x00000004
@@ -384,6 +388,10 @@
        u16 mmio_base;
        struct bgmac_dma_desc *cpu_base;
        dma_addr_t dma_base;
+#ifdef CONFIG_BGMAC_UNALIGNED_ADDRESSING
+       bool is_unaligned;
+       u32 index_base;
+#endif
 
        struct bgmac_slot_info slots[BGMAC_RX_RING_SLOTS];
 };
--- a/drivers/net/ethernet/broadcom/bgmac.c     2013-02-20 12:41:03.122481212 
+0100
+++ b/drivers/net/ethernet/broadcom/bgmac.c     2013-02-25 13:31:34.880174836 
+0100
@@ -156,6 +156,9 @@
        if (++ring->end >= BGMAC_TX_RING_SLOTS)
                ring->end = 0;
        bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX,
+#ifdef CONFIG_BGMAC_UNALIGNED_ADDRESSING
+                   ring->index_base +
+#endif
                    ring->end * sizeof(struct bgmac_dma_desc));
 
        /* Always keep one slot free to allow detecting bugged calls. */
@@ -174,14 +177,30 @@
 static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring 
*ring)
 {
        struct device *dma_dev = bgmac->core->dma_dev;
-       int empty_slot;
+       u16 empty_slot;
        bool freed = false;
 
+       if (ring->start == ring->end) {
+               bgmac_warn(bgmac, "Ignore DMA TX free on empty ring 0x%X\n", 
ring->mmio_base);
+               return;
+       }
+
        /* The last slot that hardware didn't consume yet */
        empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
        empty_slot &= BGMAC_DMA_TX_STATDPTR;
+#ifdef CONFIG_BGMAC_UNALIGNED_ADDRESSING
+       empty_slot -= ring->index_base;
+       empty_slot &= BGMAC_DMA_TX_STATDPTR;
+#endif
        empty_slot /= sizeof(struct bgmac_dma_desc);
 
+       if (((ring->start == 0) && (empty_slot > ring->end)) ||
+                       (empty_slot >= ring->num_slots)) {
+               bgmac_err(bgmac, "Bogus current TX slot index %u (start index: 
%u, end index: %u)\n",
+                         empty_slot, ring->start, ring->end);
+               return;
+       }
+
        while (ring->start != empty_slot) {
                struct bgmac_slot_info *slot = &ring->slots[ring->start];
 
@@ -195,7 +214,7 @@
                        dev_kfree_skb(slot->skb);
                        slot->skb = NULL;
                } else {
-                       bgmac_err(bgmac, "Hardware reported transmission for 
empty TX ring slot %d! End of ring: %d\n",
+                       bgmac_err(bgmac, "Hardware reported transmission for 
empty TX ring slot %u! End of ring: %u\n",
                                  ring->start, ring->end);
                }
 
@@ -270,11 +289,15 @@
 static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring 
*ring,
                             int weight)
 {
-       u32 end_slot;
+       u16 end_slot;
        int handled = 0;
 
        end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS);
        end_slot &= BGMAC_DMA_RX_STATDPTR;
+#ifdef CONFIG_BGMAC_UNALIGNED_ADDRESSING
+       end_slot -= ring->index_base;
+       end_slot &= BGMAC_DMA_RX_STATDPTR;
+#endif
        end_slot /= sizeof(struct bgmac_dma_desc);
 
        ring->end = end_slot;
@@ -298,7 +321,7 @@
 
                /* Check for poison and drop or pass the packet */
                if (len == 0xdead && flags == 0xbeef) {
-                       bgmac_err(bgmac, "Found poisoned packet at slot %d, 
DMA issue!\n",
+                       bgmac_err(bgmac, "Found poisoned packet at slot %u, 
DMA issue!\n",
                                  ring->start);
                } else {
                        new_skb = netdev_alloc_skb(bgmac->net_dev, len + 2);
@@ -416,9 +439,15 @@
                ring = &bgmac->tx_ring[i];
                ring->num_slots = BGMAC_TX_RING_SLOTS;
                ring->mmio_base = ring_base[i];
+#ifdef CONFIG_BGMAC_UNALIGNED_ADDRESSING
+               if ((ring->is_unaligned = bgmac_dma_unaligned(bgmac, ring, 
BGMAC_DMA_RING_TX)))
+                       bgmac_warn(bgmac, "TX on ring 0x%X supports unaligned 
addressing\n",
+                                  ring->mmio_base);
+#else
                if (bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_TX))
                        bgmac_warn(bgmac, "TX on ring 0x%X supports unaligned 
addressing but this feature is not implemented\n",
                                   ring->mmio_base);
+#endif
 
                /* Alloc ring of descriptors */
                size = ring->num_slots * sizeof(struct bgmac_dma_desc);
@@ -440,9 +469,15 @@
                ring = &bgmac->rx_ring[i];
                ring->num_slots = BGMAC_RX_RING_SLOTS;
                ring->mmio_base = ring_base[i];
+#ifdef CONFIG_BGMAC_UNALIGNED_ADDRESSING
+               if ((ring->is_unaligned = bgmac_dma_unaligned(bgmac, ring, 
BGMAC_DMA_RING_RX)))
+                       bgmac_warn(bgmac, "RX on ring 0x%X supports unaligned 
addressing\n",
+                                  ring->mmio_base);
+#else
                if (bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_RX))
                        bgmac_warn(bgmac, "RX on ring 0x%X supports unaligned 
addressing but this feature is not implemented\n",
                                   ring->mmio_base);
+#endif
 
                /* Alloc ring of descriptors */
                size = ring->num_slots * sizeof(struct bgmac_dma_desc);
@@ -485,32 +520,61 @@
        for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
                ring = &bgmac->tx_ring[i];
 
+#ifdef CONFIG_BGMAC_UNALIGNED_ADDRESSING
+               if (!ring->is_unaligned)
+                       /* When addressing is aligned, enable first */
+                       bgmac_dma_tx_enable(bgmac, ring);
+#else
                /* We don't implement unaligned addressing, so enable first */
                bgmac_dma_tx_enable(bgmac, ring);
+#endif
                bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
                            lower_32_bits(ring->dma_base));
                bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI,
                            upper_32_bits(ring->dma_base));
+#ifdef CONFIG_BGMAC_UNALIGNED_ADDRESSING
+               if (ring->is_unaligned) {
+                       /* Base (offset) for DMA TX index */
+                       ring->index_base = lower_32_bits( ring->dma_base );
+                       /* Enable ring after initialising DMA table */
+                       bgmac_dma_tx_enable(bgmac, ring);
+               }
+#endif
 
                ring->start = 0;
                ring->end = 0;  /* Points the slot that should *not* be read 
*/
        }
 
        for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
+               int j;
                ring = &bgmac->rx_ring[i];
 
+#ifdef CONFIG_BGMAC_UNALIGNED_ADDRESSING
+               if (!ring->is_unaligned)
+                       /* When addressing is aligned, enable first */
+                       bgmac_dma_rx_enable(bgmac, ring);
+#else
                /* We don't implement unaligned addressing, so enable first */
                bgmac_dma_rx_enable(bgmac, ring);
+#endif
                bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
                            lower_32_bits(ring->dma_base));
                bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI,
                            upper_32_bits(ring->dma_base));
+#ifdef CONFIG_BGMAC_UNALIGNED_ADDRESSING
+               if (ring->is_unaligned) {
+                       /* Base (offset) for DMA RX index */
+                       ring->index_base = lower_32_bits( ring->dma_base );
+                       /* Enable ring after initialising DMA table */
+                       bgmac_dma_rx_enable(bgmac, ring);
+               }
+#endif
 
-               for (i = 0, dma_desc = ring->cpu_base; i < ring->num_slots;
-                    i++, dma_desc++) {
+               for (j = 0, dma_desc = ring->cpu_base; j < ring->num_slots;
+                    j++, dma_desc++) {
                        ctl0 = ctl1 = 0;
 
-                       if (i == ring->num_slots - 1)
+                       if (j == ring->num_slots - 1)
                                ctl0 |= BGMAC_DESC_CTL0_EOT;
                        ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN;
                        /* Is there any BGMAC device that requires extension? 
*/
@@ -518,13 +582,16 @@
                         * B43_DMA64_DCTL1_ADDREXT_MASK;
                         */
 
-                       dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring-
>slots[i].dma_addr));
-                       dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring-
>slots[i].dma_addr));
+                       dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring-
>slots[j].dma_addr));
+                       dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring-
>slots[j].dma_addr));
                        dma_desc->ctl0 = cpu_to_le32(ctl0);
                        dma_desc->ctl1 = cpu_to_le32(ctl1);
                }
 
                bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX,
+#ifdef CONFIG_BGMAC_UNALIGNED_ADDRESSING
+                           ring->index_base +
+#endif
                            ring->num_slots * sizeof(struct bgmac_dma_desc));
 
                ring->start = 0;
_______________________________________________
openwrt-devel mailing list
openwrt-devel@lists.openwrt.org
https://lists.openwrt.org/mailman/listinfo/openwrt-devel

Reply via email to