When the chip is limited to 30bit DMA, allocate RX buffers in ZONE_DMA. When
the network stack passes us TX buffers that cannot be mapped because of the
limitation (with an address > 1GB), allocate a bounce buffer in ZONE_DMA and
copy the packet there.

Signed-off-by: Will Dyson <[EMAIL PROTECTED]>
---
.../net/wireless/mac80211/bcm43xx/bcm43xx_dma.c    |   52
+++++++++++++++++---
1 files changed, 45 insertions(+), 7 deletions(-)

diff --git a/drivers/net/wireless/mac80211/bcm43xx/bcm43xx_dma.c
b/drivers/net/wireless/mac80211/bcm43xx/bcm43xx_dma.c
index d09b849..8f6d434 100644
--- a/drivers/net/wireless/mac80211/bcm43xx/bcm43xx_dma.c
+++ b/drivers/net/wireless/mac80211/bcm43xx/bcm43xx_dma.c
@@ -397,6 +397,9 @@ static int alloc_ringmemory(struct bcm43xx_dmaring
*ring)
{
    struct device *dev = ring->dev->dev->dev;

+    /* Dont need to test for 30bit dma here.
+     * dma_alloc_coherent respects the dma_mask
+     */
    ring->descbase = dma_alloc_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
                        &(ring->dmabase), GFP_KERNEL);
    if (!ring->descbase) {
@@ -549,11 +552,15 @@ static int alloc_initial_descbuffers(struct
bcm43xx_dmaring *ring)
    int i, err = -ENOMEM;
    struct bcm43xx_dmadesc_generic *desc;
    struct bcm43xx_dmadesc_meta *meta;
+    gfp_t flags = GFP_KERNEL;
+
+    if (bcm43xx_dma30(ring->dev))
+        flags = GFP_DMA;

    for (i = 0; i < ring->nr_slots; i++) {
        desc = ring->ops->idx2desc(ring, i, &meta);

-        err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
+        err = setup_rx_descbuffer(ring, desc, meta, flags);
        if (err) {
            printk(KERN_ERR PFX "Failed to allocate initial
descbuffers\n");
            goto err_unwind;
@@ -730,6 +737,7 @@ struct bcm43xx_dmaring * bcm43xx_setup_dmaring(struct
bcm43xx_wldev *dev,
    struct bcm43xx_dmaring *ring;
    int err;
    int nr_slots;
+    gfp_t dma_flags = GFP_KERNEL;

    ring = kzalloc(sizeof(*ring), GFP_KERNEL);
    if (!ring)
@@ -744,9 +752,12 @@ struct bcm43xx_dmaring * bcm43xx_setup_dmaring(struct
bcm43xx_wldev *dev,
    if (!ring->meta)
        goto err_kfree_ring;
    if (for_tx) {
+        if (bcm43xx_dma30(dev))
+            dma_flags = GFP_DMA;
+        /* only this allocation is for DMA */
        ring->txhdr_cache = kcalloc(nr_slots,
                        sizeof(struct bcm43xx_txhdr_fw4),
-                        GFP_KERNEL);
+                        dma_flags);
        if (!ring->txhdr_cache)
            goto err_kfree_meta;
    }
@@ -1020,9 +1031,11 @@ static int dma_tx_fragment(struct bcm43xx_dmaring
*ring,
    const struct bcm43xx_dma_ops *ops = ring->ops;
    u8 *header;
    int slot;
+    int err;
    struct bcm43xx_dmadesc_generic *desc;
    struct bcm43xx_dmadesc_meta *meta;
    struct bcm43xx_dmadesc_meta *meta_hdr;
+    struct sk_buff *bounce_skb;

#define SLOTS_PER_PACKET  2
    assert(skb_shinfo(skb)->nr_frags == 0);
@@ -1052,9 +1065,27 @@ static int dma_tx_fragment(struct bcm43xx_dmaring
*ring,
    memcpy(&meta->txstat.control, ctl, sizeof(*ctl));
    meta->skb = skb;
    meta->is_last_fragment = 1;
+
    meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
-    if(dma_mapping_error(meta->dmaaddr))
-        goto out_unmap_hdr;
+
+    /* create a bounce buffer in zone_dma on mapping failure. */
+    if (dma_mapping_error(meta->dmaaddr)) {
+        bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC|GFP_DMA);
+        if(!bounce_skb) {
+            err = -ENOMEM;
+            goto out_unmap_hdr;
+        }
+
+        memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len);
+        dev_kfree_skb_any(skb);
+        skb = bounce_skb;
+        meta->skb = skb;
+        meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
+        if (dma_mapping_error(meta->dmaaddr)) {
+            err = -EIO;
+            goto out_free_bounce;
+        }
+    }

    ops->fill_descriptor(ring, desc, meta->dmaaddr,
                 skb->len, 0, 1, 1);
@@ -1064,10 +1095,12 @@ static int dma_tx_fragment(struct bcm43xx_dmaring
*ring,
    ops->poke_tx(ring, next_slot(ring, slot));
    return 0;

+out_free_bounce:
+    dev_kfree_skb_any(skb);
out_unmap_hdr:
    unmap_descbuffer(ring, meta_hdr->dmaaddr,
            sizeof(struct bcm43xx_txhdr_fw4), 1);
-    return -EIO;
+    return err;
}

int bcm43xx_dma_tx(struct bcm43xx_wldev *dev,
@@ -1088,7 +1121,7 @@ int bcm43xx_dma_tx(struct bcm43xx_wldev *dev,

    err = dma_tx_fragment(ring, skb, ctl);
    if (unlikely(err)) {
-        printkl(KERN_ERR PFX "DMA tx mapping failure\n");
+        printkl(KERN_ERR PFX "DMA tx failure\n");
        return NETDEV_TX_BUSY;
    }

@@ -1186,6 +1219,7 @@ static void dma_rx(struct bcm43xx_dmaring *ring,
    u16 len;
    int err;
    dma_addr_t dmaaddr;
+    gfp_t dma_flags;

    desc = ops->idx2desc(ring, *slot, &meta);

@@ -1253,8 +1287,12 @@ static void dma_rx(struct bcm43xx_dmaring *ring,
        goto drop;
    }

+    dma_flags = GFP_ATOMIC;
+    if (bcm43xx_dma30(ring->dev))
+        dma_flags |= GFP_DMA;
+
    dmaaddr = meta->dmaaddr;
-    err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
+    err = setup_rx_descbuffer(ring, desc, meta, dma_flags);
    if (unlikely(err)) {
        dprintkl(KERN_ERR PFX "DMA RX: setup_rx_descbuffer() failed\n");
        sync_descbuffer_for_device(ring, dmaaddr,
--
1.5.0.3



--
Will Dyson
http://www.lucidts.com/
Linux/Mac/Win consulting
_______________________________________________
Bcm43xx-dev mailing list
[email protected]
https://lists.berlios.de/mailman/listinfo/bcm43xx-dev

Reply via email to