Check result of dma_map_single(), print warnings and propagate errors up the
call stack.

Signed-off-by: Will Dyson <[EMAIL PROTECTED]>
---
 .../net/wireless/mac80211/bcm43xx/bcm43xx_dma.c    |   39 +++++++++++++++-----
 1 files changed, 30 insertions(+), 9 deletions(-)

diff --git a/drivers/net/wireless/mac80211/bcm43xx/bcm43xx_dma.c 
b/drivers/net/wireless/mac80211/bcm43xx/bcm43xx_dma.c
index d53679d..c0f83b7 100644
--- a/drivers/net/wireless/mac80211/bcm43xx/bcm43xx_dma.c
+++ b/drivers/net/wireless/mac80211/bcm43xx/bcm43xx_dma.c
@@ -525,9 +525,11 @@ static int setup_rx_descbuffer(struct bcm43xx_dmaring 
*ring,
                return -ENOMEM;
        dmaaddr = map_descbuffer(ring, skb->data,
                                 ring->rx_buffersize, 0);
+       if (dma_mapping_error(dmaaddr))
+               return -EIO;
+
        meta->skb = skb;
        meta->dmaaddr = dmaaddr;
-
        ring->ops->fill_descriptor(ring, desc, dmaaddr,
                                   ring->rx_buffersize, 0, 0, 0);
 
@@ -552,8 +554,10 @@ static int alloc_initial_descbuffers(struct 
bcm43xx_dmaring *ring)
                desc = ring->ops->idx2desc(ring, i, &meta);
 
                err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
-               if (err)
+               if (err) {
+                       printk(KERN_ERR PFX "Failed to allocate initial 
descbuffers\n");
                        goto err_unwind;
+               }
        }
        mb();
        ring->used_slots = ring->nr_slots;
@@ -1017,7 +1021,7 @@ struct bcm43xx_dmaring * parse_cookie(struct 
bcm43xx_wldev *dev,
        return ring;
 }
 
-static void dma_tx_fragment(struct bcm43xx_dmaring *ring,
+static int dma_tx_fragment(struct bcm43xx_dmaring *ring,
                            struct sk_buff *skb,
                            struct ieee80211_tx_control *ctl)
 {
@@ -1026,23 +1030,26 @@ static void dma_tx_fragment(struct bcm43xx_dmaring 
*ring,
        int slot;
        struct bcm43xx_dmadesc_generic *desc;
        struct bcm43xx_dmadesc_meta *meta;
+       struct bcm43xx_dmadesc_meta *meta_hdr;
 
 #define SLOTS_PER_PACKET  2
        assert(skb_shinfo(skb)->nr_frags == 0);
 
        /* Get a slot for the header. */
        slot = request_slot(ring);
-       desc = ops->idx2desc(ring, slot, &meta);
-       memset(meta, 0, sizeof(*meta));
+       desc = ops->idx2desc(ring, slot, &meta_hdr);
+       memset(meta_hdr, 0, sizeof(*meta_hdr));
 
        header = &(ring->txhdr_cache[slot * sizeof(struct bcm43xx_txhdr_fw4)]);
        bcm43xx_generate_txhdr(ring->dev, header,
                               skb->data, skb->len, ctl,
                               generate_cookie(ring, slot));
 
-       meta->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
+       meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
                                       sizeof(struct bcm43xx_txhdr_fw4), 1);
-       ops->fill_descriptor(ring, desc, meta->dmaaddr,
+       if (dma_mapping_error(meta_hdr->dmaaddr))
+               return -EIO;
+       ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr,
                             sizeof(struct bcm43xx_txhdr_fw4), 1, 0, 0);
 
        /* Get a slot for the payload. */
@@ -1052,8 +1059,10 @@ static void dma_tx_fragment(struct bcm43xx_dmaring *ring,
 
        memcpy(&meta->txstat.control, ctl, sizeof(*ctl));
        meta->skb = skb;
-       meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
        meta->is_last_fragment = 1;
+       meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
+       if (dma_mapping_error(meta->dmaaddr))
+               goto out_unmap_hdr;
 
        ops->fill_descriptor(ring, desc, meta->dmaaddr,
                             skb->len, 0, 1, 1);
@@ -1061,6 +1070,12 @@ static void dma_tx_fragment(struct bcm43xx_dmaring *ring,
        /* Now transfer the whole frame. */
        wmb();
        ops->poke_tx(ring, next_slot(ring, slot));
+       return 0;
+
+out_unmap_hdr:
+       unmap_descbuffer(ring, meta_hdr->dmaaddr,
+                       sizeof(struct bcm43xx_txhdr_fw4), 1);
+       return -EIO;
 }
 
 int bcm43xx_dma_tx(struct bcm43xx_wldev *dev,
@@ -1068,6 +1083,7 @@ int bcm43xx_dma_tx(struct bcm43xx_wldev *dev,
                   struct ieee80211_tx_control *ctl)
 {
        struct bcm43xx_dmaring *ring = dev->dma.tx_ring1;
+       int err = 0;
 
        assert(ring->tx);
        if (unlikely(free_slots(ring) < SLOTS_PER_PACKET)) {
@@ -1078,7 +1094,12 @@ int bcm43xx_dma_tx(struct bcm43xx_wldev *dev,
                return NETDEV_TX_BUSY;
        }
 
-       dma_tx_fragment(ring, skb, ctl);
+       err = dma_tx_fragment(ring, skb, ctl);
+       if (unlikely(err)) {
+               printkl(KERN_ERR PFX "DMA tx mapping failure\n");
+               return NETDEV_TX_BUSY;
+       }
+
        ring->nr_tx_packets++;
        if (free_slots(ring) < SLOTS_PER_PACKET) {
                /* FIXME: we currently only have one queue */
-- 
1.5.1

_______________________________________________
Bcm43xx-dev mailing list
[email protected]
https://lists.berlios.de/mailman/listinfo/bcm43xx-dev

Reply via email to