--- linux-3.10.4/drivers/net/ethernet/broadcom/bgmac.c.orig	2013-08-11 11:51:18.372493968 -0700
+++ linux-3.10.4/drivers/net/ethernet/broadcom/bgmac.c	2013-08-11 12:19:15.629460392 -0700
@@ -302,51 +302,89 @@ static int bgmac_dma_rx_read(struct bgma
 		struct device *dma_dev = bgmac->core->dma_dev;
 		struct bgmac_slot_info *slot = &ring->slots[ring->start];
 		struct sk_buff *skb = slot->skb;
-		struct sk_buff *new_skb;
 		struct bgmac_rx_header *rx;
 		u16 len, flags;
+		bool resync_skb = true;
 
-		/* Unmap buffer to make it accessible to the CPU */
+		/* Synchronize the header to make it accessible to the CPU */
 		dma_sync_single_for_cpu(dma_dev, slot->dma_addr,
-					BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
+					sizeof(struct bgmac_rx_header), DMA_FROM_DEVICE);
 
 		/* Get info from the header */
 		rx = (struct bgmac_rx_header *)skb->data;
 		len = le16_to_cpu(rx->len);
 		flags = le16_to_cpu(rx->flags);
 
-		/* Check for poison and drop or pass the packet */
-		if (len == 0xdead && flags == 0xbeef) {
-			bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
-				  ring->start);
-		} else {
+		do {
+			/* Check for poisoned packet */
+			if (len == 0xdead && flags == 0xbeef) {
+				bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
+					  ring->start);
+				break;
+			}
+
 			/* Omit CRC. */
 			len -= ETH_FCS_LEN;
 
-			new_skb = netdev_alloc_skb_ip_align(bgmac->net_dev, len);
-			if (new_skb) {
+			/* Use skb_copy for small packets only */
+			if (len > 1) {
+				dma_addr_t old_dma_addr = slot->dma_addr;
+				int err;
+
+				/* Prepare new skb for further packets */
+				err = bgmac_dma_rx_skb_for_slot(bgmac, slot);
+				if (err) {
+					bgmac_err(bgmac, "Couldn't allocate new skb for slot %d!\n",
+						  ring->start);
+					bgmac->net_dev->stats.rx_dropped++;
+					break;
+				}
+				bgmac_dma_rx_setup_desc(bgmac, ring,
+							ring->start);
+
+				/* Unmap old skb, we'll pass it to the netfif */
+				dma_unmap_single(dma_dev, old_dma_addr,
+						 BGMAC_RX_BUF_SIZE,
+						 DMA_FROM_DEVICE);
+				resync_skb = false;
+
+				skb_put(skb, BGMAC_RX_FRAME_OFFSET + len);
+				skb_pull(skb, BGMAC_RX_FRAME_OFFSET);
+			} else {
+				struct sk_buff *new_skb;
+
+				/* Synchronize the data to make it accessible to the CPU */
+				dma_sync_single_for_cpu(dma_dev, (char *)slot->dma_addr + BGMAC_RX_FRAME_OFFSET,
+							len, DMA_FROM_DEVICE);
+
+				/* Poison the old skb */
+				rx->len = cpu_to_le16(0xdead);
+				rx->flags = cpu_to_le16(0xbeef);
+
+				new_skb = netdev_alloc_skb_ip_align(bgmac->net_dev, len);
+				if (!new_skb) {
+					bgmac_err(bgmac, "Allocation of skb for copying packet failed!\n");
+					bgmac->net_dev->stats.rx_dropped++;
+					break;
+				}
+
 				skb_put(new_skb, len);
 				skb_copy_from_linear_data_offset(skb, BGMAC_RX_FRAME_OFFSET,
 								 new_skb->data,
 								 len);
-				skb_checksum_none_assert(skb);
-				new_skb->protocol =
-					eth_type_trans(new_skb, bgmac->net_dev);
-				netif_receive_skb(new_skb);
-				handled++;
-			} else {
-				bgmac->net_dev->stats.rx_dropped++;
-				bgmac_err(bgmac, "Allocation of skb for copying packet failed!\n");
+				skb = new_skb;
 			}
 
-			/* Poison the old skb */
-			rx->len = cpu_to_le16(0xdead);
-			rx->flags = cpu_to_le16(0xbeef);
-		}
-
-		/* Make it back accessible to the hardware */
-		dma_sync_single_for_device(dma_dev, slot->dma_addr,
-					   BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
+			skb_checksum_none_assert(skb);
+			skb->protocol = eth_type_trans(skb, bgmac->net_dev);
+			netif_receive_skb(skb);
+			handled++;
+		} while (0);
+
+		/* Make the header back accessible to the hardware */
+		if (resync_skb)
+			dma_sync_single_for_device(dma_dev, slot->dma_addr,
+						   sizeof(struct bgmac_rx_header), DMA_FROM_DEVICE);
 
 		if (++ring->start >= BGMAC_RX_RING_SLOTS)
 			ring->start = 0;
