- This patch adds the copybreak and skb recycle to the ixgb driver

Signed-off-by: Jeff Kirsher <[EMAIL PROTECTED]>
Signed-off-by: Jesse Brandeburg <[EMAIL PROTECTED]>
Signed-off-by: John Ronciak <[EMAIL PROTECTED]>
---

 drivers/net/ixgb/ixgb.h      |    2 ++
 drivers/net/ixgb/ixgb_main.c |   56 ++++++++++++++++++++++++++++++++----------
 2 files changed, 45 insertions(+), 13 deletions(-)

diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h
index b9c37fd..1a5bde2 100644
--- a/drivers/net/ixgb/ixgb.h
+++ b/drivers/net/ixgb/ixgb.h
@@ -65,6 +65,7 @@
 #ifdef NETIF_F_TSO
 #include <net/checksum.h>
 #endif
+#include <linux/workqueue.h>
 
 #include <linux/ethtool.h>
 #include <linux/if_vlan.h>
@@ -200,6 +201,7 @@ struct ixgb_adapter {
        struct ixgb_hw hw;
        u16 msg_enable;
        struct ixgb_hw_stats stats;
+       uint32_t alloc_rx_buff_failed;
 #ifdef CONFIG_PCI_MSI
        boolean_t have_msi;
 #endif
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index 7517d70..ee63120 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -1953,6 +1953,26 @@ ixgb_clean_rx_irq(struct ixgb_adapter *a
                        goto rxdesc_done;
                }
 
+               /* code added for copybreak, this should improve
+                * performance for small packets with large amounts
+                * of reassembly being done in the stack */
+#define IXGB_CB_LENGTH 256
+               if (length < IXGB_CB_LENGTH) {
+                       struct sk_buff *new_skb =
+                               dev_alloc_skb(length + NET_IP_ALIGN);
+                       if (new_skb) {
+                               skb_reserve(new_skb, NET_IP_ALIGN);
+                               new_skb->dev = netdev;
+                               memcpy(new_skb->data - NET_IP_ALIGN,
+                                          skb->data - NET_IP_ALIGN,
+                                          length + NET_IP_ALIGN);
+                               /* save the skb in buffer_info as good */
+                               buffer_info->skb = skb;
+                               skb = new_skb;
+                       }
+               }
+               /* end copybreak code */
+
                /* Good Receive */
                skb_put(skb, length);
 
@@ -2021,12 +2041,18 @@ ixgb_alloc_rx_buffers(struct ixgb_adapte
 
        /* leave three descriptors unused */
        while(--cleancount > 2) {
-               rx_desc = IXGB_RX_DESC(*rx_ring, i);
-
-               skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN);
+               /* recycle! its good for you */
+               if (!(skb = buffer_info->skb))
+                       skb = dev_alloc_skb(adapter->rx_buffer_len
+                                                               + NET_IP_ALIGN);
+               else {
+                       skb_trim(skb, 0);
+                       goto map_skb;
+               }
 
                if(unlikely(!skb)) {
                        /* Better luck next round */
+                       adapter->alloc_rx_buff_failed++;
                        break;
                }
 
@@ -2040,32 +2066,36 @@ ixgb_alloc_rx_buffers(struct ixgb_adapte
 
                buffer_info->skb = skb;
                buffer_info->length = adapter->rx_buffer_len;
+map_skb:
                buffer_info->dma = pci_map_single(pdev,
                                                                                
  skb->data,
                                                                                
  adapter->rx_buffer_len,
                                                                                
  PCI_DMA_FROMDEVICE);
 
+               rx_desc = IXGB_RX_DESC(*rx_ring, i);
                rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
                /* guarantee DD bit not set now before h/w gets descriptor
                 * this is the rest of the workaround for h/w double 
                 * writeback. */
                rx_desc->status = 0;
 
-               if((i & ~(num_group_tail_writes- 1)) == i) {
-                       /* Force memory writes to complete before letting h/w
-                        * know there are new descriptors to fetch.  (Only
-                        * applicable for weak-ordered memory model archs,
-                        * such as IA-64). */
-                       wmb();
-
-                       IXGB_WRITE_REG(&adapter->hw, RDT, i);
-               }
 
                if(++i == rx_ring->count) i = 0;
                buffer_info = &rx_ring->buffer_info[i];
        }
 
-       rx_ring->next_to_use = i;
+       if (likely(rx_ring->next_to_use != i)) {
+               rx_ring->next_to_use = i;
+               if (unlikely(i-- == 0))
+                       i = (rx_ring->count - 1);
+
+               /* Force memory writes to complete before letting h/w
+                * know there are new descriptors to fetch.  (Only
+                * applicable for weak-ordered memory model archs, such
+                * as IA-64). */
+               wmb();
+               IXGB_WRITE_REG(&adapter->hw, RDT, i);
+       }
 }
 
 /**

-
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to