>From 6dcb3c40bb26ddd8dee0df42574b1b9f4466f487 Mon Sep 17 00:00:00 2001
From: "yanjun.zhu" <yanjun....@windriver.com>
Date: Thu, 25 Jun 2015 11:04:52 +0800
Subject: [PATCH 1/1] igb: remove CONFIG_IGB_DISABLE_PACKET_SPLIT kernel
option
In igb, kernel option CONFIG_IGB_DISABLE_PACKET_SPLIT is not
enabled. AH packets are modified in igb driver. So these AH
packets can not pass ICV. And this option will not be enabled
in the later linux kernel.
Signed-off-by: yanjun.zhu <yanjun....@windriver.com>
---
drivers/net/igb/igb.h | 13 -
drivers/net/igb/igb_ethtool.c | 18 --
drivers/net/igb/igb_main.c | 616
------------------------------------------
drivers/net/igb/kcompat.h | 13 -
4 files changed, 660 deletions(-)
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index fe608c4..36d5d71 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -238,12 +238,10 @@ struct igb_lro_list {
#endif /* IGB_NO_LRO */
struct igb_cb {
#ifndef IGB_NO_LRO
-#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
union { /* Union defining head/tail partner */
struct sk_buff *head;
struct sk_buff *tail;
};
-#endif
__be32 tsecr; /* timestamp echo response */
u32 tsval; /* timestamp value in host order */
u32 next_seq; /* next expected sequence number */
@@ -305,12 +303,7 @@ struct igb_tx_buffer {
struct igb_rx_buffer {
dma_addr_t dma;
-#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
struct sk_buff *skb;
-#else
- struct page *page;
- u32 page_offset;
-#endif
};
struct igb_tx_queue_stats {
@@ -378,11 +371,7 @@ struct igb_ring {
/* RX */
struct {
struct igb_rx_queue_stats rx_stats;
-#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
u16 rx_buffer_len;
-#else
- struct sk_buff *skb;
-#endif
};
};
#ifdef CONFIG_IGB_VMDQ_NETDEV
@@ -804,10 +793,8 @@ static inline void igb_ptp_rx_hwtstamp(struct igb_ring
*rx_ring,
struct sk_buff *skb)
{
if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
-#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
skb_pull(skb, IGB_TS_HDR_LEN);
-#endif
return;
}
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index b1c322b..df2c234 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -1318,9 +1318,7 @@ static int igb_setup_desc_rings(struct igb_adapter
*adapter)
rx_ring->count = IGB_DEFAULT_RXD;
rx_ring->dev = pci_dev_to_dev(adapter->pdev);
rx_ring->netdev = adapter->netdev;
-#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
rx_ring->rx_buffer_len = IGB_RX_HDR_LEN;
-#endif
rx_ring->reg_idx = adapter->vfs_allocated_count;
if (igb_setup_rx_resources(rx_ring)) {
@@ -1537,21 +1535,13 @@ static int igb_check_lbtest_frame(struct
igb_rx_buffer *rx_buffer,
frame_size >>= 1;
-#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
data = rx_buffer->skb->data;
-#else
- data = kmap(rx_buffer->page);
-#endif
if (data[3] != 0xFF ||
data[frame_size + 10] != 0xBE ||
data[frame_size + 12] != 0xAF)
match = false;
-#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
- kunmap(rx_buffer->page);
-
-#endif
return match;
}
@@ -1576,11 +1566,7 @@ static u16 igb_clean_test_rings(struct igb_ring
*rx_ring,
/* sync Rx buffer for CPU read */
dma_sync_single_for_cpu(rx_ring->dev,
rx_buffer_info->dma,
-#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
IGB_RX_HDR_LEN,
-#else
- IGB_RX_BUFSZ,
-#endif
DMA_FROM_DEVICE);
/* verify contents of skb */
@@ -1590,11 +1576,7 @@ static u16 igb_clean_test_rings(struct igb_ring
*rx_ring,
/* sync Rx buffer for device write */
dma_sync_single_for_device(rx_ring->dev,
rx_buffer_info->dma,
-#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
IGB_RX_HDR_LEN,
-#else
- IGB_RX_BUFSZ,
-#endif
DMA_FROM_DEVICE);
/* unmap buffer on tx side */
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 1eb952c..ff933fc 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -3833,7 +3833,6 @@ void igb_configure_rx_ring(struct igb_adapter
*adapter,
int reg_idx = ring->reg_idx;
u32 srrctl = 0, rxdctl = 0;
-#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
/*
* RLPML prevents us from receiving a frame larger than max_frame so
* it is safe to just set the rx_buffer_len to max_frame without the
@@ -3842,7 +3841,6 @@ void igb_configure_rx_ring(struct igb_adapter
*adapter,
ring->rx_buffer_len = max_t(u32, adapter->max_frame_size,
MAXIMUM_ETHERNET_VLAN_SIZE);
-#endif
/* disable the queue */
E1000_WRITE_REG(hw, E1000_RXDCTL(reg_idx), 0);
@@ -3861,18 +3859,9 @@ void igb_configure_rx_ring(struct igb_adapter
*adapter,
/* reset next-to- use/clean to place SW in sync with hardwdare */
ring->next_to_clean = 0;
ring->next_to_use = 0;
-#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
- ring->next_to_alloc = 0;
-
-#endif
/* set descriptor configuration */
-#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
- srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
- srrctl |= IGB_RX_BUFSZ >> E1000_SRRCTL_BSIZEPKT_SHIFT;
-#else /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
srrctl = ALIGN(ring->rx_buffer_len, 1024) >>
E1000_SRRCTL_BSIZEPKT_SHIFT;
-#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
#ifdef HAVE_PTP_1588_CLOCK
if (hw->mac.type >= e1000_82580)
@@ -4078,16 +4067,9 @@ void igb_clean_rx_ring(struct igb_ring *rx_ring)
if (!rx_ring->rx_buffer_info)
return;
-#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
- if (rx_ring->skb)
- dev_kfree_skb(rx_ring->skb);
- rx_ring->skb = NULL;
-
-#endif
/* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) {
struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
-#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
if (buffer_info->dma) {
dma_unmap_single(rx_ring->dev,
buffer_info->dma,
@@ -4100,18 +4082,6 @@ void igb_clean_rx_ring(struct igb_ring *rx_ring)
dev_kfree_skb(buffer_info->skb);
buffer_info->skb = NULL;
}
-#else
- if (!buffer_info->page)
- continue;
-
- dma_unmap_page(rx_ring->dev,
- buffer_info->dma,
- PAGE_SIZE,
- DMA_FROM_DEVICE);
- __free_page(buffer_info->page);
-
- buffer_info->page = NULL;
-#endif
}
size = sizeof(struct igb_rx_buffer) * rx_ring->count;
@@ -7231,184 +7201,7 @@ static void igb_receive_skb(struct igb_q_vector
*q_vector,
}
#endif /* HAVE_VLAN_RX_REGISTER */
-#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
-/**
- * igb_reuse_rx_page - page flip buffer and store it back on the ring
- * @rx_ring: rx descriptor ring to store buffers on
- * @old_buff: donor buffer to have page reused
- *
- * Synchronizes page for reuse by the adapter
- **/
-static void igb_reuse_rx_page(struct igb_ring *rx_ring,
- struct igb_rx_buffer *old_buff)
-{
- struct igb_rx_buffer *new_buff;
- u16 nta = rx_ring->next_to_alloc;
-
- new_buff = &rx_ring->rx_buffer_info[nta];
-
- /* update, and store next to alloc */
- nta++;
- rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
-
- /* transfer page from old buffer to new buffer */
- memcpy(new_buff, old_buff, sizeof(struct igb_rx_buffer));
-
- /* sync the buffer for use by the device */
- dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma,
- old_buff->page_offset,
- IGB_RX_BUFSZ,
- DMA_FROM_DEVICE);
-}
-
-static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
- struct page *page,
- unsigned int truesize)
-{
- /* avoid re-using remote pages */
- if (unlikely(page_to_nid(page) != numa_node_id()))
- return false;
-
-#if (PAGE_SIZE < 8192)
- /* if we are only owner of page we can reuse it */
- if (unlikely(page_count(page) != 1))
- return false;
-
- /* flip page offset to other buffer */
- rx_buffer->page_offset ^= IGB_RX_BUFSZ;
-
-#else
- /* move offset up to the next cache line */
- rx_buffer->page_offset += truesize;
-
- if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ))
- return false;
-#endif
-
- /* bump ref count on page before it is given to the stack */
- get_page(page);
-
- return true;
-}
-
-/**
- * igb_add_rx_frag - Add contents of Rx buffer to sk_buff
- * @rx_ring: rx descriptor ring to transact packets on
- * @rx_buffer: buffer containing page to add
- * @rx_desc: descriptor containing length of buffer written by hardware
- * @skb: sk_buff to place the data into
- *
- * This function will add the data contained in rx_buffer->page to the skb.
- * This is done either through a direct copy if the data in the buffer is
- * less than the skb header size, otherwise it will just attach the page as
- * a frag to the skb.
- *
- * The function will then update the page offset if necessary and return
- * true if the buffer can be reused by the adapter.
- **/
-static bool igb_add_rx_frag(struct igb_ring *rx_ring,
- struct igb_rx_buffer *rx_buffer,
- union e1000_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
-{
- struct page *page = rx_buffer->page;
- unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
-#if (PAGE_SIZE < 8192)
- unsigned int truesize = IGB_RX_BUFSZ;
-#else
- unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
-#endif
-
- if ((size <= IGB_RX_HDR_LEN) && !skb_is_nonlinear(skb)) {
- unsigned char *va = page_address(page) + rx_buffer->page_offset;
-
-#ifdef HAVE_PTP_1588_CLOCK
- if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
- igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
- va += IGB_TS_HDR_LEN;
- size -= IGB_TS_HDR_LEN;
- }
-#endif /* HAVE_PTP_1588_CLOCK */
-
- memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
-
- /* we can reuse buffer as-is, just make sure it is local */
- if (likely(page_to_nid(page) == numa_node_id()))
- return true;
-
- /* this page cannot be reused so discard it */
- put_page(page);
- return false;
- }
-
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- rx_buffer->page_offset, size, truesize);
-
- return igb_can_reuse_rx_page(rx_buffer, page, truesize);
-}
-
-static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
- union e1000_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
-{
- struct igb_rx_buffer *rx_buffer;
- struct page *page;
-
- rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
-
- page = rx_buffer->page;
- prefetchw(page);
-
- if (likely(!skb)) {
- void *page_addr = page_address(page) +
- rx_buffer->page_offset;
-
- /* prefetch first cache line of first page */
- prefetch(page_addr);
-#if L1_CACHE_BYTES < 128
- prefetch(page_addr + L1_CACHE_BYTES);
-#endif
-
- /* allocate a skb to store the frags */
- skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
- IGB_RX_HDR_LEN);
- if (unlikely(!skb)) {
- rx_ring->rx_stats.alloc_failed++;
- return NULL;
- }
- /*
- * we will be copying header into skb->data in
- * pskb_may_pull so it is in our interest to prefetch
- * it now to avoid a possible cache miss
- */
- prefetchw(skb->data);
- }
-
- /* we are reusing so sync this buffer for CPU use */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_buffer->dma,
- rx_buffer->page_offset,
- IGB_RX_BUFSZ,
- DMA_FROM_DEVICE);
-
- /* pull page into skb */
- if (igb_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
- /* hand second half of page back to the ring */
- igb_reuse_rx_page(rx_ring, rx_buffer);
- } else {
- /* we are not reusing the buffer so unmap it */
- dma_unmap_page(rx_ring->dev, rx_buffer->dma,
- PAGE_SIZE, DMA_FROM_DEVICE);
- }
-
- /* clear contents of rx_buffer */
- rx_buffer->page = NULL;
-
- return skb;
-}
-
-#endif
static inline void igb_rx_checksum(struct igb_ring *ring,
union e1000_adv_rx_desc *rx_desc,
struct sk_buff *skb)
@@ -7456,7 +7249,6 @@ static inline void igb_rx_hash(struct igb_ring *ring,
#endif
#ifndef IGB_NO_LRO
-#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
/**
* igb_merge_active_tail - merge active tail into lro skb
* @tail: pointer to active tail in frag_list
@@ -7528,7 +7320,6 @@ static inline bool igb_close_active_frag_list(struct
sk_buff *head)
return true;
}
-#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
/**
* igb_can_lro - returns true if packet is TCP/IPV4 and LRO is enabled
* @adapter: board private structure
@@ -7600,11 +7391,9 @@ static void igb_lro_flush(struct igb_q_vector
*q_vector,
if (IGB_CB(skb)->append_cnt) {
struct igb_lrohdr *lroh = igb_lro_hdr(skb);
-#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
/* close any active lro contexts */
igb_close_active_frag_list(skb);
-#endif
/* incorporate ip header and re-calculate checksum */
lroh->iph.tot_len = ntohs(skb->len);
lroh->iph.check = 0;
@@ -7652,9 +7441,7 @@ static void igb_lro_header_ok(struct sk_buff *skb)
struct igb_lrohdr *lroh = igb_lro_hdr(skb);
u16 opt_bytes, data_len;
-#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
IGB_CB(skb)->tail = NULL;
-#endif
IGB_CB(skb)->tsecr = 0;
IGB_CB(skb)->append_cnt = 0;
IGB_CB(skb)->mss = 0;
@@ -7710,35 +7497,6 @@ static void igb_lro_header_ok(struct sk_buff *skb)
IGB_CB(skb)->next_seq = ntohl(lroh->th.seq);
}
-#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
-static void igb_merge_frags(struct sk_buff *lro_skb, struct sk_buff
*new_skb)
-{
- struct skb_shared_info *sh_info;
- struct skb_shared_info *new_skb_info;
- unsigned int data_len;
-
- sh_info = skb_shinfo(lro_skb);
- new_skb_info = skb_shinfo(new_skb);
-
- /* copy frags into the last skb */
- memcpy(sh_info->frags + sh_info->nr_frags,
- new_skb_info->frags,
- new_skb_info->nr_frags * sizeof(skb_frag_t));
-
- /* copy size data over */
- sh_info->nr_frags += new_skb_info->nr_frags;
- data_len = IGB_CB(new_skb)->mss;
- lro_skb->len += data_len;
- lro_skb->data_len += data_len;
- lro_skb->truesize += data_len;
-
- /* wipe record of data from new_skb */
- new_skb_info->nr_frags = 0;
- new_skb->len = new_skb->data_len = 0;
- dev_kfree_skb_any(new_skb);
-}
-
-#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
/**
* igb_lro_receive - if able, queue skb into lro chain
* @q_vector: structure containing interrupt and ring information
@@ -7819,11 +7577,6 @@ static void igb_lro_receive(struct igb_q_vector
*q_vector,
if (data_len == 0 ||
data_len > IGB_CB(lro_skb)->mss ||
data_len > IGB_CB(lro_skb)->free ||
-#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
- data_len != new_skb->data_len ||
- skb_shinfo(new_skb)->nr_frags >=
- (MAX_SKB_FRAGS - skb_shinfo(lro_skb)->nr_frags) ||
-#endif
igb_lro_hdr(lro_skb)->th.ack_seq != lroh->th.ack_seq ||
igb_lro_hdr(lro_skb)->th.window != lroh->th.window) {
igb_lro_flush(q_vector, lro_skb);
@@ -7844,13 +7597,8 @@ static void igb_lro_receive(struct igb_q_vector
*q_vector,
/* update append_cnt */
IGB_CB(lro_skb)->append_cnt++;
-#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
- /* if header is empty pull pages into current skb */
- igb_merge_frags(lro_skb, new_skb);
-#else
/* chain this new skb in frag_list */
igb_add_active_tail(lro_skb, new_skb);
-#endif
if ((data_len < IGB_CB(lro_skb)->mss) || lroh->th.psh ||
skb_shinfo(lro_skb)->nr_frags == MAX_SKB_FRAGS) {
@@ -7988,7 +7736,6 @@ static bool igb_is_non_eop(struct igb_ring *rx_ring,
return true;
}
-#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
/* igb_clean_rx_irq -- * legacy */
static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
{
@@ -8085,317 +7832,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector
*q_vector, int budget)
#endif /* IGB_NO_LRO */
return (total_packets < budget);
}
-#else /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
-/**
- * igb_get_headlen - determine size of header for LRO/GRO
- * @data: pointer to the start of the headers
- * @max_len: total length of section to find headers in
- *
- * This function is meant to determine the length of headers that will
- * be recognized by hardware for LRO, and GRO offloads. The main
- * motivation of doing this is to only perform one pull for IPv4 TCP
- * packets so that we can do basic things like calculating the gso_size
- * based on the average data per packet.
- **/
-static unsigned int igb_get_headlen(unsigned char *data,
- unsigned int max_len)
-{
- union {
- unsigned char *network;
- /* l2 headers */
- struct ethhdr *eth;
- struct vlan_hdr *vlan;
- /* l3 headers */
- struct iphdr *ipv4;
- struct ipv6hdr *ipv6;
- } hdr;
- __be16 protocol;
- u8 nexthdr = 0; /* default to not TCP */
- u8 hlen;
-
- /* this should never happen, but better safe than sorry */
- if (max_len < ETH_HLEN)
- return max_len;
-
- /* initialize network frame pointer */
- hdr.network = data;
-
- /* set first protocol and move network header forward */
- protocol = hdr.eth->h_proto;
- hdr.network += ETH_HLEN;
-
- /* handle any vlan tag if present */
- if (protocol == __constant_htons(ETH_P_8021Q)) {
- if ((hdr.network - data) > (max_len - VLAN_HLEN))
- return max_len;
-
- protocol = hdr.vlan->h_vlan_encapsulated_proto;
- hdr.network += VLAN_HLEN;
- }
-
- /* handle L3 protocols */
- if (protocol == __constant_htons(ETH_P_IP)) {
- if ((hdr.network - data) > (max_len - sizeof(struct iphdr)))
- return max_len;
-
- /* access ihl as a u8 to avoid unaligned access on ia64 */
- hlen = (hdr.network[0] & 0x0F) << 2;
-
- /* verify hlen meets minimum size requirements */
- if (hlen < sizeof(struct iphdr))
- return hdr.network - data;
-
- /* record next protocol if header is present */
- if (!(hdr.ipv4->frag_off & htons(IP_OFFSET)))
- nexthdr = hdr.ipv4->protocol;
-#ifdef NETIF_F_TSO6
- } else if (protocol == __constant_htons(ETH_P_IPV6)) {
- if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
- return max_len;
-
- /* record next protocol */
- nexthdr = hdr.ipv6->nexthdr;
- hlen = sizeof(struct ipv6hdr);
-#endif /* NETIF_F_TSO6 */
- } else {
- return hdr.network - data;
- }
-
- /* relocate pointer to start of L4 header */
- hdr.network += hlen;
-
- /* finally sort out TCP */
- if (nexthdr == IPPROTO_TCP) {
- if ((hdr.network - data) > (max_len - sizeof(struct tcphdr)))
- return max_len;
-
- /* access doff as a u8 to avoid unaligned access on ia64 */
- hlen = (hdr.network[12] & 0xF0) >> 2;
-
- /* verify hlen meets minimum size requirements */
- if (hlen < sizeof(struct tcphdr))
- return hdr.network - data;
-
- hdr.network += hlen;
- } else if (nexthdr == IPPROTO_UDP) {
- if ((hdr.network - data) > (max_len - sizeof(struct udphdr)))
- return max_len;
-
- hdr.network += sizeof(struct udphdr);
- }
-
- /*
- * If everything has gone correctly hdr.network should be the
- * data section of the packet and will be the end of the header.
- * If not then it probably represents the end of the last recognized
- * header.
- */
- if ((hdr.network - data) < max_len)
- return hdr.network - data;
- else
- return max_len;
-}
-
-/**
- * igb_pull_tail - igb specific version of skb_pull_tail
- * @rx_ring: rx descriptor ring packet is being transacted on
- * @rx_desc: pointer to the EOP Rx descriptor
- * @skb: pointer to current skb being adjusted
- *
- * This function is an igb specific version of __pskb_pull_tail. The
- * main difference between this version and the original function is that
- * this function can make several assumptions about the state of things
- * that allow for significant optimizations versus the standard function.
- * As a result we can do things like drop a frag and maintain an accurate
- * truesize for the skb.
- */
-static void igb_pull_tail(struct igb_ring *rx_ring,
- union e1000_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
-{
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
- unsigned char *va;
- unsigned int pull_len;
-
- /*
- * it is valid to use page_address instead of kmap since we are
- * working with pages allocated out of the lomem pool per
- * alloc_page(GFP_ATOMIC)
- */
- va = skb_frag_address(frag);
-
-#ifdef HAVE_PTP_1588_CLOCK
- if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
- /* retrieve timestamp from buffer */
- igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
-
- /* update pointers to remove timestamp header */
- skb_frag_size_sub(frag, IGB_TS_HDR_LEN);
- frag->page_offset += IGB_TS_HDR_LEN;
- skb->data_len -= IGB_TS_HDR_LEN;
- skb->len -= IGB_TS_HDR_LEN;
-
- /* move va to start of packet data */
- va += IGB_TS_HDR_LEN;
- }
-#endif /* HAVE_PTP_1588_CLOCK */
-
- /*
- * we need the header to contain the greater of either ETH_HLEN or
- * 60 bytes if the skb->len is less than 60 for skb_pad.
- */
- pull_len = igb_get_headlen(va, IGB_RX_HDR_LEN);
-
- /* align pull length to size of long to optimize memcpy performance */
- skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
- /* update all of the pointers */
- skb_frag_size_sub(frag, pull_len);
- frag->page_offset += pull_len;
- skb->data_len -= pull_len;
- skb->tail += pull_len;
-}
-
-/**
- * igb_cleanup_headers - Correct corrupted or empty headers
- * @rx_ring: rx descriptor ring packet is being transacted on
- * @rx_desc: pointer to the EOP Rx descriptor
- * @skb: pointer to current skb being fixed
- *
- * Address the case where we are pulling data in on pages only
- * and as such no data is present in the skb header.
- *
- * In addition if skb is not at least 60 bytes we need to pad it so that
- * it is large enough to qualify as a valid Ethernet frame.
- *
- * Returns true if an error was encountered and skb was freed.
- **/
-static bool igb_cleanup_headers(struct igb_ring *rx_ring,
- union e1000_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
-{
-
- if (unlikely((igb_test_staterr(rx_desc,
- E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
- struct net_device *netdev = rx_ring->netdev;
- if (!(netdev->features & NETIF_F_RXALL)) {
- dev_kfree_skb_any(skb);
- return true;
- }
- }
-
- /* place header in linear portion of buffer */
- if (skb_is_nonlinear(skb))
- igb_pull_tail(rx_ring, rx_desc, skb);
-
- /* if skb_pad returns an error the skb was freed */
- if (unlikely(skb->len < 60)) {
- int pad_len = 60 - skb->len;
-
- if (skb_pad(skb, pad_len))
- return true;
- __skb_put(skb, pad_len);
- }
-
- return false;
-}
-
-/* igb_clean_rx_irq -- * packet split */
-static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
-{
- struct igb_ring *rx_ring = q_vector->rx.ring;
- struct sk_buff *skb = rx_ring->skb;
- unsigned int total_bytes = 0, total_packets = 0;
- u16 cleaned_count = igb_desc_unused(rx_ring);
-
- do {
- union e1000_adv_rx_desc *rx_desc;
-
- /* return some buffers to hardware, one at a time is too slow */
- if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
- igb_alloc_rx_buffers(rx_ring, cleaned_count);
- cleaned_count = 0;
- }
-
- rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean);
-
- if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_DD))
- break;
-
- /*
- * This memory barrier is needed to keep us from reading
- * any other fields out of the rx_desc until we know the
- * RXD_STAT_DD bit is set
- */
- rmb();
-
- /* retrieve a buffer from the ring */
- skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
-
- /* exit if we failed to retrieve a buffer */
- if (!skb)
- break;
-
- cleaned_count++;
-
- /* fetch next buffer in frame if non-eop */
- if (igb_is_non_eop(rx_ring, rx_desc))
- continue;
-
- /* verify the packet layout is correct */
- if (igb_cleanup_headers(rx_ring, rx_desc, skb)) {
- skb = NULL;
- continue;
- }
-
- /* probably a little skewed due to removing CRC */
- total_bytes += skb->len;
-
- /* populate checksum, timestamp, VLAN, and protocol */
- igb_process_skb_fields(rx_ring, rx_desc, skb);
-
-#ifndef IGB_NO_LRO
- if (igb_can_lro(rx_ring, rx_desc, skb))
- igb_lro_receive(q_vector, skb);
- else
-#endif
-#ifdef HAVE_VLAN_RX_REGISTER
- igb_receive_skb(q_vector, skb);
-#else
- napi_gro_receive(&q_vector->napi, skb);
-#endif
-#ifndef NETIF_F_GRO
-
- netdev_ring(rx_ring)->last_rx = jiffies;
-#endif
-
- /* reset skb pointer */
- skb = NULL;
-
- /* update budget accounting */
- total_packets++;
- } while (likely(total_packets < budget));
-
- /* place incomplete frames back on ring for completion */
- rx_ring->skb = skb;
-
- rx_ring->rx_stats.packets += total_packets;
- rx_ring->rx_stats.bytes += total_bytes;
- q_vector->rx.total_packets += total_packets;
- q_vector->rx.total_bytes += total_bytes;
-
- if (cleaned_count)
- igb_alloc_rx_buffers(rx_ring, cleaned_count);
-
-#ifndef IGB_NO_LRO
- igb_lro_flush_all(q_vector);
-
-#endif /* IGB_NO_LRO */
- return (total_packets < budget);
-}
-#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
-
-#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
static bool igb_alloc_mapped_skb(struct igb_ring *rx_ring,
struct igb_rx_buffer *bi)
{
@@ -8436,46 +7873,6 @@ static bool igb_alloc_mapped_skb(struct igb_ring
*rx_ring,
return true;
}
-#else /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
-static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
- struct igb_rx_buffer *bi)
-{
- struct page *page = bi->page;
- dma_addr_t dma;
-
- /* since we are recycling buffers we should seldom need to alloc */
- if (likely(page))
- return true;
-
- /* alloc new page for storage */
- page = alloc_page(GFP_ATOMIC | __GFP_COLD);
- if (unlikely(!page)) {
- rx_ring->rx_stats.alloc_failed++;
- return false;
- }
-
- /* map page for use */
- dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
-
- /*
- * if mapping failed free memory back to system since
- * there isn't much point in holding memory we can't use
- */
- if (dma_mapping_error(rx_ring->dev, dma)) {
- __free_page(page);
-
- rx_ring->rx_stats.alloc_failed++;
- return false;
- }
-
- bi->dma = dma;
- bi->page = page;
- bi->page_offset = 0;
-
- return true;
-}
-
-#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
/**
* igb_alloc_rx_buffers - Replace used receive buffers; packet split
* @adapter: address of board private structure
@@ -8495,22 +7892,14 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring,
u16 cleaned_count)
i -= rx_ring->count;
do {
-#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
if (!igb_alloc_mapped_skb(rx_ring, bi))
-#else
- if (!igb_alloc_mapped_page(rx_ring, bi))
-#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
break;
/*
* Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info.
*/
-#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
-#else
- rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
-#endif
rx_desc++;
bi++;
@@ -8533,11 +7922,6 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring,
u16 cleaned_count)
/* record the next descriptor to use */
rx_ring->next_to_use = i;
-#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
- /* update next to alloc since we have filled the ring */
- rx_ring->next_to_alloc = i;
-
-#endif
/*
* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
diff --git a/drivers/net/igb/kcompat.h b/drivers/net/igb/kcompat.h
index 7034dbb..c998039 100644
--- a/drivers/net/igb/kcompat.h
+++ b/drivers/net/igb/kcompat.h
@@ -67,13 +67,6 @@
#else
#endif /* NAPI */
-/* packet split disable/enable */
-#ifdef DISABLE_PACKET_SPLIT
-#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
-#define CONFIG_IGB_DISABLE_PACKET_SPLIT
-#endif
-#endif /* DISABLE_PACKET_SPLIT */
-
/* MSI compatibility code for all kernels and drivers */
#ifdef DISABLE_PCI_MSI
#undef CONFIG_PCI_MSI
@@ -1287,12 +1280,6 @@ static inline struct device *pci_dev_to_dev(struct
pci_dev *pdev)
#undef put_cpu
#define put_cpu() do { } while(0)
#define MODULE_INFO(version, _version)
-#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
-#define CONFIG_E1000_DISABLE_PACKET_SPLIT 1
-#endif
-#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
-#define CONFIG_IGB_DISABLE_PACKET_SPLIT 1
-#endif
#define dma_set_coherent_mask(dev,mask) 1
--
1.8.5.2.233.g932f7e4
On Fri, Jun 26, 2015 at 2:29 PM, zhuyj <zyjzyj2...@gmail.com> wrote:
> From the beginning, CONFIG_IGB_DISABLE_PACKET_SPLIT is introduced in igb
> nic driver.
> But this CONFIG_IGB_DISABLE_PACKET_SPLIT is not defined in linux kernel
> options.
> As such, igb nic driver will split packets.
>
> So if the patckets are protected by some hash algorithm, such as md5,
> sha1, after igb nic dirver splits long patckets(>178bytes), these protected
> packets will be discarded in the receiver.
>
> An example is AH packets. If the AH packets are long enough(>178), these
> AH packets will be splited in igb nic driver. In the end, the receiver will
> not get correct ICV result. So these long(>178 bytes) will be dicarded.
>
> So it is necessary to define CONFIG_IGB_DISABLE_PACKET_SPLIT in some igb
> driver header files to avoid splitting long packets in igb driver.
>
> At least, some explanations should be put in the README file. But there is
> nothing about CONFIG_IGB_DISABLE_PACKET_SPLIT in the README file.
>
> So we should apply this patch (based on igb 5.0.5) in our igb nic driver
> when it is built for linux kernel.
>
> Or some explanations should be put into the README file.
>
> Best Regards!
> Zhu Yanjun
>
------------------------------------------------------------------------------
Monitor 25 network devices or servers for free with OpManager!
OpManager is web-based network management software that monitors
network devices and physical & virtual servers, alerts via email & sms
for fault. Monitor 25 devices for free with no restriction. Download now
http://ad.doubleclick.net/ddm/clk/292181274;119417398;o
_______________________________________________
E1000-devel mailing list
E1000-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/e1000-devel
To learn more about Intel® Ethernet, visit
http://communities.intel.com/community/wired