Hello,

I am attaching an updated e1000e patch that Joonwoo released. This patch should be applied with "patch -p1 < patch" in the main directory of the e1000e-0.4.1.7 directory. I have tried this patch on our lab machine and everything works perfectly. I would suggest removing the old e1000 driver if the machine does not have any PCI-X Intel cards. Otherwise, the e1000 will get loaded instead of the e1000e at boot. This problem can also be solved by upgrading to e1000 version 8 or higher as then e1000 will only support PCI and PCI-X devices. In this case, e1000 and e1000e can be used on the same machine if it has PCI-X, PCI, and PCI-E Intel cards.

Thanks again Joonwoo!

Roman
diff --git a/src/netdev.c b/src/netdev.c
index c7ada12..fd995ea 100644
--- a/src/netdev.c
+++ b/src/netdev.c
@@ -5113,6 +5113,351 @@ static void e1000_print_device_info(struct e1000_adapter *adapter)
 	       (pba_num >> 8), (pba_num & 0xff));
 }
 
+/* Click polling extension */
+static struct sk_buff *e1000_rx_poll(struct net_device *netdev, int *want)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct pci_dev *pdev = adapter->pdev;
+	struct e1000_ring *rx_ring = adapter->rx_ring;
+	struct e1000_rx_desc *rx_desc, *next_rxd;
+	struct e1000_buffer *buffer_info, *next_buffer;
+	u32 length;
+	unsigned int i;
+	int cleaned_count = 0;
+	bool cleaned = 0;
+	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+	struct sk_buff* skb_head = NULL, **skbs;
+	int got = 0;
+
+	skbs = &skb_head;
+	i = rx_ring->next_to_clean;
+	rx_desc = E1000_RX_DESC(*rx_ring, i);
+	buffer_info = &rx_ring->buffer_info[i];
+
+	while (rx_desc->status & E1000_RXD_STAT_DD) {
+		struct sk_buff *skb;
+		u8 status;
+
+		if (got >= *want)
+			break;
+		got++;
+
+		status = rx_desc->status;
+		skb = buffer_info->skb;
+		buffer_info->skb = NULL;
+
+		skb->dev = netdev;
+
+		prefetch(skb->data - NET_IP_ALIGN);
+
+		i++;
+		if (i == rx_ring->count)
+			i = 0;
+		next_rxd = E1000_RX_DESC(*rx_ring, i);
+		prefetch(next_rxd);
+
+		next_buffer = &rx_ring->buffer_info[i];
+
+		cleaned = 1;
+		cleaned_count++;
+		pci_unmap_single(pdev,
+				 buffer_info->dma,
+				 adapter->rx_buffer_len,
+				 PCI_DMA_FROMDEVICE);
+		buffer_info->dma = 0;
+
+		length = le16_to_cpu(rx_desc->length);
+
+		/* !EOP means multiple descriptors were used to store a single
+		 * packet, also make sure the frame isn't just CRC only */
+		if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) {
+			/* All receives must fit into a single buffer */
+			e_dbg("Receive packet consumed multiple buffers\n");
+			/* recycle */
+			buffer_info->skb = skb;
+			goto next_desc;
+		}
+
+		if (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
+			/* recycle */
+			buffer_info->skb = skb;
+			goto next_desc;
+		}
+
+		total_rx_bytes += length;
+		total_rx_packets++;
+
+		/* end copybreak code */
+		skb_put(skb, length);
+
+		/* Receive Checksum Offload */
+		e1000_rx_checksum(adapter,
+				  (u32)(status) |
+				  ((u32)(rx_desc->errors) << 24),
+				  le16_to_cpu(rx_desc->csum), skb);
+
+		skb_pull(skb, netdev->hard_header_len);
+		*skbs = skb;
+		skbs = &(*skbs)->next;
+		*skbs = NULL;
+		netdev->last_rx = jiffies;
+
+next_desc:
+		rx_desc->status = 0;
+
+		/* use prefetched values */
+		rx_desc = next_rxd;
+		buffer_info = next_buffer;
+	}
+	rx_ring->next_to_clean = i;
+
+	cleaned_count = e1000_desc_unused(rx_ring);
+
+	adapter->total_rx_packets += total_rx_packets;
+	adapter->total_rx_bytes += total_rx_bytes;
+	adapter->net_stats.rx_bytes += total_rx_bytes;
+	adapter->net_stats.rx_packets += total_rx_packets;
+
+	*want = got;
+	
+	return skb_head;
+}
+
+/* Click polling extension */
+int e1000_rx_refill(struct net_device *netdev, struct sk_buff **skbs)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct pci_dev *pdev = adapter->pdev;
+	struct e1000_ring *rx_ring = adapter->rx_ring;
+	struct e1000_rx_desc *rx_desc;
+	struct e1000_buffer *buffer_info;
+	struct sk_buff *skb;
+	unsigned int i;
+	int refill_count = 0;
+
+	if (skbs == 0)
+		return e1000_desc_unused(rx_ring); 
+
+	i = rx_ring->next_to_use;
+	buffer_info = &rx_ring->buffer_info[i];
+
+	while (*skbs) {
+
+		refill_count++;
+		buffer_info->skb = *skbs;
+		*skbs = (*skbs)->next;
+map_skb:
+		buffer_info->dma = pci_map_single(pdev, buffer_info->skb->data,
+						  adapter->rx_buffer_len,
+						  PCI_DMA_FROMDEVICE);
+		if (pci_dma_mapping_error(buffer_info->dma)) {
+			dev_err(&pdev->dev, "RX DMA map failed\n");
+			adapter->rx_dma_failed++;
+			break;
+		}
+
+		rx_desc = E1000_RX_DESC(*rx_ring, i);
+		rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
+
+		i++;
+		if (i == rx_ring->count)
+			i = 0;
+		buffer_info = &rx_ring->buffer_info[i];
+	}
+
+	if (rx_ring->next_to_use != i) {
+		rx_ring->next_to_use = i;
+		if (i-- == 0)
+			i = (rx_ring->count - 1);
+
+		/*
+		 * Force memory writes to complete before letting h/w
+		 * know there are new descriptors to fetch.  (Only
+		 * applicable for weak-ordered memory model archs,
+		 * such as IA-64).
+		 */
+		wmb();
+		writel(i, adapter->hw.hw_addr + rx_ring->tail);
+	}
+
+	return refill_count;
+}
+
+/* Click polling extension */
+static int e1000_tx_pqueue(struct net_device *netdev, struct sk_buff *skb)
+{
+	int res = e1000_xmit_frame(skb, netdev);
+	return res;
+}
+
+/* Click polling extension */
+static int e1000_tx_eob(struct net_device *netdev)
+{
+	return 0;
+}
+
+/* Click polling extension */
+static int e1000_tx_start(struct net_device *netdev)
+{
+	return 0;
+}
+
+/* Click polling extension */
+static struct sk_buff* e1000_tx_clean(struct net_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	struct e1000_ring *tx_ring = adapter->tx_ring;
+	struct e1000_tx_desc *tx_desc, *eop_desc;
+	struct e1000_buffer *buffer_info;
+	unsigned int i, eop;
+	bool cleaned = 0, retval = 1;
+	unsigned int total_tx_bytes = 0, total_tx_packets = 0;
+	struct sk_buff *skb_head, *skb_last;
+
+	skb_head = skb_last = NULL;
+
+	i = tx_ring->next_to_clean;
+	eop = tx_ring->buffer_info[i].next_to_watch;
+	eop_desc = E1000_TX_DESC(*tx_ring, eop);
+
+	while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
+		for (cleaned = 0; !cleaned; ) {
+			tx_desc = E1000_TX_DESC(*tx_ring, i);
+			buffer_info = &tx_ring->buffer_info[i];
+			cleaned = (i == eop);
+
+			if (cleaned) {
+				struct sk_buff *skb = buffer_info->skb;
+#ifdef NETIF_F_TSO
+				unsigned int segs, bytecount;
+				segs = skb_shinfo(skb)->gso_segs ?: 1;
+				/* multiply data chunks by size of headers */
+				bytecount = ((segs - 1) * skb_headlen(skb)) +
+					    skb->len;
+				total_tx_packets += segs;
+				total_tx_bytes += bytecount;
+#else
+				total_tx_packets++;
+				total_tx_bytes += skb->len;
+#endif
+			}
+
+			if (buffer_info->dma) {
+				pci_unmap_page(adapter->pdev,
+					buffer_info->dma,
+					buffer_info->length,
+					PCI_DMA_TODEVICE);
+				buffer_info->dma = 0;
+			}
+			
+			if (buffer_info->skb) {
+				struct sk_buff *skb = buffer_info->skb;
+				if (skb_head == 0) {
+					skb_head = skb;
+					skb_last = skb;
+					skb_last->next = NULL;
+				} else {
+					skb_last->next = skb;
+					skb->next = NULL;
+					skb_last = skb;
+				}
+				buffer_info->skb = NULL;
+			}
+			buffer_info->time_stamp = 0;
+			tx_desc->upper.data = 0;
+
+			i++;
+			if (i == tx_ring->count)
+				i = 0;
+#ifdef CONFIG_E1000E_NAPI
+			if (total_tx_packets >= tx_ring->count) {
+				retval = 0;
+				goto done_cleaning;
+			}
+#endif
+		}
+
+		eop = tx_ring->buffer_info[i].next_to_watch;
+		eop_desc = E1000_TX_DESC(*tx_ring, eop);
+	}
+
+#ifdef CONFIG_E1000E_NAPI
+done_cleaning:
+#endif
+	tx_ring->next_to_clean = i;
+
+#define TX_WAKE_THRESHOLD 32
+	if (cleaned && netif_carrier_ok(netdev) &&
+	    e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) {
+		/*
+		 * Make sure that anybody stopping the queue after this
+		 * sees the new next_to_clean.
+		 */
+		smp_mb();
+
+		if (netif_queue_stopped(netdev) &&
+		    !(test_bit(__E1000_DOWN, &adapter->state))) {
+			netif_wake_queue(netdev);
+			++adapter->restart_queue;
+		}
+	}
+
+	if (adapter->detect_tx_hung) {
+		/*
+		 * Detect a transmit hang in hardware, this serializes the
+		 * check with the clearing of time_stamp and movement of i
+		 */
+		adapter->detect_tx_hung = 0;
+		if (tx_ring->buffer_info[eop].dma &&
+		    time_after(jiffies, tx_ring->buffer_info[eop].time_stamp
+			       + (adapter->tx_timeout_factor * HZ))
+		    && !(er32(STATUS) & E1000_STATUS_TXOFF)) {
+			e1000_print_tx_hang(adapter);
+			netif_stop_queue(netdev);
+		}
+	}
+	adapter->total_tx_bytes += total_tx_bytes;
+	adapter->total_tx_packets += total_tx_packets;
+	adapter->net_stats.tx_bytes += total_tx_bytes;
+	adapter->net_stats.tx_packets += total_tx_packets;
+
+	return skb_head;
+}
+
+/* Click polling extension */
+static int e1000_poll_off(struct net_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	
+	if (netdev->polling > 0) {
+		netdev->polling = 0;
+		e1000_irq_enable(adapter);
+	}
+
+	return 0;
+}
+
+/* Click polling extension */
+static int e1000_poll_on(struct net_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	unsigned long flags;
+
+	if (!netdev->polling) {
+		local_irq_save(flags);
+		local_irq_disable();
+
+		netdev->polling = 2;
+
+		e1000_irq_disable(adapter);
+
+		local_irq_restore(flags);
+	}
+
+	return 0;
+}
+
 /**
  * e1000_probe - Device Initialization Routine
  * @pdev: PCI device information struct
@@ -5248,6 +5593,18 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	netdev->poll_controller		= e1000_netpoll;
 #endif
+
+	/* Click polling extensions */
+	netdev->polling = 0;
+	netdev->rx_poll = e1000_rx_poll;
+	netdev->rx_refill = e1000_rx_refill;
+	netdev->tx_queue = e1000_tx_pqueue;
+	netdev->tx_eob = e1000_tx_eob;
+	netdev->tx_start = e1000_tx_start;
+	netdev->tx_clean = e1000_tx_clean;
+	netdev->poll_off = e1000_poll_off;
+	netdev->poll_on = e1000_poll_on;
+
 	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
 
 	hw->mac.ops.get_bus_info(&adapter->hw);
_______________________________________________
click mailing list
[email protected]
https://amsterdam.lcs.mit.edu/mailman/listinfo/click

Reply via email to