Hi,

I am releasing this click polling patch for ixgbe 10GbE driver. It can
be applied to ixgbe-1.1.21.  I usually compile it with option DIXGBE_NO_LRO
and have used it with kernel version 2.6.19.2. Compile it with

make CFLAGS_EXTRA="-DIXGBE_NO_LRO" install

and then load with modprobe

modprobe ixgbe RxQueues=1,1

This version of polling driver supports single queue operation, I
am working on multi-queue support and once that is stable enough I
will release it. The patch is attached.

Cheers,

Maziar Manesh
diff -ruNbB ixgbe-1.1.21/src/ixgbe_82598.c ixgbe-1.1.21patched/src/ixgbe_82598.c
--- ixgbe-1.1.21/src/ixgbe_82598.c	2007-09-12 12:13:14.000000000 -0700
+++ ixgbe-1.1.21patched/src/ixgbe_82598.c	2008-08-13 23:20:46.000000000 -0700
@@ -240,6 +240,7 @@
 	case IXGBE_DEV_ID_82598AF_DUAL_PORT:
 	case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
 	case IXGBE_DEV_ID_82598EB_CX4:
+	case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
 		media_type = ixgbe_media_type_fiber;
 		break;
 	default:
@@ -545,7 +546,7 @@
 	 */
 	ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
 	if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
-		/* Enable TX Atlas so packets can be transmitted again */
+		//Enable TX Atlas so packets can be transmitted again
 		ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
 		analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
 		ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, analog_val);
diff -ruNbB ixgbe-1.1.21/src/ixgbe_api.c ixgbe-1.1.21patched/src/ixgbe_api.c
--- ixgbe-1.1.21/src/ixgbe_api.c	2007-09-12 12:13:14.000000000 -0700
+++ ixgbe-1.1.21patched/src/ixgbe_api.c	2008-08-13 23:21:11.000000000 -0700
@@ -59,6 +59,7 @@
 		case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
 		case IXGBE_DEV_ID_82598AF_DUAL_PORT:
 		case IXGBE_DEV_ID_82598EB_CX4:
+		case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
 			status = ixgbe_init_shared_code_82598(hw);
 			status = ixgbe_init_shared_code_phy(hw);
 			break;
diff -ruNbB ixgbe-1.1.21/src/ixgbe.h ixgbe-1.1.21patched/src/ixgbe.h
--- ixgbe-1.1.21/src/ixgbe.h	2007-09-12 12:13:14.000000000 -0700
+++ ixgbe-1.1.21patched/src/ixgbe.h	2008-08-06 17:25:00.000000000 -0700
@@ -216,6 +216,12 @@
 
 #define IXGBE_MAX_JUMBO_FRAME_SIZE        16128
 
+/* for Click lockup detection */
+#define IXGBE_RX_STATE_NORMAL 0
+#define IXGBE_RX_STATE_QUIET  1
+#define IXGBE_RX_STATE_LOCKUP 2
+#define IXGBE_RX_STATE_EMPTY_POLL 3
+
 /* board specific private data structure */
 struct ixgbe_adapter {
 	struct timer_list watchdog_timer;
@@ -298,6 +304,17 @@
 	unsigned long state;
 	u32 *config_space;
 	u64 tx_busy;
+
+   int do_poll_watchdog; /* Click polling */
+
+  /* Receive Lockup detection and recovery */
+  int rx_state;              /* can be either: NORMAL, QUIET, LOCKUP */
+  int rx_lockup_recoveries;  /* # of times the recovery seq is invoked */
+  unsigned long rx_normal_jiffies;     /* jiffies timeout for the NORMAL state */
+  unsigned long rx_quiet_jiffies;      /* jiffies timeout for the QUIET state */
+  int prev_rdfh;             /* prev value of Rcv Data Fifo Head register */
+  int prev_rdft;             /* prev value of Rcv Data Fifo Tail register */
+  
 };
 
 enum ixbge_state_t {
diff -ruNbB ixgbe-1.1.21/src/ixgbe_main.c ixgbe-1.1.21patched/src/ixgbe_main.c
--- ixgbe-1.1.21/src/ixgbe_main.c	2007-09-12 12:13:14.000000000 -0700
+++ ixgbe-1.1.21patched/src/ixgbe_main.c	2009-02-12 18:07:58.000000000 -0800
@@ -64,15 +64,16 @@
 #define LRO
 #endif
 
+#define MULTIQUEUE "-click-SQ"
 #ifndef CONFIG_IXGBE_NAPI
 #define DRIVERNAPI
 #else
 #define DRIVERNAPI "-NAPI"
 #endif
 #ifndef IXGBE_NO_LRO
-#define DRV_VERSION "1.1.21" DRIVERNAPI LRO
+#define DRV_VERSION "1.1.21" DRIVERNAPI LRO MULTIQUEUE
 #else
-#define DRV_VERSION "1.1.21" DRIVERNAPI
+#define DRV_VERSION "1.1.21" DRIVERNAPI MULTIQUEUE
 #endif
 char ixgbe_driver_version[] = DRV_VERSION;
 static char ixgbe_copyright[] = "Copyright (c) 1999-2007 Intel Corporation.";
@@ -89,7 +90,7 @@
 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT)},
 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT)},
 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598EB_CX4)},
-
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT)},
 	/* required last entry */
 	{0, }
 };
@@ -111,6 +112,17 @@
 
 #define DEFAULT_DEBUG_LEVEL_SHIFT 3
 
+/* For Click polling */
+static int ixgbe_tx_pqueue(struct net_device *dev, struct sk_buff *skb);
+static int ixgbe_tx_start(struct net_device *dev);
+static int ixgbe_rx_refill(struct net_device *dev, struct sk_buff **);
+static int ixgbe_tx_eob(struct net_device *dev);
+static struct sk_buff *ixgbe_tx_clean(struct net_device *dev);
+static struct sk_buff *ixgbe_rx_poll(struct net_device *dev, int *want);
+static int ixgbe_poll_on(struct net_device *dev);
+static int ixgbe_poll_off(struct net_device *dev);
+static void ixgbe_watchdog_1(struct ixgbe_adapter *adapter);
+
 static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, u16 int_alloc_entry,
 			   u8 msix_vector)
 {
@@ -1767,12 +1779,15 @@
 
 	for (i = 0; i < adapter->num_tx_queues; i++) {
 		txdctl = IXGBE_READ_REG(&adapter->hw, IXGBE_TXDCTL(i));
+		txdctl |= (8 << 16);
 		txdctl |= IXGBE_TXDCTL_ENABLE;
 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_TXDCTL(i), txdctl);
 	}
 
 	for (i = 0; i < adapter->num_rx_queues; i++) {
 		rxdctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(i));
+		rxdctl |= 0x0020;
+		rxdctl |= (4 << 16);
 		rxdctl |= IXGBE_RXDCTL_ENABLE;
 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(i), rxdctl);
 	}
@@ -2662,6 +2678,20 @@
 static void ixgbe_watchdog(unsigned long data)
 {
 	struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
+
+  if(adapter->netdev->polling){
+    adapter->do_poll_watchdog = 1;
+  } else {
+    ixgbe_watchdog_1(adapter);
+  }
+
+  /* Reset the timer */
+  mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ));
+}
+
+static void ixgbe_watchdog_1(struct ixgbe_adapter *adapter)
+{
+  //struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
 	struct net_device *netdev = adapter->netdev;
 	bool link_up;
 	u32 link_speed = 0;
@@ -2701,9 +2731,9 @@
 	ixgbe_update_stats(adapter);
 
 	/* Reset the timer */
-	if (!test_bit(__IXGBE_DOWN, &adapter->state))
-		mod_timer(&adapter->watchdog_timer,
-		          round_jiffies(jiffies + 2 * HZ));
+	//if (!test_bit(__IXGBE_DOWN, &adapter->state))
+	//	mod_timer(&adapter->watchdog_timer,
+	//	          round_jiffies(jiffies + 2 * HZ));
 }
 
 #define IXGBE_MAX_TXD_PWR	14
@@ -3287,6 +3317,18 @@
 	netdev->features = NETIF_F_SG | NETIF_F_HW_CSUM;
 #endif
 
+/* Click - polling extensions */
+        netdev->polling = 0;
+        netdev->rx_poll = ixgbe_rx_poll;
+        netdev->rx_refill = ixgbe_rx_refill;
+        netdev->tx_queue = ixgbe_tx_pqueue;
+        netdev->tx_eob = ixgbe_tx_eob;
+        netdev->tx_start = ixgbe_tx_start;
+        netdev->tx_clean = ixgbe_tx_clean;
+        netdev->poll_off = ixgbe_poll_off;
+        netdev->poll_on = ixgbe_poll_on;
+
+
 #ifdef NETIF_F_TSO
 	netdev->features |= NETIF_F_TSO;
 
@@ -3641,4 +3683,454 @@
 	return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
 }
 #endif
+
+/*******************************************/
+/***** click polling extension *************/
+/*******************************************/
+static void click_ixgbe_setup_rctl(struct ixgbe_adapter *adapter) {
+  
+  struct net_device *dev = adapter->netdev;
+  u32  txdctl, rxdctl, mhadd;
+  struct ixgbe_hw *hw = &adapter->hw;
+  unsigned int i;
+  int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
+  
+  mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
+  
+  if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
+    mhadd &= ~IXGBE_MHADD_MFS_MASK;
+    mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;  
+    IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
+  }
+  
+  for (i = 0; i < adapter->num_tx_queues; i++) {
+    txdctl = IXGBE_READ_REG(&adapter->hw, IXGBE_TXDCTL(i));
+    txdctl |= (8 << 16);
+    txdctl |= IXGBE_TXDCTL_ENABLE;
+    IXGBE_WRITE_REG(&adapter->hw, IXGBE_TXDCTL(i), txdctl);
+  }
+  
+  for (i = 0; i < adapter->num_rx_queues; i++) {
+    rxdctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(i));
+    rxdctl |= 0x0020;
+    rxdctl |= (4 << 16);
+    rxdctl |= IXGBE_RXDCTL_ENABLE;
+    IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(i), rxdctl);
+  }
+  
+  /* enable all receives */
+  rxdctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+  rxdctl |= (IXGBE_RXCTRL_DMBYPS | IXGBE_RXCTRL_RXEN);
+  IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxdctl);
+  
+  clear_bit(__IXGBE_DOWN, &adapter->state);
+  IXGBE_WRITE_FLUSH(&adapter->hw);
+  
+}
+
+static int ixgbe_poll_on (struct net_device *dev) {
+  
+  struct ixgbe_adapter *adapter = dev->priv;
+  unsigned long flags;
+  unsigned int i;
+  
+  if (!dev->polling) {
+    printk("ixgbe_poll_on\n");
+    local_irq_save(flags);
+    local_irq_disable();
+    ixgbe_irq_disable(adapter);
+    
+    /* reset the card - start in a clean state */
+    /* taken from ixgbe_down() */
+    ixgbe_reset(adapter);
+    ixgbe_irq_disable(adapter);
+    ixgbe_clean_all_tx_rings(adapter);
+    ixgbe_clean_all_rx_rings(adapter);
+    
+    /* taken from ixgbe_up() */
+    ixgbe_set_multi(dev);
+    ixgbe_configure_tx(adapter);
+    ixgbe_configure_rx(adapter);
+    //this will set all rx queues
+    for (i = 0; i < adapter->num_rx_queues; i++)
+      ixgbe_alloc_rx_buffers(adapter, &adapter->rx_ring[i],
+			     (adapter->rx_ring[i].count - 1));
+      
+    click_ixgbe_setup_rctl(adapter);
+    
+    dev->polling = 2;
+    adapter->rx_state = IXGBE_RX_STATE_EMPTY_POLL;
+    local_irq_restore(flags);
+	
+  }//end if
+  
+  return 0;
+}
+ 
+static int ixgbe_poll_off(struct net_device *dev)
+{
+    struct ixgbe_adapter *adapter = dev->priv;
+    
+    if(dev->polling > 0){
+      dev->polling = 0;
+      ixgbe_irq_enable(adapter);
+      printk("ixgbe_poll_off\n");
+    }
+    
+    return 0;
+}
+
+static struct sk_buff * ixgbe_rx_poll(struct net_device *dev, int *want)
+{
+  struct ixgbe_adapter *adapter = dev->priv;
+  struct pci_dev *pdev = adapter->pdev;
+  union ixgbe_adv_rx_desc *rx_desc;
+  struct ixgbe_ring *rx_ring = adapter->rx_ring;
+  struct sk_buff *skb_head = NULL, **skb;
+
+  int got, next, i;
+  u32 len, staterr;
+  
+  skb = &skb_head;
+  
+  for( got = 0, next = (rx_ring->next_to_clean + 1) % rx_ring->count;
+       got < *want && next != rx_ring->next_to_use;
+       got++, rx_ring->next_to_clean = next,
+	 next = (rx_ring->next_to_clean + 1) % rx_ring->count) {
+    
+    int i = rx_ring->next_to_clean;
+    rx_desc =  IXGBE_RX_DESC_ADV(*rx_ring, i);
+    staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+    if(!(staterr & IXGBE_RXD_STAT_DD)) {
+      break;
+    }
+    
+#if 0
+    pci_unmap_single(pdev, rx_ring->rx_buffer_info[i].dma,
+		     adapter->rx_buf_len + NET_IP_ALIGN,
+		     PCI_DMA_FROMDEVICE);
+#else
+    pci_unmap_page(pdev, rx_ring->rx_buffer_info[i].dma,
+		   PAGE_SIZE, PCI_DMA_FROMDEVICE);
+#endif 
+    
+    *skb = rx_ring->rx_buffer_info[i].skb;
+    rx_ring->rx_buffer_info[i].skb = NULL;
+    
+    if(!(staterr & IXGBE_RXD_STAT_EOP) ||
+       (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
+      rx_desc->wb.upper.status_error = 0;
+      dev_kfree_skb(*skb);
+      *skb = NULL;
+      got--;
+      continue;
+    }
+    
+    len = le16_to_cpu(rx_desc->wb.upper.length);
+    rx_desc->wb.upper.status_error = 0;
+    
+    skb_put(*skb, len);
+    ixgbe_rx_checksum(adapter, staterr, *skb);
+    skb_pull(*skb, dev->hard_header_len);
+    
+    rx_ring->stats.packets++;
+    rx_ring->stats.bytes += len;
+
+    skb = &((*skb)->next);
+    *skb = NULL;
+  } //end for
+  
+  *want = got;
+
+  /*
+   *  Receive Lockup detection and recovery for ixgbe
+   */  
+
+  if (got) {
+    adapter->rx_state = IXGBE_RX_STATE_NORMAL;
+    adapter->rx_normal_jiffies = jiffies + HZ;
+  } else {
+    int rdfh;
+    int rdft;
+    switch (adapter->rx_state) {
+    case IXGBE_RX_STATE_NORMAL:
+      if (time_before(jiffies, adapter->rx_normal_jiffies))
+	break;
+      adapter->rx_state = IXGBE_RX_STATE_QUIET;
+      adapter->rx_quiet_jiffies = jiffies + HZ;
+      /* getting rdh and rdt of the ring 0 for now */
+      adapter->prev_rdfh = IXGBE_READ_REG(&adapter->hw, IXGBE_RDH(0));
+      adapter->prev_rdft = IXGBE_READ_REG(&adapter->hw, IXGBE_RDT(0));
+      break;
+    case IXGBE_RX_STATE_QUIET:
+      rdfh = IXGBE_READ_REG(&adapter->hw, IXGBE_RDH(0));
+      rdft = IXGBE_READ_REG(&adapter->hw, IXGBE_RDT(0));
+      if (adapter->prev_rdfh != rdfh ||
+	  adapter->prev_rdft != rdft ||
+	  adapter->prev_rdfh == adapter->prev_rdft) {
+	adapter->prev_rdfh = rdfh;
+	adapter->prev_rdft = rdft;
+	adapter->rx_quiet_jiffies = jiffies + HZ;
+	break;
+      }
+      if (time_before(jiffies, adapter->rx_quiet_jiffies))
+	break;
+      /* Fall into the lockup case */
+    case IXGBE_RX_STATE_LOCKUP:
+      /* Receive lockup detected: perform a recovery */
+      adapter->rx_lockup_recoveries++;
+      /* taken from ixgbe_down() */
+      ixgbe_reset(adapter);
+      ixgbe_irq_disable(adapter);
+      ixgbe_clean_all_tx_rings(adapter);
+      ixgbe_clean_all_rx_rings(adapter);
+      /* taken from ixgbe_up() */
+      ixgbe_set_multi(dev);
+      ixgbe_configure_tx(adapter);
+      ixgbe_configure_rx(adapter);
+      for (i = 0; i < adapter->num_rx_queues; i++)
+	ixgbe_alloc_rx_buffers(adapter, &adapter->rx_ring[i],
+			       (adapter->rx_ring[i].count - 1));
+      click_ixgbe_setup_rctl(adapter);      
+      /* reset the lockup detection */
+      adapter->rx_state = IXGBE_RX_STATE_EMPTY_POLL;
+      //adapter->rx_normal_jiffies = jiffies + HZ;
+      break;
+    default:
+      break;
+    }
+  }
+  
+  return skb_head;
+}
+ 
+static int ixgbe_rx_refill(struct net_device *dev, struct sk_buff **skbs)
+{
+  struct ixgbe_adapter *adapter = dev->priv;
+  struct ixgbe_ring *rx_ring = adapter->rx_ring;
+  struct pci_dev *pdev = adapter->pdev;
+  union ixgbe_adv_rx_desc *rx_desc;
+  struct sk_buff *skb;
+  int next;
+  
+  /*
+   * Update statistics counters, check link.
+   * do_poll_watchdog is set by the timer interrupt ixgbe_watchdog(),
+   * but we don't want to do the work in an interrupt (since it may
+   * happen while polling code is active), so defer it to here.
+   */
+  if(adapter->do_poll_watchdog){
+    adapter->do_poll_watchdog = 0;
+    ixgbe_watchdog_1(adapter);
+  }
+  
+  if (!netif_carrier_ok(dev))
+    return 0;
+  
+  if(skbs == 0)
+    return IXGBE_DESC_UNUSED(rx_ring);
+  
+  for( next = (rx_ring->next_to_use + 1) % rx_ring->count;
+       next != rx_ring->next_to_clean;
+       rx_ring->next_to_use = next,
+	 next = (rx_ring->next_to_use + 1) % rx_ring->count ) {
+    int i = rx_ring->next_to_use;
+    if(rx_ring->rx_buffer_info[i].skb != NULL)
+      break;
+    
+    if(!(skb = *skbs))
+      break;
+    *skbs = skb->next;
+    skb->next = NULL;
+    skb->dev = dev;
+    
+    rx_ring->rx_buffer_info[i].skb = skb;
+    rx_ring->rx_buffer_info[i].dma =
+      pci_map_single(pdev,
+                     skb->data,
+                     adapter->rx_buf_len + NET_IP_ALIGN,
+		     PCI_DMA_FROMDEVICE);
+    
+    rx_desc =  IXGBE_RX_DESC_ADV(*rx_ring, i);
+    rx_desc->read.pkt_addr = cpu_to_le64(rx_ring->rx_buffer_info[i].dma);
+    
+  }
+  writel(rx_ring->next_to_use, adapter->hw.hw_addr + rx_ring->tail);
+  IXGBE_WRITE_FLUSH(&adapter->hw);
+
+  return IXGBE_DESC_UNUSED(rx_ring);
+}
+
+static int
+ixgbe_tx_eob(struct net_device *dev)
+{
+  struct ixgbe_adapter *adapter = dev->priv;
+  writel(adapter->tx_ring->next_to_use, adapter->hw.hw_addr + adapter->tx_ring->tail);
+  IXGBE_WRITE_FLUSH(&adapter->hw);
+  return 0;
+}
+
+static int
+ixgbe_tx_start(struct net_device *dev)
+{
+  ixgbe_tx_eob(dev);
+  return 0;
+}
+
+static int
+ixgbe_tx_pqueue(struct net_device *netdev, struct sk_buff *skb)
+{
+  /*
+   * This function is a streamlined version of
+   * return ixgbe_xmit_frame(skb, netdev);
+   */
+  struct ixgbe_adapter *adapter = netdev_priv(netdev);
+  struct ixgbe_ring *tx_ring = adapter->tx_ring;
+  unsigned int txd_needed, len = skb->len;
+  unsigned int tx_flags = 0;
+  unsigned long flags = 0;
+
+  union ixgbe_adv_tx_desc *tx_desc = NULL;
+  struct ixgbe_tx_buffer *tx_buffer_info;
+  u32 olinfo_status = 0, cmd_type_len = 0;
+  unsigned int i, offset;
+  u32 txd_cmd, paylen;
+  u8 hdr_len =0;
+
+  if (skb->len <= 0) {
+    dev_kfree_skb(skb);
+    return NETDEV_TX_OK;
+  }
+  if(!netif_carrier_ok(netdev)) {
+    netif_stop_queue(netdev);
+    return -1;
+  }
+ 
+  txd_needed = TXD_USE_COUNT(skb->len);
+ 
+  spin_lock_irqsave(&tx_ring->tx_lock, flags);
+  if (IXGBE_DESC_UNUSED(tx_ring) < (txd_needed)) {
+    adapter->net_stats.tx_dropped++;
+    netif_stop_queue(netdev);
+    spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
+    return -1;
+  }
+  spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
+
+  txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
+
+  cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
+ 
+  cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
+ 
+  if (skb->protocol == ntohs(ETH_P_IP))
+    tx_flags |= IXGBE_TX_FLAGS_IPV4;
+ 
+  if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags)) {
+    olinfo_status |= IXGBE_TXD_POPTS_TXSM << IXGBE_ADVTXD_POPTS_SHIFT;
+  }
+  hdr_len = 0;
+  paylen = skb->len;
+  olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
+
+  i = tx_ring->next_to_use;
+  tx_buffer_info = &tx_ring->tx_buffer_info[i];
+  tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
+  tx_buffer_info->length = len;
+  offset = 0;
+  tx_buffer_info->dma = 
+    pci_map_page(adapter->pdev, virt_to_page(skb->data + offset),
+		 (unsigned long) (skb->data + offset) & ~PAGE_MASK, len,
+		 PCI_DMA_TODEVICE);
+  
+  tx_buffer_info->time_stamp = jiffies;
+  
+  tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
+  tx_desc->read.cmd_type_len =
+    cpu_to_le32(cmd_type_len | tx_buffer_info->length);
+  tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
+  tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
+  tx_ring->tx_buffer_info[i].skb = skb;
+ 
+ i++;
+ if (i >= tx_ring->count)
+   i = 0;
+
+ /* Move the HW Tx Tail Pointer */
+ tx_ring->next_to_use = i;
+
+ netdev->trans_start = jiffies;
+
+ return 0;
+} //end tx_pqueue
+
+static struct sk_buff *
+ixgbe_tx_clean(struct net_device *netdev)
+{
+  /*
+   * This function is a streamlined version of
+   * return ixgbe_clean_tx_irq(adapter, 1);
+   */
+
+  struct ixgbe_adapter *adapter = netdev->priv;
+  struct pci_dev *pdev = adapter->pdev;
+  unsigned int i;
+  union ixgbe_adv_tx_desc *tx_desc;
+  struct sk_buff *skb_head, *skb_last;
+  struct ixgbe_ring *tx_ring = adapter->tx_ring;
+
+  skb_head = skb_last = 0;
+  
+  i = tx_ring->next_to_clean;
+  tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
+  while (tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) {
+    if(tx_ring->tx_buffer_info[i].dma != 0) {
+      pci_unmap_page(pdev, tx_ring->tx_buffer_info[i].dma,
+                     tx_ring->tx_buffer_info[i].length,
+                     PCI_DMA_TODEVICE);
+      tx_ring->tx_buffer_info[i].dma = 0;
+    }
+    
+    if(tx_ring->tx_buffer_info[i].skb != NULL) {
+      struct sk_buff *skb = tx_ring->tx_buffer_info[i].skb;
+      if (skb_head == 0) {
+        skb_head = skb;
+        skb_last = skb;
+        skb_last->next = NULL;
+      } else {
+        skb_last->next = skb;
+        skb->next = NULL;
+        skb_last = skb;
+      }
+      tx_ring->tx_buffer_info[i].skb = NULL;
+    }
+
+    /* update stats for this tx ring */
+    tx_ring->stats.bytes += tx_ring->tx_buffer_info[i].length;
+    tx_ring->stats.packets++;
+   
+    i = (i + 1) % tx_ring->count;
+    
+    tx_desc->wb.status = 0;
+    tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
+    
+  }//end while
+  
+  tx_ring->next_to_clean = i;
+  
+#define IXGBE_TX_QUEUE_WAKE 16
+  if (skb_head && netif_carrier_ok(netdev) &&
+      (IXGBE_DESC_UNUSED(tx_ring) >= IXGBE_TX_QUEUE_WAKE)) {
+    smp_mb();
+    if (netif_queue_stopped(netdev) &&
+	!test_bit(__IXGBE_DOWN, &adapter->state)) {
+      netif_start_queue(netdev);
+      adapter->restart_queue++;
+    }
+  }
+  
+  return skb_head;
+}//tx_clean
+
+
 /* ixgbe_main.c */
diff -ruNbB ixgbe-1.1.21/src/ixgbe_type.h ixgbe-1.1.21patched/src/ixgbe_type.h
--- ixgbe-1.1.21/src/ixgbe_type.h	2007-09-12 12:13:14.000000000 -0700
+++ ixgbe-1.1.21patched/src/ixgbe_type.h	2008-08-13 18:31:39.000000000 -0700
@@ -38,6 +38,7 @@
 #define IXGBE_DEV_ID_82598AF_DUAL_PORT   0x10C6
 #define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7
 #define IXGBE_DEV_ID_82598EB_CX4         0x10DD
+#define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC
 
 /* General Registers */
 #define IXGBE_CTRL      0x00000
_______________________________________________
click mailing list
[email protected]
https://amsterdam.lcs.mit.edu/mailman/listinfo/click

Reply via email to