Author: jfv
Date: Fri Nov 26 22:46:32 2010
New Revision: 215911
URL: http://svn.freebsd.org/changeset/base/215911

Log:
  Update ixgbe driver to verion 2.3.6
        - This adds a VM SRIOV interface, ixv, it is however
          transparent to the user, it links with the ixgbe.ko,
          but when ixgbe is loaded in a virtualized guest with
          SRIOV configured this will be detected.
        - Sync shared code to latest
        - Many bug fixes and improvements, thanks to everyone
          who has been using the driver and reporting issues.

Added:
  head/sys/dev/ixgbe/ixgbe_mbx.c   (contents, props changed)
  head/sys/dev/ixgbe/ixgbe_mbx.h   (contents, props changed)
  head/sys/dev/ixgbe/ixgbe_vf.c   (contents, props changed)
  head/sys/dev/ixgbe/ixgbe_vf.h   (contents, props changed)
  head/sys/dev/ixgbe/ixv.c   (contents, props changed)
  head/sys/dev/ixgbe/ixv.h   (contents, props changed)
Modified:
  head/sys/conf/files
  head/sys/dev/ixgbe/ixgbe.c
  head/sys/dev/ixgbe/ixgbe.h
  head/sys/dev/ixgbe/ixgbe_82598.c
  head/sys/dev/ixgbe/ixgbe_82599.c
  head/sys/dev/ixgbe/ixgbe_api.c
  head/sys/dev/ixgbe/ixgbe_api.h
  head/sys/dev/ixgbe/ixgbe_common.c
  head/sys/dev/ixgbe/ixgbe_common.h
  head/sys/dev/ixgbe/ixgbe_osdep.h
  head/sys/dev/ixgbe/ixgbe_phy.c
  head/sys/dev/ixgbe/ixgbe_phy.h
  head/sys/dev/ixgbe/ixgbe_type.h
  head/sys/modules/ixgbe/Makefile

Modified: head/sys/conf/files
==============================================================================
--- head/sys/conf/files Fri Nov 26 22:36:47 2010        (r215910)
+++ head/sys/conf/files Fri Nov 26 22:46:32 2010        (r215911)
@@ -1252,12 +1252,18 @@ dev/ixgb/ixgb_ee.c              optional ixgb
 dev/ixgb/ixgb_hw.c             optional ixgb
 dev/ixgbe/ixgbe.c              optional ixgbe inet \
        compile-with "${NORMAL_C} -I$S/dev/ixgbe"
+dev/ixgbe/ixv.c                        optional ixgbe inet \
+       compile-with "${NORMAL_C} -I$S/dev/ixgbe"
 dev/ixgbe/ixgbe_phy.c          optional ixgbe inet \
        compile-with "${NORMAL_C} -I$S/dev/ixgbe"
 dev/ixgbe/ixgbe_api.c          optional ixgbe inet \
        compile-with "${NORMAL_C} -I$S/dev/ixgbe"
 dev/ixgbe/ixgbe_common.c       optional ixgbe inet \
        compile-with "${NORMAL_C} -I$S/dev/ixgbe"
+dev/ixgbe/ixgbe_mbx.c          optional ixgbe inet \
+       compile-with "${NORMAL_C} -I$S/dev/ixgbe"
+dev/ixgbe/ixgbe_vf.c           optional ixgbe inet \
+       compile-with "${NORMAL_C} -I$S/dev/ixgbe"
 dev/ixgbe/ixgbe_82598.c                optional ixgbe inet \
        compile-with "${NORMAL_C} -I$S/dev/ixgbe"
 dev/ixgbe/ixgbe_82599.c                optional ixgbe inet \

Modified: head/sys/dev/ixgbe/ixgbe.c
==============================================================================
--- head/sys/dev/ixgbe/ixgbe.c  Fri Nov 26 22:36:47 2010        (r215910)
+++ head/sys/dev/ixgbe/ixgbe.c  Fri Nov 26 22:46:32 2010        (r215911)
@@ -46,7 +46,7 @@ int             ixgbe_display_debug_stat
 /*********************************************************************
  *  Driver version
  *********************************************************************/
-char ixgbe_driver_version[] = "2.2.3";
+char ixgbe_driver_version[] = "2.3.6";
 
 /*********************************************************************
  *  PCI Device ID Table
@@ -78,6 +78,8 @@ static ixgbe_vendor_info_t ixgbe_vendor_
        {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
        {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
        {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
+       {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
+       {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
        /* required last entry */
        {0, 0, 0, 0, 0}
 };
@@ -119,7 +121,7 @@ static int  ixgbe_allocate_queues(struct 
 static int     ixgbe_setup_msix(struct adapter *);
 static void    ixgbe_free_pci_resources(struct adapter *);
 static void     ixgbe_local_timer(void *);
-static int      ixgbe_setup_interface(device_t, struct adapter *);
+static void     ixgbe_setup_interface(device_t, struct adapter *);
 static void     ixgbe_config_link(struct adapter *);
 
 static int      ixgbe_allocate_transmit_buffers(struct tx_ring *);
@@ -144,7 +146,6 @@ static bool ixgbe_txeof(struct tx_ring *
 static bool    ixgbe_rxeof(struct ix_queue *, int);
 static void    ixgbe_rx_checksum(u32, struct mbuf *, u32);
 static void     ixgbe_set_promisc(struct adapter *);
-static void     ixgbe_disable_promisc(struct adapter *);
 static void     ixgbe_set_multi(struct adapter *);
 static void     ixgbe_print_hw_stats(struct adapter *);
 static void    ixgbe_print_debug_info(struct adapter *);
@@ -212,7 +213,7 @@ static driver_t ixgbe_driver = {
        "ix", ixgbe_methods, sizeof(struct adapter),
 };
 
-static devclass_t ixgbe_devclass;
+devclass_t ixgbe_devclass;
 DRIVER_MODULE(ixgbe, pci, ixgbe_driver, ixgbe_devclass, 0, 0);
 
 MODULE_DEPEND(ixgbe, pci, 1, 1, 1);
@@ -257,7 +258,7 @@ TUNABLE_INT("hw.ixgbe.enable_msix", &ixg
 
 /*
  * Header split: this causes the hardware to DMA
- * the header into a separate mbuf from the payload,
+ * the header into a seperate mbuf from the payload,
  * it can be a performance win in some workloads, but
  * in others it actually hurts, its off by default. 
  */
@@ -289,13 +290,6 @@ TUNABLE_INT("hw.ixgbe.rxd", &ixgbe_rxd);
 static int ixgbe_total_ports;
 
 /*
-** Shadow VFTA table, this is needed because
-** the real filter table gets cleared during
-** a soft reset and we need to repopulate it.
-*/
-static u32 ixgbe_shadow_vfta[IXGBE_VFTA_SIZE];
-
-/*
 ** The number of scatter-gather segments
 ** differs for 82598 and 82599, default to
 ** the former.
@@ -446,6 +440,7 @@ ixgbe_attach(device_t dev)
                        ixgbe_num_segs = IXGBE_82599_SCATTER;
                        adapter->optics = IFM_10G_T;
                default:
+                       ixgbe_num_segs = IXGBE_82599_SCATTER;
                        break;
        }
 
@@ -524,15 +519,6 @@ ixgbe_attach(device_t dev)
                goto err_out;
        }
 
-       /* Allocate multicast array memory. */
-       adapter->mta = malloc(sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
-           MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
-       if (adapter->mta == NULL) {
-               device_printf(dev, "Can not allocate multicast setup array\n");
-               error = ENOMEM;
-               goto err_late;
-       }
-
        /* Initialize the shared code */
        error = ixgbe_init_shared_code(hw);
        if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
@@ -595,8 +581,7 @@ ixgbe_attach(device_t dev)
                goto err_late;
 
        /* Setup OS specific network interface */
-       if (ixgbe_setup_interface(dev, adapter) != 0)
-               goto err_late;
+       ixgbe_setup_interface(dev, adapter);
 
        /* Sysctl for limiting the amount of work done in the taskqueue */
        ixgbe_add_rx_process_limit(adapter, "rx_processing_limit",
@@ -642,10 +627,7 @@ err_late:
        ixgbe_free_transmit_structures(adapter);
        ixgbe_free_receive_structures(adapter);
 err_out:
-       if (adapter->ifp != NULL)
-               if_free(adapter->ifp);
        ixgbe_free_pci_resources(adapter);
-       free(adapter->mta, M_DEVBUF);
        return (error);
 
 }
@@ -716,7 +698,6 @@ ixgbe_detach(device_t dev)
 
        ixgbe_free_transmit_structures(adapter);
        ixgbe_free_receive_structures(adapter);
-       free(adapter->mta, M_DEVBUF);
 
        IXGBE_CORE_LOCK_DESTROY(adapter);
        return (0);
@@ -780,8 +761,8 @@ ixgbe_start_locked(struct tx_ring *txr, 
                ETHER_BPF_MTAP(ifp, m_head);
 
                /* Set watchdog on */
-               txr->watchdog_check = TRUE;
                txr->watchdog_time = ticks;
+               txr->queue_status = IXGBE_QUEUE_WORKING;
 
        }
        return;
@@ -851,6 +832,10 @@ ixgbe_mq_start_locked(struct ifnet *ifp,
                return (err);
        }
 
+       /* Call cleanup if number of TX descriptors low */
+       if (txr->tx_avail <= IXGBE_TX_CLEANUP_THRESHOLD)
+               ixgbe_txeof(txr);
+
        enqueued = 0;
        if (m == NULL) {
                next = drbr_dequeue(ifp, txr->br);
@@ -883,7 +868,7 @@ ixgbe_mq_start_locked(struct ifnet *ifp,
 
        if (enqueued > 0) {
                /* Set watchdog on */
-               txr->watchdog_check = TRUE;
+               txr->queue_status = IXGBE_QUEUE_WORKING;
                txr->watchdog_time = ticks;
        }
 
@@ -948,7 +933,6 @@ ixgbe_ioctl(struct ifnet * ifp, u_long c
                        if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
                                if ((ifp->if_flags ^ adapter->if_flags) &
                                    (IFF_PROMISC | IFF_ALLMULTI)) {
-                                       ixgbe_disable_promisc(adapter);
                                        ixgbe_set_promisc(adapter);
                                 }
                        } else
@@ -987,6 +971,8 @@ ixgbe_ioctl(struct ifnet * ifp, u_long c
                        ifp->if_capenable ^= IFCAP_LRO;
                if (mask & IFCAP_VLAN_HWTAGGING)
                        ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
+               if (mask & IFCAP_VLAN_HWFILTER)
+                       ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
                if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
                        IXGBE_CORE_LOCK(adapter);
                        ixgbe_init_locked(adapter);
@@ -1041,6 +1027,18 @@ ixgbe_init_locked(struct adapter *adapte
        ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
        hw->addr_ctrl.rar_used_count = 1;
 
+       /* Set the various hardware offload abilities */
+       ifp->if_hwassist = 0;
+       if (ifp->if_capenable & IFCAP_TSO4)
+               ifp->if_hwassist |= CSUM_TSO;
+       if (ifp->if_capenable & IFCAP_TXCSUM) {
+               ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
+#if __FreeBSD_version >= 800000
+               if (hw->mac.type == ixgbe_mac_82599EB)
+                       ifp->if_hwassist |= CSUM_SCTP;
+#endif
+       }
+
        /* Prepare transmit descriptors and buffers */
        if (ixgbe_setup_transmit_structures(adapter)) {
                device_printf(dev,"Could not setup transmit structures\n");
@@ -1058,10 +1056,12 @@ ixgbe_init_locked(struct adapter *adapte
        ** Determine the correct mbuf pool
        ** for doing jumbo/headersplit
        */
-       if (ifp->if_mtu > ETHERMTU)
+       if (adapter->max_frame_size <= 2048)
+               adapter->rx_mbuf_sz = MCLBYTES;
+       else if (adapter->max_frame_size <= 4096)
                adapter->rx_mbuf_sz = MJUMPAGESIZE;
        else
-               adapter->rx_mbuf_sz = MCLBYTES;
+               adapter->rx_mbuf_sz = MJUM9BYTES;
 
        /* Prepare receive descriptors and buffers */
        if (ixgbe_setup_receive_structures(adapter)) {
@@ -1092,18 +1092,6 @@ ixgbe_init_locked(struct adapter *adapte
        }
        IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
 
-       /* Set the various hardware offload abilities */
-       ifp->if_hwassist = 0;
-       if (ifp->if_capenable & IFCAP_TSO4)
-               ifp->if_hwassist |= CSUM_TSO;
-       if (ifp->if_capenable & IFCAP_TXCSUM) {
-               ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
-#if __FreeBSD_version >= 800000
-               if (hw->mac.type == ixgbe_mac_82599EB)
-                       ifp->if_hwassist |= CSUM_SCTP;
-#endif
-       }
-
        /* Set MTU size */
        if (ifp->if_mtu > ETHERMTU) {
                mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
@@ -1146,7 +1134,7 @@ ixgbe_init_locked(struct adapter *adapte
                IXGBE_WRITE_REG(hw, IXGBE_RDT(i), adapter->num_rx_desc - 1);
        }
 
-       /* Set up VLAN offloads and filter */
+       /* Set up VLAN support and filter */
        ixgbe_setup_vlan_hw_support(adapter);
 
        /* Enable Receive engine */
@@ -1760,10 +1748,6 @@ ixgbe_xmit(struct tx_ring *txr, struct m
        ++txr->total_packets;
        IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(txr->me), i);
 
-       /* Do a clean if descriptors are low */
-       if (txr->tx_avail <= IXGBE_TX_CLEANUP_THRESHOLD)
-               ixgbe_txeof(txr);
-
        return (0);
 
 xmit_fail:
@@ -1775,11 +1759,13 @@ xmit_fail:
 static void
 ixgbe_set_promisc(struct adapter *adapter)
 {
-
        u_int32_t       reg_rctl;
        struct ifnet   *ifp = adapter->ifp;
 
        reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
+       reg_rctl &= (~IXGBE_FCTRL_UPE);
+       reg_rctl &= (~IXGBE_FCTRL_MPE);
+       IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
 
        if (ifp->if_flags & IFF_PROMISC) {
                reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
@@ -1792,20 +1778,6 @@ ixgbe_set_promisc(struct adapter *adapte
        return;
 }
 
-static void
-ixgbe_disable_promisc(struct adapter * adapter)
-{
-       u_int32_t       reg_rctl;
-
-       reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
-
-       reg_rctl &= (~IXGBE_FCTRL_UPE);
-       reg_rctl &= (~IXGBE_FCTRL_MPE);
-       IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
-
-       return;
-}
-
 
 /*********************************************************************
  *  Multicast Update
@@ -1819,7 +1791,7 @@ static void
 ixgbe_set_multi(struct adapter *adapter)
 {
        u32     fctrl;
-       u8      *mta;
+       u8      mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
        u8      *update_ptr;
        struct  ifmultiaddr *ifma;
        int     mcnt = 0;
@@ -1827,10 +1799,6 @@ ixgbe_set_multi(struct adapter *adapter)
 
        IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
 
-       mta = adapter->mta;
-       bzero(mta, sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
-           MAX_NUM_MULTICAST_ADDRESSES);
-
        fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
        fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
        if (ifp->if_flags & IFF_PROMISC)
@@ -1923,19 +1891,14 @@ ixgbe_local_timer(void *arg)
         */
        if (IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)
                goto out;
+
        /*
-       ** Check for time since any descriptor was cleaned
+       ** Check status on the TX queues for a hang
        */
-        for (int i = 0; i < adapter->num_queues; i++, txr++) {
-               IXGBE_TX_LOCK(txr);
-               if (txr->watchdog_check == FALSE) {
-                       IXGBE_TX_UNLOCK(txr);
-                       continue;
-               }
-               if ((ticks - txr->watchdog_time) > IXGBE_WATCHDOG)
+        for (int i = 0; i < adapter->num_queues; i++, txr++)
+               if (txr->queue_status == IXGBE_QUEUE_HUNG)
                        goto hung;
-               IXGBE_TX_UNLOCK(txr);
-       }
+
 out:
                ixgbe_rearm_queues(adapter, adapter->que_mask);
        callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
@@ -1985,7 +1948,7 @@ ixgbe_update_link_status(struct adapter 
                        adapter->link_active = FALSE;
                        for (int i = 0; i < adapter->num_queues;
                            i++, txr++)
-                               txr->watchdog_check = FALSE;
+                               txr->queue_status = IXGBE_QUEUE_IDLE;
                }
        }
 
@@ -2005,6 +1968,7 @@ ixgbe_stop(void *arg)
 {
        struct ifnet   *ifp;
        struct adapter *adapter = arg;
+       struct ixgbe_hw *hw = &adapter->hw;
        ifp = adapter->ifp;
 
        mtx_assert(&adapter->core_mtx, MA_OWNED);
@@ -2015,9 +1979,12 @@ ixgbe_stop(void *arg)
        /* Tell the stack that the interface is no longer active */
        ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
 
-       ixgbe_reset_hw(&adapter->hw);
-       adapter->hw.adapter_stopped = FALSE;
-       ixgbe_stop_adapter(&adapter->hw);
+       ixgbe_reset_hw(hw);
+       hw->adapter_stopped = FALSE;
+       ixgbe_stop_adapter(hw);
+       /* Turn off the laser */
+       if (hw->phy.multispeed_fiber)
+               ixgbe_disable_tx_laser(hw);
        callout_stop(&adapter->timer);
 
        /* reprogram the RAR[0] in case user changed it. */
@@ -2242,6 +2209,9 @@ ixgbe_setup_msix(struct adapter *adapter
 
        if (ixgbe_num_queues != 0)
                queues = ixgbe_num_queues;
+       /* Set max queues to 8 */
+       else if (queues > 8)
+               queues = 8;
 
        /*
        ** Want one vector (RX/TX pair) per queue
@@ -2375,7 +2345,7 @@ mem:
  *  Setup networking device structure and register an interface.
  *
  **********************************************************************/
-static int
+static void
 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
@@ -2384,10 +2354,8 @@ ixgbe_setup_interface(device_t dev, stru
        INIT_DEBUGOUT("ixgbe_setup_interface: begin");
 
        ifp = adapter->ifp = if_alloc(IFT_ETHER);
-       if (ifp == NULL) {
-               device_printf(dev, "can not allocate ifnet structure\n");
-               return (-1);
-       }
+       if (ifp == NULL)
+               panic("%s: can not if_alloc()\n", device_get_nameunit(dev));
        if_initname(ifp, device_get_name(dev), device_get_unit(dev));
        ifp->if_mtu = ETHERMTU;
        ifp->if_baudrate = 1000000000;
@@ -2414,10 +2382,22 @@ ixgbe_setup_interface(device_t dev, stru
 
        ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM;
        ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
-       ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_LRO;
-
+       ifp->if_capabilities |= IFCAP_JUMBO_MTU;
        ifp->if_capenable = ifp->if_capabilities;
 
+       /* Don't enable LRO by default */
+       ifp->if_capabilities |= IFCAP_LRO;
+
+       /*
+       ** Dont turn this on by default, if vlans are
+       ** created on another pseudo device (eg. lagg)
+       ** then vlan events are not passed thru, breaking
+       ** operation, but with HW FILTER off it works. If
+       ** using vlans directly on the em driver you can
+       ** enable this and get full hardware tag filtering.
+       */
+       ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
+
        /*
         * Specify the media types supported by this adapter and register
         * callbacks to update media and link information
@@ -2435,7 +2415,7 @@ ixgbe_setup_interface(device_t dev, stru
        ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
        ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
 
-       return (0);
+       return;
 }
 
 static void
@@ -2450,6 +2430,7 @@ ixgbe_config_link(struct adapter *adapte
        if (sfp) { 
                if (hw->phy.multispeed_fiber) {
                        hw->mac.ops.setup_sfp(hw);
+                       ixgbe_enable_tx_laser(hw);
                        taskqueue_enqueue(adapter->tq, &adapter->msf_task);
                } else
                        taskqueue_enqueue(adapter->tq, &adapter->mod_task);
@@ -2856,7 +2837,7 @@ ixgbe_initialize_transmit_units(struct a
 
                /* Setup Transmit Descriptor Cmd Settings */
                txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
-               txr->watchdog_check = FALSE;
+               txr->queue_status = IXGBE_QUEUE_IDLE;
 
                /* Disable Head Writeback */
                switch (hw->mac.type) {
@@ -3195,7 +3176,7 @@ ixgbe_atr(struct tx_ring *txr, struct mb
 {
        struct adapter                  *adapter = txr->adapter;
        struct ix_queue                 *que;
-       struct ixgbe_atr_input          atr_input;
+       union ixgbe_atr_input           atr_input;
        struct ip                       *ip;
        struct tcphdr                   *th;
        struct udphdr                   *uh;
@@ -3239,7 +3220,7 @@ ixgbe_atr(struct tx_ring *txr, struct mb
                return;
        }
 
-       memset(&atr_input, 0, sizeof(struct ixgbe_atr_input));
+       memset(&atr_input, 0, sizeof(union ixgbe_atr_input));
 
        vlan_id = htole16(mp->m_pkthdr.ether_vtag);
        src_ipv4_addr = ip->ip_src.s_addr;
@@ -3274,15 +3255,18 @@ ixgbe_txeof(struct tx_ring *txr)
 {
        struct adapter  *adapter = txr->adapter;
        struct ifnet    *ifp = adapter->ifp;
-       u32     first, last, done;
+       u32     first, last, done, processed;
        struct ixgbe_tx_buf *tx_buffer;
        struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
 
        mtx_assert(&txr->tx_mtx, MA_OWNED);
 
-       if (txr->tx_avail == adapter->num_tx_desc)
+       if (txr->tx_avail == adapter->num_tx_desc) {
+               txr->queue_status = IXGBE_QUEUE_IDLE;
                return FALSE;
+       }
 
+       processed = 0;
        first = txr->next_to_clean;
        tx_buffer = &txr->tx_buffers[first];
        /* For cleanup we just use legacy struct */
@@ -3314,6 +3298,7 @@ ixgbe_txeof(struct tx_ring *txr)
                        tx_desc->lower.data = 0;
                        tx_desc->buffer_addr = 0;
                        ++txr->tx_avail;
+                       ++processed;
 
                        if (tx_buffer->m_head) {
                                txr->bytes +=
@@ -3356,6 +3341,15 @@ ixgbe_txeof(struct tx_ring *txr)
        txr->next_to_clean = first;
 
        /*
+       ** Watchdog calculation, we know there's
+       ** work outstanding or the first return
+       ** would have been taken, so none processed
+       ** for too long indicates a hang.
+       */
+       if ((!processed) && ((ticks - txr->watchdog_time) > IXGBE_WATCHDOG))
+               txr->queue_status = IXGBE_QUEUE_HUNG;
+
+       /*
         * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that
         * it is OK to send packets. If there are no pending descriptors,
         * clear the timeout. Otherwise, if some descriptors have been freed,
@@ -3364,7 +3358,7 @@ ixgbe_txeof(struct tx_ring *txr)
        if (txr->tx_avail > IXGBE_TX_CLEANUP_THRESHOLD) {
                ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
                if (txr->tx_avail == adapter->num_tx_desc) {
-                       txr->watchdog_check = FALSE;
+                       txr->queue_status = IXGBE_QUEUE_IDLE;
                        return FALSE;
                }
        }
@@ -3395,51 +3389,59 @@ ixgbe_refresh_mbufs(struct rx_ring *rxr,
        cleaned = -1; /* Signify no completions */
        while (i != limit) {
                rxbuf = &rxr->rx_buffers[i];
-               if ((rxbuf->m_head == NULL) && (rxr->hdr_split)) {
+               if (rxr->hdr_split == FALSE)
+                       goto no_split;
+
+               if (rxbuf->m_head == NULL) {
                        mh = m_gethdr(M_DONTWAIT, MT_DATA);
                        if (mh == NULL)
                                goto update;
-                       mh->m_pkthdr.len = mh->m_len = MHLEN;
-                       mh->m_len = MHLEN;
-                       mh->m_flags |= M_PKTHDR;
-                       m_adj(mh, ETHER_ALIGN);
-                       /* Get the memory mapping */
-                       error = bus_dmamap_load_mbuf_sg(rxr->htag,
-                           rxbuf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT);
-                       if (error != 0) {
-                               printf("GET BUF: dmamap load"
-                                   " failure - %d\n", error);
-                               m_free(mh);
-                               goto update;
-                       }
-                       rxbuf->m_head = mh;
-                       bus_dmamap_sync(rxr->htag, rxbuf->hmap,
-                           BUS_DMASYNC_PREREAD);
-                       rxr->rx_base[i].read.hdr_addr =
-                           htole64(hseg[0].ds_addr);
+               } else
+                       mh = rxbuf->m_head;
+
+               mh->m_pkthdr.len = mh->m_len = MHLEN;
+               mh->m_len = MHLEN;
+               mh->m_flags |= M_PKTHDR;
+               /* Get the memory mapping */
+               error = bus_dmamap_load_mbuf_sg(rxr->htag,
+                   rxbuf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT);
+               if (error != 0) {
+                       printf("Refresh mbufs: hdr dmamap load"
+                           " failure - %d\n", error);
+                       m_free(mh);
+                       rxbuf->m_head = NULL;
+                       goto update;
                }
+               rxbuf->m_head = mh;
+               bus_dmamap_sync(rxr->htag, rxbuf->hmap,
+                   BUS_DMASYNC_PREREAD);
+               rxr->rx_base[i].read.hdr_addr = htole64(hseg[0].ds_addr);
 
+no_split:
                if (rxbuf->m_pack == NULL) {
                        mp = m_getjcl(M_DONTWAIT, MT_DATA,
                            M_PKTHDR, adapter->rx_mbuf_sz);
                        if (mp == NULL)
                                goto update;
-                       mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
-                       /* Get the memory mapping */
-                       error = bus_dmamap_load_mbuf_sg(rxr->ptag,
-                           rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT);
-                       if (error != 0) {
-                               printf("GET BUF: dmamap load"
-                                   " failure - %d\n", error);
-                               m_free(mp);
-                               goto update;
-                       }
-                       rxbuf->m_pack = mp;
-                       bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
-                           BUS_DMASYNC_PREREAD);
-                       rxr->rx_base[i].read.pkt_addr =
-                           htole64(pseg[0].ds_addr);
+               } else
+                       mp = rxbuf->m_pack;
+
+               mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
+               /* Get the memory mapping */
+               error = bus_dmamap_load_mbuf_sg(rxr->ptag,
+                   rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT);
+               if (error != 0) {
+                       printf("Refresh mbufs: payload dmamap load"
+                           " failure - %d\n", error);
+                       m_free(mp);
+                       rxbuf->m_pack = NULL;
+                       goto update;
                }
+               rxbuf->m_pack = mp;
+               bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
+                   BUS_DMASYNC_PREREAD);
+               rxr->rx_base[i].read.pkt_addr =
+                   htole64(pseg[0].ds_addr);
 
                cleaned = i;
                /* Calculate next index */
@@ -3501,9 +3503,9 @@ ixgbe_allocate_receive_buffers(struct rx
                                   BUS_SPACE_MAXADDR,   /* lowaddr */
                                   BUS_SPACE_MAXADDR,   /* highaddr */
                                   NULL, NULL,          /* filter, filterarg */
-                                  MJUMPAGESIZE,        /* maxsize */
+                                  MJUM9BYTES,          /* maxsize */
                                   1,                   /* nsegments */
-                                  MJUMPAGESIZE,        /* maxsegsize */
+                                  MJUM9BYTES,          /* maxsegsize */
                                   0,                   /* flags */
                                   NULL,                /* lockfunc */
                                   NULL,                /* lockfuncarg */
@@ -3661,7 +3663,7 @@ ixgbe_setup_receive_ring(struct rx_ring 
 
                rxbuf = &rxr->rx_buffers[j];
                /*
-               ** Don't allocate mbufs if not
+               ** Dont allocate mbufs if not
                ** doing header split, its wasteful
                */ 
                if (rxr->hdr_split == FALSE)
@@ -4027,25 +4029,33 @@ ixgbe_rx_input(struct rx_ring *rxr, stru
 static __inline void
 ixgbe_rx_discard(struct rx_ring *rxr, int i)
 {
-       struct adapter          *adapter = rxr->adapter;
        struct ixgbe_rx_buf     *rbuf;
-       struct mbuf             *mh, *mp;
 
        rbuf = &rxr->rx_buffers[i];
-        if (rbuf->fmp != NULL) /* Partial chain ? */
+
+        if (rbuf->fmp != NULL) {/* Partial chain ? */
+               rbuf->fmp->m_flags |= M_PKTHDR;
                 m_freem(rbuf->fmp);
+                rbuf->fmp = NULL;
+       }
 
-       mh = rbuf->m_head;
-       mp = rbuf->m_pack;
+       /*
+       ** With advanced descriptors the writeback
+       ** clobbers the buffer addrs, so its easier
+       ** to just free the existing mbufs and take
+       ** the normal refresh path to get new buffers
+       ** and mapping.
+       */
+       if (rbuf->m_head) {
+               m_free(rbuf->m_head);
+               rbuf->m_head = NULL;
+       }
+ 
+       if (rbuf->m_pack) {
+               m_free(rbuf->m_pack);
+               rbuf->m_pack = NULL;
+       }
 
-       /* Reuse loaded DMA map and just update mbuf chain */
-       mh->m_len = MHLEN;
-       mh->m_flags |= M_PKTHDR;
-       mh->m_next = NULL;
-
-       mp->m_len = mp->m_pkthdr.len = adapter->rx_mbuf_sz;
-       mp->m_data = mp->m_ext.ext_buf;
-       mp->m_next = NULL;
        return;
 }
 
@@ -4110,15 +4120,15 @@ ixgbe_rxeof(struct ix_queue *que, int co
                vtag = le16toh(cur->wb.upper.vlan);
                eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
 
-               /* Make sure all parts of a bad packet are discarded */
+               /* Make sure bad packets are discarded */
                if (((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) ||
                    (rxr->discard)) {
                        ifp->if_ierrors++;
                        rxr->rx_discarded++;
-                       if (!eop)
-                               rxr->discard = TRUE;
-                       else
+                       if (eop)
                                rxr->discard = FALSE;
+                       else
+                               rxr->discard = TRUE;
                        ixgbe_rx_discard(rxr, i);
                        goto next_desc;
                }
@@ -4129,7 +4139,7 @@ ixgbe_rxeof(struct ix_queue *que, int co
                ** not be fragmented across sequential
                ** descriptors, rather the next descriptor
                ** is indicated in bits of the descriptor.
-               ** This also means that we might process
+               ** This also means that we might proceses
                ** more than one packet at a time, something
                ** that has never been true before, it
                ** required eliminating global chain pointers
@@ -4210,7 +4220,8 @@ ixgbe_rxeof(struct ix_queue *que, int co
                         } else {
                                /* Singlet, prepare to send */
                                 sendmp = mh;
-                                if (staterr & IXGBE_RXD_STAT_VP) {
+                                if ((adapter->num_vlans) &&
+                                 (staterr & IXGBE_RXD_STAT_VP)) {
                                         sendmp->m_pkthdr.ether_vtag = vtag;
                                         sendmp->m_flags |= M_VLANTAG;
                                 }
@@ -4376,12 +4387,13 @@ ixgbe_register_vlan(void *arg, struct if
        if ((vtag == 0) || (vtag > 4095))       /* Invalid */
                return;
 
+       IXGBE_CORE_LOCK(adapter);
        index = (vtag >> 5) & 0x7F;
        bit = vtag & 0x1F;
-       ixgbe_shadow_vfta[index] |= (1 << bit);
+       adapter->shadow_vfta[index] |= (1 << bit);
        ++adapter->num_vlans;
-       /* Re-init to load the changes */
-       ixgbe_init(adapter);
+       ixgbe_init_locked(adapter);
+       IXGBE_CORE_UNLOCK(adapter);
 }
 
 /*
@@ -4401,17 +4413,20 @@ ixgbe_unregister_vlan(void *arg, struct 
        if ((vtag == 0) || (vtag > 4095))       /* Invalid */
                return;
 
+       IXGBE_CORE_LOCK(adapter);
        index = (vtag >> 5) & 0x7F;
        bit = vtag & 0x1F;
-       ixgbe_shadow_vfta[index] &= ~(1 << bit);
+       adapter->shadow_vfta[index] &= ~(1 << bit);
        --adapter->num_vlans;
        /* Re-init to load the changes */
-       ixgbe_init(adapter);
+       ixgbe_init_locked(adapter);
+       IXGBE_CORE_UNLOCK(adapter);
 }
 
 static void
 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
 {
+       struct ifnet    *ifp = adapter->ifp;
        struct ixgbe_hw *hw = &adapter->hw;
        u32             ctrl;
 
@@ -4430,14 +4445,16 @@ ixgbe_setup_vlan_hw_support(struct adapt
        ** we need to repopulate it now.
        */
        for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
-               if (ixgbe_shadow_vfta[i] != 0)
+               if (adapter->shadow_vfta[i] != 0)
                        IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
-                           ixgbe_shadow_vfta[i]);
+                           adapter->shadow_vfta[i]);
 
-       /* Enable the Filter Table */
        ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
-       ctrl &= ~IXGBE_VLNCTRL_CFIEN;
-       ctrl |= IXGBE_VLNCTRL_VFE;
+       /* Enable the Filter Table if enabled */
+       if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
+               ctrl &= ~IXGBE_VLNCTRL_CFIEN;
+               ctrl |= IXGBE_VLNCTRL_VFE;
+       }
        if (hw->mac.type == ixgbe_mac_82598EB)
                ctrl |= IXGBE_VLNCTRL_VME;
        IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
@@ -4478,14 +4495,14 @@ ixgbe_enable_intr(struct adapter *adapte
        /* With RSS we use auto clear */
        if (adapter->msix_mem) {
                mask = IXGBE_EIMS_ENABLE_MASK;
-               /* Don't autoclear Link */
+               /* Dont autoclear Link */
                mask &= ~IXGBE_EIMS_OTHER;
                mask &= ~IXGBE_EIMS_LSC;
                IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
        }
 
        /*
-       ** Now enable all queues, this is done separately to
+       ** Now enable all queues, this is done seperately to
        ** allow for handling the extended (beyond 32) MSIX
        ** vectors that can be used by 82599
        */

Modified: head/sys/dev/ixgbe/ixgbe.h
==============================================================================
--- head/sys/dev/ixgbe/ixgbe.h  Fri Nov 26 22:36:47 2010        (r215910)
+++ head/sys/dev/ixgbe/ixgbe.h  Fri Nov 26 22:46:32 2010        (r215911)
@@ -179,6 +179,9 @@
 #define IXGBE_RX_HDR                   128
 #define IXGBE_VFTA_SIZE                        128
 #define IXGBE_BR_SIZE                  4096
+#define IXGBE_QUEUE_IDLE               0
+#define IXGBE_QUEUE_WORKING            1
+#define IXGBE_QUEUE_HUNG               2
 
 /* Offload bits in mbuf flag */
 #if __FreeBSD_version >= 800000
@@ -205,11 +208,6 @@
 #define IXGBE_BULK_LATENCY     1200
 #define IXGBE_LINK_ITR         2000
 
-/* Header split args for get_bug */
-#define IXGBE_CLEAN_HDR                1
-#define IXGBE_CLEAN_PKT                2
-#define IXGBE_CLEAN_ALL                3
-
 /*
  *****************************************************************************
  * vendor_info_array
@@ -280,7 +278,7 @@ struct tx_ring {
         struct adapter         *adapter;
        struct mtx              tx_mtx;
        u32                     me;
-       bool                    watchdog_check;
+       int                     queue_status;
        int                     watchdog_time;
        union ixgbe_adv_tx_desc *tx_base;
        struct ixgbe_dma_alloc  txdma;
@@ -374,7 +372,15 @@ struct adapter {
        u16                     num_vlans;
        u16                     num_queues;
 
-       /* Info about the board itself */
+       /*
+       ** Shadow VFTA table, this is needed because
+       ** the real vlan filter table gets cleared during
+       ** a soft reset and the driver needs to be able
+       ** to repopulate it.
+       */
+       u32                     shadow_vfta[IXGBE_VFTA_SIZE];
+
+       /* Info about the interface */
        u32                     optics;
        int                     advertise;  /* link speeds */
        bool                    link_active;
@@ -421,8 +427,6 @@ struct adapter {
        u64                     que_mask;
        u32                     rx_process_limit;
 
-       /* Multicast array memory */
-       u8                      *mta;
        /* Misc stats maintained by the driver */
        unsigned long           dropped_pkts;
        unsigned long           mbuf_defrag_failed;

Modified: head/sys/dev/ixgbe/ixgbe_82598.c
==============================================================================
--- head/sys/dev/ixgbe/ixgbe_82598.c    Fri Nov 26 22:36:47 2010        
(r215910)
+++ head/sys/dev/ixgbe/ixgbe_82598.c    Fri Nov 26 22:46:32 2010        
(r215911)
@@ -73,7 +73,6 @@ u32 ixgbe_get_supported_physical_layer_8
 s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw);
 void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw);
 void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw);
-static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw);
 
 /**
  *  ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
@@ -186,6 +185,7 @@ s32 ixgbe_init_ops_82598(struct ixgbe_hw
        mac->mcft_size       = 128;
        mac->vft_size        = 128;
        mac->num_rar_entries = 16;
+       mac->rx_pb_size      = 512;
        mac->max_tx_queues   = 32;
        mac->max_rx_queues   = 64;
        mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82598(hw);
@@ -196,6 +196,7 @@ s32 ixgbe_init_ops_82598(struct ixgbe_hw
        /* Link */
        mac->ops.check_link = &ixgbe_check_mac_link_82598;
        mac->ops.setup_link = &ixgbe_setup_mac_link_82598;
+       mac->ops.flap_tx_laser = NULL;
        mac->ops.get_link_capabilities =
                               &ixgbe_get_link_capabilities_82598;
 
@@ -385,11 +386,14 @@ static enum ixgbe_media_type ixgbe_get_m
        DEBUGFUNC("ixgbe_get_media_type_82598");
 
        /* Detect if there is a copper PHY attached. */
-       if (hw->phy.type == ixgbe_phy_cu_unknown ||
-           hw->phy.type == ixgbe_phy_tn ||
-           hw->phy.type == ixgbe_phy_aq) {
+       switch (hw->phy.type) {
+       case ixgbe_phy_cu_unknown:
+       case ixgbe_phy_tn:
+       case ixgbe_phy_aq:
                media_type = ixgbe_media_type_copper;
                goto out;
+       default:
+               break;
        }
 
        /* Media type for I82598 is based on device ID */
@@ -436,6 +440,7 @@ s32 ixgbe_fc_enable_82598(struct ixgbe_h
        u32 fctrl_reg;
        u32 rmcs_reg;
        u32 reg;
+       u32 rx_pba_size;
        u32 link_speed = 0;
        bool link_up;
 
@@ -463,7 +468,7 @@ s32 ixgbe_fc_enable_82598(struct ixgbe_h
 
        /* Negotiate the fc mode to use */
        ret_val = ixgbe_fc_autoneg(hw);
-       if (ret_val)
+       if (ret_val == IXGBE_ERR_FLOW_CONTROL)
                goto out;
 
        /* Disable any previous flow control settings */
@@ -485,7 +490,8 @@ s32 ixgbe_fc_enable_82598(struct ixgbe_h
         */
        switch (hw->fc.current_mode) {
        case ixgbe_fc_none:
-               /* Flow control is disabled by software override or autoneg.
+               /*
+                * Flow control is disabled by software override or autoneg.
                 * The code below will actually disable it in the HW.
                 */
                break;
@@ -526,16 +532,19 @@ s32 ixgbe_fc_enable_82598(struct ixgbe_h
 
        /* Set up and enable Rx high/low water mark thresholds, enable XON. */
        if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
-               if (hw->fc.send_xon) {
-                       IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
-                                       (hw->fc.low_water | IXGBE_FCRTL_XONE));
-               } else {
-                       IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
-                                       hw->fc.low_water);
-               }
+               rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
+               rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
+
+               reg = (rx_pba_size - hw->fc.low_water) << 6;
+               if (hw->fc.send_xon)
+                       reg |= IXGBE_FCRTL_XONE;
+
+               IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), reg);
 
-               IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num),
-                               (hw->fc.high_water | IXGBE_FCRTH_FCEN));
+               reg = (rx_pba_size - hw->fc.high_water) << 6;
+               reg |= IXGBE_FCRTH_FCEN;
+
+               IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), reg);
        }
 
        /* Configure pause time (2 TCs per register) */
@@ -560,7 +569,7 @@ out:
  *  Restarts the link.  Performs autonegotiation if needed.
  **/
 static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
-                                      bool autoneg_wait_to_complete)
+                                      bool autoneg_wait_to_complete)
 {
        u32 autoc_reg;
        u32 links_reg;
@@ -601,6 +610,41 @@ static s32 ixgbe_start_mac_link_82598(st
 }
 
 /**
+ *  ixgbe_validate_link_ready - Function looks for phy link
+ *  @hw: pointer to hardware structure
+ *
+ *  Function indicates success when phy link is available. If phy is not ready
+ *  within 5 seconds of MAC indicating link, the function returns error.
+ **/
+static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
+{
+       u32 timeout;
+       u16 an_reg;
+
+       if (hw->device_id != IXGBE_DEV_ID_82598AT2)
+               return IXGBE_SUCCESS;
+
+       for (timeout = 0;
+            timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) {

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
_______________________________________________
[email protected] mailing list
http://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "[email protected]"

Reply via email to