Module Name: src Committed By: msaitoh Date: Tue Mar 10 09:26:50 UTC 2015
Modified Files: src/sys/arch/amd64/conf: GENERIC src/sys/dev/pci: files.pci src/sys/dev/pci/ixgbe: ixgbe_vf.h ixv.c ixv.h Log Message: Modify to make Intel Intel 10G Ethernet (ixg(4)) virtual function ixv(4) compilable. Not completed yet. It's required to use MSI-X. To generate a diff of this commit: cvs rdiff -u -r1.410 -r1.411 src/sys/arch/amd64/conf/GENERIC cvs rdiff -u -r1.373 -r1.374 src/sys/dev/pci/files.pci cvs rdiff -u -r1.1 -r1.2 src/sys/dev/pci/ixgbe/ixgbe_vf.h cvs rdiff -u -r1.2 -r1.3 src/sys/dev/pci/ixgbe/ixv.c \ src/sys/dev/pci/ixgbe/ixv.h Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/sys/arch/amd64/conf/GENERIC diff -u src/sys/arch/amd64/conf/GENERIC:1.410 src/sys/arch/amd64/conf/GENERIC:1.411 --- src/sys/arch/amd64/conf/GENERIC:1.410 Fri Mar 6 15:09:50 2015 +++ src/sys/arch/amd64/conf/GENERIC Tue Mar 10 09:26:50 2015 @@ -1,4 +1,4 @@ -# $NetBSD: GENERIC,v 1.410 2015/03/06 15:09:50 riastradh Exp $ +# $NetBSD: GENERIC,v 1.411 2015/03/10 09:26:50 msaitoh Exp $ # # GENERIC machine description file # @@ -22,7 +22,7 @@ include "arch/amd64/conf/std.amd64" options INCLUDE_CONFIG_FILE # embed config file in kernel binary -#ident "GENERIC-$Revision: 1.410 $" +#ident "GENERIC-$Revision: 1.411 $" maxusers 64 # estimated number of users @@ -736,6 +736,7 @@ iwi* at pci? dev ? function ? # Intel PR iwn* at pci? dev ? function ? # Intel PRO/Wireless 4965AGN iwm* at pci? dev ? function ? # Intel Centrino 7260 ixg* at pci? dev ? function ? # Intel 8259x 10 gigabit +#ixv* at pci? dev ? function ? # Intel 8259x 10G virtual function jme* at pci? dev ? function ? # JMicron JMC2[56]0 ethernet hme* at pci? dev ? function ? # Sun Microelectronics STP2002-STQ le* at pci? dev ? function ? # PCnet-PCI Ethernet Index: src/sys/dev/pci/files.pci diff -u src/sys/dev/pci/files.pci:1.373 src/sys/dev/pci/files.pci:1.374 --- src/sys/dev/pci/files.pci:1.373 Sat Feb 7 13:20:00 2015 +++ src/sys/dev/pci/files.pci Tue Mar 10 09:26:49 2015 @@ -1,4 +1,4 @@ -# $NetBSD: files.pci,v 1.373 2015/02/07 13:20:00 pooka Exp $ +# $NetBSD: files.pci,v 1.374 2015/03/10 09:26:49 msaitoh Exp $ # # Config file and device description for machine-independent PCI code. # Included by ports that need it. Requires that the SCSI files be @@ -676,8 +676,11 @@ file dev/pci/ixgbe/ixgbe_common.c ixg file dev/pci/ixgbe/ixgbe_mbx.c ixg file dev/pci/ixgbe/ixgbe_phy.c ixg file dev/pci/ixgbe/ixgbe_vf.c ixg + # This appears to be the driver for virtual instances of i82599. -#file dev/pci/ixgbe/ixv.c ixg +device ixv: ether, ifnet, arp +attach ixv at pci +file dev/pci/ixgbe/ixv.c ixv # Intel i8254x Gigabit Ethernet device wm: ether, ifnet, arp, mii, mii_bitbang Index: src/sys/dev/pci/ixgbe/ixgbe_vf.h diff -u src/sys/dev/pci/ixgbe/ixgbe_vf.h:1.1 src/sys/dev/pci/ixgbe/ixgbe_vf.h:1.2 --- src/sys/dev/pci/ixgbe/ixgbe_vf.h:1.1 Fri Aug 12 21:55:29 2011 +++ src/sys/dev/pci/ixgbe/ixgbe_vf.h Tue Mar 10 09:26:49 2015 @@ -31,7 +31,7 @@ ******************************************************************************/ /*$FreeBSD: src/sys/dev/ixgbe/ixgbe_vf.h,v 1.1 2010/11/26 22:46:32 jfv Exp $*/ -/*$NetBSD: ixgbe_vf.h,v 1.1 2011/08/12 21:55:29 dyoung Exp $*/ +/*$NetBSD: ixgbe_vf.h,v 1.2 2015/03/10 09:26:49 msaitoh Exp $*/ #ifndef __IXGBE_VF_H__ #define __IXGBE_VF_H__ @@ -108,6 +108,11 @@ struct ixgbevf_hw_stats { u64 saved_reset_vfgorc; u64 saved_reset_vfgotc; u64 saved_reset_vfmprc; + + struct evcnt ipcs; + struct evcnt ipcs_bad; + struct evcnt l4cs; + struct evcnt l4cs_bad; }; #endif /* __IXGBE_VF_H__ */ Index: src/sys/dev/pci/ixgbe/ixv.c diff -u src/sys/dev/pci/ixgbe/ixv.c:1.2 src/sys/dev/pci/ixgbe/ixv.c:1.3 --- src/sys/dev/pci/ixgbe/ixv.c:1.2 Tue Mar 25 16:19:14 2014 +++ src/sys/dev/pci/ixgbe/ixv.c Tue Mar 10 09:26:49 2015 @@ -31,7 +31,7 @@ ******************************************************************************/ /*$FreeBSD: src/sys/dev/ixgbe/ixv.c,v 1.2 2011/03/23 13:10:15 jhb Exp $*/ -/*$NetBSD: ixv.c,v 1.2 2014/03/25 16:19:14 christos Exp $*/ +/*$NetBSD: ixv.c,v 1.3 2015/03/10 09:26:49 msaitoh Exp $*/ #include "opt_inet.h" @@ -63,17 +63,19 @@ static ixv_vendor_info_t ixv_vendor_info * Table of branding strings *********************************************************************/ -static char *ixv_strings[] = { +static const char *ixv_strings[] = { "Intel(R) PRO/10GbE Virtual Function Network Driver" }; /********************************************************************* * Function prototypes *********************************************************************/ -static int ixv_probe(device_t); -static int ixv_attach(device_t); -static int ixv_detach(device_t); +static int ixv_probe(device_t, cfdata_t, void *); +static void ixv_attach(device_t, device_t, void *); +static int ixv_detach(device_t, int); +#if 0 static int ixv_shutdown(device_t); +#endif #if __FreeBSD_version < 800000 static void ixv_start(struct ifnet *); static void ixv_start_locked(struct tx_ring *, struct ifnet *); @@ -83,14 +85,15 @@ static int ixv_mq_start_locked(struct if struct tx_ring *, struct mbuf *); static void ixv_qflush(struct ifnet *); #endif -static int ixv_ioctl(struct ifnet *, u_long, caddr_t); -static void ixv_init(void *); +static int ixv_ioctl(struct ifnet *, u_long, void *); +static int ixv_init(struct ifnet *); static void ixv_init_locked(struct adapter *); static void ixv_stop(void *); static void ixv_media_status(struct ifnet *, struct ifmediareq *); static int ixv_media_change(struct ifnet *); static void ixv_identify_hardware(struct adapter *); -static int ixv_allocate_pci_resources(struct adapter *); +static int ixv_allocate_pci_resources(struct adapter *, + const struct pci_attach_args *); static int ixv_allocate_msix(struct adapter *); static int ixv_allocate_queues(struct adapter *); static int ixv_setup_msix(struct adapter *); @@ -117,28 +120,31 @@ static void ixv_enable_intr(struct a static void ixv_disable_intr(struct adapter *); static bool ixv_txeof(struct tx_ring *); static bool ixv_rxeof(struct ix_queue *, int); -static void ixv_rx_checksum(u32, struct mbuf *, u32); +static void ixv_rx_checksum(u32, struct mbuf *, u32, + struct ixgbevf_hw_stats *); static void ixv_set_multi(struct adapter *); static void ixv_update_link_status(struct adapter *); static void ixv_refresh_mbufs(struct rx_ring *, int); -static int ixv_xmit(struct tx_ring *, struct mbuf **); -static int ixv_sysctl_stats(SYSCTL_HANDLER_ARGS); -static int ixv_sysctl_debug(SYSCTL_HANDLER_ARGS); -static int ixv_set_flowcntl(SYSCTL_HANDLER_ARGS); +static int ixv_xmit(struct tx_ring *, struct mbuf *); +static int ixv_sysctl_stats(SYSCTLFN_PROTO); +static int ixv_sysctl_debug(SYSCTLFN_PROTO); +static int ixv_set_flowcntl(SYSCTLFN_PROTO); static int ixv_dma_malloc(struct adapter *, bus_size_t, struct ixv_dma_alloc *, int); static void ixv_dma_free(struct adapter *, struct ixv_dma_alloc *); static void ixv_add_rx_process_limit(struct adapter *, const char *, const char *, int *, int); -static bool ixv_tx_ctx_setup(struct tx_ring *, struct mbuf *); +static u32 ixv_tx_ctx_setup(struct tx_ring *, struct mbuf *); static bool ixv_tso_setup(struct tx_ring *, struct mbuf *, u32 *); static void ixv_set_ivar(struct adapter *, u8, u8, s8); static void ixv_configure_ivars(struct adapter *); static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); static void ixv_setup_vlan_support(struct adapter *); +#if 0 static void ixv_register_vlan(void *, struct ifnet *, u16); static void ixv_unregister_vlan(void *, struct ifnet *, u16); +#endif static void ixv_save_stats(struct adapter *); static void ixv_init_stats(struct adapter *); @@ -153,13 +159,21 @@ static void ixv_msix_que(void *); static void ixv_msix_mbx(void *); /* Deferred interrupt tasklets */ -static void ixv_handle_que(void *, int); -static void ixv_handle_mbx(void *, int); +static void ixv_handle_que(void *); +static void ixv_handle_mbx(void *); + +const struct sysctlnode *ixv_sysctl_instance(struct adapter *); +static ixv_vendor_info_t *ixv_lookup(const struct pci_attach_args *); /********************************************************************* * FreeBSD Device Interface Entry Points *********************************************************************/ +CFATTACH_DECL3_NEW(ixv, sizeof(struct adapter), + ixv_probe, ixv_attach, ixv_detach, NULL, NULL, NULL, + DVF_DETACH_SHUTDOWN); + +# if 0 static device_method_t ixv_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ixv_probe), @@ -168,6 +182,7 @@ static device_method_t ixv_methods[] = { DEVMETHOD(device_shutdown, ixv_shutdown), {0, 0} }; +#endif #if 0 static driver_t ixv_driver = { @@ -230,6 +245,9 @@ TUNABLE_INT("hw.ixv.rxd", &ixv_rxd); */ static u32 ixv_shadow_vfta[VFTA_SIZE]; +/* Keep running tab on them for sanity check */ +static int ixv_total_ports; + /********************************************************************* * Device identification routine * @@ -240,44 +258,84 @@ static u32 ixv_shadow_vfta[VFTA_SIZE]; *********************************************************************/ static int -ixv_probe(device_t dev) +ixv_probe(device_t dev, cfdata_t cf, void *aux) +{ + const struct pci_attach_args *pa = aux; + + return (ixv_lookup(pa) != NULL) ? 1 : 0; +} + +static ixv_vendor_info_t * +ixv_lookup(const struct pci_attach_args *pa) { + pcireg_t subid; ixv_vendor_info_t *ent; - u16 pci_vendor_id = 0; - u16 pci_device_id = 0; - u16 pci_subvendor_id = 0; - u16 pci_subdevice_id = 0; - char adapter_name[256]; + INIT_DEBUGOUT("ixv_probe: begin"); + if (PCI_VENDOR(pa->pa_id) != IXGBE_INTEL_VENDOR_ID) + return NULL; - pci_vendor_id = pci_get_vendor(dev); - if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID) - return (ENXIO); + subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG); - pci_device_id = pci_get_device(dev); - pci_subvendor_id = pci_get_subvendor(dev); - pci_subdevice_id = pci_get_subdevice(dev); - - ent = ixv_vendor_info_array; - while (ent->vendor_id != 0) { - if ((pci_vendor_id == ent->vendor_id) && - (pci_device_id == ent->device_id) && + for (ent = ixv_vendor_info_array; ent->vendor_id != 0; ent++) { + if ((PCI_VENDOR(pa->pa_id) == ent->vendor_id) && + (PCI_PRODUCT(pa->pa_id) == ent->device_id) && - ((pci_subvendor_id == ent->subvendor_id) || + ((PCI_SUBSYS_VENDOR(subid) == ent->subvendor_id) || (ent->subvendor_id == 0)) && - ((pci_subdevice_id == ent->subdevice_id) || + ((PCI_SUBSYS_ID(subid) == ent->subdevice_id) || (ent->subdevice_id == 0))) { - snprintf(adapter_name, sizeof(adapter_name), - "%s, Version - %s", ixv_strings[ent->index], - ixv_driver_version); - device_set_desc_copy(dev, adapter_name); - return (0); + ++ixv_total_ports; + return ent; } - ent++; } - return (ENXIO); + return NULL; +} + + +static void +ixv_sysctl_attach(struct adapter *adapter) +{ + struct sysctllog **log; + const struct sysctlnode *rnode, *cnode; + device_t dev; + + dev = adapter->dev; + log = &adapter->sysctllog; + + if ((rnode = ixv_sysctl_instance(adapter)) == NULL) { + aprint_error_dev(dev, "could not create sysctl root\n"); + return; + } + + if (sysctl_createv(log, 0, &rnode, &cnode, + CTLFLAG_READWRITE, CTLTYPE_INT, + "stats", SYSCTL_DESCR("Statistics"), + ixv_sysctl_stats, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0) + aprint_error_dev(dev, "could not create sysctl\n"); + + if (sysctl_createv(log, 0, &rnode, &cnode, + CTLFLAG_READWRITE, CTLTYPE_INT, + "debug", SYSCTL_DESCR("Debug Info"), + ixv_sysctl_debug, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0) + aprint_error_dev(dev, "could not create sysctl\n"); + + if (sysctl_createv(log, 0, &rnode, &cnode, + CTLFLAG_READWRITE, CTLTYPE_INT, + "flow_control", SYSCTL_DESCR("Flow Control"), + ixv_set_flowcntl, 0, (void *)adapter, 0, CTL_CREATE, CTL_EOL) != 0) + aprint_error_dev(dev, "could not create sysctl\n"); + + /* XXX This is an *instance* sysctl controlling a *global* variable. + * XXX It's that way in the FreeBSD driver that this derives from. + */ + if (sysctl_createv(log, 0, &rnode, &cnode, + CTLFLAG_READWRITE, CTLTYPE_INT, + "enable_aim", SYSCTL_DESCR("Interrupt Moderation"), + NULL, 0, &ixv_enable_aim, 0, CTL_CREATE, CTL_EOL) != 0) + aprint_error_dev(dev, "could not create sysctl\n"); } /********************************************************************* @@ -290,53 +348,44 @@ ixv_probe(device_t dev) * return 0 on success, positive on failure *********************************************************************/ -static int -ixv_attach(device_t dev) +static void +ixv_attach(device_t parent, device_t dev, void *aux) { struct adapter *adapter; struct ixgbe_hw *hw; int error = 0; + ixv_vendor_info_t *ent; + const struct pci_attach_args *pa = aux; INIT_DEBUGOUT("ixv_attach: begin"); /* Allocate, clear, and link in our adapter structure */ - adapter = device_get_softc(dev); + adapter = device_private(dev); adapter->dev = adapter->osdep.dev = dev; hw = &adapter->hw; + ent = ixv_lookup(pa); + + KASSERT(ent != NULL); + + aprint_normal(": %s, Version - %s\n", + ixv_strings[ent->index], ixv_driver_version); + /* Core Lock Init*/ - IXV_CORE_LOCK_INIT(adapter, device_get_nameunit(dev)); + IXV_CORE_LOCK_INIT(adapter, device_xname(dev)); /* SYSCTL APIs */ - SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), - SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), - OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW, - adapter, 0, ixv_sysctl_stats, "I", "Statistics"); - - SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), - SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), - OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW, - adapter, 0, ixv_sysctl_debug, "I", "Debug Info"); - - SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), - SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), - OID_AUTO, "flow_control", CTLTYPE_INT | CTLFLAG_RW, - adapter, 0, ixv_set_flowcntl, "I", "Flow Control"); - - SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), - SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), - OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW, - &ixv_enable_aim, 1, "Interrupt Moderation"); + ixv_sysctl_attach(adapter); /* Set up the timer callout */ - callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0); + callout_init(&adapter->timer, 0); /* Determine hardware revision */ ixv_identify_hardware(adapter); /* Do base PCI setup - map BAR0 */ - if (ixv_allocate_pci_resources(adapter)) { - device_printf(dev, "Allocation of PCI resources failed\n"); + if (ixv_allocate_pci_resources(adapter, pa)) { + aprint_error_dev(dev, "Allocation of PCI resources failed\n"); error = ENXIO; goto err_out; } @@ -344,14 +393,14 @@ ixv_attach(device_t dev) /* Do descriptor calc and sanity checks */ if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 || ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) { - device_printf(dev, "TXD config issue, using default!\n"); + aprint_error_dev(dev, "TXD config issue, using default!\n"); adapter->num_tx_desc = DEFAULT_TXD; } else adapter->num_tx_desc = ixv_txd; if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 || ixv_rxd < MIN_TXD || ixv_rxd > MAX_TXD) { - device_printf(dev, "RXD config issue, using default!\n"); + aprint_error_dev(dev, "RXD config issue, using default!\n"); adapter->num_rx_desc = DEFAULT_RXD; } else adapter->num_rx_desc = ixv_rxd; @@ -368,7 +417,7 @@ ixv_attach(device_t dev) */ error = ixgbe_init_shared_code(hw); if (error) { - device_printf(dev,"Shared Code Initialization Failure\n"); + aprint_error_dev(dev,"Shared Code Initialization Failure\n"); error = EIO; goto err_late; } @@ -387,7 +436,7 @@ ixv_attach(device_t dev) error = ixgbe_init_hw(hw); if (error) { - device_printf(dev,"Hardware Initialization Failure\n"); + aprint_error_dev(dev,"Hardware Initialization Failure\n"); error = EIO; goto err_late; } @@ -409,20 +458,22 @@ ixv_attach(device_t dev) ixv_init_stats(adapter); /* Register for VLAN events */ +#if 0 /* XXX msaitoh delete after write? */ adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST); adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST); +#endif INIT_DEBUGOUT("ixv_attach: end"); - return (0); + return; err_late: ixv_free_transmit_structures(adapter); ixv_free_receive_structures(adapter); err_out: ixv_free_pci_resources(adapter); - return (error); + return; } @@ -437,17 +488,21 @@ err_out: *********************************************************************/ static int -ixv_detach(device_t dev) +ixv_detach(device_t dev, int flags) { - struct adapter *adapter = device_get_softc(dev); + struct adapter *adapter = device_private(dev); struct ix_queue *que = adapter->queues; INIT_DEBUGOUT("ixv_detach: begin"); /* Make sure VLANS are not using driver */ - if (adapter->ifp->if_vlantrunk != NULL) { - device_printf(dev,"Vlan in use, detach first\n"); - return (EBUSY); + if (!VLAN_ATTACHED(&adapter->osdep.ec)) + ; /* nothing to do: no VLANs */ + else if ((flags & (DETACH_SHUTDOWN|DETACH_FORCE)) != 0) + vlan_ifdetach(adapter->ifp); + else { + aprint_error_dev(dev, "VLANs in use\n"); + return EBUSY; } IXV_CORE_LOCK(adapter); @@ -455,29 +510,27 @@ ixv_detach(device_t dev) IXV_CORE_UNLOCK(adapter); for (int i = 0; i < adapter->num_queues; i++, que++) { - if (que->tq) { - taskqueue_drain(que->tq, &que->que_task); - taskqueue_free(que->tq); - } + softint_disestablish(que->que_si); } /* Drain the Link queue */ - if (adapter->tq) { - taskqueue_drain(adapter->tq, &adapter->mbx_task); - taskqueue_free(adapter->tq); - } + softint_disestablish(adapter->mbx_si); /* Unregister VLAN events */ +#if 0 /* XXX msaitoh delete after write? */ if (adapter->vlan_attach != NULL) EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach); if (adapter->vlan_detach != NULL) EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach); +#endif ether_ifdetach(adapter->ifp); - callout_drain(&adapter->timer); + callout_halt(&adapter->timer, NULL); ixv_free_pci_resources(adapter); +#if 0 /* XXX the NetBSD port is probably missing something here */ bus_generic_detach(dev); - if_free(adapter->ifp); +#endif + if_detach(adapter->ifp); ixv_free_transmit_structures(adapter); ixv_free_receive_structures(adapter); @@ -491,15 +544,17 @@ ixv_detach(device_t dev) * Shutdown entry point * **********************************************************************/ +#if 0 /* XXX NetBSD ought to register something like this through pmf(9) */ static int ixv_shutdown(device_t dev) { - struct adapter *adapter = device_get_softc(dev); + struct adapter *adapter = device_private(dev); IXV_CORE_LOCK(adapter); ixv_stop(adapter); IXV_CORE_UNLOCK(adapter); return (0); } +#endif #if __FreeBSD_version < 800000 /********************************************************************* @@ -514,37 +569,50 @@ ixv_shutdown(device_t dev) static void ixv_start_locked(struct tx_ring *txr, struct ifnet * ifp) { + int rc; struct mbuf *m_head; struct adapter *adapter = txr->adapter; IXV_TX_LOCK_ASSERT(txr); - if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != - IFF_DRV_RUNNING) + if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != + IFF_RUNNING) return; if (!adapter->link_active) return; - while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { + while (!IFQ_IS_EMPTY(&ifp->if_snd)) { - IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); + IFQ_POLL(&ifp->if_snd, m_head); if (m_head == NULL) break; - if (ixv_xmit(txr, &m_head)) { - if (m_head == NULL) - break; - ifp->if_drv_flags |= IFF_DRV_OACTIVE; - IFQ_DRV_PREPEND(&ifp->if_snd, m_head); + if (ixv_xmit(txr, m_head) == EAGAIN) { + ifp->if_flags |= IFF_OACTIVE; break; } + IFQ_DEQUEUE(&ifp->if_snd, m_head); + if (rc == EFBIG) { + struct mbuf *mtmp; + + if ((mtmp = m_defrag(m_head, M_DONTWAIT)) != NULL) { + m_head = mtmp; + rc = ixv_xmit(txr, m_head); + if (rc != 0) + adapter->efbig2_tx_dma_setup.ev_count++; + } else + adapter->m_defrag_failed.ev_count++; + } + if (rc != 0) { + m_freem(m_head); + continue; + } /* Send a copy of the frame to the BPF listener */ - ETHER_BPF_MTAP(ifp, m_head); + bpf_mtap(ifp, m_head); /* Set watchdog on */ txr->watchdog_check = TRUE; - txr->watchdog_time = ticks; - + getmicrotime(&txr->watchdog_time); } return; } @@ -560,7 +628,7 @@ ixv_start(struct ifnet *ifp) struct adapter *adapter = ifp->if_softc; struct tx_ring *txr = adapter->tx_rings; - if (ifp->if_drv_flags & IFF_DRV_RUNNING) { + if (ifp->if_flags & IFF_RUNNING) { IXV_TX_LOCK(txr); ixv_start_locked(txr, ifp); IXV_TX_UNLOCK(txr); @@ -594,7 +662,7 @@ ixv_mq_start(struct ifnet *ifp, struct m IXV_TX_UNLOCK(txr); } else { err = drbr_enqueue(ifp, txr->br, m); - taskqueue_enqueue(que->tq, &que->que_task); + softint_schedule(que->que_si); } return (err); @@ -607,8 +675,8 @@ ixv_mq_start_locked(struct ifnet *ifp, s struct mbuf *next; int enqueued, err = 0; - if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != - IFF_DRV_RUNNING || adapter->link_active == 0) { + if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != + IFF_RUNNING || adapter->link_active == 0) { if (m != NULL) err = drbr_enqueue(ifp, txr->br, m); return (err); @@ -630,7 +698,7 @@ ixv_mq_start_locked(struct ifnet *ifp, s /* Process the queue */ while (next != NULL) { - if ((err = ixv_xmit(txr, &next)) != 0) { + if ((err = ixv_xmit(txr, next)) != 0) { if (next != NULL) err = drbr_enqueue(ifp, txr->br, next); break; @@ -639,10 +707,10 @@ ixv_mq_start_locked(struct ifnet *ifp, s drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags); /* Send a copy of the frame to the BPF listener */ ETHER_BPF_MTAP(ifp, next); - if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) + if ((ifp->if_flags & IFF_RUNNING) == 0) break; if (txr->tx_avail <= IXV_TX_OP_THRESHOLD) { - ifp->if_drv_flags |= IFF_DRV_OACTIVE; + ifp->if_flags |= IFF_OACTIVE; break; } next = drbr_dequeue(ifp, txr->br); @@ -651,7 +719,7 @@ ixv_mq_start_locked(struct ifnet *ifp, s if (enqueued > 0) { /* Set watchdog on */ txr->watchdog_check = TRUE; - txr->watchdog_time = ticks; + getmicrotime(&txr->watchdog_time); } return (err); @@ -678,6 +746,26 @@ ixv_qflush(struct ifnet *ifp) #endif +static int +ixv_ifflags_cb(struct ethercom *ec) +{ + struct ifnet *ifp = &ec->ec_if; + struct adapter *adapter = ifp->if_softc; + int change = ifp->if_flags ^ adapter->if_flags, rc = 0; + + IXV_CORE_LOCK(adapter); + + if (change != 0) + adapter->if_flags = ifp->if_flags; + + if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) + rc = ENETRESET; + + IXV_CORE_UNLOCK(adapter); + + return rc; +} + /********************************************************************* * Ioctl entry point * @@ -688,83 +776,77 @@ ixv_qflush(struct ifnet *ifp) **********************************************************************/ static int -ixv_ioctl(struct ifnet * ifp, u_long command, caddr_t data) +ixv_ioctl(struct ifnet * ifp, u_long command, void *data) { struct adapter *adapter = ifp->if_softc; + struct ifcapreq *ifcr = data; struct ifreq *ifr = (struct ifreq *) data; int error = 0; + int l4csum_en; + const int l4csum = IFCAP_CSUM_TCPv4_Rx|IFCAP_CSUM_UDPv4_Rx| + IFCAP_CSUM_TCPv6_Rx|IFCAP_CSUM_UDPv6_Rx; switch (command) { - - case SIOCSIFMTU: - IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)"); - if (ifr->ifr_mtu > IXV_MAX_FRAME_SIZE - ETHER_HDR_LEN) { - error = EINVAL; - } else { - IXV_CORE_LOCK(adapter); - ifp->if_mtu = ifr->ifr_mtu; - adapter->max_frame_size = - ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; - ixv_init_locked(adapter); - IXV_CORE_UNLOCK(adapter); - } - break; case SIOCSIFFLAGS: IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)"); - IXV_CORE_LOCK(adapter); - if (ifp->if_flags & IFF_UP) { - if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) - ixv_init_locked(adapter); - } else - if (ifp->if_drv_flags & IFF_DRV_RUNNING) - ixv_stop(adapter); - adapter->if_flags = ifp->if_flags; - IXV_CORE_UNLOCK(adapter); break; case SIOCADDMULTI: case SIOCDELMULTI: IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI"); - if (ifp->if_drv_flags & IFF_DRV_RUNNING) { - IXV_CORE_LOCK(adapter); - ixv_disable_intr(adapter); - ixv_set_multi(adapter); - ixv_enable_intr(adapter); - IXV_CORE_UNLOCK(adapter); - } break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)"); - error = ifmedia_ioctl(ifp, ifr, &adapter->media, command); break; case SIOCSIFCAP: - { - int mask = ifr->ifr_reqcap ^ ifp->if_capenable; IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)"); - if (mask & IFCAP_HWCSUM) - ifp->if_capenable ^= IFCAP_HWCSUM; - if (mask & IFCAP_TSO4) - ifp->if_capenable ^= IFCAP_TSO4; - if (mask & IFCAP_LRO) - ifp->if_capenable ^= IFCAP_LRO; - if (mask & IFCAP_VLAN_HWTAGGING) - ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; - if (ifp->if_drv_flags & IFF_DRV_RUNNING) { - IXV_CORE_LOCK(adapter); - ixv_init_locked(adapter); - IXV_CORE_UNLOCK(adapter); - } - VLAN_CAPABILITIES(ifp); break; - } - + case SIOCSIFMTU: + IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)"); + break; default: IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command); - error = ether_ioctl(ifp, command, data); break; } - return (error); + switch (command) { + case SIOCSIFMEDIA: + case SIOCGIFMEDIA: + return ifmedia_ioctl(ifp, ifr, &adapter->media, command); + case SIOCSIFCAP: + /* Layer-4 Rx checksum offload has to be turned on and + * off as a unit. + */ + l4csum_en = ifcr->ifcr_capenable & l4csum; + if (l4csum_en != l4csum && l4csum_en != 0) + return EINVAL; + /*FALLTHROUGH*/ + case SIOCADDMULTI: + case SIOCDELMULTI: + case SIOCSIFFLAGS: + case SIOCSIFMTU: + default: + if ((error = ether_ioctl(ifp, command, data)) != ENETRESET) + return error; + if ((ifp->if_flags & IFF_RUNNING) == 0) + ; + else if (command == SIOCSIFCAP || command == SIOCSIFMTU) { + IXV_CORE_LOCK(adapter); + ixv_init_locked(adapter); + IXV_CORE_UNLOCK(adapter); + } else if (command == SIOCADDMULTI || command == SIOCDELMULTI) { + /* + * Multicast list has changed; set the hardware filter + * accordingly. + */ + IXV_CORE_LOCK(adapter); + ixv_disable_intr(adapter); + ixv_set_multi(adapter); + ixv_enable_intr(adapter); + IXV_CORE_UNLOCK(adapter); + } + return 0; + } } /********************************************************************* @@ -788,7 +870,7 @@ ixv_init_locked(struct adapter *adapter) u32 mhadd, gpie; INIT_DEBUGOUT("ixv_init: begin"); - mtx_assert(&adapter->core_mtx, MA_OWNED); + KASSERT(mutex_owned(&adapter->core_mtx)); hw->adapter_stopped = FALSE; ixgbe_stop_adapter(hw); callout_stop(&adapter->timer); @@ -797,14 +879,14 @@ ixv_init_locked(struct adapter *adapter) ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); /* Get the latest mac address, User can use a LAA */ - bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr, + memcpy(hw->mac.addr, CLLADDR(adapter->ifp->if_sadl), IXGBE_ETH_LENGTH_OF_ADDRESS); ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1); hw->addr_ctrl.rar_used_count = 1; /* Prepare transmit descriptors and buffers */ if (ixv_setup_transmit_structures(adapter)) { - device_printf(dev,"Could not setup transmit structures\n"); + aprint_error_dev(dev,"Could not setup transmit structures\n"); ixv_stop(adapter); return; } @@ -840,6 +922,7 @@ ixv_init_locked(struct adapter *adapter) gpie |= IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD; IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); +#if 0 /* XXX isn't it required? -- msaitoh */ /* Set the various hardware offload abilities */ ifp->if_hwassist = 0; if (ifp->if_capenable & IFCAP_TSO4) @@ -850,6 +933,7 @@ ixv_init_locked(struct adapter *adapter) ifp->if_hwassist |= CSUM_SCTP; #endif } +#endif /* Set MTU size */ if (ifp->if_mtu > ETHERMTU) { @@ -883,21 +967,21 @@ ixv_init_locked(struct adapter *adapter) ixv_enable_intr(adapter); /* Now inform the stack we're ready */ - ifp->if_drv_flags |= IFF_DRV_RUNNING; - ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; + ifp->if_flags |= IFF_RUNNING; + ifp->if_flags &= ~IFF_OACTIVE; return; } -static void -ixv_init(void *arg) +static int +ixv_init(struct ifnet *ifp) { - struct adapter *adapter = arg; + struct adapter *adapter = ifp->if_softc; IXV_CORE_LOCK(adapter); ixv_init_locked(adapter); IXV_CORE_UNLOCK(adapter); - return; + return 0; } @@ -946,7 +1030,7 @@ ixv_handle_que(void *context) struct ifnet *ifp = adapter->ifp; bool more; - if (ifp->if_drv_flags & IFF_DRV_RUNNING) { + if (ifp->if_flags & IFF_RUNNING) { more = ixv_rxeof(que, adapter->rx_process_limit); IXV_TX_LOCK(txr); ixv_txeof(txr); @@ -954,12 +1038,13 @@ ixv_handle_que(void *context) if (!drbr_empty(ifp, txr->br)) ixv_mq_start_locked(ifp, txr, NULL); #else - if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) + if (!IFQ_IS_EMPTY(&ifp->if_snd)) ixv_start_locked(txr, ifp); #endif IXV_TX_UNLOCK(txr); if (more) { - taskqueue_enqueue(que->tq, &que->que_task); + adapter->req.ev_count++; + softint_schedule(que->que_si); return; } } @@ -1045,7 +1130,7 @@ ixv_msix_que(void *arg) no_calc: if (more_tx || more_rx) - taskqueue_enqueue(que->tq, &que->que_task); + softint_schedule(que->que_si); else /* Reenable this interrupt */ ixv_enable_queue(adapter, que->msix); return; @@ -1058,7 +1143,7 @@ ixv_msix_mbx(void *arg) struct ixgbe_hw *hw = &adapter->hw; u32 reg; - ++adapter->mbx_irq; + ++adapter->mbx_irq.ev_count; /* First get the cause */ reg = IXGBE_READ_REG(hw, IXGBE_VTEICS); @@ -1067,7 +1152,7 @@ ixv_msix_mbx(void *arg) /* Link status change */ if (reg & IXGBE_EICR_LSC) - taskqueue_enqueue(adapter->tq, &adapter->mbx_task); + softint_schedule(adapter->mbx_si); IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER); return; @@ -1153,26 +1238,24 @@ ixv_media_change(struct ifnet * ifp) **********************************************************************/ static int -ixv_xmit(struct tx_ring *txr, struct mbuf **m_headp) +ixv_xmit(struct tx_ring *txr, struct mbuf *m_head) { + struct m_tag *mtag; struct adapter *adapter = txr->adapter; + struct ethercom *ec = &adapter->osdep.ec; u32 olinfo_status = 0, cmd_type_len; u32 paylen = 0; int i, j, error, nsegs; int first, last = 0; - struct mbuf *m_head; - bus_dma_segment_t segs[32]; bus_dmamap_t map; - struct ixv_tx_buf *txbuf, *txbuf_mapped; + struct ixv_tx_buf *txbuf; union ixgbe_adv_tx_desc *txd = NULL; - m_head = *m_headp; - /* Basic descriptor defines */ cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA | IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT); - if (m_head->m_flags & M_VLANTAG) + if ((mtag = VLAN_OUTPUT_TAG(ec, m_head)) != NULL) cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE; /* @@ -1182,74 +1265,61 @@ ixv_xmit(struct tx_ring *txr, struct mbu */ first = txr->next_avail_desc; txbuf = &txr->tx_buffers[first]; - txbuf_mapped = txbuf; map = txbuf->map; /* * Map the packet for DMA. */ - error = bus_dmamap_load_mbuf_sg(txr->txtag, map, - *m_headp, segs, &nsegs, BUS_DMA_NOWAIT); - - if (error == EFBIG) { - struct mbuf *m; - - m = m_defrag(*m_headp, M_DONTWAIT); - if (m == NULL) { - adapter->mbuf_defrag_failed++; - m_freem(*m_headp); - *m_headp = NULL; - return (ENOBUFS); - } - *m_headp = m; - - /* Try it again */ - error = bus_dmamap_load_mbuf_sg(txr->txtag, map, - *m_headp, segs, &nsegs, BUS_DMA_NOWAIT); + error = bus_dmamap_load_mbuf(txr->txtag->dt_dmat, map, + m_head, BUS_DMA_NOWAIT); - if (error == ENOMEM) { - adapter->no_tx_dma_setup++; - return (error); - } else if (error != 0) { - adapter->no_tx_dma_setup++; - m_freem(*m_headp); - *m_headp = NULL; - return (error); - } - } else if (error == ENOMEM) { - adapter->no_tx_dma_setup++; - return (error); - } else if (error != 0) { - adapter->no_tx_dma_setup++; - m_freem(*m_headp); - *m_headp = NULL; - return (error); + switch (error) { + case EAGAIN: + adapter->eagain_tx_dma_setup.ev_count++; + return EAGAIN; + case ENOMEM: + adapter->enomem_tx_dma_setup.ev_count++; + return EAGAIN; + case EFBIG: + adapter->efbig_tx_dma_setup.ev_count++; + return error; + case EINVAL: + adapter->einval_tx_dma_setup.ev_count++; + return error; + default: + adapter->other_tx_dma_setup.ev_count++; + return error; + case 0: + break; } /* Make certain there are enough descriptors */ if (nsegs > txr->tx_avail - 2) { - txr->no_desc_avail++; - error = ENOBUFS; - goto xmit_fail; + txr->no_desc_avail.ev_count++; + /* XXX s/ixgbe/ixv/ */ + ixgbe_dmamap_unload(txr->txtag, txbuf->map); + return EAGAIN; } - m_head = *m_headp; /* ** Set up the appropriate offload context ** this becomes the first descriptor of ** a packet. */ - if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { + if (m_head->m_pkthdr.csum_flags & (M_CSUM_TSOv4|M_CSUM_TSOv6)) { if (ixv_tso_setup(txr, m_head, &paylen)) { cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8; olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8; olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT; - ++adapter->tso_tx; - } else + ++adapter->tso_tx.ev_count; + } else { + ++adapter->tso_err.ev_count; + /* XXX unload DMA map! --dyoung -> easy? --msaitoh */ return (ENXIO); - } else if (ixv_tx_ctx_setup(txr, m_head)) - olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8; + } + } else + olinfo_status |= ixv_tx_ctx_setup(txr, m_head); /* Record payload length */ if (paylen == 0) @@ -1257,14 +1327,14 @@ ixv_xmit(struct tx_ring *txr, struct mbu IXGBE_ADVTXD_PAYLEN_SHIFT; i = txr->next_avail_desc; - for (j = 0; j < nsegs; j++) { + for (j = 0; j < map->dm_nsegs; j++) { bus_size_t seglen; bus_addr_t segaddr; txbuf = &txr->tx_buffers[i]; txd = &txr->tx_base[i]; - seglen = segs[j].ds_len; - segaddr = htole64(segs[j].ds_addr); + seglen = map->dm_segs[j].ds_len; + segaddr = htole64(map->dm_segs[j].ds_addr); txd->read.buffer_addr = segaddr; txd->read.cmd_type_len = htole32(txr->txd_cmd | @@ -1281,32 +1351,34 @@ ixv_xmit(struct tx_ring *txr, struct mbu txd->read.cmd_type_len |= htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS); - txr->tx_avail -= nsegs; + txr->tx_avail -= map->dm_nsegs; txr->next_avail_desc = i; txbuf->m_head = m_head; + /* We exchange the maps instead of copying because otherwise + * we end up with many pointers to the same map and we free + * one map twice in ixgbe_free_transmit_structures(). Who + * knows what other problems this caused. --dyoung + */ txbuf->map = map; - bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE); + bus_dmamap_sync(txr->txtag->dt_dmat, map, 0, m_head->m_pkthdr.len, + BUS_DMASYNC_PREWRITE); /* Set the index of the descriptor that will be marked done */ txbuf = &txr->tx_buffers[first]; txbuf->eop_index = last; - bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, + /* XXX s/ixgbe/ixg/ */ + ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* * Advance the Transmit Descriptor Tail (Tdt), this tells the * hardware that this frame is available to transmit. */ - ++txr->total_packets; + ++txr->total_packets.ev_count; IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(txr->me), i); - return (0); - -xmit_fail: - bus_dmamap_unload(txr->txtag, txbuf->map); - return (error); - + return 0; } @@ -1321,32 +1393,26 @@ xmit_fail: static void ixv_set_multi(struct adapter *adapter) { + struct ether_multi *enm; + struct ether_multistep step; u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS]; u8 *update_ptr; - struct ifmultiaddr *ifma; int mcnt = 0; - struct ifnet *ifp = adapter->ifp; + struct ethercom *ec = &adapter->osdep.ec; IOCTL_DEBUGOUT("ixv_set_multi: begin"); -#if __FreeBSD_version < 800000 - IF_ADDR_LOCK(ifp); -#else - if_maddr_rlock(ifp); -#endif - TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { - if (ifma->ifma_addr->sa_family != AF_LINK) - continue; - bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), + ETHER_FIRST_MULTI(step, ec, enm); + while (enm != NULL) { + bcopy(enm->enm_addrlo, &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS], IXGBE_ETH_LENGTH_OF_ADDRESS); mcnt++; + /* XXX This might be required --msaitoh */ + if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) + break; + ETHER_NEXT_MULTI(step, enm); } -#if __FreeBSD_version < 800000 - IF_ADDR_UNLOCK(ifp); -#else - if_maddr_runlock(ifp); -#endif update_ptr = mta; @@ -1382,14 +1448,15 @@ ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **********************************************************************/ static void -ixv_local_timer(void *arg) +ixv_local_timer1(void *arg) { struct adapter *adapter = arg; device_t dev = adapter->dev; struct tx_ring *txr = adapter->tx_rings; int i; + struct timeval now, elapsed; - mtx_assert(&adapter->core_mtx, MA_OWNED); + KASSERT(mutex_owned(&adapter->core_mtx)); ixv_update_link_status(adapter); @@ -1411,7 +1478,9 @@ ixv_local_timer(void *arg) IXV_TX_UNLOCK(txr); continue; } - if ((ticks - txr->watchdog_time) > IXV_WATCHDOG) + getmicrotime(&now); + timersub(&now, &txr->watchdog_time, &elapsed); + if (tvtohz(&elapsed) > IXV_WATCHDOG) goto hung; IXV_TX_UNLOCK(txr); } @@ -1428,12 +1497,22 @@ hung: device_printf(dev,"TX(%d) desc avail = %d," "Next TX to Clean = %d\n", txr->me, txr->tx_avail, txr->next_to_clean); - adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING; - adapter->watchdog_events++; + adapter->ifp->if_flags &= ~IFF_RUNNING; + adapter->watchdog_events.ev_count++; IXV_TX_UNLOCK(txr); ixv_init_locked(adapter); } +static void +ixv_local_timer(void *arg) +{ + struct adapter *adapter = arg; + + IXV_CORE_LOCK(adapter); + ixv_local_timer1(adapter); + IXV_CORE_UNLOCK(adapter); +} + /* ** Note: this routine updates the OS on the link state ** the real check of the hardware only happens with @@ -1472,6 +1551,16 @@ ixv_update_link_status(struct adapter *a } +static void +ixv_ifstop(struct ifnet *ifp, int disable) +{ + struct adapter *adapter = ifp->if_softc; + + IXV_CORE_LOCK(adapter); + ixv_stop(adapter); + IXV_CORE_UNLOCK(adapter); +} + /********************************************************************* * * This routine disables all traffic on the adapter by issuing a @@ -1487,13 +1576,13 @@ ixv_stop(void *arg) struct ixgbe_hw *hw = &adapter->hw; ifp = adapter->ifp; - mtx_assert(&adapter->core_mtx, MA_OWNED); + KASSERT(mutex_owned(&adapter->core_mtx)); INIT_DEBUGOUT("ixv_stop: begin\n"); ixv_disable_intr(adapter); /* Tell the stack that the interface is no longer active */ - ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); + ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); ixgbe_reset_hw(hw); adapter->hw.adapter_stopped = FALSE; @@ -1515,30 +1604,38 @@ ixv_stop(void *arg) static void ixv_identify_hardware(struct adapter *adapter) { - device_t dev = adapter->dev; u16 pci_cmd_word; + pcitag_t tag; + pci_chipset_tag_t pc; + pcireg_t subid, id; + struct ixgbe_hw *hw = &adapter->hw; + + pc = adapter->osdep.pc; + tag = adapter->osdep.tag; /* ** Make sure BUSMASTER is set, on a VM under ** KVM it may not be and will break things. */ - pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); - if (!((pci_cmd_word & PCIM_CMD_BUSMASTEREN) && - (pci_cmd_word & PCIM_CMD_MEMEN))) { + pci_cmd_word = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG); + if (!((pci_cmd_word & PCI_COMMAND_MASTER_ENABLE) && + (pci_cmd_word & PCI_COMMAND_MEM_ENABLE))) { INIT_DEBUGOUT("Memory Access and/or Bus Master " "bits were not set!\n"); - pci_cmd_word |= (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN); - pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2); + pci_cmd_word |= + (PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_MEM_ENABLE); + pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, pci_cmd_word); } + id = pci_conf_read(pc, tag, PCI_ID_REG); + subid = pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG); + /* Save off the information about this board */ - adapter->hw.vendor_id = pci_get_vendor(dev); - adapter->hw.device_id = pci_get_device(dev); - adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1); - adapter->hw.subsystem_vendor_id = - pci_read_config(dev, PCIR_SUBVEND_0, 2); - adapter->hw.subsystem_device_id = - pci_read_config(dev, PCIR_SUBDEV_0, 2); + hw->vendor_id = PCI_VENDOR(id); + hw->device_id = PCI_PRODUCT(id); + hw->revision_id = PCI_REVISION(pci_conf_read(pc, tag, PCI_CLASS_REG)); + hw->subsystem_vendor_id = PCI_SUBSYS_VENDOR(subid); + hw->subsystem_device_id = PCI_SUBSYS_ID(subid); return; } @@ -1551,16 +1648,24 @@ ixv_identify_hardware(struct adapter *ad static int ixv_allocate_msix(struct adapter *adapter) { +#if !defined(NETBSD_MSI_OR_MSIX) + return 0; +#else device_t dev = adapter->dev; struct ix_queue *que = adapter->queues; int error, rid, vector = 0; + pcitag_t tag; + pci_chipset_tag_t pc; + + pc = adapter->osdep.pc; + tag = adapter->osdep.tag; for (int i = 0; i < adapter->num_queues; i++, vector++, que++) { rid = vector + 1; que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (que->res == NULL) { - device_printf(dev,"Unable to allocate" + aprint_error_dev(dev,"Unable to allocate" " bus resource: que interrupt [%d]\n", vector); return (ENXIO); } @@ -1570,7 +1675,8 @@ ixv_allocate_msix(struct adapter *adapte ixv_msix_que, que, &que->tag); if (error) { que->res = NULL; - device_printf(dev, "Failed to register QUE handler"); + aprint_error_dev(dev, + "Failed to register QUE handler"); return (error); } #if __FreeBSD_version >= 800504 @@ -1585,11 +1691,8 @@ ixv_allocate_msix(struct adapter *adapte if (adapter->num_queues > 1) bus_bind_intr(dev, que->res, i); - ixgbe_task_init(&que->que_task, ixv_handle_que, que); - que->tq = taskqueue_create_fast("ixv_que", M_NOWAIT, - taskqueue_thread_enqueue, &que->tq); - taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que", - device_get_nameunit(adapter->dev)); + que->que_si = softint_establish(SOFTINT_NET, ixv_handle_que, + que); } /* and Mailbox */ @@ -1597,7 +1700,7 @@ ixv_allocate_msix(struct adapter *adapte adapter->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (!adapter->res) { - device_printf(dev,"Unable to allocate" + aprint_error_dev(dev,"Unable to allocate" " bus resource: MBX interrupt [%d]\n", rid); return (ENXIO); } @@ -1607,7 +1710,7 @@ ixv_allocate_msix(struct adapter *adapte ixv_msix_mbx, adapter, &adapter->tag); if (error) { adapter->res = NULL; - device_printf(dev, "Failed to register LINK handler"); + aprint_error_dev(dev, "Failed to register LINK handler"); return (error); } #if __FreeBSD_version >= 800504 @@ -1615,11 +1718,8 @@ ixv_allocate_msix(struct adapter *adapte #endif adapter->mbxvec = vector; /* Tasklets for Mailbox */ - ixgbe_task_init(&adapter->mbx_task, ixv_handle_mbx, adapter); - adapter->tq = taskqueue_create_fast("ixv_mbx", M_NOWAIT, - taskqueue_thread_enqueue, &adapter->tq); - taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s mbxq", - device_get_nameunit(adapter->dev)); + adapter->mbx_si = softint_establish(SOFTINT_NET, ixv_handle_mbx, + adapter); /* ** Due to a broken design QEMU will fail to properly ** enable the guest for MSIX unless the vectors in @@ -1629,14 +1729,15 @@ ixv_allocate_msix(struct adapter *adapte */ if (adapter->hw.mac.type == ixgbe_mac_82599_vf) { int msix_ctrl; - pci_find_cap(dev, PCIY_MSIX, &rid); - rid += PCIR_MSIX_CTRL; - msix_ctrl = pci_read_config(dev, rid, 2); - msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE; - pci_write_config(dev, rid, msix_ctrl, 2); + pci_get_capability(pc, tag, PCI_CAP_MSIX, &rid); + rid += PCI_MSIX_CTL; + msix_ctrl = pci_read_config(pc, tag, rid); + msix_ctrl |= PCI_MSIX_CTL_ENABLE; + pci_conf_write(pc, tag, msix_ctrl); } return (0); +#endif } /* @@ -1646,6 +1747,9 @@ ixv_allocate_msix(struct adapter *adapte static int ixv_setup_msix(struct adapter *adapter) { +#if !defined(NETBSD_MSI_OR_MSIX) + return 0; +#else device_t dev = adapter->dev; int rid, vectors, want = 2; @@ -1680,30 +1784,46 @@ ixv_setup_msix(struct adapter *adapter) out: device_printf(adapter->dev,"MSIX config error\n"); return (ENXIO); +#endif } static int -ixv_allocate_pci_resources(struct adapter *adapter) +ixv_allocate_pci_resources(struct adapter *adapter, + const struct pci_attach_args *pa) { - int rid; + pcireg_t memtype; device_t dev = adapter->dev; + bus_addr_t addr; + int flags; - rid = PCIR_BAR(0); - adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, - &rid, RF_ACTIVE); + memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_BAR(0)); - if (!(adapter->pci_mem)) { - device_printf(dev,"Unable to allocate bus resource: memory\n"); - return (ENXIO); + switch (memtype) { + case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: + case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: + adapter->osdep.mem_bus_space_tag = pa->pa_memt; + if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_BAR(0), + memtype, &addr, &adapter->osdep.mem_size, &flags) != 0) + goto map_err; + if ((flags & BUS_SPACE_MAP_PREFETCHABLE) != 0) { + aprint_normal_dev(dev, "clearing prefetchable bit\n"); + flags &= ~BUS_SPACE_MAP_PREFETCHABLE; + } + if (bus_space_map(adapter->osdep.mem_bus_space_tag, addr, + adapter->osdep.mem_size, flags, + &adapter->osdep.mem_bus_space_handle) != 0) { +map_err: + adapter->osdep.mem_size = 0; + aprint_error_dev(dev, "unable to map BAR0\n"); + return ENXIO; + } + break; + default: + aprint_error_dev(dev, "unexpected type on BAR0\n"); + return ENXIO; } - adapter->osdep.mem_bus_space_tag = - rman_get_bustag(adapter->pci_mem); - adapter->osdep.mem_bus_space_handle = - rman_get_bushandle(adapter->pci_mem); - adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle; - adapter->num_queues = 1; adapter->hw.back = &adapter->osdep; @@ -1722,11 +1842,12 @@ ixv_allocate_pci_resources(struct adapte static void ixv_free_pci_resources(struct adapter * adapter) { +#if defined(NETBSD_MSI_OR_MSIX) struct ix_queue *que = adapter->queues; device_t dev = adapter->dev; int rid, memrid; - memrid = PCIR_BAR(MSIX_BAR); + memrid = PCI_BAR(MSIX_BAR); /* ** There is a slight possibility of a failure mode @@ -1778,6 +1899,7 @@ mem: bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), adapter->pci_mem); +#endif return; } @@ -1789,17 +1911,17 @@ mem: static void ixv_setup_interface(device_t dev, struct adapter *adapter) { + struct ethercom *ec = &adapter->osdep.ec; struct ifnet *ifp; INIT_DEBUGOUT("ixv_setup_interface: begin"); - ifp = adapter->ifp = if_alloc(IFT_ETHER); - if (ifp == NULL) - panic("%s: can not if_alloc()\n", device_get_nameunit(dev)); - if_initname(ifp, device_get_name(dev), device_get_unit(dev)); + ifp = adapter->ifp = &ec->ec_if; + strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ); ifp->if_mtu = ETHERMTU; ifp->if_baudrate = 1000000000; ifp->if_init = ixv_init; + ifp->if_stop = ixv_ifstop; ifp->if_softc = adapter; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = ixv_ioctl; @@ -1811,7 +1933,9 @@ ixv_setup_interface(device_t dev, struct #endif ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2; + if_attach(ifp); ether_ifattach(ifp, adapter->hw.mac.addr); + ether_set_ifflags_cb(ec, ixv_ifflags_cb); adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; @@ -1819,13 +1943,28 @@ ixv_setup_interface(device_t dev, struct /* * Tell the upper layer(s) we support long frames. */ - ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); + ifp->if_hdrlen = sizeof(struct ether_vlan_header); + + ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSOv4; + ifp->if_capenable = 0; - ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM; - ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; - ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_LRO; + ec->ec_capabilities |= ETHERCAP_VLAN_HWCSUM; + ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU; + ec->ec_capabilities |= ETHERCAP_JUMBO_MTU; + ec->ec_capenable = ec->ec_capabilities; - ifp->if_capenable = ifp->if_capabilities; + /* Don't enable LRO by default */ + ifp->if_capabilities |= IFCAP_LRO; + + /* + ** Dont turn this on by default, if vlans are + ** created on another pseudo device (eg. lagg) + ** then vlan events are not passed thru, breaking + ** operation, but with HW FILTER off it works. If + ** using vlans directly on the em driver you can + ** enable this and get full hardware tag filtering. + */ + ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER; /* * Specify the media types supported by this adapter and register @@ -1863,62 +2002,72 @@ out: /******************************************************************** * Manage DMA'able memory. *******************************************************************/ -static void -ixv_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error) -{ - if (error) - return; - *(bus_addr_t *) arg = segs->ds_addr; - return; -} static int ixv_dma_malloc(struct adapter *adapter, bus_size_t size, struct ixv_dma_alloc *dma, int mapflags) { device_t dev = adapter->dev; - int r; + int r, rsegs; - r = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */ + r = ixgbe_dma_tag_create(adapter->osdep.dmat, /* parent */ DBA_ALIGN, 0, /* alignment, bounds */ - BUS_SPACE_MAXADDR, /* lowaddr */ - BUS_SPACE_MAXADDR, /* highaddr */ - NULL, NULL, /* filter, filterarg */ size, /* maxsize */ 1, /* nsegments */ size, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ - NULL, /* lockfunc */ - NULL, /* lockfuncarg */ &dma->dma_tag); if (r != 0) { - device_printf(dev,"ixv_dma_malloc: bus_dma_tag_create failed; " - "error %u\n", r); + aprint_error_dev(dev, + "ixv_dma_malloc: bus_dma_tag_create failed; error %u\n", r); goto fail_0; } - r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr, - BUS_DMA_NOWAIT, &dma->dma_map); + r = bus_dmamem_alloc(dma->dma_tag->dt_dmat, + size, + dma->dma_tag->dt_alignment, + dma->dma_tag->dt_boundary, + &dma->dma_seg, 1, &rsegs, BUS_DMA_NOWAIT); if (r != 0) { - device_printf(dev,"ixv_dma_malloc: bus_dmamem_alloc failed; " - "error %u\n", r); + aprint_error_dev(dev, + "%s: bus_dmamem_alloc failed; error %u\n", __func__, r); goto fail_1; } - r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, + + r = bus_dmamem_map(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs, + size, &dma->dma_vaddr, BUS_DMA_NOWAIT); + if (r != 0) { + aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n", + __func__, r); + goto fail_2; + } + + r = ixgbe_dmamap_create(dma->dma_tag, 0, &dma->dma_map); + if (r != 0) { + aprint_error_dev(dev, "%s: bus_dmamem_map failed; error %d\n", + __func__, r); + goto fail_3; + } + + r = bus_dmamap_load(dma->dma_tag->dt_dmat, dma->dma_map, dma->dma_vaddr, size, - ixv_dmamap_cb, - &dma->dma_paddr, + NULL, mapflags | BUS_DMA_NOWAIT); if (r != 0) { - device_printf(dev,"ixv_dma_malloc: bus_dmamap_load failed; " - "error %u\n", r); - goto fail_2; + aprint_error_dev(dev,"%s: bus_dmamap_load failed; error %u\n", + __func__, r); + goto fail_4; } + dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr; dma->dma_size = size; - return (0); + return 0; +fail_4: + ixgbe_dmamap_destroy(dma->dma_tag, dma->dma_map); +fail_3: + bus_dmamem_unmap(dma->dma_tag->dt_dmat, dma->dma_vaddr, size); fail_2: - bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); + bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, rsegs); fail_1: - bus_dma_tag_destroy(dma->dma_tag); + ixgbe_dma_tag_destroy(dma->dma_tag); fail_0: dma->dma_map = NULL; dma->dma_tag = NULL; @@ -1928,11 +2077,11 @@ fail_0: static void ixv_dma_free(struct adapter *adapter, struct ixv_dma_alloc *dma) { - bus_dmamap_sync(dma->dma_tag, dma->dma_map, + bus_dmamap_sync(dma->dma_tag->dt_dmat, dma->dma_map, 0, dma->dma_size, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); - bus_dmamap_unload(dma->dma_tag, dma->dma_map); - bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); - bus_dma_tag_destroy(dma->dma_tag); + ixgbe_dmamap_unload(dma->dma_tag, dma->dma_map); + bus_dmamem_free(dma->dma_tag->dt_dmat, &dma->dma_seg, 1); + ixgbe_dma_tag_destroy(dma->dma_tag); } @@ -1956,7 +2105,7 @@ ixv_allocate_queues(struct adapter *adap if (!(adapter->queues = (struct ix_queue *) malloc(sizeof(struct ix_queue) * adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { - device_printf(dev, "Unable to allocate queue memory\n"); + aprint_error_dev(dev, "Unable to allocate queue memory\n"); error = ENOMEM; goto fail; } @@ -1965,7 +2114,7 @@ ixv_allocate_queues(struct adapter *adap if (!(adapter->tx_rings = (struct tx_ring *) malloc(sizeof(struct tx_ring) * adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { - device_printf(dev, "Unable to allocate TX ring memory\n"); + aprint_error_dev(dev, "Unable to allocate TX ring memory\n"); error = ENOMEM; goto tx_fail; } @@ -1974,7 +2123,7 @@ ixv_allocate_queues(struct adapter *adap if (!(adapter->rx_rings = (struct rx_ring *) malloc(sizeof(struct rx_ring) * adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { - device_printf(dev, "Unable to allocate RX ring memory\n"); + aprint_error_dev(dev, "Unable to allocate RX ring memory\n"); error = ENOMEM; goto rx_fail; } @@ -1996,12 +2145,12 @@ ixv_allocate_queues(struct adapter *adap /* Initialize the TX side lock */ snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)", - device_get_nameunit(dev), txr->me); - mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF); + device_xname(dev), txr->me); + mutex_init(&txr->tx_mtx, MUTEX_DEFAULT, IPL_NET); if (ixv_dma_malloc(adapter, tsize, &txr->txdma, BUS_DMA_NOWAIT)) { - device_printf(dev, + aprint_error_dev(dev, "Unable to allocate TX Descriptor memory\n"); error = ENOMEM; goto err_tx_desc; @@ -2011,7 +2160,7 @@ ixv_allocate_queues(struct adapter *adap /* Now allocate transmit buffers for the ring */ if (ixv_allocate_transmit_buffers(txr)) { - device_printf(dev, + aprint_error_dev(dev, "Critical Failure setting up transmit buffers\n"); error = ENOMEM; goto err_tx_desc; @@ -2021,7 +2170,7 @@ ixv_allocate_queues(struct adapter *adap txr->br = buf_ring_alloc(IXV_BR_SIZE, M_DEVBUF, M_WAITOK, &txr->tx_mtx); if (txr->br == NULL) { - device_printf(dev, + aprint_error_dev(dev, "Critical Failure setting up buf ring\n"); error = ENOMEM; goto err_tx_desc; @@ -2042,12 +2191,12 @@ ixv_allocate_queues(struct adapter *adap /* Initialize the RX side lock */ snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)", - device_get_nameunit(dev), rxr->me); - mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF); + device_xname(dev), rxr->me); + mutex_init(&rxr->rx_mtx, MUTEX_DEFAULT, IPL_NET); if (ixv_dma_malloc(adapter, rsize, &rxr->rxdma, BUS_DMA_NOWAIT)) { - device_printf(dev, + aprint_error_dev(dev, "Unable to allocate RxDescriptor memory\n"); error = ENOMEM; goto err_rx_desc; @@ -2057,7 +2206,7 @@ ixv_allocate_queues(struct adapter *adap /* Allocate receive buffers for the ring*/ if (ixv_allocate_receive_buffers(rxr)) { - device_printf(dev, + aprint_error_dev(dev, "Critical Failure setting up receive buffers\n"); error = ENOMEM; goto err_rx_desc; @@ -2110,26 +2259,21 @@ ixv_allocate_transmit_buffers(struct tx_ /* * Setup DMA descriptor areas. */ - if ((error = bus_dma_tag_create(NULL, /* parent */ + if ((error = ixgbe_dma_tag_create(adapter->osdep.dmat, /* parent */ 1, 0, /* alignment, bounds */ - BUS_SPACE_MAXADDR, /* lowaddr */ - BUS_SPACE_MAXADDR, /* highaddr */ - NULL, NULL, /* filter, filterarg */ IXV_TSO_SIZE, /* maxsize */ 32, /* nsegments */ PAGE_SIZE, /* maxsegsize */ 0, /* flags */ - NULL, /* lockfunc */ - NULL, /* lockfuncarg */ &txr->txtag))) { - device_printf(dev,"Unable to allocate TX DMA tag\n"); + aprint_error_dev(dev,"Unable to allocate TX DMA tag\n"); goto fail; } if (!(txr->tx_buffers = (struct ixv_tx_buf *) malloc(sizeof(struct ixv_tx_buf) * adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) { - device_printf(dev, "Unable to allocate tx_buffer memory\n"); + aprint_error_dev(dev, "Unable to allocate tx_buffer memory\n"); error = ENOMEM; goto fail; } @@ -2137,9 +2281,9 @@ ixv_allocate_transmit_buffers(struct tx_ /* Create the descriptor buffer dma maps */ txbuf = txr->tx_buffers; for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) { - error = bus_dmamap_create(txr->txtag, 0, &txbuf->map); + error = ixgbe_dmamap_create(txr->txtag, 0, &txbuf->map); if (error != 0) { - device_printf(dev, "Unable to create TX DMA map\n"); + aprint_error_dev(dev, "Unable to create TX DMA map\n"); goto fail; } } @@ -2175,9 +2319,10 @@ ixv_setup_transmit_ring(struct tx_ring * txbuf = txr->tx_buffers; for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) { if (txbuf->m_head != NULL) { - bus_dmamap_sync(txr->txtag, txbuf->map, + bus_dmamap_sync(txr->txtag->dt_dmat, txbuf->map, + 0, txbuf->m_head->m_pkthdr.len, BUS_DMASYNC_POSTWRITE); - bus_dmamap_unload(txr->txtag, txbuf->map); + ixgbe_dmamap_unload(txr->txtag, txbuf->map); m_freem(txbuf->m_head); txbuf->m_head = NULL; } @@ -2188,7 +2333,7 @@ ixv_setup_transmit_ring(struct tx_ring * /* Set number of descriptors available */ txr->tx_avail = adapter->num_tx_desc; - bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, + ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); IXV_TX_UNLOCK(txr); } @@ -2269,10 +2414,8 @@ ixv_free_transmit_structures(struct adap struct tx_ring *txr = adapter->tx_rings; for (int i = 0; i < adapter->num_queues; i++, txr++) { - IXV_TX_LOCK(txr); ixv_free_transmit_buffers(txr); ixv_dma_free(adapter, &txr->txdma); - IXV_TX_UNLOCK(txr); IXV_TX_LOCK_DESTROY(txr); } free(adapter->tx_rings, M_DEVBUF); @@ -2298,22 +2441,20 @@ ixv_free_transmit_buffers(struct tx_ring tx_buffer = txr->tx_buffers; for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) { if (tx_buffer->m_head != NULL) { - bus_dmamap_sync(txr->txtag, tx_buffer->map, + bus_dmamap_sync(txr->txtag->dt_dmat, tx_buffer->map, + 0, tx_buffer->m_head->m_pkthdr.len, BUS_DMASYNC_POSTWRITE); - bus_dmamap_unload(txr->txtag, - tx_buffer->map); + ixgbe_dmamap_unload(txr->txtag, tx_buffer->map); m_freem(tx_buffer->m_head); tx_buffer->m_head = NULL; if (tx_buffer->map != NULL) { - bus_dmamap_destroy(txr->txtag, + ixgbe_dmamap_destroy(txr->txtag, tx_buffer->map); tx_buffer->map = NULL; } } else if (tx_buffer->map != NULL) { - bus_dmamap_unload(txr->txtag, - tx_buffer->map); - bus_dmamap_destroy(txr->txtag, - tx_buffer->map); + ixgbe_dmamap_unload(txr->txtag, tx_buffer->map); + ixgbe_dmamap_destroy(txr->txtag, tx_buffer->map); tx_buffer->map = NULL; } } @@ -2326,7 +2467,7 @@ ixv_free_transmit_buffers(struct tx_ring txr->tx_buffers = NULL; } if (txr->txtag != NULL) { - bus_dma_tag_destroy(txr->txtag); + ixgbe_dma_tag_destroy(txr->txtag); txr->txtag = NULL; } return; @@ -2334,31 +2475,31 @@ ixv_free_transmit_buffers(struct tx_ring /********************************************************************* * - * Advanced Context Descriptor setup for VLAN or CSUM + * Advanced Context Descriptor setup for VLAN or L4 CSUM * **********************************************************************/ -static boolean_t +static u32 ixv_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp) { + struct m_tag *mtag; struct adapter *adapter = txr->adapter; + struct ethercom *ec = &adapter->osdep.ec; struct ixgbe_adv_tx_context_desc *TXD; struct ixv_tx_buf *tx_buffer; - u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0; + u32 olinfo = 0, vlan_macip_lens = 0, type_tucmd_mlhl = 0; struct ether_vlan_header *eh; - struct ip *ip; - struct ip6_hdr *ip6; + struct ip ip; + struct ip6_hdr ip6; int ehdrlen, ip_hlen = 0; u16 etype; u8 ipproto = 0; - bool offload = TRUE; + bool offload; int ctxd = txr->next_avail_desc; u16 vtag = 0; - if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0) - offload = FALSE; - + offload = ((mp->m_pkthdr.csum_flags & M_CSUM_OFFLOAD) != 0); tx_buffer = &txr->tx_buffers[ctxd]; TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd]; @@ -2367,19 +2508,21 @@ ixv_tx_ctx_setup(struct tx_ring *txr, st ** In advanced descriptors the vlan tag must ** be placed into the descriptor itself. */ - if (mp->m_flags & M_VLANTAG) { - vtag = htole16(mp->m_pkthdr.ether_vtag); + if ((mtag = VLAN_OUTPUT_TAG(ec, mp)) != NULL) { + vtag = htole16(VLAN_TAG_VALUE(mtag) & 0xffff); vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT); - } else if (offload == FALSE) - return FALSE; + } else if (!offload) + return 0; /* * Determine where frame payload starts. * Jump over vlan headers if already present, * helpful for QinQ too. */ + KASSERT(mp->m_len >= offsetof(struct ether_vlan_header, evl_tag)); eh = mtod(mp, struct ether_vlan_header *); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { + KASSERT(mp->m_len >= sizeof(struct ether_vlan_header)); etype = ntohs(eh->evl_proto); ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; } else { @@ -2391,50 +2534,43 @@ ixv_tx_ctx_setup(struct tx_ring *txr, st vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT; switch (etype) { - case ETHERTYPE_IP: - ip = (struct ip *)(mp->m_data + ehdrlen); - ip_hlen = ip->ip_hl << 2; - if (mp->m_len < ehdrlen + ip_hlen) - return (FALSE); - ipproto = ip->ip_p; - type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; - break; - case ETHERTYPE_IPV6: - ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); - ip_hlen = sizeof(struct ip6_hdr); - if (mp->m_len < ehdrlen + ip_hlen) - return (FALSE); - ipproto = ip6->ip6_nxt; - type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6; - break; - default: - offload = FALSE; - break; + case ETHERTYPE_IP: + m_copydata(mp, ehdrlen, sizeof(ip), &ip); + ip_hlen = ip.ip_hl << 2; + ipproto = ip.ip_p; +#if 0 + ip.ip_sum = 0; + m_copyback(mp, ehdrlen, sizeof(ip), &ip); +#else + KASSERT((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) == 0 || + ip.ip_sum == 0); +#endif + type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; + break; + case ETHERTYPE_IPV6: + m_copydata(mp, ehdrlen, sizeof(ip6), &ip6); + ip_hlen = sizeof(ip6); + ipproto = ip6.ip6_nxt; + type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6; + break; + default: + break; } + if ((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) != 0) + olinfo |= IXGBE_TXD_POPTS_IXSM << 8; + vlan_macip_lens |= ip_hlen; type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; - switch (ipproto) { - case IPPROTO_TCP: - if (mp->m_pkthdr.csum_flags & CSUM_TCP) - type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; - break; - - case IPPROTO_UDP: - if (mp->m_pkthdr.csum_flags & CSUM_UDP) - type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP; - break; - -#if __FreeBSD_version >= 800000 - case IPPROTO_SCTP: - if (mp->m_pkthdr.csum_flags & CSUM_SCTP) - type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; - break; -#endif - default: - offload = FALSE; - break; + if (mp->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_TCPv6)) { + type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; + olinfo |= IXGBE_TXD_POPTS_TXSM << 8; + KASSERT(ipproto == IPPROTO_TCP); + } else if (mp->m_pkthdr.csum_flags & (M_CSUM_UDPv4|M_CSUM_UDPv6)) { + type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP; + olinfo |= IXGBE_TXD_POPTS_TXSM << 8; + KASSERT(ipproto == IPPROTO_UDP); } /* Now copy bits into descriptor */ @@ -2452,7 +2588,7 @@ ixv_tx_ctx_setup(struct tx_ring *txr, st txr->next_avail_desc = ctxd; --txr->tx_avail; - return (offload); + return olinfo; } /********************************************************************** @@ -2461,10 +2597,12 @@ ixv_tx_ctx_setup(struct tx_ring *txr, st * adapters using advanced tx descriptors * **********************************************************************/ -static boolean_t +static bool ixv_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen) { + struct m_tag *mtag; struct adapter *adapter = txr->adapter; + struct ethercom *ec = &adapter->osdep.ec; struct ixgbe_adv_tx_context_desc *TXD; struct ixv_tx_buf *tx_buffer; u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0; @@ -2499,8 +2637,9 @@ ixv_tso_setup(struct tx_ring *txr, struc return FALSE; /* 0 */ ip->ip_sum = 0; ip_hlen = ip->ip_hl << 2; - th = (struct tcphdr *)((caddr_t)ip + ip_hlen); - th->th_sum = in_pseudo(ip->ip_src.s_addr, + th = (struct tcphdr *)((char *)ip + ip_hlen); + /* XXX Educated guess: FreeBSD's in_pseudo == NetBSD's in_cksum_phdr */ + th->th_sum = in_cksum_phdr(ip->ip_src.s_addr, ip->ip_dst.s_addr, htons(IPPROTO_TCP)); tcp_hlen = th->th_off << 2; hdrlen = ehdrlen + ip_hlen + tcp_hlen; @@ -2509,8 +2648,8 @@ ixv_tso_setup(struct tx_ring *txr, struc *paylen = mp->m_pkthdr.len - hdrlen; /* VLAN MACLEN IPLEN */ - if (mp->m_flags & M_VLANTAG) { - vtag = htole16(mp->m_pkthdr.ether_vtag); + if ((mtag = VLAN_OUTPUT_TAG(ec, mp)) != NULL) { + vtag = htole16(VLAN_TAG_VALUE(mtag) & 0xffff); vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT); } @@ -2526,7 +2665,7 @@ ixv_tso_setup(struct tx_ring *txr, struc /* MSS L4LEN IDX */ - mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT); + mss_l4len_idx |= (mp->m_pkthdr.segsz << IXGBE_ADVTXD_MSS_SHIFT); mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT); TXD->mss_l4len_idx = htole32(mss_l4len_idx); @@ -2550,7 +2689,7 @@ ixv_tso_setup(struct tx_ring *txr, struc * tx_buffer is put back on the free queue. * **********************************************************************/ -static boolean_t +static bool ixv_txeof(struct tx_ring *txr) { struct adapter *adapter = txr->adapter; @@ -2559,10 +2698,10 @@ ixv_txeof(struct tx_ring *txr) struct ixv_tx_buf *tx_buffer; struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc; - mtx_assert(&txr->tx_mtx, MA_OWNED); + KASSERT(mutex_owned(&txr->tx_mtx)); if (txr->tx_avail == adapter->num_tx_desc) - return FALSE; + return false; first = txr->next_to_clean; tx_buffer = &txr->tx_buffers[first]; @@ -2570,7 +2709,7 @@ ixv_txeof(struct tx_ring *txr) tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first]; last = tx_buffer->eop_index; if (last == -1) - return FALSE; + return false; eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last]; /* @@ -2582,7 +2721,7 @@ ixv_txeof(struct tx_ring *txr) if (++last == adapter->num_tx_desc) last = 0; done = last; - bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, + ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, BUS_DMASYNC_POSTREAD); /* ** Only the EOP descriptor of a packet now has the DD @@ -2597,17 +2736,17 @@ ixv_txeof(struct tx_ring *txr) ++txr->tx_avail; if (tx_buffer->m_head) { - bus_dmamap_sync(txr->txtag, + bus_dmamap_sync(txr->txtag->dt_dmat, tx_buffer->map, + 0, tx_buffer->m_head->m_pkthdr.len, BUS_DMASYNC_POSTWRITE); - bus_dmamap_unload(txr->txtag, - tx_buffer->map); + ixgbe_dmamap_unload(txr->txtag, tx_buffer->map); m_freem(tx_buffer->m_head); tx_buffer->m_head = NULL; tx_buffer->map = NULL; } tx_buffer->eop_index = -1; - txr->watchdog_time = ticks; + getmicrotime(&txr->watchdog_time); if (++first == adapter->num_tx_desc) first = 0; @@ -2628,26 +2767,26 @@ ixv_txeof(struct tx_ring *txr) } else break; } - bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, + ixgbe_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); txr->next_to_clean = first; /* - * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that + * If we have enough room, clear IFF_OACTIVE to tell the stack that * it is OK to send packets. If there are no pending descriptors, * clear the timeout. Otherwise, if some descriptors have been freed, * restart the timeout. */ if (txr->tx_avail > IXV_TX_CLEANUP_THRESHOLD) { - ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; + ifp->if_flags &= ~IFF_OACTIVE; if (txr->tx_avail == adapter->num_tx_desc) { txr->watchdog_check = FALSE; - return FALSE; + return false; } } - return TRUE; + return true; } /********************************************************************* @@ -2663,15 +2802,16 @@ static void ixv_refresh_mbufs(struct rx_ring *rxr, int limit) { struct adapter *adapter = rxr->adapter; - bus_dma_segment_t hseg[1]; - bus_dma_segment_t pseg[1]; struct ixv_rx_buf *rxbuf; struct mbuf *mh, *mp; - int i, nsegs, error, cleaned; + int i, j, error; + bool refreshed = false; - i = rxr->next_to_refresh; - cleaned = -1; /* Signify no completions */ - while (i != limit) { + i = j = rxr->next_to_refresh; + /* Control the loop with one beyond */ + if (++j == adapter->num_rx_desc) + j = 0; + while (j != limit) { rxbuf = &rxr->rx_buffers[i]; if ((rxbuf->m_head == NULL) && (rxr->hdr_split)) { mh = m_gethdr(M_DONTWAIT, MT_DATA); @@ -2682,8 +2822,8 @@ ixv_refresh_mbufs(struct rx_ring *rxr, i mh->m_flags |= M_PKTHDR; m_adj(mh, ETHER_ALIGN); /* Get the memory mapping */ - error = bus_dmamap_load_mbuf_sg(rxr->htag, - rxbuf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT); + error = bus_dmamap_load_mbuf(rxr->htag->dt_dmat, + rxbuf->hmap, mh, BUS_DMA_NOWAIT); if (error != 0) { printf("GET BUF: dmamap load" " failure - %d\n", error); @@ -2691,21 +2831,23 @@ ixv_refresh_mbufs(struct rx_ring *rxr, i goto update; } rxbuf->m_head = mh; - bus_dmamap_sync(rxr->htag, rxbuf->hmap, + ixgbe_dmamap_sync(rxr->htag, rxbuf->hmap, BUS_DMASYNC_PREREAD); rxr->rx_base[i].read.hdr_addr = - htole64(hseg[0].ds_addr); + htole64(rxbuf->hmap->dm_segs[0].ds_addr); } if (rxbuf->m_pack == NULL) { - mp = m_getjcl(M_DONTWAIT, MT_DATA, - M_PKTHDR, adapter->rx_mbuf_sz); - if (mp == NULL) + mp = ixgbe_getjcl(&adapter->jcl_head, M_DONTWAIT, + MT_DATA, M_PKTHDR, adapter->rx_mbuf_sz); + if (mp == NULL) { + rxr->no_jmbuf.ev_count++; goto update; + } mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz; /* Get the memory mapping */ - error = bus_dmamap_load_mbuf_sg(rxr->ptag, - rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT); + error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat, + rxbuf->pmap, mp, BUS_DMA_NOWAIT); if (error != 0) { printf("GET BUF: dmamap load" " failure - %d\n", error); @@ -2713,23 +2855,22 @@ ixv_refresh_mbufs(struct rx_ring *rxr, i goto update; } rxbuf->m_pack = mp; - bus_dmamap_sync(rxr->ptag, rxbuf->pmap, - BUS_DMASYNC_PREREAD); + bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap, + 0, mp->m_pkthdr.len, BUS_DMASYNC_PREREAD); rxr->rx_base[i].read.pkt_addr = - htole64(pseg[0].ds_addr); + htole64(rxbuf->pmap->dm_segs[0].ds_addr); } - cleaned = i; + refreshed = true; + rxr->next_to_refresh = i = j; /* Calculate next index */ - if (++i == adapter->num_rx_desc) - i = 0; - /* This is the work marker for refresh */ - rxr->next_to_refresh = i; + if (++j == adapter->num_rx_desc) + j = 0; } update: - if (cleaned != -1) /* If we refreshed some, bump tail */ + if (refreshed) /* If we refreshed some, bump tail */ IXGBE_WRITE_REG(&adapter->hw, - IXGBE_VFRDT(rxr->me), cleaned); + IXGBE_VFRDT(rxr->me), rxr->next_to_refresh); return; } @@ -2753,55 +2894,45 @@ ixv_allocate_receive_buffers(struct rx_r if (!(rxr->rx_buffers = (struct ixv_rx_buf *) malloc(bsize, M_DEVBUF, M_NOWAIT | M_ZERO))) { - device_printf(dev, "Unable to allocate rx_buffer memory\n"); + aprint_error_dev(dev, "Unable to allocate rx_buffer memory\n"); error = ENOMEM; goto fail; } - if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ + if ((error = ixgbe_dma_tag_create(adapter->osdep.dmat, /* parent */ 1, 0, /* alignment, bounds */ - BUS_SPACE_MAXADDR, /* lowaddr */ - BUS_SPACE_MAXADDR, /* highaddr */ - NULL, NULL, /* filter, filterarg */ MSIZE, /* maxsize */ 1, /* nsegments */ MSIZE, /* maxsegsize */ 0, /* flags */ - NULL, /* lockfunc */ - NULL, /* lockfuncarg */ &rxr->htag))) { - device_printf(dev, "Unable to create RX DMA tag\n"); + aprint_error_dev(dev, "Unable to create RX DMA tag\n"); goto fail; } - if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ + if ((error = ixgbe_dma_tag_create(adapter->osdep.dmat, /* parent */ 1, 0, /* alignment, bounds */ - BUS_SPACE_MAXADDR, /* lowaddr */ - BUS_SPACE_MAXADDR, /* highaddr */ - NULL, NULL, /* filter, filterarg */ MJUMPAGESIZE, /* maxsize */ 1, /* nsegments */ MJUMPAGESIZE, /* maxsegsize */ 0, /* flags */ - NULL, /* lockfunc */ - NULL, /* lockfuncarg */ &rxr->ptag))) { - device_printf(dev, "Unable to create RX DMA tag\n"); + aprint_error_dev(dev, "Unable to create RX DMA tag\n"); goto fail; } for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) { rxbuf = &rxr->rx_buffers[i]; - error = bus_dmamap_create(rxr->htag, + error = ixgbe_dmamap_create(rxr->htag, BUS_DMA_NOWAIT, &rxbuf->hmap); if (error) { - device_printf(dev, "Unable to create RX head map\n"); + aprint_error_dev(dev, "Unable to create RX head map\n"); goto fail; } - error = bus_dmamap_create(rxr->ptag, + error = ixgbe_dmamap_create(rxr->ptag, BUS_DMA_NOWAIT, &rxbuf->pmap); if (error) { - device_printf(dev, "Unable to create RX pkt map\n"); + aprint_error_dev(dev, "Unable to create RX pkt map\n"); goto fail; } } @@ -2825,16 +2956,18 @@ ixv_free_receive_ring(struct rx_ring *rx for (i = 0; i < adapter->num_rx_desc; i++) { rxbuf = &rxr->rx_buffers[i]; if (rxbuf->m_head != NULL) { - bus_dmamap_sync(rxr->htag, rxbuf->hmap, + ixgbe_dmamap_sync(rxr->htag, rxbuf->hmap, BUS_DMASYNC_POSTREAD); - bus_dmamap_unload(rxr->htag, rxbuf->hmap); + ixgbe_dmamap_unload(rxr->htag, rxbuf->hmap); rxbuf->m_head->m_flags |= M_PKTHDR; m_freem(rxbuf->m_head); } if (rxbuf->m_pack != NULL) { - bus_dmamap_sync(rxr->ptag, rxbuf->pmap, + /* XXX not ixgbe_ ? */ + bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap, + 0, rxbuf->m_pack->m_pkthdr.len, BUS_DMASYNC_POSTREAD); - bus_dmamap_unload(rxr->ptag, rxbuf->pmap); + ixgbe_dmamap_unload(rxr->ptag, rxbuf->pmap); rxbuf->m_pack->m_flags |= M_PKTHDR; m_freem(rxbuf->m_pack); } @@ -2853,16 +2986,17 @@ static int ixv_setup_receive_ring(struct rx_ring *rxr) { struct adapter *adapter; - struct ifnet *ifp; - device_t dev; struct ixv_rx_buf *rxbuf; - bus_dma_segment_t pseg[1], hseg[1]; +#ifdef LRO + struct ifnet *ifp; struct lro_ctrl *lro = &rxr->lro; - int rsize, nsegs, error = 0; +#endif /* LRO */ + int rsize, error = 0; adapter = rxr->adapter; +#ifdef LRO ifp = adapter->ifp; - dev = adapter->dev; +#endif /* LRO */ /* Clear the ring contents */ IXV_RX_LOCK(rxr); @@ -2873,6 +3007,16 @@ ixv_setup_receive_ring(struct rx_ring *r /* Free current RX buffer structs and their mbufs */ ixv_free_receive_ring(rxr); + IXV_RX_UNLOCK(rxr); + + /* Now reinitialize our supply of jumbo mbufs. The number + * or size of jumbo mbufs may have changed. + */ + ixgbe_jcl_reinit(&adapter->jcl_head, rxr->ptag->dt_dmat, + 2 * adapter->num_rx_desc, adapter->rx_mbuf_sz); + + IXV_RX_LOCK(rxr); + /* Configure header split? */ if (ixv_header_split) rxr->hdr_split = TRUE; @@ -2890,7 +3034,7 @@ ixv_setup_receive_ring(struct rx_ring *r goto skip_head; /* First the header */ - rxbuf->m_head = m_gethdr(M_NOWAIT, MT_DATA); + rxbuf->m_head = m_gethdr(M_DONTWAIT, MT_DATA); if (rxbuf->m_head == NULL) { error = ENOBUFS; goto fail; @@ -2900,20 +3044,20 @@ ixv_setup_receive_ring(struct rx_ring *r mh->m_len = mh->m_pkthdr.len = MHLEN; mh->m_flags |= M_PKTHDR; /* Get the memory mapping */ - error = bus_dmamap_load_mbuf_sg(rxr->htag, - rxbuf->hmap, rxbuf->m_head, hseg, - &nsegs, BUS_DMA_NOWAIT); + error = bus_dmamap_load_mbuf(rxr->htag->dt_dmat, + rxbuf->hmap, rxbuf->m_head, BUS_DMA_NOWAIT); if (error != 0) /* Nothing elegant to do here */ goto fail; - bus_dmamap_sync(rxr->htag, - rxbuf->hmap, BUS_DMASYNC_PREREAD); + bus_dmamap_sync(rxr->htag->dt_dmat, rxbuf->hmap, + 0, mh->m_pkthdr.len, BUS_DMASYNC_PREREAD); /* Update descriptor */ - rxr->rx_base[j].read.hdr_addr = htole64(hseg[0].ds_addr); + rxr->rx_base[j].read.hdr_addr = + htole64(rxbuf->hmap->dm_segs[0].ds_addr); skip_head: /* Now the payload cluster */ - rxbuf->m_pack = m_getjcl(M_NOWAIT, MT_DATA, - M_PKTHDR, adapter->rx_mbuf_sz); + rxbuf->m_pack = ixgbe_getjcl(&adapter->jcl_head, M_DONTWAIT, + MT_DATA, M_PKTHDR, adapter->rx_mbuf_sz); if (rxbuf->m_pack == NULL) { error = ENOBUFS; goto fail; @@ -2921,15 +3065,15 @@ skip_head: mp = rxbuf->m_pack; mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz; /* Get the memory mapping */ - error = bus_dmamap_load_mbuf_sg(rxr->ptag, - rxbuf->pmap, mp, pseg, - &nsegs, BUS_DMA_NOWAIT); + error = bus_dmamap_load_mbuf(rxr->ptag->dt_dmat, + rxbuf->pmap, mp, BUS_DMA_NOWAIT); if (error != 0) goto fail; - bus_dmamap_sync(rxr->ptag, - rxbuf->pmap, BUS_DMASYNC_PREREAD); + bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap, + 0, adapter->rx_mbuf_sz, BUS_DMASYNC_PREREAD); /* Update descriptor */ - rxr->rx_base[j].read.pkt_addr = htole64(pseg[0].ds_addr); + rxr->rx_base[j].read.pkt_addr = + htole64(rxbuf->pmap->dm_segs[0].ds_addr); } @@ -2937,16 +3081,18 @@ skip_head: rxr->next_to_check = 0; rxr->next_to_refresh = 0; rxr->lro_enabled = FALSE; - rxr->rx_split_packets = 0; - rxr->rx_bytes = 0; + rxr->rx_split_packets.ev_count = 0; + rxr->rx_bytes.ev_count = 0; - bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, + ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); +#ifdef LRO /* ** Now set up the LRO interface: */ if (ifp->if_capenable & IFCAP_LRO) { + device_t dev = adapter->dev; int err = tcp_lro_init(lro); if (err) { device_printf(dev, "LRO Initialization failed!\n"); @@ -2956,6 +3102,7 @@ skip_head: rxr->lro_enabled = TRUE; lro->ifp = adapter->ifp; } +#endif /* LRO */ IXV_RX_UNLOCK(rxr); return (0); @@ -3006,6 +3153,7 @@ fail: static void ixv_initialize_receive_units(struct adapter *adapter) { + int i; struct rx_ring *rxr = adapter->rx_rings; struct ixgbe_hw *hw = &adapter->hw; struct ifnet *ifp = adapter->ifp; @@ -3030,7 +3178,7 @@ ixv_initialize_receive_units(struct adap } IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg); - for (int i = 0; i < adapter->num_queues; i++, rxr++) { + for (i = 0; i < adapter->num_queues; i++, rxr++) { u64 rdba = rxr->rxdma.dma_paddr; u32 reg, rxdctl; @@ -3100,12 +3248,17 @@ ixv_free_receive_structures(struct adapt struct rx_ring *rxr = adapter->rx_rings; for (int i = 0; i < adapter->num_queues; i++, rxr++) { +#ifdef LRO struct lro_ctrl *lro = &rxr->lro; +#endif /* LRO */ ixv_free_receive_buffers(rxr); +#ifdef LRO /* Free LRO memory */ tcp_lro_free(lro); +#endif /* LRO */ /* Free the ring memory as well */ ixv_dma_free(adapter, &rxr->rxdma); + IXV_RX_LOCK_DESTROY(rxr); } free(adapter->rx_rings, M_DEVBUF); @@ -3130,27 +3283,29 @@ ixv_free_receive_buffers(struct rx_ring for (int i = 0; i < adapter->num_rx_desc; i++) { rxbuf = &rxr->rx_buffers[i]; if (rxbuf->m_head != NULL) { - bus_dmamap_sync(rxr->htag, rxbuf->hmap, + ixgbe_dmamap_sync(rxr->htag, rxbuf->hmap, BUS_DMASYNC_POSTREAD); - bus_dmamap_unload(rxr->htag, rxbuf->hmap); + ixgbe_dmamap_unload(rxr->htag, rxbuf->hmap); rxbuf->m_head->m_flags |= M_PKTHDR; m_freem(rxbuf->m_head); } if (rxbuf->m_pack != NULL) { - bus_dmamap_sync(rxr->ptag, rxbuf->pmap, + /* XXX not ixgbe_* ? */ + bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap, + 0, rxbuf->m_pack->m_pkthdr.len, BUS_DMASYNC_POSTREAD); - bus_dmamap_unload(rxr->ptag, rxbuf->pmap); + ixgbe_dmamap_unload(rxr->ptag, rxbuf->pmap); rxbuf->m_pack->m_flags |= M_PKTHDR; m_freem(rxbuf->m_pack); } rxbuf->m_head = NULL; rxbuf->m_pack = NULL; if (rxbuf->hmap != NULL) { - bus_dmamap_destroy(rxr->htag, rxbuf->hmap); + ixgbe_dmamap_destroy(rxr->htag, rxbuf->hmap); rxbuf->hmap = NULL; } if (rxbuf->pmap != NULL) { - bus_dmamap_destroy(rxr->ptag, rxbuf->pmap); + ixgbe_dmamap_destroy(rxr->ptag, rxbuf->pmap); rxbuf->pmap = NULL; } } @@ -3161,11 +3316,11 @@ ixv_free_receive_buffers(struct rx_ring } if (rxr->htag != NULL) { - bus_dma_tag_destroy(rxr->htag); + ixgbe_dma_tag_destroy(rxr->htag); rxr->htag = NULL; } if (rxr->ptag != NULL) { - bus_dma_tag_destroy(rxr->ptag); + ixgbe_dma_tag_destroy(rxr->ptag); rxr->ptag = NULL; } @@ -3175,14 +3330,19 @@ ixv_free_receive_buffers(struct rx_ring static __inline void ixv_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype) { + int s; +#ifdef LRO + struct adapter *adapter = ifp->if_softc; + struct ethercom *ec = &adapter->osdep.ec; + /* * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet * should be computed by hardware. Also it should not have VLAN tag in * ethernet header. */ if (rxr->lro_enabled && - (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && + (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) != 0 && (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 && (ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) == (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) && @@ -3198,7 +3358,17 @@ ixv_rx_input(struct rx_ring *rxr, struct if (tcp_lro_rx(&rxr->lro, m, 0) == 0) return; } +#endif /* LRO */ + + IXV_RX_UNLOCK(rxr); + + s = splnet(); + /* Pass this up to any BPF listeners. */ + bpf_mtap(ifp, m); (*ifp->if_input)(ifp, m); + splx(s); + + IXV_RX_LOCK(rxr); } static __inline void @@ -3244,8 +3414,10 @@ ixv_rxeof(struct ix_queue *que, int coun struct adapter *adapter = que->adapter; struct rx_ring *rxr = que->rxr; struct ifnet *ifp = adapter->ifp; +#ifdef LRO struct lro_ctrl *lro = &rxr->lro; struct lro_entry *queued; +#endif /* LRO */ int i, nextp, processed = 0; u32 staterr = 0; union ixgbe_adv_rx_desc *cur; @@ -3255,12 +3427,12 @@ ixv_rxeof(struct ix_queue *que, int coun for (i = rxr->next_to_check; count != 0;) { struct mbuf *sendmp, *mh, *mp; - u32 rsc, ptype; + u32 ptype; u16 hlen, plen, hdr, vtag; bool eop; /* Sync the ring. */ - bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, + ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); cur = &rxr->rx_base[i]; @@ -3268,13 +3440,12 @@ ixv_rxeof(struct ix_queue *que, int coun if ((staterr & IXGBE_RXD_STAT_DD) == 0) break; - if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) + if ((ifp->if_flags & IFF_RUNNING) == 0) break; count--; sendmp = NULL; nbuf = NULL; - rsc = 0; cur->wb.upper.status_error = 0; rbuf = &rxr->rx_buffers[i]; mh = rbuf->m_head; @@ -3291,7 +3462,7 @@ ixv_rxeof(struct ix_queue *que, int coun if (((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) || (rxr->discard)) { ifp->if_ierrors++; - rxr->rx_discarded++; + rxr->rx_discarded.ev_count++; if (!eop) rxr->discard = TRUE; else @@ -3343,7 +3514,7 @@ ixv_rxeof(struct ix_queue *que, int coun mh->m_pkthdr.len += mp->m_len; /* Null buf pointer so it is refreshed */ rbuf->m_pack = NULL; - rxr->rx_split_packets++; + rxr->rx_split_packets.ev_count++; } /* ** Now create the forward @@ -3361,9 +3532,11 @@ ixv_rxeof(struct ix_queue *que, int coun } else { /* Singlet, prepare to send */ sendmp = mh; - if (staterr & IXGBE_RXD_STAT_VP) { - sendmp->m_pkthdr.ether_vtag = vtag; - sendmp->m_flags |= M_VLANTAG; + if (VLAN_ATTACHED(&adapter->osdep.ec) && + (staterr & IXGBE_RXD_STAT_VP)) { + VLAN_INPUT_TAG(ifp, sendmp, vtag, + printf("%s: could not apply VLAN " + "tag", __func__)); } } } else { @@ -3388,8 +3561,12 @@ ixv_rxeof(struct ix_queue *que, int coun sendmp->m_flags |= M_PKTHDR; sendmp->m_pkthdr.len = mp->m_len; if (staterr & IXGBE_RXD_STAT_VP) { - sendmp->m_pkthdr.ether_vtag = vtag; - sendmp->m_flags |= M_VLANTAG; + /* XXX Do something reasonable on + * error. + */ + VLAN_INPUT_TAG(ifp, sendmp, vtag, + printf("%s: could not apply VLAN " + "tag", __func__)); } } /* Pass the head pointer on */ @@ -3404,19 +3581,21 @@ ixv_rxeof(struct ix_queue *que, int coun if (eop) { sendmp->m_pkthdr.rcvif = ifp; ifp->if_ipackets++; - rxr->rx_packets++; + rxr->rx_packets.ev_count++; /* capture data for AIM */ rxr->bytes += sendmp->m_pkthdr.len; - rxr->rx_bytes += sendmp->m_pkthdr.len; - if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) - ixv_rx_checksum(staterr, sendmp, ptype); + rxr->rx_bytes.ev_count += sendmp->m_pkthdr.len; + if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { + ixv_rx_checksum(staterr, sendmp, ptype, + &adapter->stats); + } #if __FreeBSD_version >= 800000 sendmp->m_pkthdr.flowid = que->msix; sendmp->m_flags |= M_FLOWID; #endif } next_desc: - bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, + ixgbe_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* Advance our pointers to the next descriptor. */ @@ -3442,6 +3621,7 @@ next_desc: rxr->next_to_check = i; +#ifdef LRO /* * Flush any outstanding LRO work */ @@ -3449,6 +3629,7 @@ next_desc: SLIST_REMOVE_HEAD(&lro->lro_active, next); tcp_lro_flush(lro, queued); } +#endif /* LRO */ IXV_RX_UNLOCK(rxr); @@ -3457,11 +3638,11 @@ next_desc: ** Schedule another interrupt if so. */ if ((staterr & IXGBE_RXD_STAT_DD) != 0) { - ixv_rearm_queues(adapter, (u64)(1 << que->msix)); - return (TRUE); + ixv_rearm_queues(adapter, (u64)(1ULL << que->msix)); + return true; } - return (FALSE); + return false; } @@ -3473,35 +3654,36 @@ next_desc: * *********************************************************************/ static void -ixv_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype) +ixv_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype, + struct ixgbevf_hw_stats *stats) { u16 status = (u16) staterr; u8 errors = (u8) (staterr >> 24); +#if 0 bool sctp = FALSE; - if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 && (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0) sctp = TRUE; - +#endif if (status & IXGBE_RXD_STAT_IPCS) { + stats->ipcs.ev_count++; if (!(errors & IXGBE_RXD_ERR_IPE)) { /* IP Checksum Good */ - mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED; - mp->m_pkthdr.csum_flags |= CSUM_IP_VALID; + mp->m_pkthdr.csum_flags |= M_CSUM_IPv4; - } else - mp->m_pkthdr.csum_flags = 0; + } else { + stats->ipcs_bad.ev_count++; + mp->m_pkthdr.csum_flags = M_CSUM_IPv4|M_CSUM_IPv4_BAD; + } } if (status & IXGBE_RXD_STAT_L4CS) { - u16 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); -#if __FreeBSD_version >= 800000 - if (sctp) - type = CSUM_SCTP_VALID; -#endif + stats->l4cs.ev_count++; + u16 type = M_CSUM_TCPv4|M_CSUM_TCPv6|M_CSUM_UDPv4|M_CSUM_UDPv6; if (!(errors & IXGBE_RXD_ERR_TCPE)) { mp->m_pkthdr.csum_flags |= type; - if (!sctp) - mp->m_pkthdr.csum_data = htons(0xffff); + } else { + stats->l4cs_bad.ev_count++; + mp->m_pkthdr.csum_flags |= type | M_CSUM_TCP_UDP_BAD; } } return; @@ -3557,6 +3739,7 @@ ixv_setup_vlan_support(struct adapter *a } } +#if 0 /* XXX Badly need to overhaul vlan(4) on NetBSD. */ /* ** This routine is run via an vlan config EVENT, ** it enables us to use the HW Filter table since @@ -3579,7 +3762,6 @@ ixv_register_vlan(void *arg, struct ifne index = (vtag >> 5) & 0x7F; bit = vtag & 0x1F; ixv_shadow_vfta[index] |= (1 << bit); - ++adapter->num_vlans; /* Re-init to load the changes */ ixv_init(adapter); } @@ -3604,10 +3786,10 @@ ixv_unregister_vlan(void *arg, struct if index = (vtag >> 5) & 0x7F; bit = vtag & 0x1F; ixv_shadow_vfta[index] &= ~(1 << bit); - --adapter->num_vlans; /* Re-init to load the changes */ ixv_init(adapter); } +#endif static void ixv_enable_intr(struct adapter *adapter) @@ -3804,18 +3986,18 @@ ixv_print_hw_stats(struct adapter * adap device_t dev = adapter->dev; device_printf(dev,"Std Mbuf Failed = %lu\n", - adapter->mbuf_defrag_failed); + adapter->mbuf_defrag_failed.ev_count); device_printf(dev,"Driver dropped packets = %lu\n", - adapter->dropped_pkts); + adapter->dropped_pkts.ev_count); device_printf(dev, "watchdog timeouts = %ld\n", - adapter->watchdog_events); + adapter->watchdog_events.ev_count); device_printf(dev,"Good Packets Rcvd = %llu\n", (long long)adapter->stats.vfgprc); device_printf(dev,"Good Packets Xmtd = %llu\n", (long long)adapter->stats.vfgptc); device_printf(dev,"TSO Transmissions = %lu\n", - adapter->tso_tx); + adapter->tso_tx.ev_count); } @@ -3834,7 +4016,9 @@ ixv_print_debug_info(struct adapter *ada struct ix_queue *que = adapter->queues; struct rx_ring *rxr; struct tx_ring *txr; +#ifdef LRO struct lro_ctrl *lro; +#endif /* LRO */ device_printf(dev,"Error Byte Count = %u \n", IXGBE_READ_REG(hw, IXGBE_ERRBC)); @@ -3842,67 +4026,74 @@ ixv_print_debug_info(struct adapter *ada for (int i = 0; i < adapter->num_queues; i++, que++) { txr = que->txr; rxr = que->rxr; +#ifdef LRO lro = &rxr->lro; +#endif /* LRO */ device_printf(dev,"QUE(%d) IRQs Handled: %lu\n", que->msix, (long)que->irqs); device_printf(dev,"RX(%d) Packets Received: %lld\n", - rxr->me, (long long)rxr->rx_packets); + rxr->me, (long long)rxr->rx_packets.ev_count); device_printf(dev,"RX(%d) Split RX Packets: %lld\n", - rxr->me, (long long)rxr->rx_split_packets); + rxr->me, (long long)rxr->rx_split_packets.ev_count); device_printf(dev,"RX(%d) Bytes Received: %lu\n", - rxr->me, (long)rxr->rx_bytes); + rxr->me, (long)rxr->rx_bytes.ev_count); +#ifdef LRO device_printf(dev,"RX(%d) LRO Queued= %d\n", rxr->me, lro->lro_queued); device_printf(dev,"RX(%d) LRO Flushed= %d\n", rxr->me, lro->lro_flushed); +#endif /* LRO */ device_printf(dev,"TX(%d) Packets Sent: %lu\n", - txr->me, (long)txr->total_packets); + txr->me, (long)txr->total_packets.ev_count); device_printf(dev,"TX(%d) NO Desc Avail: %lu\n", - txr->me, (long)txr->no_desc_avail); + txr->me, (long)txr->no_desc_avail.ev_count); } device_printf(dev,"MBX IRQ Handled: %lu\n", - (long)adapter->mbx_irq); + (long)adapter->mbx_irq.ev_count); return; } static int -ixv_sysctl_stats(SYSCTL_HANDLER_ARGS) +ixv_sysctl_stats(SYSCTLFN_ARGS) { + struct sysctlnode node; int error; - int result; + int result; struct adapter *adapter; - result = -1; - error = sysctl_handle_int(oidp, &result, 0, req); + node = *rnode; + adapter = (struct adapter *)node.sysctl_data; + node.sysctl_data = &result; + error = sysctl_lookup(SYSCTLFN_CALL(&node)); + if (error != 0) + return error; - if (error || !req->newptr) - return (error); - - if (result == 1) { - adapter = (struct adapter *) arg1; + if (result == 1) ixv_print_hw_stats(adapter); - } - return error; + + return 0; } static int -ixv_sysctl_debug(SYSCTL_HANDLER_ARGS) +ixv_sysctl_debug(SYSCTLFN_ARGS) { + struct sysctlnode node; int error, result; struct adapter *adapter; - result = -1; - error = sysctl_handle_int(oidp, &result, 0, req); + node = *rnode; + adapter = (struct adapter *)node.sysctl_data; + node.sysctl_data = &result; + error = sysctl_lookup(SYSCTLFN_CALL(&node)); - if (error || !req->newptr) - return (error); + if (error) + return error; - if (result == 1) { - adapter = (struct adapter *) arg1; + if (result == 1) ixv_print_debug_info(adapter); - } - return error; + + return 0; } /* @@ -3914,17 +4105,20 @@ ixv_sysctl_debug(SYSCTL_HANDLER_ARGS) ** 3 - full */ static int -ixv_set_flowcntl(SYSCTL_HANDLER_ARGS) +ixv_set_flowcntl(SYSCTLFN_ARGS) { + struct sysctlnode node; int error; struct adapter *adapter; - error = sysctl_handle_int(oidp, &ixv_flow_control, 0, req); + node = *rnode; + adapter = (struct adapter *)node.sysctl_data; + node.sysctl_data = &ixv_flow_control; + error = sysctl_lookup(SYSCTLFN_CALL(&node)); if (error) return (error); - adapter = (struct adapter *) arg1; switch (ixv_flow_control) { case ixgbe_fc_rx_pause: case ixgbe_fc_tx_pause: @@ -3940,13 +4134,49 @@ ixv_set_flowcntl(SYSCTL_HANDLER_ARGS) return error; } +const struct sysctlnode * +ixv_sysctl_instance(struct adapter *adapter) +{ + const char *dvname; + struct sysctllog **log; + int rc; + const struct sysctlnode *rnode; + + log = &adapter->sysctllog; + dvname = device_xname(adapter->dev); + + if ((rc = sysctl_createv(log, 0, NULL, &rnode, + 0, CTLTYPE_NODE, dvname, + SYSCTL_DESCR("ixv information and settings"), + NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) + goto err; + + return rnode; +err: + printf("%s: sysctl_createv failed, rc = %d\n", __func__, rc); + return NULL; +} + static void ixv_add_rx_process_limit(struct adapter *adapter, const char *name, const char *description, int *limit, int value) { + const struct sysctlnode *rnode, *cnode; + struct sysctllog **log = &adapter->sysctllog; + *limit = value; - SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev), - SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)), - OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description); + + if ((rnode = ixv_sysctl_instance(adapter)) == NULL) + aprint_error_dev(adapter->dev, + "could not create sysctl root\n"); + else if (sysctl_createv(log, 0, &rnode, &cnode, + CTLFLAG_READWRITE, + CTLTYPE_INT, + name, SYSCTL_DESCR(description), + NULL, 0, limit, 0, + CTL_CREATE, CTL_EOL) != 0) { + aprint_error_dev(adapter->dev, "%s: could not create sysctl", + __func__); + } } Index: src/sys/dev/pci/ixgbe/ixv.h diff -u src/sys/dev/pci/ixgbe/ixv.h:1.2 src/sys/dev/pci/ixgbe/ixv.h:1.3 --- src/sys/dev/pci/ixgbe/ixv.h:1.2 Sat Oct 27 17:18:36 2012 +++ src/sys/dev/pci/ixgbe/ixv.h Tue Mar 10 09:26:49 2015 @@ -31,7 +31,7 @@ ******************************************************************************/ /*$FreeBSD: src/sys/dev/ixgbe/ixv.h,v 1.3 2011/01/07 23:39:41 jfv Exp $*/ -/*$NetBSD: ixv.h,v 1.2 2012/10/27 17:18:36 chs Exp $*/ +/*$NetBSD: ixv.h,v 1.3 2015/03/10 09:26:49 msaitoh Exp $*/ #ifndef _IXV_H_ @@ -39,6 +39,7 @@ #include <sys/param.h> +#include <sys/reboot.h> #include <sys/systm.h> #include <sys/mbuf.h> #include <sys/protosw.h> @@ -57,6 +58,7 @@ #include <net/bpf.h> #include <net/if_types.h> +#include <net/if_vlanvar.h> #include <netinet/in_systm.h> #include <netinet/in.h> @@ -72,6 +74,7 @@ #include <sys/sysctl.h> #include <sys/endian.h> +#include "ixgbe_netbsd.h" #include "ixgbe_api.h" #include "ixgbe_vf.h" @@ -161,11 +164,8 @@ #define VFTA_SIZE 128 /* Offload bits in mbuf flag */ -#if __FreeBSD_version >= 800000 -#define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP) -#else -#define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP) -#endif +#define M_CSUM_OFFLOAD \ + (M_CSUM_IPv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_UDPv6|M_CSUM_TCPv6) /* ***************************************************************************** @@ -205,7 +205,7 @@ struct ixv_rx_buf { struct ixv_dma_alloc { bus_addr_t dma_paddr; void *dma_vaddr; - bus_dma_tag_t dma_tag; + ixgbe_dma_tag_t *dma_tag; /* XXX s/ixgbe/ixv/ --msaitoh */ bus_dmamap_t dma_map; bus_dma_segment_t dma_seg; bus_size_t dma_size; @@ -226,10 +226,7 @@ struct ix_queue { void *tag; struct tx_ring *txr; struct rx_ring *rxr; -#if 0 - struct task que_task; - struct taskqueue *tq; -#endif + void *que_si; u64 irqs; }; @@ -241,7 +238,7 @@ struct tx_ring { kmutex_t tx_mtx; u32 me; bool watchdog_check; - int watchdog_time; + struct timeval watchdog_time; union ixgbe_adv_tx_desc *tx_base; struct ixv_dma_alloc txdma; u32 next_avail_desc; @@ -249,14 +246,14 @@ struct tx_ring { struct ixv_tx_buf *tx_buffers; volatile u16 tx_avail; u32 txd_cmd; - bus_dma_tag_t txtag; + ixgbe_dma_tag_t *txtag; char mtx_name[16]; struct buf_ring *br; /* Soft Stats */ u32 bytes; u32 packets; - u64 no_desc_avail; - u64 total_packets; + struct evcnt no_desc_avail; + struct evcnt total_packets; }; @@ -269,7 +266,9 @@ struct rx_ring { u32 me; union ixgbe_adv_rx_desc *rx_base; struct ixv_dma_alloc rxdma; +#ifdef LRO struct lro_ctrl lro; +#endif /* LRO */ bool lro_enabled; bool hdr_split; bool discard; @@ -277,18 +276,19 @@ struct rx_ring { u32 next_to_check; char mtx_name[16]; struct ixv_rx_buf *rx_buffers; - bus_dma_tag_t htag; - bus_dma_tag_t ptag; + ixgbe_dma_tag_t *htag; + ixgbe_dma_tag_t *ptag; u32 bytes; /* Used for AIM calc */ u32 packets; /* Soft stats */ - u64 rx_irq; - u64 rx_split_packets; - u64 rx_packets; - u64 rx_bytes; - u64 rx_discarded; + struct evcnt rx_irq; + struct evcnt rx_split_packets; + struct evcnt rx_packets; + struct evcnt rx_bytes; + struct evcnt rx_discarded; + struct evcnt no_jmbuf; }; /* Our adapter structure */ @@ -336,10 +336,7 @@ struct adapter { u32 rx_mbuf_sz; /* Support for pluggable optics */ -#if 0 - struct task mbx_task; /* Mailbox tasklet */ - struct taskqueue *tq; -#endif + void *mbx_si; /* Mailbox tasklet */ /* ** Queues: @@ -366,34 +363,46 @@ struct adapter { u32 rx_process_limit; /* Misc stats maintained by the driver */ - unsigned long dropped_pkts; - unsigned long mbuf_defrag_failed; - unsigned long mbuf_header_failed; - unsigned long mbuf_packet_failed; - unsigned long no_tx_map_avail; - unsigned long no_tx_dma_setup; - unsigned long watchdog_events; - unsigned long tso_tx; - unsigned long mbx_irq; + struct evcnt dropped_pkts; + struct evcnt mbuf_defrag_failed; + struct evcnt mbuf_header_failed; + struct evcnt mbuf_packet_failed; + struct evcnt no_tx_map_avail; + struct evcnt no_tx_dma_setup; + + struct evcnt efbig_tx_dma_setup; + struct evcnt efbig2_tx_dma_setup; + struct evcnt m_defrag_failed; + struct evcnt einval_tx_dma_setup; + struct evcnt other_tx_dma_setup; + struct evcnt eagain_tx_dma_setup; + struct evcnt enomem_tx_dma_setup; + struct evcnt watchdog_events; + struct evcnt tso_err; + struct evcnt tso_tx; + struct evcnt mbx_irq; + struct evcnt req; struct ixgbevf_hw_stats stats; + struct sysctllog *sysctllog; + ixgbe_extmem_head_t jcl_head; }; #define IXV_CORE_LOCK_INIT(_sc, _name) \ - mtx_init(&(_sc)->core_mtx, _name, "IXV Core Lock", MTX_DEF) -#define IXV_CORE_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->core_mtx) -#define IXV_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->tx_mtx) -#define IXV_RX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rx_mtx) -#define IXV_CORE_LOCK(_sc) mtx_lock(&(_sc)->core_mtx) -#define IXV_TX_LOCK(_sc) mtx_lock(&(_sc)->tx_mtx) -#define IXV_TX_TRYLOCK(_sc) mtx_trylock(&(_sc)->tx_mtx) -#define IXV_RX_LOCK(_sc) mtx_lock(&(_sc)->rx_mtx) -#define IXV_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->core_mtx) -#define IXV_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->tx_mtx) -#define IXV_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->rx_mtx) -#define IXV_CORE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->core_mtx, MA_OWNED) -#define IXV_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->tx_mtx, MA_OWNED) + mutex_init(&(_sc)->core_mtx, MUTEX_DEFAULT, IPL_SOFTNET) +#define IXV_CORE_LOCK_DESTROY(_sc) mutex_destroy(&(_sc)->core_mtx) +#define IXV_TX_LOCK_DESTROY(_sc) mutex_destroy(&(_sc)->tx_mtx) +#define IXV_RX_LOCK_DESTROY(_sc) mutex_destroy(&(_sc)->rx_mtx) +#define IXV_CORE_LOCK(_sc) mutex_enter(&(_sc)->core_mtx) +#define IXV_TX_LOCK(_sc) mutex_enter(&(_sc)->tx_mtx) +#define IXV_TX_TRYLOCK(_sc) mutex_tryenter(&(_sc)->tx_mtx) +#define IXV_RX_LOCK(_sc) mutex_enter(&(_sc)->rx_mtx) +#define IXV_CORE_UNLOCK(_sc) mutex_exit(&(_sc)->core_mtx) +#define IXV_TX_UNLOCK(_sc) mutex_exit(&(_sc)->tx_mtx) +#define IXV_RX_UNLOCK(_sc) mutex_exit(&(_sc)->rx_mtx) +#define IXV_CORE_LOCK_ASSERT(_sc) KASSERT(mutex_owned(&(_sc)->core_mtx)) +#define IXV_TX_LOCK_ASSERT(_sc) KASSERT(mutex_owned(&(_sc)->tx_mtx)) /* Workaround to make 8.0 buildable */ #if __FreeBSD_version >= 800000 && __FreeBSD_version < 800504