Module Name: src Committed By: bouyer Date: Sun Dec 2 17:02:04 UTC 2018
Modified Files: src/sys/dev/pci: if_bge.c if_bgereg.h Log Message: Don't destroy the dma maps if we're not disabling the adapter, avoids a KASSERT() when bus_dmamap_destroy() is called from interrupt context via bge_watchdog() Set IFF_OACTIVE only when bge_encap() fails on adapter ressource shortage. Otherwise we may set IFF_OACTIVE while no transmit is in progress, and nothing will clear it. If bus_dmamap_load_mbuf() fails with EFBIG, m_defrag() the chain and retry. Refine the check for the 4GB boundary workaround (a fragment should also not cross the boundary), and do it only for TSO. If bge_encap() fails and didn't set IFF_OACTIVE, drop the packet. Bring in more hardware bug workarounds from freebsd. With these it seems that a BCM5720 A0 can survive a few hours of internet load with TSO4 enabled. To generate a diff of this commit: cvs rdiff -u -r1.319 -r1.320 src/sys/dev/pci/if_bge.c cvs rdiff -u -r1.93 -r1.94 src/sys/dev/pci/if_bgereg.h Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/sys/dev/pci/if_bge.c diff -u src/sys/dev/pci/if_bge.c:1.319 src/sys/dev/pci/if_bge.c:1.320 --- src/sys/dev/pci/if_bge.c:1.319 Fri Nov 30 17:53:08 2018 +++ src/sys/dev/pci/if_bge.c Sun Dec 2 17:02:04 2018 @@ -1,4 +1,4 @@ -/* $NetBSD: if_bge.c,v 1.319 2018/11/30 17:53:08 jdolecek Exp $ */ +/* $NetBSD: if_bge.c,v 1.320 2018/12/02 17:02:04 bouyer Exp $ */ /* * Copyright (c) 2001 Wind River Systems @@ -79,7 +79,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: if_bge.c,v 1.319 2018/11/30 17:53:08 jdolecek Exp $"); +__KERNEL_RCSID(0, "$NetBSD: if_bge.c,v 1.320 2018/12/02 17:02:04 bouyer Exp $"); #include <sys/param.h> #include <sys/systm.h> @@ -233,10 +233,10 @@ static int bge_newbuf_std(struct bge_sof bus_dmamap_t); static int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *); static int bge_init_rx_ring_std(struct bge_softc *); -static void bge_free_rx_ring_std(struct bge_softc *); +static void bge_free_rx_ring_std(struct bge_softc *m, bool); static int bge_init_rx_ring_jumbo(struct bge_softc *); static void bge_free_rx_ring_jumbo(struct bge_softc *); -static void bge_free_tx_ring(struct bge_softc *); +static void bge_free_tx_ring(struct bge_softc *m, bool); static int bge_init_tx_ring(struct bge_softc *); static int bge_chipinit(struct bge_softc *); @@ -1713,6 +1713,9 @@ bge_newbuf_std(struct bge_softc *sc, int struct bge_rx_bd *r; int error; + if (dmamap == NULL) + dmamap = sc->bge_cdata.bge_rx_std_map[i]; + if (dmamap == NULL) { error = bus_dmamap_create(sc->bge_dmatag, MCLBYTES, 1, MCLBYTES, 0, BUS_DMA_NOWAIT, &dmamap); @@ -1852,7 +1855,7 @@ bge_init_rx_ring_std(struct bge_softc *s } static void -bge_free_rx_ring_std(struct bge_softc *sc) +bge_free_rx_ring_std(struct bge_softc *sc, bool disable) { int i; @@ -1863,8 +1866,11 @@ bge_free_rx_ring_std(struct bge_softc *s if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { m_freem(sc->bge_cdata.bge_rx_std_chain[i]); sc->bge_cdata.bge_rx_std_chain[i] = NULL; - bus_dmamap_destroy(sc->bge_dmatag, - sc->bge_cdata.bge_rx_std_map[i]); + if (disable) { + bus_dmamap_destroy(sc->bge_dmatag, + sc->bge_cdata.bge_rx_std_map[i]); + sc->bge_cdata.bge_rx_std_map[i] = NULL; + } } memset((char *)&sc->bge_rdata->bge_rx_std_ring[i], 0, sizeof(struct bge_rx_bd)); @@ -1920,7 +1926,7 @@ bge_free_rx_ring_jumbo(struct bge_softc } static void -bge_free_tx_ring(struct bge_softc *sc) +bge_free_tx_ring(struct bge_softc *sc, bool disable) { int i; struct txdmamap_pool_entry *dma; @@ -1940,12 +1946,17 @@ bge_free_tx_ring(struct bge_softc *sc) sizeof(struct bge_tx_bd)); } - while ((dma = SLIST_FIRST(&sc->txdma_list))) { - SLIST_REMOVE_HEAD(&sc->txdma_list, link); - bus_dmamap_destroy(sc->bge_dmatag, dma->dmamap); - if (sc->bge_dma64) - bus_dmamap_destroy(sc->bge_dmatag32, dma->dmamap32); - free(dma, M_DEVBUF); + if (disable) { + while ((dma = SLIST_FIRST(&sc->txdma_list))) { + SLIST_REMOVE_HEAD(&sc->txdma_list, link); + bus_dmamap_destroy(sc->bge_dmatag, dma->dmamap); + if (sc->bge_dma64) { + bus_dmamap_destroy(sc->bge_dmatag32, + dma->dmamap32); + } + free(dma, M_DEVBUF); + } + SLIST_INIT(&sc->txdma_list); } sc->bge_flags &= ~BGEF_TXRING_VALID; @@ -1988,7 +1999,9 @@ bge_init_tx_ring(struct bge_softc *sc) else maxsegsz = ETHER_MAX_LEN_JUMBO; - SLIST_INIT(&sc->txdma_list); + if (SLIST_FIRST(&sc->txdma_list) != NULL) + goto alloc_done; + for (i = 0; i < BGE_TX_RING_CNT; i++) { if (bus_dmamap_create(sc->bge_dmatag, BGE_TXDMA_MAX, BGE_NTXSEG, maxsegsz, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, @@ -2021,7 +2034,7 @@ bge_init_tx_ring(struct bge_softc *sc) dma->dmamap32 = dmamap32; SLIST_INSERT_HEAD(&sc->txdma_list, dma, link); } - +alloc_done: sc->bge_flags |= BGEF_TXRING_VALID; return 0; @@ -3141,12 +3154,30 @@ bge_blockinit(struct bge_softc *sc) BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 | BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K); } - /* Turn on read DMA state machine */ CSR_WRITE_4_FLUSH(sc, BGE_RDMA_MODE, val); /* 5718 step 52 */ delay(40); + if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 || + BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) { + for (i = 0; i < BGE_NUM_RDMA_CHANNELS / 2; i++) { + val = CSR_READ_4(sc, BGE_RDMA_LENGTH + i * 4); + if ((val & 0xFFFF) > BGE_FRAMELEN) + break; + if (((val >> 16) & 0xFFFF) > BGE_FRAMELEN) + break; + } + if (i != BGE_NUM_RDMA_CHANNELS / 2) { + val = CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL); + if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) + val |= BGE_RDMA_TX_LENGTH_WA_5719; + else + val |= BGE_RDMA_TX_LENGTH_WA_5720; + CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, val); + } + } + /* 5718 step 56, 57XX step 84 */ /* Turn on RX data completion state machine */ CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); @@ -3913,6 +3944,7 @@ bge_attach(device_t parent, device_t sel } } } + SLIST_INIT(&sc->txdma_list); DPRINTFN(5, ("bus_dmamem_alloc\n")); if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data), PAGE_SIZE, 0, &sc->bge_ring_seg, 1, @@ -4592,7 +4624,7 @@ bge_rxeof(struct bge_softc *sc) sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL; stdcnt++; dmamap = sc->bge_cdata.bge_rx_std_map[rxidx]; - sc->bge_cdata.bge_rx_std_map[rxidx] = 0; + sc->bge_cdata.bge_rx_std_map[rxidx] = NULL; if (dmamap == NULL) { ifp->if_ierrors++; bge_newbuf_std(sc, sc->bge_std, m, dmamap); @@ -4940,7 +4972,17 @@ bge_stats_update_regs(struct bge_softc * ifp->if_collisions += CSR_READ_4(sc, BGE_MAC_STATS + offsetof(struct bge_mac_stats_regs, etherStatsCollisions)); - ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS); + /* + * On BCM5717, BCM5718, BCM5719 A0 and BCM5720 A0, + * RXLP_LOCSTAT_IFIN_DROPS includes unwanted multicast frames + * (silicon bug). There's no reliable workaround so just + * ignore the counter + */ + if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5717 && + BGE_ASICREV(sc->bge_chipid) != BGE_CHIPID_BCM5719_A0 && + BGE_ASICREV(sc->bge_chipid) != BGE_CHIPID_BCM5720_A0) { + ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS); + } ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS); ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS); } @@ -5159,6 +5201,7 @@ bge_compact_dma_runt(struct mbuf *pkt) static int bge_encap(struct bge_softc *sc, struct mbuf *m_head, uint32_t *txidx) { + struct ifnet *ifp = &sc->ethercom.ec_if; struct bge_tx_bd *f, *prev_f; uint32_t frag, cur; uint16_t csum_flags = 0; @@ -5170,6 +5213,7 @@ bge_encap(struct bge_softc *sc, struct m int use_tso, maxsegsize, error; bool have_vtag; uint16_t vtag; + bool remap; if (m_head->m_pkthdr.csum_flags) { if (m_head->m_pkthdr.csum_flags & M_CSUM_IPv4) @@ -5193,7 +5237,7 @@ bge_encap(struct bge_softc *sc, struct m goto check_dma_bug; if (bge_cksum_pad(m_head) != 0) - return ENOBUFS; + return ENOBUFS; check_dma_bug: if (!(BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)) @@ -5209,8 +5253,10 @@ check_dma_bug: doit: dma = SLIST_FIRST(&sc->txdma_list); - if (dma == NULL) + if (dma == NULL) { + ifp->if_flags |= IFF_OACTIVE; return ENOBUFS; + } dmamap = dma->dmamap; dmatag = sc->bge_dmatag; dma->is_dma32 = false; @@ -5366,11 +5412,22 @@ doit: * the fragment pointers. Stop when we run out * of fragments or hit the end of the mbuf chain. */ + remap = true; load_again: error = bus_dmamap_load_mbuf(dmatag, dmamap, m_head, BUS_DMA_NOWAIT); - if (error) - return ENOBUFS; + if (__predict_false(error)) { + if (error == EFBIG && remap) { + struct mbuf *m; + remap = false; + m = m_defrag(m_head, M_NOWAIT); + if (m != NULL) { + KASSERT(m == m_head); + goto load_again; + } + } + return error; + } /* * Sanity check: avoid coming within 16 descriptors * of the end of the ring. @@ -5393,8 +5450,12 @@ load_again: BGE_HOSTADDR(f->bge_addr, dmamap->dm_segs[i].ds_addr); f->bge_len = dmamap->dm_segs[i].ds_len; - if (dma->is_dma32 == false && prev_f != NULL && - prev_f->bge_addr.bge_addr_hi != f->bge_addr.bge_addr_hi) { + if (sizeof(bus_addr_t) > 4 && dma->is_dma32 == false && use_tso && ( + (dmamap->dm_segs[i].ds_addr & 0xffffffff00000000) != + ((dmamap->dm_segs[i].ds_addr + f->bge_len) & 0xffffffff00000000) || + (prev_f != NULL && + prev_f->bge_addr.bge_addr_hi != f->bge_addr.bge_addr_hi)) + ) { /* * watchdog timeout issue was observed with TSO, * limiting DMA address space to 32bits seems to @@ -5404,6 +5465,7 @@ load_again: dmatag = sc->bge_dmatag32; dmamap = dma->dmamap32; dma->is_dma32 = true; + remap = true; goto load_again; } @@ -5466,6 +5528,7 @@ load_again: fail_unload: bus_dmamap_unload(dmatag, dmamap); + ifp->if_flags |= IFF_OACTIVE; return ENOBUFS; } @@ -5479,8 +5542,10 @@ bge_start(struct ifnet *ifp) { struct bge_softc *sc; struct mbuf *m_head = NULL; + struct mbuf *m; uint32_t prodidx; int pkts = 0; + int error; sc = ifp->if_softc; @@ -5518,13 +5583,21 @@ bge_start(struct ifnet *ifp) * don't have room, set the OACTIVE flag and wait * for the NIC to drain the ring. */ - if (bge_encap(sc, m_head, &prodidx)) { - ifp->if_flags |= IFF_OACTIVE; - break; + error = bge_encap(sc, m_head, &prodidx); + if (__predict_false(error)) { + if (ifp->if_flags & IFF_OACTIVE) { + /* just wait for the transmit ring to drain */ + break; + } + IFQ_DEQUEUE(&ifp->if_snd, m); + KASSERT(m == m_head); + m_freem(m_head); + continue; } - + /* now we are committed to transmit the packet */ - IFQ_DEQUEUE(&ifp->if_snd, m_head); + IFQ_DEQUEUE(&ifp->if_snd, m); + KASSERT(m == m_head); pkts++; /* @@ -5963,9 +6036,37 @@ static void bge_watchdog(struct ifnet *ifp) { struct bge_softc *sc; + uint32_t status; sc = ifp->if_softc; + /* If pause frames are active then don't reset the hardware. */ + if ((CSR_READ_4(sc, BGE_RX_MODE) & BGE_RXMODE_FLOWCTL_ENABLE) != 0) { + status = CSR_READ_4(sc, BGE_RX_STS); + if ((status & BGE_RXSTAT_REMOTE_XOFFED) != 0) { + /* + * If link partner has us in XOFF state then wait for + * the condition to clear. + */ + CSR_WRITE_4(sc, BGE_RX_STS, status); + ifp->if_timer = 5; + return; + } else if ((status & BGE_RXSTAT_RCVD_XOFF) != 0 && + (status & BGE_RXSTAT_RCVD_XON) != 0) { + /* + * If link partner has us in XOFF state then wait for + * the condition to clear. + */ + CSR_WRITE_4(sc, BGE_RX_STS, status); + ifp->if_timer = 5; + return; + } + /* + * Any other condition is unexpected and the controller + * should be reset. + */ + } + aprint_error_dev(sc->bge_dev, "watchdog timeout -- resetting\n"); ifp->if_flags &= ~IFF_RUNNING; @@ -6083,14 +6184,14 @@ bge_stop(struct ifnet *ifp, int disable) BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); /* Free the RX lists. */ - bge_free_rx_ring_std(sc); + bge_free_rx_ring_std(sc, disable); /* Free jumbo RX list. */ if (BGE_IS_JUMBO_CAPABLE(sc)) bge_free_rx_ring_jumbo(sc); /* Free TX buffers. */ - bge_free_tx_ring(sc); + bge_free_tx_ring(sc, disable); /* * Isolate/power down the PHY. Index: src/sys/dev/pci/if_bgereg.h diff -u src/sys/dev/pci/if_bgereg.h:1.93 src/sys/dev/pci/if_bgereg.h:1.94 --- src/sys/dev/pci/if_bgereg.h:1.93 Thu Apr 13 04:27:46 2017 +++ src/sys/dev/pci/if_bgereg.h Sun Dec 2 17:02:04 2018 @@ -1,4 +1,4 @@ -/* $NetBSD: if_bgereg.h,v 1.93 2017/04/13 04:27:46 msaitoh Exp $ */ +/* $NetBSD: if_bgereg.h,v 1.94 2018/12/02 17:02:04 bouyer Exp $ */ /* * Copyright (c) 2001 Wind River Systems * Copyright (c) 1997, 1998, 1999, 2001 @@ -1547,6 +1547,11 @@ #define BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 0x00020000 #define BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K 0x00030000 #define BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K 0x000C0000 +#define BGE_RDMA_TX_LENGTH_WA_5719 0x02000000 +#define BGE_RDMA_TX_LENGTH_WA_5720 0x00200000 + +#define BGE_RDMA_LENGTH 0x4BE0 +#define BGE_NUM_RDMA_CHANNELS 4 /* * Write DMA control registers