sk(4) uses a queue for managing Tx DMA maps. Because the queue would require synchronization if the driver was unlocked, it might be best to refactor the code a bit. This patch removes the queue by making Tx and Rx DMA maps part of struct sk_chain.
OK? Index: dev/pci/if_sk.c =================================================================== RCS file: src/sys/dev/pci/if_sk.c,v retrieving revision 1.185 diff -u -p -r1.185 if_sk.c --- dev/pci/if_sk.c 8 Jan 2017 18:08:14 -0000 1.185 +++ dev/pci/if_sk.c 15 Jan 2017 13:56:06 -0000 @@ -97,7 +97,6 @@ #include <sys/socket.h> #include <sys/timeout.h> #include <sys/device.h> -#include <sys/queue.h> #include <net/if.h> @@ -569,14 +568,11 @@ sk_init_tx_ring(struct sk_if_softc *sc_i struct sk_softc *sc = sc_if->sk_softc; struct sk_chain_data *cd = &sc_if->sk_cdata; struct sk_ring_data *rd = sc_if->sk_rdata; - bus_dmamap_t dmamap; - struct sk_txmap_entry *entry; int i, nexti; bzero(sc_if->sk_rdata->sk_tx_ring, sizeof(struct sk_tx_desc) * SK_TX_RING_CNT); - SIMPLEQ_INIT(&sc_if->sk_txmap_head); for (i = 0; i < SK_TX_RING_CNT; i++) { cd->sk_tx_chain[i].sk_desc = &rd->sk_tx_ring[i]; if (i == (SK_TX_RING_CNT - 1)) @@ -584,19 +580,12 @@ sk_init_tx_ring(struct sk_if_softc *sc_i else nexti = i + 1; cd->sk_tx_chain[i].sk_next = &cd->sk_tx_chain[nexti]; - rd->sk_tx_ring[i].sk_next = htole32(SK_TX_RING_ADDR(sc_if, nexti)); + rd->sk_tx_ring[i].sk_next = htole32(SK_TX_RING_ADDR(sc_if, + nexti)); if (bus_dmamap_create(sc->sc_dmatag, SK_JLEN, SK_NTXSEG, - SK_JLEN, 0, BUS_DMA_NOWAIT, &dmamap)) - return (ENOBUFS); - - entry = malloc(sizeof(*entry), M_DEVBUF, M_NOWAIT); - if (!entry) { - bus_dmamap_destroy(sc->sc_dmatag, dmamap); + SK_JLEN, 0, BUS_DMA_NOWAIT, &cd->sk_tx_chain[i].sk_map)) return (ENOBUFS); - } - entry->dmamap = dmamap; - SIMPLEQ_INSERT_HEAD(&sc_if->sk_txmap_head, entry, link); } sc_if->sk_cdata.sk_tx_prod = 0; @@ -627,7 +616,8 @@ sk_newbuf(struct sk_if_softc *sc_if) m_adj(m, ETHER_ALIGN); prod = sc_if->sk_cdata.sk_rx_prod; - dmamap = sc_if->sk_cdata.sk_rx_map[prod]; + c = &sc_if->sk_cdata.sk_rx_chain[prod]; + dmamap = c->sk_map; error = bus_dmamap_load_mbuf(sc_if->sk_softc->sc_dmatag, dmamap, m, BUS_DMA_READ|BUS_DMA_NOWAIT); @@ -639,7 +629,6 @@ sk_newbuf(struct sk_if_softc *sc_if) bus_dmamap_sync(sc_if->sk_softc->sc_dmatag, dmamap, 0, dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); - c = &sc_if->sk_cdata.sk_rx_chain[prod]; c->sk_mbuf = m; r = c->sk_desc; @@ -979,7 +968,7 @@ sk_attach(struct device *parent, struct for (i = 0; i < SK_RX_RING_CNT; i++) { error = bus_dmamap_create(sc->sc_dmatag, SK_JLEN, 1, - SK_JLEN, 0, 0, &sc_if->sk_cdata.sk_rx_map[i]); + SK_JLEN, 0, 0, &sc_if->sk_cdata.sk_rx_chain[i].sk_map); if (error != 0) { printf(": unable to create rx DMA map %d, " "error = %d\n", i, error); @@ -1051,10 +1040,11 @@ sk_attach(struct device *parent, struct return; fail_4: for (i = 0; i < SK_RX_RING_CNT; i++) { - if (sc_if->sk_cdata.sk_rx_map[i] == NULL) + if (sc_if->sk_cdata.sk_rx_chain[i].sk_map == NULL) continue; - bus_dmamap_destroy(sc->sc_dmatag, sc_if->sk_cdata.sk_rx_map[i]); + bus_dmamap_destroy(sc->sc_dmatag, + sc_if->sk_cdata.sk_rx_chain[i].sk_map); } fail_3: bus_dmamem_unmap(sc->sc_dmatag, kva, sizeof(struct sk_ring_data)); @@ -1391,19 +1381,12 @@ sk_encap(struct sk_if_softc *sc_if, stru struct sk_tx_desc *f = NULL; u_int32_t frag, cur, sk_ctl; int i; - struct sk_txmap_entry *entry; bus_dmamap_t txmap; DPRINTFN(2, ("sk_encap\n")); - entry = SIMPLEQ_FIRST(&sc_if->sk_txmap_head); - if (entry == NULL) { - DPRINTFN(2, ("sk_encap: no txmap available\n")); - return (ENOBUFS); - } - txmap = entry->dmamap; - cur = frag = *txidx; + txmap = sc_if->sk_cdata.sk_tx_chain[*txidx].sk_map; #ifdef SK_DEBUG if (skdebug >= 2) @@ -1446,10 +1429,12 @@ sk_encap(struct sk_if_softc *sc_if, stru SK_INC(frag, SK_TX_RING_CNT); } + sc_if->sk_cdata.sk_tx_chain[*txidx].sk_map = + sc_if->sk_cdata.sk_tx_chain[cur].sk_map; + sc_if->sk_cdata.sk_tx_chain[cur].sk_map = txmap; + sc_if->sk_cdata.sk_tx_chain[cur].sk_mbuf = m_head; - SIMPLEQ_REMOVE_HEAD(&sc_if->sk_txmap_head, link); - sc_if->sk_cdata.sk_tx_map[cur] = entry; sc_if->sk_rdata->sk_tx_ring[cur].sk_ctl |= htole32(SK_TXCTL_LASTFRAG|SK_TXCTL_EOF_INTR); @@ -1606,7 +1591,7 @@ sk_rxeof(struct sk_if_softc *sc_if) break; cur_desc = &sc_if->sk_rdata->sk_rx_ring[cur]; - dmamap = sc_if->sk_cdata.sk_rx_map[cur]; + dmamap = sc_if->sk_cdata.sk_rx_chain[cur].sk_map; bus_dmamap_sync(sc_if->sk_softc->sc_dmatag, dmamap, 0, dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); @@ -1648,8 +1633,8 @@ sk_txeof(struct sk_if_softc *sc_if) struct sk_softc *sc = sc_if->sk_softc; struct sk_tx_desc *cur_tx; struct ifnet *ifp = &sc_if->arpcom.ac_if; + bus_dmamap_t dmamap; u_int32_t idx, sk_ctl; - struct sk_txmap_entry *entry; DPRINTFN(2, ("sk_txeof\n")); @@ -1675,18 +1660,15 @@ sk_txeof(struct sk_if_softc *sc_if) if (sk_ctl & SK_TXCTL_LASTFRAG) ifp->if_opackets++; if (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf != NULL) { - entry = sc_if->sk_cdata.sk_tx_map[idx]; + dmamap = sc_if->sk_cdata.sk_tx_chain[idx].sk_map; m_freem(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf); sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf = NULL; - bus_dmamap_sync(sc->sc_dmatag, entry->dmamap, 0, - entry->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); + bus_dmamap_sync(sc->sc_dmatag, dmamap, 0, + dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); - bus_dmamap_unload(sc->sc_dmatag, entry->dmamap); - SIMPLEQ_INSERT_TAIL(&sc_if->sk_txmap_head, entry, - link); - sc_if->sk_cdata.sk_tx_map[idx] = NULL; + bus_dmamap_unload(sc->sc_dmatag, dmamap); } sc_if->sk_cdata.sk_tx_cnt--; SK_INC(idx, SK_TX_RING_CNT); @@ -2409,7 +2391,6 @@ sk_stop(struct sk_if_softc *sc_if, int s struct sk_softc *sc = sc_if->sk_softc; struct ifnet *ifp = &sc_if->arpcom.ac_if; bus_dmamap_t dmamap; - struct sk_txmap_entry *dma; int i; u_int32_t val; @@ -2502,7 +2483,7 @@ sk_stop(struct sk_if_softc *sc_if, int s /* Free RX and TX mbufs still in the queues. */ for (i = 0; i < SK_RX_RING_CNT; i++) { if (sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf != NULL) { - dmamap = sc_if->sk_cdata.sk_rx_map[i]; + dmamap = sc_if->sk_cdata.sk_rx_chain[i].sk_map; bus_dmamap_sync(sc_if->sk_softc->sc_dmatag, dmamap, 0, dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc_if->sk_softc->sc_dmatag, dmamap); @@ -2515,16 +2496,12 @@ sk_stop(struct sk_if_softc *sc_if, int s if (sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf != NULL) { m_freem(sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf); sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf = NULL; - SIMPLEQ_INSERT_HEAD(&sc_if->sk_txmap_head, - sc_if->sk_cdata.sk_tx_map[i], link); - sc_if->sk_cdata.sk_tx_map[i] = 0; } - } - - while ((dma = SIMPLEQ_FIRST(&sc_if->sk_txmap_head))) { - SIMPLEQ_REMOVE_HEAD(&sc_if->sk_txmap_head, link); - bus_dmamap_destroy(sc->sc_dmatag, dma->dmamap); - free(dma, M_DEVBUF, 0); + if (sc_if->sk_cdata.sk_tx_chain[i].sk_map != NULL) { + bus_dmamap_destroy(sc->sc_dmatag, + sc_if->sk_cdata.sk_tx_chain[i].sk_map); + sc_if->sk_cdata.sk_tx_chain[i].sk_map = NULL; + } } } Index: dev/pci/if_skvar.h =================================================================== RCS file: src/sys/dev/pci/if_skvar.h,v retrieving revision 1.10 diff -u -p -r1.10 if_skvar.h --- dev/pci/if_skvar.h 8 Jan 2017 18:08:14 -0000 1.10 +++ dev/pci/if_skvar.h 15 Jan 2017 13:56:06 -0000 @@ -83,6 +83,7 @@ struct sk_chain { void *sk_desc; + bus_dmamap_t sk_map; struct mbuf *sk_mbuf; struct sk_chain *sk_next; }; @@ -96,16 +97,9 @@ struct sk_chain { */ #define SK_NTXSEG 30 -struct sk_txmap_entry { - bus_dmamap_t dmamap; - SIMPLEQ_ENTRY(sk_txmap_entry) link; -}; - struct sk_chain_data { struct sk_chain sk_tx_chain[SK_TX_RING_CNT]; struct sk_chain sk_rx_chain[SK_RX_RING_CNT]; - struct sk_txmap_entry *sk_tx_map[SK_TX_RING_CNT]; - bus_dmamap_t sk_rx_map[SK_RX_RING_CNT]; int sk_tx_prod; int sk_tx_cons; int sk_tx_cnt; @@ -213,7 +207,6 @@ struct sk_if_softc { int sk_ring_nseg; struct sk_softc *sk_softc; /* parent controller */ int sk_tx_bmu; /* TX BMU register */ - SIMPLEQ_HEAD(__sk_txmaphead, sk_txmap_entry) sk_txmap_head; }; struct skc_attach_args {