Here is a diff to fix a few issues with jme_encap(). I noticed some issues with
jme_encap() as it was ported from DragonFly. I had fixed similar issues with
age(4) and alc(4), also originating from DragonFly. Maybe age/alc took 
inspiration
from jme when being ported from DragonFly?

- Remove the maximum DMA segments handling bits as it is unused between
  DragonFly and OpenBSD.
- Fix error handling for bus_dmamap_load_mbuf() so as to not try unloading
  a DMA map that had not already been loaded.
- Clean up the DMA chain defragmenting path to remove unwanted printfs and
  simplify things a bit.
- Have jme_encap() check the number of mapped DMA segments against the TX
  ring to see if it'll fit as do most of the driver nowadays.
- Remove the KASSERT's that shouldn't be there.
- Simplify the dummy descriptor handling to be closer to the FreeBSD
  driver since unlike the DragonFly driver this orinated from our
  driver always uses the 64-bit dummy descriptor.
- If the ring was full make sure to IF_PREPEND() the packet back on
  the queue since it wasn't transmitted.

Tested by com...@daknet.org and vigdis+o...@chown.me and myself.

OK?


Index: if_jme.c
===================================================================
RCS file: /home/cvs/src/sys/dev/pci/if_jme.c,v
retrieving revision 1.32
diff -u -p -r1.32 if_jme.c
--- if_jme.c    3 Nov 2013 23:27:33 -0000       1.32
+++ if_jme.c    28 Nov 2013 19:38:12 -0000
@@ -1044,48 +1044,36 @@ jme_encap(struct jme_softc *sc, struct m
        struct jme_txdesc *txd;
        struct jme_desc *desc;
        struct mbuf *m;
-       int maxsegs;
        int error, i, prod;
        uint32_t cflags;
 
        prod = sc->jme_cdata.jme_tx_prod;
        txd = &sc->jme_cdata.jme_txdesc[prod];
 
-       maxsegs = (JME_TX_RING_CNT - sc->jme_cdata.jme_tx_cnt) -
-                 (JME_TXD_RSVD + 1);
-       if (maxsegs > JME_MAXTXSEGS)
-               maxsegs = JME_MAXTXSEGS;
-       if (maxsegs < (sc->jme_txd_spare - 1))
-               panic("%s: not enough segments %d", sc->sc_dev.dv_xname,
-                   maxsegs);
-
        error = bus_dmamap_load_mbuf(sc->sc_dmat, txd->tx_dmamap,
                                     *m_head, BUS_DMA_NOWAIT);
+       if (error != 0 && error != EFBIG)
+               goto drop;
        if (error != 0) {
-               bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
-               error = EFBIG;
-       }
-       if (error == EFBIG) {
                if (m_defrag(*m_head, M_DONTWAIT)) {
-                       printf("%s: can't defrag TX mbuf\n",
-                           sc->sc_dev.dv_xname);
-                       m_freem(*m_head);
-                       *m_head = NULL;
-                       return (ENOBUFS);
-               }
-               error = bus_dmamap_load_mbuf(sc->sc_dmat,
-                                            txd->tx_dmamap, *m_head,
-                                            BUS_DMA_NOWAIT);
-               if (error != 0) {
-                       printf("%s: could not load defragged TX mbuf\n",
-                           sc->sc_dev.dv_xname);
-                       m_freem(*m_head);
-                       *m_head = NULL;
-                       return (error);
-               }
-       } else if (error) {
-               printf("%s: could not load TX mbuf\n", sc->sc_dev.dv_xname);
-               return (error);
+                       error = ENOBUFS;
+                       goto drop;
+               }
+               error = bus_dmamap_load_mbuf(sc->sc_dmat, txd->tx_dmamap,
+                                            *m_head, BUS_DMA_NOWAIT);
+               if (error != 0)
+                       goto drop;
+       }
+
+       /*
+        * Check descriptor overrun. Leave one free descriptor.
+        * Since we always use 64bit address mode for transmitting,
+        * each Tx request requires one more dummy descriptor.
+        */
+       if (sc->jme_cdata.jme_tx_cnt + txd->tx_dmamap->dm_nsegs + JME_TXD_RSVD >
+           JME_TX_RING_CNT - JME_TXD_RSVD) {
+               bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
+               return (ENOBUFS);
        }
 
        m = *m_head;
@@ -1113,7 +1101,6 @@ jme_encap(struct jme_softc *sc, struct m
        desc->addr_hi = htole32(m->m_pkthdr.len);
        desc->addr_lo = 0;
        sc->jme_cdata.jme_tx_cnt++;
-       KASSERT(sc->jme_cdata.jme_tx_cnt < JME_TX_RING_CNT - JME_TXD_RSVD);
        JME_DESC_INC(prod, JME_TX_RING_CNT);
        for (i = 0; i < txd->tx_dmamap->dm_nsegs; i++) {
                desc = &sc->jme_rdata.jme_tx_ring[prod];
@@ -1123,10 +1110,7 @@ jme_encap(struct jme_softc *sc, struct m
                    htole32(JME_ADDR_HI(txd->tx_dmamap->dm_segs[i].ds_addr));
                desc->addr_lo =
                    htole32(JME_ADDR_LO(txd->tx_dmamap->dm_segs[i].ds_addr));
-
                sc->jme_cdata.jme_tx_cnt++;
-               KASSERT(sc->jme_cdata.jme_tx_cnt <=
-                        JME_TX_RING_CNT - JME_TXD_RSVD);
                JME_DESC_INC(prod, JME_TX_RING_CNT);
        }
 
@@ -1140,7 +1124,7 @@ jme_encap(struct jme_softc *sc, struct m
        desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
 
        txd->tx_m = m;
-       txd->tx_ndesc = txd->tx_dmamap->dm_nsegs + 1;
+       txd->tx_ndesc = txd->tx_dmamap->dm_nsegs + JME_TXD_RSVD;
 
        /* Sync descriptors. */
        bus_dmamap_sync(sc->sc_dmat, txd->tx_dmamap, 0,
@@ -1149,6 +1133,11 @@ jme_encap(struct jme_softc *sc, struct m
             sc->jme_cdata.jme_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
 
        return (0);
+
+  drop:
+       m_freem(*m_head);
+       *m_head = NULL;
+       return (error);
 }
 
 void
@@ -1174,7 +1163,7 @@ jme_start(struct ifnet *ifp)
                 * Check number of available TX descs, always
                 * leave JME_TXD_RSVD free TX descs.
                 */
-               if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare >
+               if (sc->jme_cdata.jme_tx_cnt + JME_TXD_RSVD >
                    JME_TX_RING_CNT - JME_TXD_RSVD) {
                        ifp->if_flags |= IFF_OACTIVE;
                        break;
@@ -1190,13 +1179,15 @@ jme_start(struct ifnet *ifp)
                 * for the NIC to drain the ring.
                 */
                if (jme_encap(sc, &m_head)) {
-                       if (m_head == NULL) {
+                       if (m_head == NULL)
                                ifp->if_oerrors++;
-                               break;
+                       else {
+                               IF_PREPEND(&ifp->if_snd, m_head);
+                               ifp->if_flags |= IFF_OACTIVE;
                        }
-                       ifp->if_flags |= IFF_OACTIVE;
                        break;
                }
+
                enq++;
 
 #if NBPFILTER > 0
@@ -1528,7 +1519,7 @@ jme_txeof(struct jme_softc *sc)
        if (sc->jme_cdata.jme_tx_cnt == 0)
                ifp->if_timer = 0;
 
-       if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare <=
+       if (sc->jme_cdata.jme_tx_cnt + JME_TXD_RSVD <=
            JME_TX_RING_CNT - JME_TXD_RSVD)
                ifp->if_flags &= ~IFF_OACTIVE;
 
@@ -1775,14 +1766,6 @@ jme_init(struct ifnet *ifp)
         * Reset the chip to a known state.
         */
        jme_reset(sc);
-
-       /*
-        * Since we always use 64bit address mode for transmitting,
-        * each Tx request requires one more dummy descriptor.
-        */
-       sc->jme_txd_spare =
-       howmany(ifp->if_mtu + sizeof(struct ether_vlan_header), MCLBYTES) + 1;
-       KASSERT(sc->jme_txd_spare >= 2);
 
        /* Init descriptors. */
        error = jme_init_rx_ring(sc);
Index: if_jmevar.h
===================================================================
RCS file: /home/cvs/src/sys/dev/pci/if_jmevar.h,v
retrieving revision 1.5
diff -u -p -r1.5 if_jmevar.h
--- if_jmevar.h 10 Jan 2009 15:33:05 -0000      1.5
+++ if_jmevar.h 3 Nov 2013 23:34:57 -0000
@@ -209,8 +209,6 @@ struct jme_softc {
        uint32_t                jme_txcsr;
        uint32_t                jme_rxcsr;
 
-       int                     jme_txd_spare;
-
        /*
         * Sysctl variables
         */

-- 
This message has been scanned for viruses and
dangerous content by MailScanner, and is
believed to be clean.

Reply via email to