On Wed, Nov 04, 2015 at 08:18:48AM +0100, Martin Pieuchot wrote:
> On 04/11/15(Wed) 10:39, David Gwynne wrote:
> > im working on making the interface send queue mpsafe.
> > 
> > part of that involced deprecating the IFQ_POLL api because it allows the 
> > caller to get a reference an mbuf that is still on the send queue. this is 
> > dangerous if another cpu tries to manipulate the send queue. instead code 
> > should call IFQ_DEQUEUE, which takes it off the queue for the driver to use.
> > 
> > however, blindly changing code from IFQ_POLL to IFQ_DEQUEUE will
> > cause unwanted packet loss when encapsulation fails in some cases,
> > such as when the tx ring is already full. to cope, the easiest
> > solution is to requeue the packet so the next call to the start
> > routine can try fitting it on the ring again.
> > 
> > this introduces IFQ_PREPEND (cause we currently have IF_PREPEND)
> > and works on top of both hfsc and priq because i added hfsc_requeue
> > a while back.
> > 
> > this also converts uses of IF_PREPEND in drivers to IFQ_PREPEND.
> > this improves the situation a bit if people have decided to use
> > hfsc on these interfaces.

ok, so after talking to kenjiro cho about the problems with IFQ_PREPEND
and arbitrary queuing disciplines, im taking a step back while
thinking about how to approach the send queue stuff for a bit.
however, deprecating IF_PREPEND is still necessary.

this tweaks the relevant drivers to not need IF_PREPEND. note that
these are non-trivial changes, so i would like some review and maybe
some actual testing? especially on vr, im sure there are a lot of
users.

most of the changes are just shuffling conditionals around, but vr
also includes a change to use m_defrag. im not sure that is enough
to satisfy the alignment requirements the code discusses, so testing
is necessary.  and at least one of age, alc, or ale.

ok?

Index: if_age.c
===================================================================
RCS file: /cvs/src/sys/dev/pci/if_age.c,v
retrieving revision 1.29
diff -u -p -r1.29 if_age.c
--- if_age.c    25 Oct 2015 13:04:28 -0000      1.29
+++ if_age.c    6 Nov 2015 11:27:04 -0000
@@ -89,7 +89,7 @@ void  age_dma_free(struct age_softc *);
 void   age_get_macaddr(struct age_softc *);
 void   age_phy_reset(struct age_softc *);
 
-int    age_encap(struct age_softc *, struct mbuf **);
+int    age_encap(struct age_softc *, struct mbuf *);
 void   age_init_tx_ring(struct age_softc *);
 int    age_init_rx_ring(struct age_softc *);
 void   age_init_rr_ring(struct age_softc *);
@@ -957,7 +957,7 @@ void
 age_start(struct ifnet *ifp)
 {
         struct age_softc *sc = ifp->if_softc;
-        struct mbuf *m_head;
+        struct mbuf *m;
        int enq;
 
        if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
@@ -969,8 +969,14 @@ age_start(struct ifnet *ifp)
 
        enq = 0;
        for (;;) {
-               IFQ_DEQUEUE(&ifp->if_snd, m_head);
-               if (m_head == NULL)
+               if (sc->age_cdata.age_tx_cnt + AGE_MAXTXSEGS >=
+                   AGE_TX_RING_CNT - 2) {
+                       ifp->if_flags |= IFF_OACTIVE;
+                       break;
+               }
+
+               IFQ_DEQUEUE(&ifp->if_snd, m);
+               if (m == NULL)
                        break;
 
                /*
@@ -978,14 +984,9 @@ age_start(struct ifnet *ifp)
                 * don't have room, set the OACTIVE flag and wait
                 * for the NIC to drain the ring.
                 */
-               if (age_encap(sc, &m_head)) {
-                       if (m_head == NULL)
-                               ifp->if_oerrors++;
-                       else {
-                               IF_PREPEND(&ifp->if_snd, m_head);
-                               ifp->if_flags |= IFF_OACTIVE;
-                       }
-                       break;
+               if (age_encap(sc, m) != 0) {
+                       ifp->if_oerrors++;
+                       continue;
                }
                enq = 1;
 
@@ -995,7 +996,7 @@ age_start(struct ifnet *ifp)
                 * to him.
                 */
                if (ifp->if_bpf != NULL)
-                       bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
+                       bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
 #endif
        }
 
@@ -1115,16 +1116,14 @@ age_mac_config(struct age_softc *sc)
 }
 
 int
-age_encap(struct age_softc *sc, struct mbuf **m_head)
+age_encap(struct age_softc *sc, struct mbuf *m)
 {
        struct age_txdesc *txd, *txd_last;
        struct tx_desc *desc;
-       struct mbuf *m;
        bus_dmamap_t map;
        uint32_t cflags, poff, vtag;
        int error, i, prod;
 
-       m = *m_head;
        cflags = vtag = 0;
        poff = 0;
 
@@ -1133,27 +1132,20 @@ age_encap(struct age_softc *sc, struct m
        txd_last = txd;
        map = txd->tx_dmamap;
 
-       error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head, BUS_DMA_NOWAIT);
+       error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT);
        if (error != 0 && error != EFBIG)
                goto drop;
        if (error != 0) {
-               if (m_defrag(*m_head, M_DONTWAIT)) {
+               if (m_defrag(m, M_DONTWAIT)) {
                        error = ENOBUFS;
                        goto drop;
                }
-               error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head,
+               error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
                    BUS_DMA_NOWAIT);
                if (error != 0)
                        goto drop;
        }
 
-       /* Check descriptor overrun. */
-       if (sc->age_cdata.age_tx_cnt + map->dm_nsegs >= AGE_TX_RING_CNT - 2) {
-               bus_dmamap_unload(sc->sc_dmat, map);
-               return (ENOBUFS);
-       }
-
-       m = *m_head;
        /* Configure Tx IP/TCP/UDP checksum offload. */
        if ((m->m_pkthdr.csum_flags & AGE_CSUM_FEATURES) != 0) {
                cflags |= AGE_TD_CSUM;
@@ -1210,8 +1202,7 @@ age_encap(struct age_softc *sc, struct m
        return (0);
 
  drop:
-       m_freem(*m_head);
-       *m_head = NULL;
+       m_freem(m);
        return (error);
 }
 
Index: if_alc.c
===================================================================
RCS file: /cvs/src/sys/dev/pci/if_alc.c,v
retrieving revision 1.35
diff -u -p -r1.35 if_alc.c
--- if_alc.c    25 Oct 2015 13:04:28 -0000      1.35
+++ if_alc.c    6 Nov 2015 11:27:04 -0000
@@ -83,7 +83,7 @@ void  alc_aspm(struct alc_softc *, uint64
 void   alc_disable_l0s_l1(struct alc_softc *);
 int    alc_dma_alloc(struct alc_softc *);
 void   alc_dma_free(struct alc_softc *);
-int    alc_encap(struct alc_softc *, struct mbuf **);
+int    alc_encap(struct alc_softc *, struct mbuf *);
 void   alc_get_macaddr(struct alc_softc *);
 void   alc_init_cmb(struct alc_softc *);
 void   alc_init_rr_ring(struct alc_softc *);
@@ -1265,16 +1265,14 @@ alc_dma_free(struct alc_softc *sc)
 }
 
 int
-alc_encap(struct alc_softc *sc, struct mbuf **m_head)
+alc_encap(struct alc_softc *sc, struct mbuf *m)
 {
        struct alc_txdesc *txd, *txd_last;
        struct tx_desc *desc;
-       struct mbuf *m;
        bus_dmamap_t map;
        uint32_t cflags, poff, vtag;
        int error, idx, prod;
 
-       m = *m_head;
        cflags = vtag = 0;
        poff = 0;
 
@@ -1283,30 +1281,23 @@ alc_encap(struct alc_softc *sc, struct m
        txd_last = txd;
        map = txd->tx_dmamap;
 
-       error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head, BUS_DMA_NOWAIT);
+       error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT);
        if (error != 0 && error != EFBIG)
                goto drop;
        if (error != 0) {
-               if (m_defrag(*m_head, M_DONTWAIT)) {
+               if (m_defrag(m, M_DONTWAIT)) {
                        error = ENOBUFS;
                        goto drop;
                }
-               error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head,
+               error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
                    BUS_DMA_NOWAIT);
                if (error != 0)
                        goto drop;
        }
 
-       /* Check descriptor overrun. */
-       if (sc->alc_cdata.alc_tx_cnt + map->dm_nsegs >= ALC_TX_RING_CNT - 3) {
-               bus_dmamap_unload(sc->sc_dmat, map);
-               return (ENOBUFS);
-       }
-
        bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
            BUS_DMASYNC_PREWRITE);
 
-       m = *m_head;
        desc = NULL;
        idx = 0;
 #if NVLAN > 0
@@ -1353,8 +1344,7 @@ alc_encap(struct alc_softc *sc, struct m
        return (0);
 
  drop:
-       m_freem(*m_head);
-       *m_head = NULL;
+       m_freem(m);
        return (error);
 }
 
@@ -1362,7 +1352,7 @@ void
 alc_start(struct ifnet *ifp)
 {
        struct alc_softc *sc = ifp->if_softc;
-       struct mbuf *m_head;
+       struct mbuf *m;
        int enq = 0;
 
        /* Reclaim transmitted frames. */
@@ -1377,23 +1367,19 @@ alc_start(struct ifnet *ifp)
                return;
 
        for (;;) {
-               IFQ_DEQUEUE(&ifp->if_snd, m_head);
-               if (m_head == NULL)
+               if (sc->alc_cdata.alc_tx_cnt + ALC_MAXTXSEGS >=
+                   ALC_TX_RING_CNT - 3) {
+                       ifp->if_flags |= IFF_OACTIVE;
                        break;
+               }
 
-               /*
-                * Pack the data into the transmit ring. If we
-                * don't have room, set the OACTIVE flag and wait
-                * for the NIC to drain the ring.
-                */
-               if (alc_encap(sc, &m_head)) {
-                       if (m_head == NULL)
-                               ifp->if_oerrors++;
-                       else {
-                               IF_PREPEND(&ifp->if_snd, m_head);
-                               ifp->if_flags |= IFF_OACTIVE;
-                       }
+               IFQ_DEQUEUE(&ifp->if_snd, m);
+               if (m == NULL)
                        break;
+
+               if (alc_encap(sc, m) != 0) {
+                       ifp->if_oerrors++;
+                       continue;
                }
                enq++;
                
@@ -1403,7 +1389,7 @@ alc_start(struct ifnet *ifp)
                 * to him.
                 */
                if (ifp->if_bpf != NULL)
-                       bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
+                       bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
 #endif
        }
 
Index: if_ale.c
===================================================================
RCS file: /cvs/src/sys/dev/pci/if_ale.c,v
retrieving revision 1.40
diff -u -p -r1.40 if_ale.c
--- if_ale.c    25 Oct 2015 13:04:28 -0000      1.40
+++ if_ale.c    6 Nov 2015 11:27:04 -0000
@@ -96,7 +96,7 @@ void  ale_txeof(struct ale_softc *);
 
 int    ale_dma_alloc(struct ale_softc *);
 void   ale_dma_free(struct ale_softc *);
-int    ale_encap(struct ale_softc *, struct mbuf **);
+int    ale_encap(struct ale_softc *, struct mbuf *);
 void   ale_init_rx_pages(struct ale_softc *);
 void   ale_init_tx_ring(struct ale_softc *);
 
@@ -866,16 +866,14 @@ ale_dma_free(struct ale_softc *sc)
 }
 
 int
-ale_encap(struct ale_softc *sc, struct mbuf **m_head)
+ale_encap(struct ale_softc *sc, struct mbuf *m)
 {
        struct ale_txdesc *txd, *txd_last;
        struct tx_desc *desc;
-       struct mbuf *m;
        bus_dmamap_t map;
        uint32_t cflags, poff, vtag;
        int error, i, prod;
 
-       m = *m_head;
        cflags = vtag = 0;
        poff = 0;
 
@@ -884,30 +882,23 @@ ale_encap(struct ale_softc *sc, struct m
        txd_last = txd;
        map = txd->tx_dmamap;
 
-       error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head, BUS_DMA_NOWAIT);
+       error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT);
        if (error != 0 && error != EFBIG)
                goto drop;
        if (error != 0) {
-               if (m_defrag(*m_head, M_DONTWAIT)) {
+               if (m_defrag(m, M_DONTWAIT)) {
                        error = ENOBUFS;
                        goto drop;
                }
-               error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head,
+               error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
                    BUS_DMA_NOWAIT);
                if (error != 0)
                        goto drop;
        }
 
-       /* Check descriptor overrun. */
-       if (sc->ale_cdata.ale_tx_cnt + map->dm_nsegs >= ALE_TX_RING_CNT - 2) {
-               bus_dmamap_unload(sc->sc_dmat, map);
-               return (ENOBUFS);
-       }
-
        bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
            BUS_DMASYNC_PREWRITE);
 
-       m = *m_head;
        /* Configure Tx checksum offload. */
        if ((m->m_pkthdr.csum_flags & ALE_CSUM_FEATURES) != 0) {
                /*
@@ -980,8 +971,7 @@ ale_encap(struct ale_softc *sc, struct m
        return (0);
 
  drop:
-       m_freem(*m_head);
-       *m_head = NULL;
+       m_freem(m);
        return (error);
 }
 
@@ -989,7 +979,7 @@ void
 ale_start(struct ifnet *ifp)
 {
         struct ale_softc *sc = ifp->if_softc;
-       struct mbuf *m_head;
+       struct mbuf *m;
        int enq;
 
        /* Reclaim transmitted frames. */
@@ -1005,8 +995,15 @@ ale_start(struct ifnet *ifp)
 
        enq = 0;
        for (;;) {
-               IFQ_DEQUEUE(&ifp->if_snd, m_head);
-               if (m_head == NULL)
+               /* Check descriptor overrun. */
+               if (sc->ale_cdata.ale_tx_cnt + ALE_MAXTXSEGS >=
+                   ALE_TX_RING_CNT - 2) {
+                       ifp->if_flags |= IFF_OACTIVE;
+                       break;
+               }
+
+               IFQ_DEQUEUE(&ifp->if_snd, m);
+               if (m == NULL)
                        break;
 
                /*
@@ -1014,14 +1011,9 @@ ale_start(struct ifnet *ifp)
                 * don't have room, set the OACTIVE flag and wait
                 * for the NIC to drain the ring.
                 */
-               if (ale_encap(sc, &m_head)) {
-                       if (m_head == NULL)
-                               ifp->if_oerrors++;
-                       else {
-                               IF_PREPEND(&ifp->if_snd, m_head);
-                               ifp->if_flags |= IFF_OACTIVE;
-                       }
-                       break;
+               if (ale_encap(sc, m) != 0) {
+                       ifp->if_oerrors++;
+                       continue;
                }
 
                enq = 1;
@@ -1032,7 +1024,7 @@ ale_start(struct ifnet *ifp)
                 * to him.
                 */
                if (ifp->if_bpf != NULL)
-                       bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
+                       bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
 #endif
        }
 
Index: if_jme.c
===================================================================
RCS file: /cvs/src/sys/dev/pci/if_jme.c,v
retrieving revision 1.42
diff -u -p -r1.42 if_jme.c
--- if_jme.c    25 Oct 2015 13:04:28 -0000      1.42
+++ if_jme.c    6 Nov 2015 11:27:04 -0000
@@ -100,7 +100,7 @@ int jme_init_rx_ring(struct jme_softc *)
 void   jme_init_tx_ring(struct jme_softc *);
 void   jme_init_ssb(struct jme_softc *);
 int    jme_newbuf(struct jme_softc *, struct jme_rxdesc *);
-int    jme_encap(struct jme_softc *, struct mbuf **);
+int    jme_encap(struct jme_softc *, struct mbuf *);
 void   jme_rxpkt(struct jme_softc *);
 
 void   jme_tick(void *);
@@ -1108,11 +1108,10 @@ jme_setwol(struct jme_softc *sc)
 #endif
 
 int
-jme_encap(struct jme_softc *sc, struct mbuf **m_head)
+jme_encap(struct jme_softc *sc, struct mbuf *m)
 {
        struct jme_txdesc *txd;
        struct jme_desc *desc;
-       struct mbuf *m;
        int error, i, prod;
        uint32_t cflags;
 
@@ -1120,32 +1119,20 @@ jme_encap(struct jme_softc *sc, struct m
        txd = &sc->jme_cdata.jme_txdesc[prod];
 
        error = bus_dmamap_load_mbuf(sc->sc_dmat, txd->tx_dmamap,
-                                    *m_head, BUS_DMA_NOWAIT);
+           m, BUS_DMA_NOWAIT);
        if (error != 0 && error != EFBIG)
                goto drop;
        if (error != 0) {
-               if (m_defrag(*m_head, M_DONTWAIT)) {
+               if (m_defrag(m, M_DONTWAIT)) {
                        error = ENOBUFS;
                        goto drop;
                }
                error = bus_dmamap_load_mbuf(sc->sc_dmat, txd->tx_dmamap,
-                                            *m_head, BUS_DMA_NOWAIT);
+                                            m, BUS_DMA_NOWAIT);
                if (error != 0)
                        goto drop;
        }
 
-       /*
-        * Check descriptor overrun. Leave one free descriptor.
-        * Since we always use 64bit address mode for transmitting,
-        * each Tx request requires one more dummy descriptor.
-        */
-       if (sc->jme_cdata.jme_tx_cnt + txd->tx_dmamap->dm_nsegs + JME_TXD_RSVD >
-           JME_TX_RING_CNT - JME_TXD_RSVD) {
-               bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
-               return (ENOBUFS);
-       }
-
-       m = *m_head;
        cflags = 0;
 
        /* Configure checksum offload. */
@@ -1204,8 +1191,7 @@ jme_encap(struct jme_softc *sc, struct m
        return (0);
 
   drop:
-       m_freem(*m_head);
-       *m_head = NULL;
+       m_freem(m);
        return (error);
 }
 
@@ -1213,7 +1199,7 @@ void
 jme_start(struct ifnet *ifp)
 {
        struct jme_softc *sc = ifp->if_softc;
-       struct mbuf *m_head;
+       struct mbuf *m;
        int enq = 0;
 
        /* Reclaim transmitted frames. */
@@ -1238,8 +1224,8 @@ jme_start(struct ifnet *ifp)
                        break;
                }
 
-               IFQ_DEQUEUE(&ifp->if_snd, m_head);
-               if (m_head == NULL)
+               IFQ_DEQUEUE(&ifp->if_snd, m);
+               if (m == NULL)
                        break;
 
                /*
@@ -1247,14 +1233,9 @@ jme_start(struct ifnet *ifp)
                 * don't have room, set the OACTIVE flag and wait
                 * for the NIC to drain the ring.
                 */
-               if (jme_encap(sc, &m_head)) {
-                       if (m_head == NULL)
-                               ifp->if_oerrors++;
-                       else {
-                               IF_PREPEND(&ifp->if_snd, m_head);
-                               ifp->if_flags |= IFF_OACTIVE;
-                       }
-                       break;
+               if (jme_encap(sc, m) != 0) {
+                       ifp->if_oerrors++;
+                       continue;
                }
 
                enq++;
@@ -1265,7 +1246,7 @@ jme_start(struct ifnet *ifp)
                 * to him.
                 */
                if (ifp->if_bpf != NULL)
-                       bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
+                       bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
 #endif
        }
 
Index: if_vr.c
===================================================================
RCS file: /cvs/src/sys/dev/pci/if_vr.c,v
retrieving revision 1.144
diff -u -p -r1.144 if_vr.c
--- if_vr.c     25 Oct 2015 13:04:28 -0000      1.144
+++ if_vr.c     6 Nov 2015 11:27:05 -0000
@@ -1185,62 +1185,51 @@ vr_intr(void *arg)
  * pointers to the fragment pointers.
  */
 int
-vr_encap(struct vr_softc *sc, struct vr_chain **cp, struct mbuf *m_head)
+vr_encap(struct vr_softc *sc, struct vr_chain **cp, struct mbuf *m)
 {
        struct vr_chain         *c = *cp;
        struct vr_desc          *f = NULL;
-       struct mbuf             *m_new = NULL;
        u_int32_t               vr_ctl = 0, vr_status = 0, intdisable = 0;
        bus_dmamap_t            txmap;
        int                     i, runt = 0;
+       int                     error;
 
        if (sc->vr_quirks & VR_Q_CSUM) {
-               if (m_head->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
+               if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
                        vr_ctl |= VR_TXCTL_IPCSUM;
-               if (m_head->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
+               if (m->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
                        vr_ctl |= VR_TXCTL_TCPCSUM;
-               if (m_head->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
+               if (m->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
                        vr_ctl |= VR_TXCTL_UDPCSUM;
        }
 
-       /* Deep copy for chips that need alignment, or too many segments */
-       if (sc->vr_quirks & VR_Q_NEEDALIGN ||
-           bus_dmamap_load_mbuf(sc->sc_dmat, c->vr_map, m_head,
-                                BUS_DMA_NOWAIT | BUS_DMA_WRITE)) {
-               MGETHDR(m_new, M_DONTWAIT, MT_DATA);
-               if (m_new == NULL)
-                       return (1);
-               if (m_head->m_pkthdr.len > MHLEN) {
-                       MCLGET(m_new, M_DONTWAIT);
-                       if (!(m_new->m_flags & M_EXT)) {
-                               m_freem(m_new);
-                               return (1);
-                       }
-               }
-               m_copydata(m_head, 0, m_head->m_pkthdr.len,
-                   mtod(m_new, caddr_t));
-               m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
-
-               if (bus_dmamap_load_mbuf(sc->sc_dmat, c->vr_map, m_new,
-                   BUS_DMA_NOWAIT | BUS_DMA_WRITE)) {
-                       m_freem(m_new);
-                       return(1);
-               }
-       }
+       if (sc->vr_quirks & VR_Q_NEEDALIGN) {
+               /* Deep copy for chips that need alignment */
+               error = EFBIG;
+       } else {
+               error = bus_dmamap_load_mbuf(sc->sc_dmat, c->vr_map, m,
+                   BUS_DMA_NOWAIT | BUS_DMA_WRITE);
+       }
+
+       switch (error) {
+       case 0:
+               break;
+       case EFBIG:
+               if (m_defrag(m, M_DONTWAIT) == 0 &&
+                    bus_dmamap_load_mbuf(sc->sc_dmat, c->vr_map, m,
+                     BUS_DMA_NOWAIT) == 0)
+                        break;
+
+               /* FALLTHROUGH */
+        default:
+               return (ENOBUFS);
+        }
 
        bus_dmamap_sync(sc->sc_dmat, c->vr_map, 0, c->vr_map->dm_mapsize,
            BUS_DMASYNC_PREWRITE);
        if (c->vr_map->dm_mapsize < VR_MIN_FRAMELEN)
                runt = 1;
 
-       /* Check number of available descriptors */
-       if (sc->vr_cdata.vr_tx_cnt + c->vr_map->dm_nsegs + runt >=
-           (VR_TX_LIST_CNT - 1)) {
-               if (m_new)
-                       m_freem(m_new);
-               return(1);
-       }
-
 #if NVLAN > 0
        /*
         * Tell chip to insert VLAN tag if needed.
@@ -1248,8 +1237,8 @@ vr_encap(struct vr_softc *sc, struct vr_
         * in only 15 bits without the gap at 0x1000 (reserved for DEI).
         * Therefore we need to de- / re-construct the VLAN header.
         */
-       if (m_head->m_flags & M_VLANTAG) {
-               u_int32_t vtag = m_head->m_pkthdr.ether_vtag;
+       if (m->m_flags & M_VLANTAG) {
+               u_int32_t vtag = m->m_pkthdr.ether_vtag;
                vtag = EVL_VLANOFTAG(vtag) | EVL_PRIOFTAG(vtag) << 12;
                vr_status |= vtag << VR_TXSTAT_PQSHIFT;
                vr_ctl |= htole32(VR_TXCTL_INSERTTAG);
@@ -1268,12 +1257,7 @@ vr_encap(struct vr_softc *sc, struct vr_
            sc->vr_quirks & VR_Q_INTDISABLE)
                intdisable = VR_TXNEXT_INTDISABLE;
 
-       if (m_new != NULL) {
-               m_freem(m_head);
-
-               c->vr_mbuf = m_new;
-       } else
-               c->vr_mbuf = m_head;
+       c->vr_mbuf = m;
        txmap = c->vr_map;
        for (i = 0; i < txmap->dm_nsegs; i++) {
                if (i != 0)
@@ -1321,7 +1305,7 @@ void
 vr_start(struct ifnet *ifp)
 {
        struct vr_softc         *sc;
-       struct mbuf             *m_head;
+       struct mbuf             *m;
        struct vr_chain         *cur_tx, *head_tx;
        unsigned int             queued = 0;
 
@@ -1334,17 +1318,22 @@ vr_start(struct ifnet *ifp)
                return;
 
        cur_tx = sc->vr_cdata.vr_tx_prod;
-       while (cur_tx->vr_mbuf == NULL) {
-               IFQ_DEQUEUE(&ifp->if_snd, m_head);
-               if (m_head == NULL)
+       for (;;) {
+               if (sc->vr_cdata.vr_tx_cnt + VR_MAXFRAGS >=
+                   VR_TX_LIST_CNT - 1) {
+                       ifp->if_flags |= IFF_OACTIVE;
+                       break;
+               }
+
+               IFQ_DEQUEUE(&ifp->if_snd, m);
+               if (m== NULL)
                        break;
 
                /* Pack the data into the descriptor. */
                head_tx = cur_tx;
-               if (vr_encap(sc, &cur_tx, m_head)) {
-                       /* Rollback, send what we were able to encap. */
-                       IF_PREPEND(&ifp->if_snd, m_head);
-                       break;
+               if (vr_encap(sc, &cur_tx, m)) {
+                       ifp->if_oerrors++;
+                       continue;
                }
                queued++;
 
@@ -1357,8 +1346,7 @@ vr_start(struct ifnet *ifp)
                 * to him.
                 */
                if (ifp->if_bpf)
-                       bpf_mtap_ether(ifp->if_bpf, head_tx->vr_mbuf,
-                       BPF_DIRECTION_OUT);
+                       bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
 #endif
                cur_tx = cur_tx->vr_nextdesc;
        }
@@ -1374,9 +1362,6 @@ vr_start(struct ifnet *ifp)
 
                /* Set a timeout in case the chip goes out to lunch. */
                ifp->if_timer = 5;
-
-               if (cur_tx->vr_mbuf != NULL)
-                       ifp->if_flags |= IFF_OACTIVE;
        }
 }
 

Reply via email to