Module Name:    src
Committed By:   jakllsch
Date:           Thu Apr 30 01:52:09 UTC 2020

Modified Files:
        src/sys/dev/pci: if_msk.c

Log Message:
msk(4): rework rx descriptor loading to support multiple segments

This paves the way to replace the driver-internal jumbo frame rx buffer
with other recieve buffers (for example MCLGET/MEXTMALLOC) in the future.


To generate a diff of this commit:
cvs rdiff -u -r1.105 -r1.106 src/sys/dev/pci/if_msk.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/dev/pci/if_msk.c
diff -u src/sys/dev/pci/if_msk.c:1.105 src/sys/dev/pci/if_msk.c:1.106
--- src/sys/dev/pci/if_msk.c:1.105	Wed Apr 29 20:03:52 2020
+++ src/sys/dev/pci/if_msk.c	Thu Apr 30 01:52:08 2020
@@ -1,4 +1,4 @@
-/* $NetBSD: if_msk.c,v 1.105 2020/04/29 20:03:52 jakllsch Exp $ */
+/* $NetBSD: if_msk.c,v 1.106 2020/04/30 01:52:08 jakllsch Exp $ */
 /*	$OpenBSD: if_msk.c,v 1.79 2009/10/15 17:54:56 deraadt Exp $	*/
 
 /*
@@ -52,7 +52,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: if_msk.c,v 1.105 2020/04/29 20:03:52 jakllsch Exp $");
+__KERNEL_RCSID(0, "$NetBSD: if_msk.c,v 1.106 2020/04/30 01:52:08 jakllsch Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -113,7 +113,7 @@ static int msk_init(struct ifnet *);
 static void msk_init_yukon(struct sk_if_softc *);
 static void msk_stop(struct ifnet *, int);
 static void msk_watchdog(struct ifnet *);
-static int msk_newbuf(struct sk_if_softc *, bus_dmamap_t);
+static int msk_newbuf(struct sk_if_softc *);
 static int msk_alloc_jumbo_mem(struct sk_if_softc *);
 static void *msk_jalloc(struct sk_if_softc *);
 static void msk_jfree(struct mbuf *, void *, size_t, void *);
@@ -472,13 +472,18 @@ msk_init_tx_ring(struct sk_if_softc *sc_
 }
 
 static int
-msk_newbuf(struct sk_if_softc *sc_if, bus_dmamap_t dmamap)
+msk_newbuf(struct sk_if_softc *sc_if)
 {
+	struct sk_softc		*sc = sc_if->sk_softc;
 	struct mbuf		*m_new = NULL;
 	struct sk_chain		*c;
 	struct msk_rx_desc	*r;
 	void			*buf = NULL;
 	bus_addr_t		addr;
+	bus_dmamap_t		rxmap;
+	size_t			i;
+	uint32_t		rxidx, frag, cur, hiaddr, old_hiaddr, total;
+	uint32_t		entries = 0;
 
 	MGETHDR(m_new, M_DONTWAIT, MT_DATA);
 	if (m_new == NULL)
@@ -499,44 +504,99 @@ msk_newbuf(struct sk_if_softc *sc_if, bu
 
 	m_adj(m_new, ETHER_ALIGN);
 
-	addr = dmamap->dm_segs[0].ds_addr +
-		  ((vaddr_t)m_new->m_data -
-		   (vaddr_t)sc_if->sk_cdata.sk_jumbo_buf);
-
-	if (sc_if->sk_cdata.sk_rx_hiaddr != MSK_ADDR_HI(addr)) {
-		c = &sc_if->sk_cdata.sk_rx_chain[sc_if->sk_cdata.sk_rx_prod];
-		r = &sc_if->sk_rdata->sk_rx_ring[sc_if->sk_cdata.sk_rx_prod];
-		c->sk_mbuf = NULL;
-		r->sk_addr = htole32(MSK_ADDR_HI(addr));
-		r->sk_len = 0;
-		r->sk_ctl = 0;
-		r->sk_opcode = SK_Y2_BMUOPC_ADDR64 | SK_Y2_RXOPC_OWN;
-		sc_if->sk_cdata.sk_rx_hiaddr = MSK_ADDR_HI(addr);
+	rxidx = frag = cur = sc_if->sk_cdata.sk_rx_prod;
+	rxmap = sc_if->sk_cdata.sk_rx_chain[rxidx].sk_dmamap;
 
-		MSK_CDRXSYNC(sc_if, sc_if->sk_cdata.sk_rx_prod,
-		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
+	if (bus_dmamap_load_mbuf(sc->sc_dmatag, rxmap, m_new, BUS_DMA_NOWAIT)) {
+		DPRINTFN(2, ("msk_newbuf: dmamap_load failed\n"));
+		m_freem(m_new);
+		return ENOBUFS;
+	}
 
-		SK_INC(sc_if->sk_cdata.sk_rx_prod, MSK_RX_RING_CNT);
-		sc_if->sk_cdata.sk_rx_cnt++;
+	/* Count how many rx descriptors needed. */
+	hiaddr = sc_if->sk_cdata.sk_rx_hiaddr;
+	for (total = i = 0; i < rxmap->dm_nsegs; i++) {
+		if (hiaddr != MSK_ADDR_HI(rxmap->dm_segs[i].ds_addr)) {
+			hiaddr = MSK_ADDR_HI(rxmap->dm_segs[i].ds_addr);
+			total++;
+		}
+		total++;
+	}
 
-		DPRINTFN(10, ("%s: rx ADDR64: %#x\n",
-		    sc_if->sk_ethercom.ec_if.if_xname,
-			(unsigned)MSK_ADDR_HI(addr)));
+	if (total > MSK_RX_RING_CNT - sc_if->sk_cdata.sk_rx_cnt - 1) {
+		DPRINTFN(2, ("msk_newbuf: too few descriptors free\n"));
+		bus_dmamap_unload(sc->sc_dmatag, rxmap);
+		m_freem(m_new);
+		return ENOBUFS;
 	}
 
-	c = &sc_if->sk_cdata.sk_rx_chain[sc_if->sk_cdata.sk_rx_prod];
-	r = &sc_if->sk_rdata->sk_rx_ring[sc_if->sk_cdata.sk_rx_prod];
-	c->sk_mbuf = m_new;
-	r->sk_addr = htole32(MSK_ADDR_LO(addr));
-	r->sk_len = htole16(SK_JLEN);
-	r->sk_ctl = 0;
-	r->sk_opcode = SK_Y2_RXOPC_PACKET | SK_Y2_RXOPC_OWN;
+	DPRINTFN(2, ("msk_newbuf: dm_nsegs=%d total desc=%u\n",
+	    rxmap->dm_nsegs, total));
 
-	MSK_CDRXSYNC(sc_if, sc_if->sk_cdata.sk_rx_prod,
+	/* Sync the DMA map. */
+	bus_dmamap_sync(sc->sc_dmatag, rxmap, 0, rxmap->dm_mapsize,
+	    BUS_DMASYNC_PREREAD);
+
+	old_hiaddr = sc_if->sk_cdata.sk_rx_hiaddr;
+	for (i = 0; i < rxmap->dm_nsegs; i++) {
+		addr = rxmap->dm_segs[i].ds_addr;
+		DPRINTFN(2, ("msk_newbuf: addr %llx\n",
+		    (unsigned long long)addr));
+		hiaddr = MSK_ADDR_HI(addr);
+
+		if (sc_if->sk_cdata.sk_rx_hiaddr != hiaddr) {
+			c = &sc_if->sk_cdata.sk_rx_chain[frag];
+			c->sk_mbuf = NULL;
+			r = &sc_if->sk_rdata->sk_rx_ring[frag];
+			r->sk_addr = htole32(hiaddr);
+			r->sk_len = 0;
+			r->sk_ctl = 0;
+			if (i == 0)
+				r->sk_opcode = SK_Y2_BMUOPC_ADDR64;
+			else
+				r->sk_opcode = SK_Y2_BMUOPC_ADDR64 |
+				    SK_Y2_RXOPC_OWN;
+			sc_if->sk_cdata.sk_rx_hiaddr = hiaddr;
+			MSK_CDRXSYNC(sc_if, frag,
+			    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
+			SK_INC(frag, MSK_RX_RING_CNT);
+			entries++;
+			DPRINTFN(10, ("%s: rx ADDR64: %#x\n",
+			    sc_if->sk_ethercom.ec_if.if_xname, hiaddr));
+		}
+
+		c = &sc_if->sk_cdata.sk_rx_chain[frag];
+		r = &sc_if->sk_rdata->sk_rx_ring[frag];
+		r->sk_addr = htole32(MSK_ADDR_LO(addr));
+		r->sk_len = htole16(rxmap->dm_segs[i].ds_len);
+		r->sk_ctl = 0;
+		if (i == 0) {
+			if (hiaddr != old_hiaddr)
+				r->sk_opcode = SK_Y2_RXOPC_PACKET |
+				    SK_Y2_RXOPC_OWN;
+			else
+				r->sk_opcode = SK_Y2_RXOPC_PACKET;
+		} else
+			r->sk_opcode = SK_Y2_RXOPC_BUFFER | SK_Y2_RXOPC_OWN;
+		MSK_CDRXSYNC(sc_if, frag,
+		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
+		cur = frag;
+		SK_INC(frag, MSK_RX_RING_CNT);
+		entries++;
+	}
+	KASSERTMSG(entries == total, "entries %u total %u", entries, total);
+
+	sc_if->sk_cdata.sk_rx_chain[rxidx].sk_dmamap =
+	    sc_if->sk_cdata.sk_rx_chain[cur].sk_dmamap;
+	sc_if->sk_cdata.sk_rx_chain[cur].sk_mbuf = m_new;
+	sc_if->sk_cdata.sk_rx_chain[cur].sk_dmamap = rxmap;
+
+	sc_if->sk_rdata->sk_rx_ring[rxidx].sk_opcode |= SK_Y2_RXOPC_OWN;
+	MSK_CDRXSYNC(sc_if, rxidx,
 	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
 
-	SK_INC(sc_if->sk_cdata.sk_rx_prod, MSK_RX_RING_CNT);
-	sc_if->sk_cdata.sk_rx_cnt++;
+	sc_if->sk_cdata.sk_rx_cnt += entries;
+	sc_if->sk_cdata.sk_rx_prod = frag;
 
 	return 0;
 }
@@ -1189,6 +1249,20 @@ msk_attach(device_t parent, device_t sel
 		sc_if->sk_cdata.sk_tx_chain[i].sk_dmamap = dmamap;
 	}
 
+	for (i = 0; i < MSK_RX_RING_CNT; i++) {
+		sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf = NULL;
+
+		if (bus_dmamap_create(sc->sc_dmatag, SK_JLEN,
+		    howmany(SK_JLEN + 1, NBPG),
+		    SK_JLEN, 0, BUS_DMA_NOWAIT, &dmamap)) {
+			aprint_error_dev(sc_if->sk_dev,
+			    "Can't create RX dmamap\n");
+			goto fail_3;
+		}
+
+		sc_if->sk_cdata.sk_rx_chain[i].sk_dmamap = dmamap;
+	}
+
 	sc_if->sk_rdata = (struct msk_ring_data *)kva;
 	memset(sc_if->sk_rdata, 0, sizeof(struct msk_ring_data));
 
@@ -1303,6 +1377,11 @@ msk_detach(device_t self, int flags)
 		    sc_if->sk_cdata.sk_tx_chain[i].sk_dmamap);
 	}
 
+	for (i = 0; i < MSK_RX_RING_CNT; i++) {
+		bus_dmamap_destroy(sc->sc_dmatag,
+		    sc_if->sk_cdata.sk_rx_chain[i].sk_dmamap);
+	}
+
 	if (--sc->rnd_attached == 0)
 		rnd_detach_source(&sc->rnd_source);
 
@@ -2027,13 +2106,13 @@ msk_rxeof(struct sk_if_softc *sc_if, uin
 	cur = sc_if->sk_cdata.sk_rx_cons;
 	prod = sc_if->sk_cdata.sk_rx_prod;
 
-	/* Sync the descriptor */
-	MSK_CDRXSYNC(sc_if, cur, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
-
 	DPRINTFN(2, ("msk_rxeof: cur %u prod %u rx_cnt %u\n", cur, prod,
 		sc_if->sk_cdata.sk_rx_cnt));
 
 	while (prod != cur) {
+		MSK_CDRXSYNC(sc_if, cur,
+		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+
 		tail = cur;
 		SK_INC(cur, MSK_RX_RING_CNT);
 
@@ -2050,10 +2129,11 @@ msk_rxeof(struct sk_if_softc *sc_if, uin
 	if (m == NULL)
 		return;
 
-	dmamap = sc_if->sk_cdata.sk_rx_jumbo_map;
+	dmamap = sc_if->sk_cdata.sk_rx_chain[tail].sk_dmamap;
 
 	bus_dmamap_sync(sc_if->sk_softc->sc_dmatag, dmamap, 0,
-	    dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
+	    uimin(dmamap->dm_mapsize, total_len), BUS_DMASYNC_POSTREAD);
+	bus_dmamap_unload(sc->sc_dmatag, dmamap);
 
 	if (total_len < SK_MIN_FRAMELEN ||
 	    total_len > ETHER_MAX_LEN_JUMBO ||
@@ -2132,8 +2212,7 @@ msk_fill_rx_ring(struct sk_if_softc *sc_
 {
 	/* Make sure to not completely wrap around */
 	while (sc_if->sk_cdata.sk_rx_cnt < (MSK_RX_RING_CNT - 1)) {
-		if (msk_newbuf(sc_if,
-		    sc_if->sk_cdata.sk_rx_jumbo_map) == ENOBUFS) {
+		if (msk_newbuf(sc_if) == ENOBUFS) {
 			goto schedretry;
 		}
 	}
@@ -2640,6 +2719,13 @@ msk_stop(struct ifnet *ifp, int disable)
 	/* Free RX and TX mbufs still in the queues. */
 	for (i = 0; i < MSK_RX_RING_CNT; i++) {
 		if (sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf != NULL) {
+			dmamap = sc_if->sk_cdata.sk_rx_chain[i].sk_dmamap;
+
+			bus_dmamap_sync(sc->sc_dmatag, dmamap, 0,
+			    dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
+
+			bus_dmamap_unload(sc->sc_dmatag, dmamap);
+
 			m_freem(sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf);
 			sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf = NULL;
 		}

Reply via email to