Hello,
last year, a patch regarding bnx(4) jumbos was provided and refined by
dlg@, kettenis@ and brad@.
I've tested the diff for if_bnx.c against current and setting MTUs >
1500 works in principle.
However, with this diff enabled on current, there's quickly
deteriorating packet loss regardless of packet size (i.e. when pinging a
directly connected host) and the interface quickly becomes unusable.
Are these problems probably related to BCM5709 (I think Brad tested with
BCM5708) or are there general problems with this patch?
# dmesg|grep -i bcm
bnx0 at pci7 dev 0 function 0 "Broadcom BCM5709" rev 0x20: apic 0 int 6
bnx1 at pci7 dev 0 function 1 "Broadcom BCM5709" rev 0x20: apic 0 int 13
bnx2 at pci8 dev 0 function 0 "Broadcom BCM5709" rev 0x20: apic 0 int 7
bnx3 at pci8 dev 0 function 1 "Broadcom BCM5709" rev 0x20: apic 0 int 15
brgphy0 at bnx0 phy 1: BCM5709 10/100/1000baseT PHY, rev. 8
brgphy1 at bnx1 phy 1: BCM5709 10/100/1000baseT PHY, rev. 8
brgphy2 at bnx2 phy 1: BCM5709 10/100/1000baseT PHY, rev. 8
brgphy3 at bnx3 phy 1: BCM5709 10/100/1000baseT PHY, rev. 8
Best Regards
Andreas
Index: sys/dev/pci/if_bnx.c
===================================================================
RCS file: /cvs/src/sys/dev/pci/if_bnx.c,v
retrieving revision 1.100
diff -u -r1.100 if_bnx.c
--- sys/dev/pci/if_bnx.c 13 Jan 2013 05:45:10 -0000 1.100
+++ sys/dev/pci/if_bnx.c 2 Mar 2013 10:06:46 -0000
@@ -848,6 +848,8 @@
sc->bnx_rx_ticks = 18;
#endif
+ sc->mbuf_alloc_size = BNX_MAX_JUMBO_MRU;
+
/* Update statistics once every second. */
sc->bnx_stats_ticks = 1000000 & 0xffff00;
@@ -878,9 +880,10 @@
ifp->if_ioctl = bnx_ioctl;
ifp->if_start = bnx_start;
ifp->if_watchdog = bnx_watchdog;
+ ifp->if_hardmtu = BNX_MAX_JUMBO_MTU;
IFQ_SET_MAXLEN(&ifp->if_snd, USABLE_TX_BD - 1);
IFQ_SET_READY(&ifp->if_snd);
- m_clsetwms(ifp, MCLBYTES, 2, USABLE_RX_BD);
+ m_clsetwms(ifp, sc->mbuf_alloc_size, 2, USABLE_RX_BD);
bcopy(sc->eaddr, sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
bcopy(sc->bnx_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
@@ -894,8 +897,6 @@
ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
#endif
- sc->mbuf_alloc_size = BNX_MAX_MRU;
-
printf("%s: address %s\n", sc->bnx_dev.dv_xname,
ether_sprintf(sc->arpcom.ac_enaddr));
@@ -2664,8 +2665,8 @@
* Create DMA maps for the Rx buffer mbufs.
*/
for (i = 0; i < TOTAL_RX_BD; i++) {
- if (bus_dmamap_create(sc->bnx_dmatag, BNX_MAX_MRU,
- BNX_MAX_SEGMENTS, BNX_MAX_MRU, 0, BUS_DMA_NOWAIT,
+ if (bus_dmamap_create(sc->bnx_dmatag, sc->mbuf_alloc_size,
+ 1, sc->mbuf_alloc_size, 0, BUS_DMA_NOWAIT,
&sc->rx_mbuf_map[i])) {
printf(": Could not create Rx mbuf %d DMA map!\n", i);
rc = ENOMEM;
@@ -3680,10 +3681,10 @@
*prod_bseq);
/* This is a new mbuf allocation. */
- m = MCLGETI(NULL, M_DONTWAIT, &sc->arpcom.ac_if, MCLBYTES);
+ m = MCLGETI(NULL, M_DONTWAIT, &sc->arpcom.ac_if, sc->mbuf_alloc_size);
if (!m)
return (ENOBUFS);
- m->m_len = m->m_pkthdr.len = MCLBYTES;
+ m->m_len = m->m_pkthdr.len = sc->mbuf_alloc_size;
/* the chip aligns the ip header for us, no need to m_adj */
/* Map the mbuf cluster into device memory. */
@@ -4013,6 +4014,16 @@
REG_WR(sc, BNX_MQ_MAP_L2_5, val | BNX_MQ_MAP_L2_5_ARM);
}
+ CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_RX_PG_BUF_SIZE, 0);
+
+ /* Configure the rx_bd and page chain mbuf cluster size. */
+ val = (sc->mbuf_alloc_size << 16);
+ CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_RX_PG_BUF_SIZE, val);
+
+ /* Configure the context reserved for jumbo support. */
+ CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_RX_RBDC_KEY,
+ BNX_L2CTX_RX_RBDC_JUMBO_KEY);
+
/* Point the hardware to the first page in the chain. */
val = (u_int32_t)((u_int64_t)sc->rx_bd_chain_paddr[0] >> 32);
CTX_WR(sc, GET_CID_ADDR(RX_CID), BNX_L2CTX_NX_BDHADDR_HI, val);
@@ -4787,7 +4798,7 @@
bnx_set_mac_addr(sc);
/* Calculate and program the Ethernet MRU size. */
- ether_mtu = BNX_MAX_STD_ETHER_MTU_VLAN;
+ ether_mtu = BNX_MAX_JUMBO_ETHER_MTU_VLAN;
DBPRINT(sc, BNX_INFO, "%s(): setting MRU = %d\n",
__FUNCTION__, ether_mtu);