Please consider this version instead; I made a mistake when importing
the msk diff (both versions have been tested and are known to work,
but the first one is missing some of the upstream fixes).

Thanks

-- 
Robert Millan
Index: debian/changelog
===================================================================
--- debian/changelog    (revision 3277)
+++ debian/changelog    (revision 3571)
@@ -1,3 +1,15 @@
+kfreebsd-8 (8.1+dfsg-9) UNRELEASED; urgency=low
+
+  * Fix net802.11 stack kernel memory disclosure (CVE-2011-2480).
+    (Closes: #631160)
+    - 000_net80211_disclosure.diff
+  * Merge backported if_msk driver from 8-STABLE.  (Closes: #628954)
+    - 000_msk_backport.diff
+  * Disable buggy 009_disable_duped_modules.diff.  It was disabling many
+    more modules than built into kernel (e.g. all USB modules).
+
+ -- Robert Millan <[email protected]>  Mon, 11 Jul 2011 17:32:30 +0200
+
 kfreebsd-8 (8.1+dfsg-8) stable-proposed-updates; urgency=low
 
   [ Petr Salinger ]
Index: debian/patches/series
===================================================================
--- debian/patches/series       (revision 3277)
+++ debian/patches/series       (revision 3571)
@@ -3,12 +3,14 @@
 000_coda.diff
 000_ufs_lookup.diff  
 000_tcp_usrreq.diff
+000_net80211_disclosure.diff
+000_msk_backport.diff
 001_misc.diff
 003_glibc_dev_aicasm.diff
 004_xargs.diff
 007_clone_signals.diff
 008_config.diff
-009_disable_duped_modules.diff
+#009_disable_duped_modules.diff
 013_ip_packed.diff
 020_linker.diff 
 021_superpages_i386.diff
Index: debian/patches/000_net80211_disclosure.diff
===================================================================
--- debian/patches/000_net80211_disclosure.diff (revision 0)
+++ debian/patches/000_net80211_disclosure.diff (revision 3571)
@@ -0,0 +1,79 @@
+--- a/sys/net80211/ieee80211_acl.c
++++ b/sys/net80211/ieee80211_acl.c
+@@ -77,7 +77,7 @@
+ struct aclstate {
+       acl_lock_t              as_lock;
+       int                     as_policy;
+-      int                     as_nacls;
++      uint32_t                as_nacls;
+       TAILQ_HEAD(, acl)       as_list;        /* list of all ACL's */
+       LIST_HEAD(, acl)        as_hash[ACL_HASHSIZE];
+       struct ieee80211vap     *as_vap;
+@@ -289,7 +289,8 @@
+       struct aclstate *as = vap->iv_as;
+       struct acl *acl;
+       struct ieee80211req_maclist *ap;
+-      int error, space, i;
++      int error;
++      uint32_t i, space;
+ 
+       switch (ireq->i_val) {
+       case IEEE80211_MACCMD_POLICY:
+--- a/sys/net80211/ieee80211_ioctl.c
++++ b/sys/net80211/ieee80211_ioctl.c
+@@ -141,7 +141,7 @@
+ ieee80211_ioctl_getchaninfo(struct ieee80211vap *vap, struct ieee80211req 
*ireq)
+ {
+       struct ieee80211com *ic = vap->iv_ic;
+-      int space;
++      uint32_t space;
+ 
+       space = __offsetof(struct ieee80211req_chaninfo,
+                       ic_chans[ic->ic_nchans]);
+@@ -205,7 +205,7 @@
+ {
+       struct ieee80211_node *ni;
+       uint8_t macaddr[IEEE80211_ADDR_LEN];
+-      const int off = __offsetof(struct ieee80211req_sta_stats, is_stats);
++      const size_t off = __offsetof(struct ieee80211req_sta_stats, is_stats);
+       int error;
+ 
+       if (ireq->i_len < off)
+@@ -321,7 +321,7 @@
+       if (req.space > ireq->i_len)
+               req.space = ireq->i_len;
+       if (req.space > 0) {
+-              size_t space;
++              uint32_t space;
+               void *p;
+ 
+               space = req.space;
+@@ -456,7 +456,7 @@
+ 
+ static __noinline int
+ getstainfo_common(struct ieee80211vap *vap, struct ieee80211req *ireq,
+-      struct ieee80211_node *ni, int off)
++      struct ieee80211_node *ni, size_t off)
+ {
+       struct ieee80211com *ic = vap->iv_ic;
+       struct stainforeq req;
+@@ -501,7 +501,7 @@
+ ieee80211_ioctl_getstainfo(struct ieee80211vap *vap, struct ieee80211req 
*ireq)
+ {
+       uint8_t macaddr[IEEE80211_ADDR_LEN];
+-      const int off = __offsetof(struct ieee80211req_sta_req, info);
++      const size_t off = __offsetof(struct ieee80211req_sta_req, info);
+       struct ieee80211_node *ni;
+       int error;
+ 
+--- a/sys/net80211/ieee80211_ioctl.h
++++ b/sys/net80211/ieee80211_ioctl.h
+@@ -578,7 +578,7 @@
+       char            i_name[IFNAMSIZ];       /* if_name, e.g. "wi0" */
+       uint16_t        i_type;                 /* req type */
+       int16_t         i_val;                  /* Index or simple value */
+-      int16_t         i_len;                  /* Index or simple value */
++      uint16_t        i_len;                  /* Index or simple value */
+       void            *i_data;                /* Extra data */
+ };
+ #define       SIOCS80211               _IOW('i', 234, struct ieee80211req)
Index: debian/patches/000_msk_backport.diff
===================================================================
--- debian/patches/000_msk_backport.diff        (revision 0)
+++ debian/patches/000_msk_backport.diff        (revision 3571)
@@ -0,0 +1,890 @@
+
+See http://www.freebsd.org/cgi/query-pr.cgi?pr=154591
+
+Patch obtained from 8-STABLE using:
+
+  svn diff http://svn.freebsd.org/base/release/8.1.0/sys/dev/msk \
+    http://svn.freebsd.org/base/stable/8/sys/dev/msk
+
+--- a/sys/dev/msk/if_mskreg.h
++++ b/sys/dev/msk/if_mskreg.h
+@@ -144,6 +144,8 @@
+ #define DEVICEID_MRVL_436A    0x436A
+ #define DEVICEID_MRVL_436B    0x436B
+ #define DEVICEID_MRVL_436C    0x436C
++#define DEVICEID_MRVL_436D    0x436D
++#define DEVICEID_MRVL_4370    0x4370
+ #define DEVICEID_MRVL_4380    0x4380
+ #define DEVICEID_MRVL_4381    0x4381
+ 
+@@ -321,6 +323,9 @@
+ #define PCI_OS_SPD_X100               2       /* PCI-X 100MHz Bus */
+ #define PCI_OS_SPD_X133               3       /* PCI-X 133MHz Bus */
+ 
++/* PCI_OUR_REG_3      32 bit  Our Register 3 (Yukon-ECU only) */
++#define       PCI_CLK_MACSEC_DIS      BIT_17  /* Disable Clock MACSec. */
++
+ /* PCI_OUR_REG_4      32 bit  Our Register 4 (Yukon-ECU only) */
+ #define       PCI_TIMER_VALUE_MSK     (0xff<<16)      /* Bit 23..16:  Timer 
Value Mask */
+ #define       PCI_FORCE_ASPM_REQUEST  BIT_15  /* Force ASPM Request (A1 only) 
*/
+@@ -677,6 +682,7 @@
+ /* ASF Subsystem Registers (Yukon-2 only) */
+ #define B28_Y2_SMB_CONFIG     0x0e40  /* 32 bit ASF SMBus Config Register */
+ #define B28_Y2_SMB_CSD_REG    0x0e44  /* 32 bit ASF SMB Control/Status/Data */
++#define B28_Y2_CPU_WDOG               0x0e48  /* 32 bit Watchdog Register */
+ #define B28_Y2_ASF_IRQ_V_BASE 0x0e60  /* 32 bit ASF IRQ Vector Base */
+ #define B28_Y2_ASF_STAT_CMD   0x0e68  /* 32 bit ASF Status and Command Reg */
+ #define B28_Y2_ASF_HCU_CCSR   0x0e68  /* 32 bit ASF HCU CCSR (Yukon EX) */
+@@ -918,6 +924,10 @@
+ #define       CHIP_REV_YU_EX_A0       1 /* Chip Rev. for Yukon-2 EX A0 */
+ #define       CHIP_REV_YU_EX_B0       2 /* Chip Rev. for Yukon-2 EX B0 */
+ 
++#define       CHIP_REV_YU_SU_A0       0 /* Chip Rev. for Yukon-2 SUPR A0 */
++#define       CHIP_REV_YU_SU_B0       1 /* Chip Rev. for Yukon-2 SUPR B0 */
++#define       CHIP_REV_YU_SU_B1       3 /* Chip Rev. for Yukon-2 SUPR B1 */
++
+ /*    B2_Y2_CLK_GATE   8 bit  Clock Gating (Yukon-2 only) */
+ #define Y2_STATUS_LNK2_INAC   BIT_7   /* Status Link 2 inactiv (0 = activ) */
+ #define Y2_CLK_GAT_LNK2_DIS   BIT_6   /* Disable clock gating Link 2 */
+@@ -2555,6 +2565,7 @@
+       struct msk_hw_stats     msk_stats;
+       int                     msk_if_flags;
+       uint16_t                msk_vtag;       /* VLAN tag id. */
++      uint32_t                msk_csum;
+ };
+ 
+ #define MSK_TIMEOUT   1000
+--- a/sys/dev/msk/if_msk.c
++++ b/sys/dev/msk/if_msk.c
+@@ -221,6 +221,10 @@
+           "Marvell Yukon 88E8071 Gigabit Ethernet" },
+       { VENDORID_MARVELL, DEVICEID_MRVL_436C,
+           "Marvell Yukon 88E8072 Gigabit Ethernet" },
++      { VENDORID_MARVELL, DEVICEID_MRVL_436D,
++          "Marvell Yukon 88E8055 Gigabit Ethernet" },
++      { VENDORID_MARVELL, DEVICEID_MRVL_4370,
++          "Marvell Yukon 88E8075 Gigabit Ethernet" },
+       { VENDORID_MARVELL, DEVICEID_MRVL_4380,
+           "Marvell Yukon 88E8057 Gigabit Ethernet" },
+       { VENDORID_MARVELL, DEVICEID_MRVL_4381,
+@@ -270,6 +274,7 @@
+ #ifndef __NO_STRICT_ALIGNMENT
+ static __inline void msk_fixup_rx(struct mbuf *);
+ #endif
++static __inline void msk_rxcsum(struct msk_if_softc *, uint32_t, struct mbuf 
*);
+ static void msk_rxeof(struct msk_if_softc *, uint32_t, uint32_t, int);
+ static void msk_jumbo_rxeof(struct msk_if_softc *, uint32_t, uint32_t, int);
+ static void msk_txeof(struct msk_if_softc *, int);
+@@ -294,6 +299,7 @@
+ static int msk_rx_dma_jalloc(struct msk_if_softc *);
+ static void msk_txrx_dma_free(struct msk_if_softc *);
+ static void msk_rx_dma_jfree(struct msk_if_softc *);
++static int msk_rx_fill(struct msk_if_softc *, int);
+ static int msk_init_rx_ring(struct msk_if_softc *);
+ static int msk_init_jumbo_rx_ring(struct msk_if_softc *);
+ static void msk_init_tx_ring(struct msk_if_softc *);
+@@ -399,9 +405,6 @@
+ {
+       struct msk_if_softc *sc_if;
+ 
+-      if (phy != PHY_ADDR_MARV)
+-              return (0);
+-
+       sc_if = device_get_softc(dev);
+ 
+       return (msk_phy_readreg(sc_if, phy, reg));
+@@ -440,9 +443,6 @@
+ {
+       struct msk_if_softc *sc_if;
+ 
+-      if (phy != PHY_ADDR_MARV)
+-              return (0);
+-
+       sc_if = device_get_softc(dev);
+ 
+       return (msk_phy_writereg(sc_if, phy, reg, val));
+@@ -565,7 +566,7 @@
+               msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
+               /* Disable Rx/Tx MAC. */
+               gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
+-              if ((GM_GPCR_RX_ENA | GM_GPCR_TX_ENA) != 0) {
++              if ((gmac & (GM_GPCR_RX_ENA | GM_GPCR_TX_ENA)) != 0) {
+                       gmac &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
+                       GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
+                       /* Read again to ensure writing. */
+@@ -647,6 +648,54 @@
+ }
+ 
+ static int
++msk_rx_fill(struct msk_if_softc *sc_if, int jumbo)
++{
++      uint16_t idx;
++      int i;
++
++      if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
++          (sc_if->msk_ifp->if_capenable & IFCAP_RXCSUM) != 0) {
++              /* Wait until controller executes OP_TCPSTART command. */
++              for (i = 10; i > 0; i--) {
++                      DELAY(10);
++                      idx = CSR_READ_2(sc_if->msk_softc,
++                          Y2_PREF_Q_ADDR(sc_if->msk_rxq,
++                          PREF_UNIT_GET_IDX_REG));
++                      if (idx != 0)
++                              break;
++              }
++              if (i == 0) {
++                      device_printf(sc_if->msk_if_dev,
++                          "prefetch unit stuck?\n");
++                      return (ETIMEDOUT);
++              }
++              /*
++               * Fill consumed LE with free buffer. This can be done
++               * in Rx handler but we don't want to add special code
++               * in fast handler.
++               */
++              if (jumbo > 0) {
++                      if (msk_jumbo_newbuf(sc_if, 0) != 0)
++                              return (ENOBUFS);
++                      bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
++                          sc_if->msk_cdata.msk_jumbo_rx_ring_map,
++                          BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
++              } else {
++                      if (msk_newbuf(sc_if, 0) != 0)
++                              return (ENOBUFS);
++                      bus_dmamap_sync(sc_if->msk_cdata.msk_rx_ring_tag,
++                          sc_if->msk_cdata.msk_rx_ring_map,
++                          BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
++              }
++              sc_if->msk_cdata.msk_rx_prod = 0;
++              CSR_WRITE_2(sc_if->msk_softc,
++                  Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
++                  sc_if->msk_cdata.msk_rx_prod);
++      }
++      return (0);
++}
++
++static int
+ msk_init_rx_ring(struct msk_if_softc *sc_if)
+ {
+       struct msk_ring_data *rd;
+@@ -662,7 +711,21 @@
+       rd = &sc_if->msk_rdata;
+       bzero(rd->msk_rx_ring, sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT);
+       prod = sc_if->msk_cdata.msk_rx_prod;
+-      for (i = 0; i < MSK_RX_RING_CNT; i++) {
++      i = 0;
++      /* Have controller know how to compute Rx checksum. */
++      if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
++          (sc_if->msk_ifp->if_capenable & IFCAP_RXCSUM) != 0) {
++              rxd = &sc_if->msk_cdata.msk_rxdesc[prod];
++              rxd->rx_m = NULL;
++              rxd->rx_le = &rd->msk_rx_ring[prod];
++              rxd->rx_le->msk_addr = htole32(ETHER_HDR_LEN << 16 |
++                  ETHER_HDR_LEN);
++              rxd->rx_le->msk_control = htole32(OP_TCPSTART | HW_OWNER);
++              MSK_INC(prod, MSK_RX_RING_CNT);
++              MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT);
++              i++;
++      }
++      for (; i < MSK_RX_RING_CNT; i++) {
+               rxd = &sc_if->msk_cdata.msk_rxdesc[prod];
+               rxd->rx_m = NULL;
+               rxd->rx_le = &rd->msk_rx_ring[prod];
+@@ -680,7 +743,8 @@
+       CSR_WRITE_2(sc_if->msk_softc,
+           Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
+           sc_if->msk_cdata.msk_rx_prod);
+-
++      if (msk_rx_fill(sc_if, 0) != 0)
++              return (ENOBUFS);
+       return (0);
+ }
+ 
+@@ -701,7 +765,21 @@
+       bzero(rd->msk_jumbo_rx_ring,
+           sizeof(struct msk_rx_desc) * MSK_JUMBO_RX_RING_CNT);
+       prod = sc_if->msk_cdata.msk_rx_prod;
+-      for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
++      i = 0;
++      /* Have controller know how to compute Rx checksum. */
++      if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
++          (sc_if->msk_ifp->if_capenable & IFCAP_RXCSUM) != 0) {
++              rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod];
++              rxd->rx_m = NULL;
++              rxd->rx_le = &rd->msk_jumbo_rx_ring[prod];
++              rxd->rx_le->msk_addr = htole32(ETHER_HDR_LEN << 16 |
++                  ETHER_HDR_LEN);
++              rxd->rx_le->msk_control = htole32(OP_TCPSTART | HW_OWNER);
++              MSK_INC(prod, MSK_JUMBO_RX_RING_CNT);
++              MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT);
++              i++;
++      }
++      for (; i < MSK_JUMBO_RX_RING_CNT; i++) {
+               rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod];
+               rxd->rx_m = NULL;
+               rxd->rx_le = &rd->msk_jumbo_rx_ring[prod];
+@@ -718,7 +796,8 @@
+       CSR_WRITE_2(sc_if->msk_softc,
+           Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
+           sc_if->msk_cdata.msk_rx_prod);
+-
++      if (msk_rx_fill(sc_if, 1) != 0)
++              return (ENOBUFS);
+       return (0);
+ }
+ 
+@@ -927,7 +1006,7 @@
+       struct msk_if_softc *sc_if;
+       struct ifreq *ifr;
+       struct mii_data *mii;
+-      int error, mask;
++      int error, mask, reinit;
+ 
+       sc_if = ifp->if_softc;
+       ifr = (struct ifreq *)data;
+@@ -939,7 +1018,7 @@
+               if (ifr->ifr_mtu > MSK_JUMBO_MTU || ifr->ifr_mtu < ETHERMIN)
+                       error = EINVAL;
+               else if (ifp->if_mtu != ifr->ifr_mtu) {
+-                      if (ifr->ifr_mtu > ETHERMTU) {
++                      if (ifr->ifr_mtu > ETHERMTU) {
+                               if ((sc_if->msk_flags & MSK_FLAG_JUMBO) == 0) {
+                                       error = EINVAL;
+                                       MSK_IF_UNLOCK(sc_if);
+@@ -955,7 +1034,10 @@
+                               }
+                       }
+                       ifp->if_mtu = ifr->ifr_mtu;
+-                      msk_init_locked(sc_if);
++                      if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
++                              ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
++                              msk_init_locked(sc_if);
++                      }
+               }
+               MSK_IF_UNLOCK(sc_if);
+               break;
+@@ -986,6 +1068,7 @@
+               error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
+               break;
+       case SIOCSIFCAP:
++              reinit = 0;
+               MSK_IF_LOCK(sc_if);
+               mask = ifr->ifr_reqcap ^ ifp->if_capenable;
+               if ((mask & IFCAP_TXCSUM) != 0 &&
+@@ -997,8 +1080,11 @@
+                               ifp->if_hwassist &= ~MSK_CSUM_FEATURES;
+               }
+               if ((mask & IFCAP_RXCSUM) != 0 &&
+-                  (IFCAP_RXCSUM & ifp->if_capabilities) != 0)
++                  (IFCAP_RXCSUM & ifp->if_capabilities) != 0) {
+                       ifp->if_capenable ^= IFCAP_RXCSUM;
++                      if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0)
++                              reinit = 1;
++              }
+               if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
+                   (IFCAP_VLAN_HWCSUM & ifp->if_capabilities) != 0)
+                       ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
+@@ -1026,8 +1112,11 @@
+                       ifp->if_hwassist &= ~(MSK_CSUM_FEATURES | CSUM_TSO);
+                       ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM);
+               }
+-
+               VLAN_CAPABILITIES(ifp);
++              if (reinit > 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
++                      ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
++                      msk_init_locked(sc_if);
++              }
+               MSK_IF_UNLOCK(sc_if);
+               break;
+       default:
+@@ -1076,7 +1165,7 @@
+       sc->msk_pflags |= MSK_FLAG_RAMBUF;
+       /*
+        * Give receiver 2/3 of memory and round down to the multiple
+-       * of 1024. Tx/Rx RAM buffer size of Yukon II shoud be multiple
++       * of 1024. Tx/Rx RAM buffer size of Yukon II should be multiple
+        * of 1024.
+        */
+       sc->msk_rxqsize = rounddown((sc->msk_ramsize * 1024 * 2) / 3, 1024);
+@@ -1130,37 +1219,30 @@
+                */
+               CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
+ 
+-              val = CSR_PCI_READ_4(sc, PCI_OUR_REG_1);
+-              val &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
++              our = CSR_PCI_READ_4(sc, PCI_OUR_REG_1);
++              our &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
+               if (sc->msk_hw_id == CHIP_ID_YUKON_XL) {
+                       if (sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
+                               /* Deassert Low Power for 1st PHY. */
+-                              val |= PCI_Y2_PHY1_COMA;
++                              our |= PCI_Y2_PHY1_COMA;
+                               if (sc->msk_num_port > 1)
+-                                      val |= PCI_Y2_PHY2_COMA;
++                                      our |= PCI_Y2_PHY2_COMA;
+                       }
+               }
+-              /* Release PHY from PowerDown/COMA mode. */
+-              CSR_PCI_WRITE_4(sc, PCI_OUR_REG_1, val);
+-              switch (sc->msk_hw_id) {
+-              case CHIP_ID_YUKON_EC_U:
+-              case CHIP_ID_YUKON_EX:
+-              case CHIP_ID_YUKON_FE_P:
+-              case CHIP_ID_YUKON_UL_2:
+-              case CHIP_ID_YUKON_OPT:
+-                      CSR_WRITE_2(sc, B0_CTST, Y2_HW_WOL_OFF);
+-
+-                      /* Enable all clocks. */
+-                      CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, 0);
+-                      our = CSR_PCI_READ_4(sc, PCI_OUR_REG_4);
+-                      our &= (PCI_FORCE_ASPM_REQUEST|PCI_ASPM_GPHY_LINK_DOWN|
+-                          PCI_ASPM_INT_FIFO_EMPTY|PCI_ASPM_CLKRUN_REQUEST);
++              if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U ||
++                  sc->msk_hw_id == CHIP_ID_YUKON_EX ||
++                  sc->msk_hw_id >= CHIP_ID_YUKON_FE_P) {
++                      val = CSR_PCI_READ_4(sc, PCI_OUR_REG_4);
++                      val &= (PCI_FORCE_ASPM_REQUEST |
++                          PCI_ASPM_GPHY_LINK_DOWN | PCI_ASPM_INT_FIFO_EMPTY |
++                          PCI_ASPM_CLKRUN_REQUEST);
+                       /* Set all bits to 0 except bits 15..12. */
+-                      CSR_PCI_WRITE_4(sc, PCI_OUR_REG_4, our);
+-                      our = CSR_PCI_READ_4(sc, PCI_OUR_REG_5);
+-                      our &= PCI_CTL_TIM_VMAIN_AV_MSK;
+-                      CSR_PCI_WRITE_4(sc, PCI_OUR_REG_5, our);
++                      CSR_PCI_WRITE_4(sc, PCI_OUR_REG_4, val);
++                      val = CSR_PCI_READ_4(sc, PCI_OUR_REG_5);
++                      val &= PCI_CTL_TIM_VMAIN_AV_MSK;
++                      CSR_PCI_WRITE_4(sc, PCI_OUR_REG_5, val);
+                       CSR_PCI_WRITE_4(sc, PCI_CFG_REG_1, 0);
++                      CSR_WRITE_2(sc, B0_CTST, Y2_HW_WOL_ON);
+                       /*
+                        * Disable status race, workaround for
+                        * Yukon EC Ultra & Yukon EX.
+@@ -1169,10 +1251,10 @@
+                       val |= GLB_GPIO_STAT_RACE_DIS;
+                       CSR_WRITE_4(sc, B2_GP_IO, val);
+                       CSR_READ_4(sc, B2_GP_IO);
+-                      break;
+-              default:
+-                      break;
+               }
++              /* Release PHY from PowerDown/COMA mode. */
++              CSR_PCI_WRITE_4(sc, PCI_OUR_REG_1, our);
++
+               for (i = 0; i < sc->msk_num_port; i++) {
+                       CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
+                           GMLC_RST_SET);
+@@ -1218,28 +1300,33 @@
+       bus_addr_t addr;
+       uint16_t status;
+       uint32_t val;
+-      int i;
+-
+-      CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
++      int i, initram;
+ 
+       /* Disable ASF. */
+-      if (sc->msk_hw_id == CHIP_ID_YUKON_EX) {
+-              status = CSR_READ_2(sc, B28_Y2_ASF_HCU_CCSR);
+-              /* Clear AHB bridge & microcontroller reset. */
+-              status &= ~(Y2_ASF_HCU_CCSR_AHB_RST |
+-                  Y2_ASF_HCU_CCSR_CPU_RST_MODE);
+-              /* Clear ASF microcontroller state. */
+-              status &= ~ Y2_ASF_HCU_CCSR_UC_STATE_MSK;
+-              CSR_WRITE_2(sc, B28_Y2_ASF_HCU_CCSR, status);
+-      } else
+-              CSR_WRITE_1(sc, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
+-      CSR_WRITE_2(sc, B0_CTST, Y2_ASF_DISABLE);
+-
+-      /*
+-       * Since we disabled ASF, S/W reset is required for Power Management.
+-       */
+-      CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
+-      CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
++      if (sc->msk_hw_id >= CHIP_ID_YUKON_XL &&
++          sc->msk_hw_id <= CHIP_ID_YUKON_SUPR) {
++              if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
++                  sc->msk_hw_id == CHIP_ID_YUKON_SUPR) {
++                      CSR_WRITE_4(sc, B28_Y2_CPU_WDOG, 0);
++                      status = CSR_READ_2(sc, B28_Y2_ASF_HCU_CCSR);
++                      /* Clear AHB bridge & microcontroller reset. */
++                      status &= ~(Y2_ASF_HCU_CCSR_AHB_RST |
++                          Y2_ASF_HCU_CCSR_CPU_RST_MODE);
++                      /* Clear ASF microcontroller state. */
++                      status &= ~Y2_ASF_HCU_CCSR_UC_STATE_MSK;
++                      status &= ~Y2_ASF_HCU_CCSR_CPU_CLK_DIVIDE_MSK;
++                      CSR_WRITE_2(sc, B28_Y2_ASF_HCU_CCSR, status);
++                      CSR_WRITE_4(sc, B28_Y2_CPU_WDOG, 0);
++              } else
++                      CSR_WRITE_1(sc, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
++              CSR_WRITE_2(sc, B0_CTST, Y2_ASF_DISABLE);
++              /*
++               * Since we disabled ASF, S/W reset is required for
++               * Power Management.
++               */
++              CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
++              CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
++      }
+ 
+       /* Clear all error bits in the PCI status register. */
+       status = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
+@@ -1280,17 +1367,22 @@
+       /* Reset GPHY/GMAC Control */
+       for (i = 0; i < sc->msk_num_port; i++) {
+               /* GPHY Control reset. */
+-              CSR_WRITE_4(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_SET);
+-              CSR_WRITE_4(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_CLR);
++              CSR_WRITE_1(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_SET);
++              CSR_WRITE_1(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_CLR);
+               /* GMAC Control reset. */
+               CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_SET);
+               CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_CLR);
+               CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_F_LOOPB_OFF);
+-              if (sc->msk_hw_id == CHIP_ID_YUKON_EX)
++              if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
++                  sc->msk_hw_id == CHIP_ID_YUKON_SUPR)
+                       CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL),
+                           GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON |
+                           GMC_BYP_RETR_ON);
+       }
++
++      if (sc->msk_hw_id == CHIP_ID_YUKON_SUPR &&
++          sc->msk_hw_rev > CHIP_REV_YU_SU_B0)
++              CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, PCI_CLK_MACSEC_DIS);
+       if (sc->msk_hw_id == CHIP_ID_YUKON_OPT && sc->msk_hw_rev == 0) {
+               /* Disable PCIe PHY powerdown(reg 0x80, bit7). */
+               CSR_WRITE_4(sc, Y2_PEX_PHY_DATA, (0x0080 << 16) | 0x0080);
+@@ -1314,8 +1406,14 @@
+       CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_STOP);
+       CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
+ 
++      initram = 0;
++      if (sc->msk_hw_id == CHIP_ID_YUKON_XL ||
++          sc->msk_hw_id == CHIP_ID_YUKON_EC ||
++          sc->msk_hw_id == CHIP_ID_YUKON_FE)
++              initram++;
++
+       /* Configure timeout values. */
+-      for (i = 0; i < sc->msk_num_port; i++) {
++      for (i = 0; initram > 0 && i < sc->msk_num_port; i++) {
+               CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_SET);
+               CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR);
+               CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R1),
+@@ -1490,23 +1588,14 @@
+       if_initname(ifp, device_get_name(dev), device_get_unit(dev));
+       ifp->if_mtu = ETHERMTU;
+       ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+-      /*
+-       * IFCAP_RXCSUM capability is intentionally disabled as the hardware
+-       * has serious bug in Rx checksum offload for all Yukon II family
+-       * hardware. It seems there is a workaround to make it work somtimes.
+-       * However, the workaround also have to check OP code sequences to
+-       * verify whether the OP code is correct. Sometimes it should compute
+-       * IP/TCP/UDP checksum in driver in order to verify correctness of
+-       * checksum computed by hardware. If you have to compute checksum
+-       * with software to verify the hardware's checksum why have hardware
+-       * compute the checksum? I think there is no reason to spend time to
+-       * make Rx checksum offload work on Yukon II hardware.
+-       */
+       ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_TSO4;
+       /*
+-       * Enable Rx checksum offloading if controller support new
+-       * descriptor format.
++       * Enable Rx checksum offloading if controller supports
++       * new descriptor formant and controller is not Yukon XL.
+        */
++      if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
++          sc->msk_hw_id != CHIP_ID_YUKON_XL)
++              ifp->if_capabilities |= IFCAP_RXCSUM;
+       if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0 &&
+           (sc_if->msk_flags & MSK_FLAG_NORX_CSUM) == 0)
+               ifp->if_capabilities |= IFCAP_RXCSUM;
+@@ -1549,9 +1638,9 @@
+                * this workaround does not work so disable checksum offload
+                * for VLAN interface.
+                */
+-              ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO;
++              ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO;
+               /*
+-               * Enable Rx checksum offloading for VLAN taggedd frames
++               * Enable Rx checksum offloading for VLAN tagged frames
+                * if controller support new descriptor format.
+                */
+               if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0 &&
+@@ -1634,13 +1724,15 @@
+               }
+       }
+ 
++      /* Enable all clocks before accessing any registers. */
++      CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, 0);
++
+       CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
+       sc->msk_hw_id = CSR_READ_1(sc, B2_CHIP_ID);
+       sc->msk_hw_rev = (CSR_READ_1(sc, B2_MAC_CFG) >> 4) & 0x0f;
+       /* Bail out if chip is not recognized. */
+       if (sc->msk_hw_id < CHIP_ID_YUKON_XL ||
+           sc->msk_hw_id > CHIP_ID_YUKON_OPT ||
+-          sc->msk_hw_id == CHIP_ID_YUKON_SUPR ||
+           sc->msk_hw_id == CHIP_ID_YUKON_UNKNOWN) {
+               device_printf(dev, "unknown device: id=0x%02x, rev=0x%02x\n",
+                   sc->msk_hw_id, sc->msk_hw_rev);
+@@ -1674,9 +1766,6 @@
+       resource_int_value(device_get_name(dev), device_get_unit(dev),
+           "int_holdoff", &sc->msk_int_holdoff);
+ 
+-      /* Soft reset. */
+-      CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
+-      CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
+       sc->msk_pmd = CSR_READ_1(sc, B2_PMD_TYP);
+       /* Check number of MACs. */
+       sc->msk_num_port = 1;
+@@ -1738,7 +1827,7 @@
+                        * does not rely on status word of received frame
+                        * in msk_rxeof() which in turn disables all
+                        * hardware assistance bits reported by the status
+-                       * word as well as validity of the recevied frame.
++                       * word as well as validity of the received frame.
+                        * Just pass received frames to upper stack with
+                        * minimal test and let upper stack handle them.
+                        */
+@@ -1750,6 +1839,11 @@
+               sc->msk_clock = 156;    /* 156 MHz */
+               sc->msk_pflags |= MSK_FLAG_JUMBO;
+               break;
++      case CHIP_ID_YUKON_SUPR:
++              sc->msk_clock = 125;    /* 125 MHz */
++              sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_DESCV2 |
++                  MSK_FLAG_AUTOTX_CSUM;
++              break;
+       case CHIP_ID_YUKON_UL_2:
+               sc->msk_clock = 125;    /* 125 MHz */
+               sc->msk_pflags |= MSK_FLAG_JUMBO;
+@@ -1826,7 +1923,8 @@
+                       error = ENXIO;
+                       goto fail;
+               }
+-              mmd = malloc(sizeof(struct msk_mii_data), M_DEVBUF, M_WAITOK | 
M_ZERO);
++              mmd = malloc(sizeof(struct msk_mii_data), M_DEVBUF, M_WAITOK |
++                  M_ZERO);
+               if (mmd == NULL) {
+                       device_printf(dev, "failed to allocate memory for "
+                           "ivars of PORT_B\n");
+@@ -2068,10 +2168,10 @@
+        * what DMA address is used and chain another descriptor for the
+        * 64bits DMA operation. This also means descriptor ring size is
+        * variable. Limiting DMA address to be in 32bit address space greatly
+-       * simplyfies descriptor handling and possibly would increase
++       * simplifies descriptor handling and possibly would increase
+        * performance a bit due to efficient handling of descriptors.
+        * Apart from harassing checksum offloading mechanisms, it seems
+-       * it's really bad idea to use a seperate descriptor for 64bit
++       * it's really bad idea to use a separate descriptor for 64bit
+        * DMA operation to save small descriptor memory. Anyway, I've
+        * never seen these exotic scheme on ethernet interface hardware.
+        */
+@@ -2568,7 +2668,7 @@
+                        * Short UDP packets appear to be handled correctly by
+                        * Yukon II. Also I assume this bug does not happen on
+                        * controllers that use newer descriptor format or
+-                       * automatic Tx checksum calaulcation.
++                       * automatic Tx checksum calculation.
+                        */
+                       m = m_pullup(m, offset + sizeof(struct tcphdr));
+                       if (m == NULL) {
+@@ -2705,7 +2805,7 @@
+       /* Update producer index. */
+       sc_if->msk_cdata.msk_tx_prod = prod;
+ 
+-      /* Set EOP on the last desciptor. */
++      /* Set EOP on the last descriptor. */
+       prod = (prod + MSK_TX_RING_CNT - 1) % MSK_TX_RING_CNT;
+       tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
+       tx_le->msk_control |= htole32(EOP);
+@@ -2885,6 +2985,7 @@
+ 
+       MSK_LOCK(sc);
+ 
++      CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, 0);
+       mskc_reset(sc);
+       for (i = 0; i < sc->msk_num_port; i++) {
+               if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
+@@ -2918,6 +3019,96 @@
+ }
+ #endif
+ 
++static __inline void
++msk_rxcsum(struct msk_if_softc *sc_if, uint32_t control, struct mbuf *m)
++{
++      struct ether_header *eh;
++      struct ip *ip;
++      struct udphdr *uh;
++      int32_t hlen, len, pktlen, temp32;
++      uint16_t csum, *opts;
++
++      if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0) {
++              if ((control & (CSS_IPV4 | CSS_IPFRAG)) == CSS_IPV4) {
++                      m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
++                      if ((control & CSS_IPV4_CSUM_OK) != 0)
++                              m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
++                      if ((control & (CSS_TCP | CSS_UDP)) != 0 &&
++                          (control & (CSS_TCPUDP_CSUM_OK)) != 0) {
++                              m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
++                                  CSUM_PSEUDO_HDR;
++                              m->m_pkthdr.csum_data = 0xffff;
++                      }
++              }
++              return;
++      }
++      /*
++       * Marvell Yukon controllers that support OP_RXCHKS has known
++       * to have various Rx checksum offloading bugs. These
++       * controllers can be configured to compute simple checksum
++       * at two different positions. So we can compute IP and TCP/UDP
++       * checksum at the same time. We intentionally have controller
++       * compute TCP/UDP checksum twice by specifying the same
++       * checksum start position and compare the result. If the value
++       * is different it would indicate the hardware logic was wrong.
++       */
++      if ((sc_if->msk_csum & 0xFFFF) != (sc_if->msk_csum >> 16)) {
++              if (bootverbose)
++                      device_printf(sc_if->msk_if_dev,
++                          "Rx checksum value mismatch!\n");
++              return;
++      }
++      pktlen = m->m_pkthdr.len;
++      if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
++              return;
++      eh = mtod(m, struct ether_header *);
++      if (eh->ether_type != htons(ETHERTYPE_IP))
++              return;
++      ip = (struct ip *)(eh + 1);
++      if (ip->ip_v != IPVERSION)
++              return;
++
++      hlen = ip->ip_hl << 2;
++      pktlen -= sizeof(struct ether_header);
++      if (hlen < sizeof(struct ip))
++              return;
++      if (ntohs(ip->ip_len) < hlen)
++              return;
++      if (ntohs(ip->ip_len) != pktlen)
++              return;
++      if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
++              return; /* can't handle fragmented packet. */
++
++      switch (ip->ip_p) {
++      case IPPROTO_TCP:
++              if (pktlen < (hlen + sizeof(struct tcphdr)))
++                      return;
++              break;
++      case IPPROTO_UDP:
++              if (pktlen < (hlen + sizeof(struct udphdr)))
++                      return;
++              uh = (struct udphdr *)((caddr_t)ip + hlen);
++              if (uh->uh_sum == 0)
++                      return; /* no checksum */
++              break;
++      default:
++              return;
++      }
++      csum = bswap16(sc_if->msk_csum & 0xFFFF);
++      /* Checksum fixup for IP options. */
++      len = hlen - sizeof(struct ip);
++      if (len > 0) {
++              opts = (uint16_t *)(ip + 1);
++              for (; len > 0; len -= sizeof(uint16_t), opts++) {
++                      temp32 = csum - *opts;
++                      temp32 = (temp32 >> 16) + (temp32 & 65535);
++                      csum = temp32 & 65535;
++              }
++      }
++      m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
++      m->m_pkthdr.csum_data = csum;
++}
++
+ static void
+ msk_rxeof(struct msk_if_softc *sc_if, uint32_t status, uint32_t control,
+     int len)
+@@ -2972,18 +3163,8 @@
+                       msk_fixup_rx(m);
+ #endif
+               ifp->if_ipackets++;
+-              if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 &&
+-                  (control & (CSS_IPV4 | CSS_IPFRAG)) == CSS_IPV4) {
+-                      m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
+-                      if ((control & CSS_IPV4_CSUM_OK) != 0)
+-                              m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
+-                      if ((control & (CSS_TCP | CSS_UDP)) != 0 &&
+-                          (control & (CSS_TCPUDP_CSUM_OK)) != 0) {
+-                              m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
+-                                  CSUM_PSEUDO_HDR;
+-                              m->m_pkthdr.csum_data = 0xffff;
+-                      }
+-              }
++              if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
++                      msk_rxcsum(sc_if, control, m);
+               /* Check for VLAN tagged packets. */
+               if ((status & GMR_FS_VLAN) != 0 &&
+                   (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
+@@ -3042,18 +3223,8 @@
+                       msk_fixup_rx(m);
+ #endif
+               ifp->if_ipackets++;
+-              if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 &&
+-                  (control & (CSS_IPV4 | CSS_IPFRAG)) == CSS_IPV4) {
+-                      m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
+-                      if ((control & CSS_IPV4_CSUM_OK) != 0)
+-                              m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
+-                      if ((control & (CSS_TCP | CSS_UDP)) != 0 &&
+-                          (control & (CSS_TCPUDP_CSUM_OK)) != 0) {
+-                              m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
+-                                  CSUM_PSEUDO_HDR;
+-                              m->m_pkthdr.csum_data = 0xffff;
+-                      }
+-              }
++              if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
++                      msk_rxcsum(sc_if, control, m);
+               /* Check for VLAN tagged packets. */
+               if ((status & GMR_FS_VLAN) != 0 &&
+                   (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
+@@ -3176,7 +3347,7 @@
+                * XXX
+                * In case of Tx underrun, we may need to flush/reset
+                * Tx MAC but that would also require resynchronization
+-               * with status LEs. Reintializing status LEs would
++               * with status LEs. Reinitializing status LEs would
+                * affect other port in dual MAC configuration so it
+                * should be avoided as possible as we can.
+                * Due to lack of documentation it's all vague guess but
+@@ -3370,6 +3541,9 @@
+                       break;
+               case OP_RXCHKSVLAN:
+                       sc_if->msk_vtag = ntohs(len);
++                      /* FALLTHROUGH */
++              case OP_RXCHKS:
++                      sc_if->msk_csum = status;
+                       break;
+               case OP_RXSTAT:
+                       if (!(sc_if->msk_ifp->if_drv_flags & IFF_DRV_RUNNING))
+@@ -3503,37 +3677,24 @@
+ 
+       ifp = sc_if->msk_ifp;
+       sc = sc_if->msk_softc;
+-      switch (sc->msk_hw_id) {
+-      case CHIP_ID_YUKON_EX:
+-              if (sc->msk_hw_rev == CHIP_REV_YU_EX_A0)
+-                      goto yukon_ex_workaround;
+-              if (ifp->if_mtu > ETHERMTU)
+-                      CSR_WRITE_4(sc,
+-                          MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
+-                          TX_JUMBO_ENA | TX_STFW_ENA);
+-              else
+-                      CSR_WRITE_4(sc,
+-                          MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
+-                          TX_JUMBO_DIS | TX_STFW_ENA);
+-              break;
+-      default:
+-yukon_ex_workaround:
++      if ((sc->msk_hw_id == CHIP_ID_YUKON_EX &&
++          sc->msk_hw_rev != CHIP_REV_YU_EX_A0) ||
++          sc->msk_hw_id >= CHIP_ID_YUKON_SUPR) {
++              CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
++                  TX_STFW_ENA);
++      } else {
+               if (ifp->if_mtu > ETHERMTU) {
+                       /* Set Tx GMAC FIFO Almost Empty Threshold. */
+                       CSR_WRITE_4(sc,
+                           MR_ADDR(sc_if->msk_port, TX_GMF_AE_THR),
+                           MSK_ECU_JUMBO_WM << 16 | MSK_ECU_AE_THR);
+                       /* Disable Store & Forward mode for Tx. */
+-                      CSR_WRITE_4(sc,
+-                          MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
+-                          TX_JUMBO_ENA | TX_STFW_DIS);
++                      CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
++                          TX_STFW_DIS);
+               } else {
+-                      /* Enable Store & Forward mode for Tx. */
+-                      CSR_WRITE_4(sc,
+-                          MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
+-                          TX_JUMBO_DIS | TX_STFW_ENA);
++                      CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
++                          TX_STFW_ENA);
+               }
+-              break;
+       }
+ }
+ 
+@@ -3582,11 +3743,12 @@
+               ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM);
+       }
+ 
+-      /* GMAC Control reset. */
+-      CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_SET);
+-      CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_CLR);
+-      CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_F_LOOPB_OFF);
+-      if (sc->msk_hw_id == CHIP_ID_YUKON_EX)
++      /* GMAC Control reset. */
++      CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_SET);
++      CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_CLR);
++      CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_F_LOOPB_OFF);
++      if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
++          sc->msk_hw_id == CHIP_ID_YUKON_SUPR)
+               CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL),
+                   GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON |
+                   GMC_BYP_RETR_ON);
+@@ -3685,7 +3847,7 @@
+       msk_setvlan(sc_if, ifp);
+ 
+       if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) {
+-              /* Set Rx Pause threshould. */
++              /* Set Rx Pause threshold. */
+               CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_LP_THR),
+                   MSK_ECU_LLPP);
+               CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_UP_THR),
+@@ -3694,13 +3856,13 @@
+               msk_set_tx_stfwd(sc_if);
+       }
+ 
+-      if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P &&
+-          sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) {
+-              /* Disable dynamic watermark - from Linux. */
+-              reg = CSR_READ_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA));
+-              reg &= ~0x03;
+-              CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA), reg);
+-      }
++      if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P &&
++          sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) {
++              /* Disable dynamic watermark - from Linux. */
++              reg = CSR_READ_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA));
++              reg &= ~0x03;
++              CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA), reg);
++      }
+ 
+       /*
+        * Disable Force Sync bit and Alloc bit in Tx RAM interface
+@@ -3757,8 +3919,13 @@
+       msk_init_tx_ring(sc_if);
+ 
+       /* Disable Rx checksum offload and RSS hash. */
+-      CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR),
+-          BMU_DIS_RX_CHKSUM | BMU_DIS_RX_RSS_HASH);
++      reg = BMU_DIS_RX_RSS_HASH;
++      if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 &&
++          (ifp->if_capenable & IFCAP_RXCSUM) != 0)
++              reg |= BMU_ENA_RX_CHKSUM;
++      else
++              reg |= BMU_DIS_RX_CHKSUM;
++      CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), reg);
+       if (sc_if->msk_framesize > (MCLBYTES - MSK_RX_BUF_ALIGN)) {
+               msk_set_prefetch(sc, sc_if->msk_rxq,
+                   sc_if->msk_rdata.msk_jumbo_rx_ring_paddr,
+@@ -3776,7 +3943,8 @@
+               msk_stop(sc_if);
+               return;
+       }
+-      if (sc->msk_hw_id == CHIP_ID_YUKON_EX) {
++      if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
++          sc->msk_hw_id == CHIP_ID_YUKON_SUPR) {
+               /* Disable flushing of non-ASF packets. */
+               CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
+                   GMF_RX_MACSEC_FLUSH_OFF);

Reply via email to