commit:     63e318c66258ba6be277fb558bc364ef2f2c126f
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Sep 26 21:50:07 2020 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Sep 26 21:50:07 2020 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=63e318c6

Linux patch 5.8.12

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1011_linux-5.8.12.patch | 2440 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2444 insertions(+)

diff --git a/0000_README b/0000_README
index d438f0f..51cee27 100644
--- a/0000_README
+++ b/0000_README
@@ -87,6 +87,10 @@ Patch:  1010_linux-5.8.11.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.8.11
 
+Patch:  1011_linux-5.8.12.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.8.12
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1011_linux-5.8.12.patch b/1011_linux-5.8.12.patch
new file mode 100644
index 0000000..ac579a3
--- /dev/null
+++ b/1011_linux-5.8.12.patch
@@ -0,0 +1,2440 @@
+diff --git a/Makefile b/Makefile
+index 0b025b3a56401..d0d40c628dc34 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 8
+-SUBLEVEL = 11
++SUBLEVEL = 12
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+ 
+diff --git a/drivers/net/dsa/microchip/ksz8795.c 
b/drivers/net/dsa/microchip/ksz8795.c
+index 7c17b0f705ec3..87db588bcdd6b 100644
+--- a/drivers/net/dsa/microchip/ksz8795.c
++++ b/drivers/net/dsa/microchip/ksz8795.c
+@@ -1269,7 +1269,7 @@ static int ksz8795_switch_init(struct ksz_device *dev)
+       }
+ 
+       /* set the real number of ports */
+-      dev->ds->num_ports = dev->port_cnt;
++      dev->ds->num_ports = dev->port_cnt + 1;
+ 
+       return 0;
+ }
+diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c
+index 1368816abaed1..99cdb2f18fa2f 100644
+--- a/drivers/net/dsa/rtl8366.c
++++ b/drivers/net/dsa/rtl8366.c
+@@ -452,13 +452,19 @@ int rtl8366_vlan_del(struct dsa_switch *ds, int port,
+                               return ret;
+ 
+                       if (vid == vlanmc.vid) {
+-                              /* clear VLAN member configurations */
+-                              vlanmc.vid = 0;
+-                              vlanmc.priority = 0;
+-                              vlanmc.member = 0;
+-                              vlanmc.untag = 0;
+-                              vlanmc.fid = 0;
+-
++                              /* Remove this port from the VLAN */
++                              vlanmc.member &= ~BIT(port);
++                              vlanmc.untag &= ~BIT(port);
++                              /*
++                               * If no ports are members of this VLAN
++                               * anymore then clear the whole member
++                               * config so it can be reused.
++                               */
++                              if (!vlanmc.member && vlanmc.untag) {
++                                      vlanmc.vid = 0;
++                                      vlanmc.priority = 0;
++                                      vlanmc.fid = 0;
++                              }
+                               ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
+                               if (ret) {
+                                       dev_err(smi->dev,
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c 
b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index cd5c7a1412c6d..dd07db656a5c3 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -4198,7 +4198,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void 
*msg, u32 msg_len,
+       u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
+       u16 dst = BNXT_HWRM_CHNL_CHIMP;
+ 
+-      if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
++      if (BNXT_NO_FW_ACCESS(bp))
+               return -EBUSY;
+ 
+       if (msg_len > BNXT_HWRM_MAX_REQ_LEN) {
+@@ -5530,7 +5530,7 @@ static int hwrm_ring_free_send_msg(struct bnxt *bp,
+       struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
+       u16 error_code;
+ 
+-      if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
++      if (BNXT_NO_FW_ACCESS(bp))
+               return 0;
+ 
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
+@@ -7502,7 +7502,7 @@ static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
+ 
+       if (set_tpa)
+               tpa_flags = bp->flags & BNXT_FLAG_TPA;
+-      else if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
++      else if (BNXT_NO_FW_ACCESS(bp))
+               return 0;
+       for (i = 0; i < bp->nr_vnics; i++) {
+               rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
+@@ -8993,18 +8993,16 @@ static ssize_t bnxt_show_temp(struct device *dev,
+       struct hwrm_temp_monitor_query_output *resp;
+       struct bnxt *bp = dev_get_drvdata(dev);
+       u32 len = 0;
++      int rc;
+ 
+       resp = bp->hwrm_cmd_resp_addr;
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
+       mutex_lock(&bp->hwrm_cmd_lock);
+-      if (!_hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
++      rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
++      if (!rc)
+               len = sprintf(buf, "%u\n", resp->temp * 1000); /* display 
millidegree */
+       mutex_unlock(&bp->hwrm_cmd_lock);
+-
+-      if (len)
+-              return len;
+-
+-      return sprintf(buf, "unknown\n");
++      return rc ?: len;
+ }
+ static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
+ 
+@@ -9024,7 +9022,16 @@ static void bnxt_hwmon_close(struct bnxt *bp)
+ 
+ static void bnxt_hwmon_open(struct bnxt *bp)
+ {
++      struct hwrm_temp_monitor_query_input req = {0};
+       struct pci_dev *pdev = bp->pdev;
++      int rc;
++
++      bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
++      rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
++      if (rc == -EACCES || rc == -EOPNOTSUPP) {
++              bnxt_hwmon_close(bp);
++              return;
++      }
+ 
+       if (bp->hwmon_dev)
+               return;
+@@ -11498,6 +11505,10 @@ static void bnxt_remove_one(struct pci_dev *pdev)
+       if (BNXT_PF(bp))
+               bnxt_sriov_disable(bp);
+ 
++      clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
++      bnxt_cancel_sp_work(bp);
++      bp->sp_event = 0;
++
+       bnxt_dl_fw_reporters_destroy(bp, true);
+       if (BNXT_PF(bp))
+               devlink_port_type_clear(&bp->dl_port);
+@@ -11505,9 +11516,6 @@ static void bnxt_remove_one(struct pci_dev *pdev)
+       unregister_netdev(dev);
+       bnxt_dl_unregister(bp);
+       bnxt_shutdown_tc(bp);
+-      clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+-      bnxt_cancel_sp_work(bp);
+-      bp->sp_event = 0;
+ 
+       bnxt_clear_int_mode(bp);
+       bnxt_hwrm_func_drv_unrgtr(bp);
+@@ -11806,7 +11814,7 @@ static int bnxt_init_mac_addr(struct bnxt *bp)
+ static void bnxt_vpd_read_info(struct bnxt *bp)
+ {
+       struct pci_dev *pdev = bp->pdev;
+-      int i, len, pos, ro_size;
++      int i, len, pos, ro_size, size;
+       ssize_t vpd_size;
+       u8 *vpd_data;
+ 
+@@ -11841,7 +11849,8 @@ static void bnxt_vpd_read_info(struct bnxt *bp)
+       if (len + pos > vpd_size)
+               goto read_sn;
+ 
+-      strlcpy(bp->board_partno, &vpd_data[pos], min(len, BNXT_VPD_FLD_LEN));
++      size = min(len, BNXT_VPD_FLD_LEN - 1);
++      memcpy(bp->board_partno, &vpd_data[pos], size);
+ 
+ read_sn:
+       pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size,
+@@ -11854,7 +11863,8 @@ read_sn:
+       if (len + pos > vpd_size)
+               goto exit;
+ 
+-      strlcpy(bp->board_serialno, &vpd_data[pos], min(len, BNXT_VPD_FLD_LEN));
++      size = min(len, BNXT_VPD_FLD_LEN - 1);
++      memcpy(bp->board_serialno, &vpd_data[pos], size);
+ exit:
+       kfree(vpd_data);
+ }
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h 
b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+index 78e2fd63ac3d5..440b43c8068f1 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+@@ -1673,6 +1673,10 @@ struct bnxt {
+ #define BNXT_STATE_FW_FATAL_COND      6
+ #define BNXT_STATE_DRV_REGISTERED     7
+ 
++#define BNXT_NO_FW_ACCESS(bp)                                 \
++      (test_bit(BNXT_STATE_FW_FATAL_COND, &(bp)->state) ||    \
++       pci_channel_offline((bp)->pdev))
++
+       struct bnxt_irq *irq_tbl;
+       int                     total_irqs;
+       u8                      mac_addr[ETH_ALEN];
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 
b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+index bc2c76fa54cad..f6e236a7bf18d 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+@@ -1735,9 +1735,12 @@ static int bnxt_set_pauseparam(struct net_device *dev,
+       if (!BNXT_PHY_CFG_ABLE(bp))
+               return -EOPNOTSUPP;
+ 
++      mutex_lock(&bp->link_lock);
+       if (epause->autoneg) {
+-              if (!(link_info->autoneg & BNXT_AUTONEG_SPEED))
+-                      return -EINVAL;
++              if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
++                      rc = -EINVAL;
++                      goto pause_exit;
++              }
+ 
+               link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
+               if (bp->hwrm_spec_code >= 0x10201)
+@@ -1758,11 +1761,11 @@ static int bnxt_set_pauseparam(struct net_device *dev,
+       if (epause->tx_pause)
+               link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX;
+ 
+-      if (netif_running(dev)) {
+-              mutex_lock(&bp->link_lock);
++      if (netif_running(dev))
+               rc = bnxt_hwrm_set_pause(bp);
+-              mutex_unlock(&bp->link_lock);
+-      }
++
++pause_exit:
++      mutex_unlock(&bp->link_lock);
+       return rc;
+ }
+ 
+@@ -2499,8 +2502,7 @@ static int bnxt_set_eee(struct net_device *dev, struct 
ethtool_eee *edata)
+       struct bnxt *bp = netdev_priv(dev);
+       struct ethtool_eee *eee = &bp->eee;
+       struct bnxt_link_info *link_info = &bp->link_info;
+-      u32 advertising =
+-               _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
++      u32 advertising;
+       int rc = 0;
+ 
+       if (!BNXT_PHY_CFG_ABLE(bp))
+@@ -2509,19 +2511,23 @@ static int bnxt_set_eee(struct net_device *dev, struct 
ethtool_eee *edata)
+       if (!(bp->flags & BNXT_FLAG_EEE_CAP))
+               return -EOPNOTSUPP;
+ 
++      mutex_lock(&bp->link_lock);
++      advertising = _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
+       if (!edata->eee_enabled)
+               goto eee_ok;
+ 
+       if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
+               netdev_warn(dev, "EEE requires autoneg\n");
+-              return -EINVAL;
++              rc = -EINVAL;
++              goto eee_exit;
+       }
+       if (edata->tx_lpi_enabled) {
+               if (bp->lpi_tmr_hi && (edata->tx_lpi_timer > bp->lpi_tmr_hi ||
+                                      edata->tx_lpi_timer < bp->lpi_tmr_lo)) {
+                       netdev_warn(dev, "Valid LPI timer range is %d and %d 
microsecs\n",
+                                   bp->lpi_tmr_lo, bp->lpi_tmr_hi);
+-                      return -EINVAL;
++                      rc = -EINVAL;
++                      goto eee_exit;
+               } else if (!bp->lpi_tmr_hi) {
+                       edata->tx_lpi_timer = eee->tx_lpi_timer;
+               }
+@@ -2531,7 +2537,8 @@ static int bnxt_set_eee(struct net_device *dev, struct 
ethtool_eee *edata)
+       } else if (edata->advertised & ~advertising) {
+               netdev_warn(dev, "EEE advertised %x must be a subset of autoneg 
advertised speeds %x\n",
+                           edata->advertised, advertising);
+-              return -EINVAL;
++              rc = -EINVAL;
++              goto eee_exit;
+       }
+ 
+       eee->advertised = edata->advertised;
+@@ -2543,6 +2550,8 @@ eee_ok:
+       if (netif_running(dev))
+               rc = bnxt_hwrm_set_link_setting(bp, false, true);
+ 
++eee_exit:
++      mutex_unlock(&bp->link_lock);
+       return rc;
+ }
+ 
+diff --git a/drivers/net/ethernet/cadence/macb_main.c 
b/drivers/net/ethernet/cadence/macb_main.c
+index 4b1b5928b1043..55347bcea2285 100644
+--- a/drivers/net/ethernet/cadence/macb_main.c
++++ b/drivers/net/ethernet/cadence/macb_main.c
+@@ -647,8 +647,7 @@ static void macb_mac_link_up(struct phylink_config *config,
+                               ctrl |= GEM_BIT(GBE);
+               }
+ 
+-              /* We do not support MLO_PAUSE_RX yet */
+-              if (tx_pause)
++              if (rx_pause)
+                       ctrl |= MACB_BIT(PAE);
+ 
+               macb_set_tx_clk(bp->tx_clk, speed, ndev);
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c 
b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+index d02d346629b36..ff0d82e2535da 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+@@ -1906,13 +1906,16 @@ out:
+ static int configure_filter_tcb(struct adapter *adap, unsigned int tid,
+                               struct filter_entry *f)
+ {
+-      if (f->fs.hitcnts)
++      if (f->fs.hitcnts) {
+               set_tcb_field(adap, f, tid, TCB_TIMESTAMP_W,
+-                            TCB_TIMESTAMP_V(TCB_TIMESTAMP_M) |
++                            TCB_TIMESTAMP_V(TCB_TIMESTAMP_M),
++                            TCB_TIMESTAMP_V(0ULL),
++                            1);
++              set_tcb_field(adap, f, tid, TCB_RTT_TS_RECENT_AGE_W,
+                             TCB_RTT_TS_RECENT_AGE_V(TCB_RTT_TS_RECENT_AGE_M),
+-                            TCB_TIMESTAMP_V(0ULL) |
+                             TCB_RTT_TS_RECENT_AGE_V(0ULL),
+                             1);
++      }
+ 
+       if (f->fs.newdmac)
+               set_tcb_tflag(adap, f, tid, TF_CCTRL_ECE_S, 1,
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c 
b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c
+index b1a073eea60b2..a020e84906813 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c
+@@ -229,7 +229,7 @@ void cxgb4_free_mps_ref_entries(struct adapter *adap)
+ {
+       struct mps_entries_ref *mps_entry, *tmp;
+ 
+-      if (!list_empty(&adap->mps_ref))
++      if (list_empty(&adap->mps_ref))
+               return;
+ 
+       spin_lock(&adap->mps_ref_lock);
+diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c 
b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
+index e0f5a81d8620d..7fe39a155b329 100644
+--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
++++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
+@@ -45,6 +45,8 @@
+ 
+ #define MGMT_MSG_TIMEOUT                5000
+ 
++#define SET_FUNC_PORT_MBOX_TIMEOUT    30000
++
+ #define SET_FUNC_PORT_MGMT_TIMEOUT    25000
+ 
+ #define mgmt_to_pfhwdev(pf_mgmt)        \
+@@ -358,16 +360,20 @@ int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt 
*pf_to_mgmt,
+               return -EINVAL;
+       }
+ 
+-      if (cmd == HINIC_PORT_CMD_SET_FUNC_STATE)
+-              timeout = SET_FUNC_PORT_MGMT_TIMEOUT;
++      if (HINIC_IS_VF(hwif)) {
++              if (cmd == HINIC_PORT_CMD_SET_FUNC_STATE)
++                      timeout = SET_FUNC_PORT_MBOX_TIMEOUT;
+ 
+-      if (HINIC_IS_VF(hwif))
+               return hinic_mbox_to_pf(pf_to_mgmt->hwdev, mod, cmd, buf_in,
+-                                      in_size, buf_out, out_size, 0);
+-      else
++                                      in_size, buf_out, out_size, timeout);
++      } else {
++              if (cmd == HINIC_PORT_CMD_SET_FUNC_STATE)
++                      timeout = SET_FUNC_PORT_MGMT_TIMEOUT;
++
+               return msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size,
+                               buf_out, out_size, MGMT_DIRECT_SEND,
+                               MSG_NOT_RESP, timeout);
++      }
+ }
+ 
+ static void recv_mgmt_msg_work_handler(struct work_struct *work)
+diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c 
b/drivers/net/ethernet/huawei/hinic/hinic_main.c
+index e9e6f4c9309a1..c9d884049fd04 100644
+--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
++++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
+@@ -168,6 +168,24 @@ err_init_txq:
+       return err;
+ }
+ 
++static void enable_txqs_napi(struct hinic_dev *nic_dev)
++{
++      int num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev);
++      int i;
++
++      for (i = 0; i < num_txqs; i++)
++              napi_enable(&nic_dev->txqs[i].napi);
++}
++
++static void disable_txqs_napi(struct hinic_dev *nic_dev)
++{
++      int num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev);
++      int i;
++
++      for (i = 0; i < num_txqs; i++)
++              napi_disable(&nic_dev->txqs[i].napi);
++}
++
+ /**
+  * free_txqs - Free the Logical Tx Queues of specific NIC device
+  * @nic_dev: the specific NIC device
+@@ -394,6 +412,8 @@ int hinic_open(struct net_device *netdev)
+               goto err_create_txqs;
+       }
+ 
++      enable_txqs_napi(nic_dev);
++
+       err = create_rxqs(nic_dev);
+       if (err) {
+               netif_err(nic_dev, drv, netdev,
+@@ -475,6 +495,7 @@ err_port_state:
+       }
+ 
+ err_create_rxqs:
++      disable_txqs_napi(nic_dev);
+       free_txqs(nic_dev);
+ 
+ err_create_txqs:
+@@ -488,6 +509,9 @@ int hinic_close(struct net_device *netdev)
+       struct hinic_dev *nic_dev = netdev_priv(netdev);
+       unsigned int flags;
+ 
++      /* Disable txq napi firstly to aviod rewaking txq in free_tx_poll */
++      disable_txqs_napi(nic_dev);
++
+       down(&nic_dev->mgmt_lock);
+ 
+       flags = nic_dev->flags;
+diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c 
b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
+index 4c66a0bc1b283..789aa278851e3 100644
+--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
++++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
+@@ -684,18 +684,6 @@ static int free_tx_poll(struct napi_struct *napi, int 
budget)
+       return budget;
+ }
+ 
+-static void tx_napi_add(struct hinic_txq *txq, int weight)
+-{
+-      netif_napi_add(txq->netdev, &txq->napi, free_tx_poll, weight);
+-      napi_enable(&txq->napi);
+-}
+-
+-static void tx_napi_del(struct hinic_txq *txq)
+-{
+-      napi_disable(&txq->napi);
+-      netif_napi_del(&txq->napi);
+-}
+-
+ static irqreturn_t tx_irq(int irq, void *data)
+ {
+       struct hinic_txq *txq = data;
+@@ -724,7 +712,7 @@ static int tx_request_irq(struct hinic_txq *txq)
+       struct hinic_sq *sq = txq->sq;
+       int err;
+ 
+-      tx_napi_add(txq, nic_dev->tx_weight);
++      netif_napi_add(txq->netdev, &txq->napi, free_tx_poll, 
nic_dev->tx_weight);
+ 
+       hinic_hwdev_msix_set(nic_dev->hwdev, sq->msix_entry,
+                            TX_IRQ_NO_PENDING, TX_IRQ_NO_COALESC,
+@@ -734,7 +722,7 @@ static int tx_request_irq(struct hinic_txq *txq)
+       err = request_irq(sq->irq, tx_irq, 0, txq->irq_name, txq);
+       if (err) {
+               dev_err(&pdev->dev, "Failed to request Tx irq\n");
+-              tx_napi_del(txq);
++              netif_napi_del(&txq->napi);
+               return err;
+       }
+ 
+@@ -746,7 +734,7 @@ static void tx_free_irq(struct hinic_txq *txq)
+       struct hinic_sq *sq = txq->sq;
+ 
+       free_irq(sq->irq, txq);
+-      tx_napi_del(txq);
++      netif_napi_del(&txq->napi);
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c 
b/drivers/net/ethernet/ibm/ibmvnic.c
+index 5afb3c9c52d20..1b702a43a5d01 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -479,6 +479,9 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
+       int i, j, rc;
+       u64 *size_array;
+ 
++      if (!adapter->rx_pool)
++              return -1;
++
+       size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
+               be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
+ 
+@@ -649,6 +652,9 @@ static int reset_tx_pools(struct ibmvnic_adapter *adapter)
+       int tx_scrqs;
+       int i, rc;
+ 
++      if (!adapter->tx_pool)
++              return -1;
++
+       tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
+       for (i = 0; i < tx_scrqs; i++) {
+               rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
+@@ -2011,7 +2017,10 @@ static int do_reset(struct ibmvnic_adapter *adapter,
+                   adapter->req_rx_add_entries_per_subcrq !=
+                   old_num_rx_slots ||
+                   adapter->req_tx_entries_per_subcrq !=
+-                  old_num_tx_slots) {
++                  old_num_tx_slots ||
++                  !adapter->rx_pool ||
++                  !adapter->tso_pool ||
++                  !adapter->tx_pool) {
+                       release_rx_pools(adapter);
+                       release_tx_pools(adapter);
+                       release_napi(adapter);
+@@ -2023,12 +2032,18 @@ static int do_reset(struct ibmvnic_adapter *adapter,
+ 
+               } else {
+                       rc = reset_tx_pools(adapter);
+-                      if (rc)
++                      if (rc) {
++                              netdev_dbg(adapter->netdev, "reset tx pools 
failed (%d)\n",
++                                              rc);
+                               goto out;
++                      }
+ 
+                       rc = reset_rx_pools(adapter);
+-                      if (rc)
++                      if (rc) {
++                              netdev_dbg(adapter->netdev, "reset rx pools 
failed (%d)\n",
++                                              rc);
+                               goto out;
++                      }
+               }
+               ibmvnic_disable_irqs(adapter);
+       }
+diff --git a/drivers/net/ethernet/lantiq_xrx200.c 
b/drivers/net/ethernet/lantiq_xrx200.c
+index 1645e4e7ebdbb..635ff3a5dcfb3 100644
+--- a/drivers/net/ethernet/lantiq_xrx200.c
++++ b/drivers/net/ethernet/lantiq_xrx200.c
+@@ -230,8 +230,8 @@ static int xrx200_poll_rx(struct napi_struct *napi, int 
budget)
+       }
+ 
+       if (rx < budget) {
+-              napi_complete(&ch->napi);
+-              ltq_dma_enable_irq(&ch->dma);
++              if (napi_complete_done(&ch->napi, rx))
++                      ltq_dma_enable_irq(&ch->dma);
+       }
+ 
+       return rx;
+@@ -268,9 +268,12 @@ static int xrx200_tx_housekeeping(struct napi_struct 
*napi, int budget)
+       net_dev->stats.tx_bytes += bytes;
+       netdev_completed_queue(ch->priv->net_dev, pkts, bytes);
+ 
++      if (netif_queue_stopped(net_dev))
++              netif_wake_queue(net_dev);
++
+       if (pkts < budget) {
+-              napi_complete(&ch->napi);
+-              ltq_dma_enable_irq(&ch->dma);
++              if (napi_complete_done(&ch->napi, pkts))
++                      ltq_dma_enable_irq(&ch->dma);
+       }
+ 
+       return pkts;
+@@ -342,10 +345,12 @@ static irqreturn_t xrx200_dma_irq(int irq, void *ptr)
+ {
+       struct xrx200_chan *ch = ptr;
+ 
+-      ltq_dma_disable_irq(&ch->dma);
+-      ltq_dma_ack_irq(&ch->dma);
++      if (napi_schedule_prep(&ch->napi)) {
++              __napi_schedule(&ch->napi);
++              ltq_dma_disable_irq(&ch->dma);
++      }
+ 
+-      napi_schedule(&ch->napi);
++      ltq_dma_ack_irq(&ch->dma);
+ 
+       return IRQ_HANDLED;
+ }
+@@ -499,7 +504,7 @@ static int xrx200_probe(struct platform_device *pdev)
+ 
+       /* setup NAPI */
+       netif_napi_add(net_dev, &priv->chan_rx.napi, xrx200_poll_rx, 32);
+-      netif_napi_add(net_dev, &priv->chan_tx.napi, xrx200_tx_housekeeping, 
32);
++      netif_tx_napi_add(net_dev, &priv->chan_tx.napi, xrx200_tx_housekeeping, 
32);
+ 
+       platform_set_drvdata(pdev, priv);
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h 
b/drivers/net/ethernet/mellanox/mlx5/core/en.h
+index 842db20493df6..76b23ba7a4687 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
+@@ -604,7 +604,7 @@ struct mlx5e_rq {
+       struct dim         dim; /* Dynamic Interrupt Moderation */
+ 
+       /* XDP */
+-      struct bpf_prog       *xdp_prog;
++      struct bpf_prog __rcu *xdp_prog;
+       struct mlx5e_xdpsq    *xdpsq;
+       DECLARE_BITMAP(flags, 8);
+       struct page_pool      *page_pool;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c 
b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+index c9d308e919655..75ed820b0ad72 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+@@ -121,7 +121,7 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct 
mlx5e_rq *rq,
+ bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
+                     u32 *len, struct xdp_buff *xdp)
+ {
+-      struct bpf_prog *prog = READ_ONCE(rq->xdp_prog);
++      struct bpf_prog *prog = rcu_dereference(rq->xdp_prog);
+       u32 act;
+       int err;
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c 
b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
+index a33a1f762c70d..40db27bf790bb 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
+@@ -31,7 +31,6 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct 
mlx5e_rq *rq,
+ {
+       struct xdp_buff *xdp = wi->umr.dma_info[page_idx].xsk;
+       u32 cqe_bcnt32 = cqe_bcnt;
+-      bool consumed;
+ 
+       /* Check packet size. Note LRO doesn't use linear SKB */
+       if (unlikely(cqe_bcnt > rq->hw_mtu)) {
+@@ -51,10 +50,6 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct 
mlx5e_rq *rq,
+       xsk_buff_dma_sync_for_cpu(xdp);
+       prefetch(xdp->data);
+ 
+-      rcu_read_lock();
+-      consumed = mlx5e_xdp_handle(rq, NULL, &cqe_bcnt32, xdp);
+-      rcu_read_unlock();
+-
+       /* Possible flows:
+        * - XDP_REDIRECT to XSKMAP:
+        *   The page is owned by the userspace from now.
+@@ -70,7 +65,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct 
mlx5e_rq *rq,
+        * allocated first from the Reuse Ring, so it has enough space.
+        */
+ 
+-      if (likely(consumed)) {
++      if (likely(mlx5e_xdp_handle(rq, NULL, &cqe_bcnt32, xdp))) {
+               if (likely(__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, 
rq->flags)))
+                       __set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic 
*/
+               return NULL; /* page/packet was consumed by XDP */
+@@ -88,7 +83,6 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct 
mlx5e_rq *rq,
+                                             u32 cqe_bcnt)
+ {
+       struct xdp_buff *xdp = wi->di->xsk;
+-      bool consumed;
+ 
+       /* wi->offset is not used in this function, because xdp->data and the
+        * DMA address point directly to the necessary place. Furthermore, the
+@@ -107,11 +101,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct 
mlx5e_rq *rq,
+               return NULL;
+       }
+ 
+-      rcu_read_lock();
+-      consumed = mlx5e_xdp_handle(rq, NULL, &cqe_bcnt, xdp);
+-      rcu_read_unlock();
+-
+-      if (likely(consumed))
++      if (likely(mlx5e_xdp_handle(rq, NULL, &cqe_bcnt, xdp)))
+               return NULL; /* page/packet was consumed by XDP */
+ 
+       /* XDP_PASS: copy the data from the UMEM to a new SKB. The frame reuse
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c 
b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+index 2c80205dc939d..3081cd74d651b 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+@@ -143,8 +143,7 @@ err_free_cparam:
+ void mlx5e_close_xsk(struct mlx5e_channel *c)
+ {
+       clear_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
+-      napi_synchronize(&c->napi);
+-      synchronize_rcu(); /* Sync with the XSK wakeup. */
++      synchronize_rcu(); /* Sync with the XSK wakeup and with NAPI. */
+ 
+       mlx5e_close_rq(&c->xskrq);
+       mlx5e_close_cq(&c->xskrq.cq);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c 
b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c
+index 01468ec274466..b949b9a7538b0 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c
+@@ -35,7 +35,6 @@
+ #include <net/sock.h>
+ 
+ #include "en.h"
+-#include "accel/tls.h"
+ #include "fpga/sdk.h"
+ #include "en_accel/tls.h"
+ 
+@@ -51,9 +50,14 @@ static const struct counter_desc mlx5e_tls_sw_stats_desc[] 
= {
+ 
+ #define NUM_TLS_SW_COUNTERS ARRAY_SIZE(mlx5e_tls_sw_stats_desc)
+ 
++static bool is_tls_atomic_stats(struct mlx5e_priv *priv)
++{
++      return priv->tls && !mlx5_accel_is_ktls_device(priv->mdev);
++}
++
+ int mlx5e_tls_get_count(struct mlx5e_priv *priv)
+ {
+-      if (!priv->tls)
++      if (!is_tls_atomic_stats(priv))
+               return 0;
+ 
+       return NUM_TLS_SW_COUNTERS;
+@@ -63,7 +67,7 @@ int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t 
*data)
+ {
+       unsigned int i, idx = 0;
+ 
+-      if (!priv->tls)
++      if (!is_tls_atomic_stats(priv))
+               return 0;
+ 
+       for (i = 0; i < NUM_TLS_SW_COUNTERS; i++)
+@@ -77,7 +81,7 @@ int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data)
+ {
+       int i, idx = 0;
+ 
+-      if (!priv->tls)
++      if (!is_tls_atomic_stats(priv))
+               return 0;
+ 
+       for (i = 0; i < NUM_TLS_SW_COUNTERS; i++)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c 
b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 3b892ec301b4a..cccf65fc116ee 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -401,7 +401,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
+ 
+       if (params->xdp_prog)
+               bpf_prog_inc(params->xdp_prog);
+-      rq->xdp_prog = params->xdp_prog;
++      RCU_INIT_POINTER(rq->xdp_prog, params->xdp_prog);
+ 
+       rq_xdp_ix = rq->ix;
+       if (xsk)
+@@ -410,7 +410,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
+       if (err < 0)
+               goto err_rq_wq_destroy;
+ 
+-      rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
++      rq->buff.map_dir = params->xdp_prog ? DMA_BIDIRECTIONAL : 
DMA_FROM_DEVICE;
+       rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params, xsk);
+       pool_size = 1 << params->log_rq_mtu_frames;
+ 
+@@ -605,8 +605,8 @@ err_free:
+       }
+ 
+ err_rq_wq_destroy:
+-      if (rq->xdp_prog)
+-              bpf_prog_put(rq->xdp_prog);
++      if (params->xdp_prog)
++              bpf_prog_put(params->xdp_prog);
+       xdp_rxq_info_unreg(&rq->xdp_rxq);
+       page_pool_destroy(rq->page_pool);
+       mlx5_wq_destroy(&rq->wq_ctrl);
+@@ -616,10 +616,16 @@ err_rq_wq_destroy:
+ 
+ static void mlx5e_free_rq(struct mlx5e_rq *rq)
+ {
++      struct mlx5e_channel *c = rq->channel;
++      struct bpf_prog *old_prog = NULL;
+       int i;
+ 
+-      if (rq->xdp_prog)
+-              bpf_prog_put(rq->xdp_prog);
++      /* drop_rq has neither channel nor xdp_prog. */
++      if (c)
++              old_prog = rcu_dereference_protected(rq->xdp_prog,
++                                                   
lockdep_is_held(&c->priv->state_lock));
++      if (old_prog)
++              bpf_prog_put(old_prog);
+ 
+       switch (rq->wq_type) {
+       case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+@@ -905,7 +911,7 @@ void mlx5e_activate_rq(struct mlx5e_rq *rq)
+ void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
+ {
+       clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
+-      napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
++      synchronize_rcu(); /* Sync with NAPI to prevent mlx5e_post_rx_wqes. */
+ }
+ 
+ void mlx5e_close_rq(struct mlx5e_rq *rq)
+@@ -1350,12 +1356,10 @@ void mlx5e_tx_disable_queue(struct netdev_queue *txq)
+ 
+ static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
+ {
+-      struct mlx5e_channel *c = sq->channel;
+       struct mlx5_wq_cyc *wq = &sq->wq;
+ 
+       clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
+-      /* prevent netif_tx_wake_queue */
+-      napi_synchronize(&c->napi);
++      synchronize_rcu(); /* Sync with NAPI to prevent netif_tx_wake_queue. */
+ 
+       mlx5e_tx_disable_queue(sq->txq);
+ 
+@@ -1430,10 +1434,8 @@ void mlx5e_activate_icosq(struct mlx5e_icosq *icosq)
+ 
+ void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq)
+ {
+-      struct mlx5e_channel *c = icosq->channel;
+-
+       clear_bit(MLX5E_SQ_STATE_ENABLED, &icosq->state);
+-      napi_synchronize(&c->napi);
++      synchronize_rcu(); /* Sync with NAPI. */
+ }
+ 
+ void mlx5e_close_icosq(struct mlx5e_icosq *sq)
+@@ -1511,7 +1513,7 @@ void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq)
+       struct mlx5e_channel *c = sq->channel;
+ 
+       clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
+-      napi_synchronize(&c->napi);
++      synchronize_rcu(); /* Sync with NAPI. */
+ 
+       mlx5e_destroy_sq(c->mdev, sq->sqn);
+       mlx5e_free_xdpsq_descs(sq);
+@@ -4423,6 +4425,16 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, 
struct bpf_prog *prog)
+       return 0;
+ }
+ 
++static void mlx5e_rq_replace_xdp_prog(struct mlx5e_rq *rq, struct bpf_prog 
*prog)
++{
++      struct bpf_prog *old_prog;
++
++      old_prog = rcu_replace_pointer(rq->xdp_prog, prog,
++                                     
lockdep_is_held(&rq->channel->priv->state_lock));
++      if (old_prog)
++              bpf_prog_put(old_prog);
++}
++
+ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
+ {
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+@@ -4481,29 +4493,10 @@ static int mlx5e_xdp_set(struct net_device *netdev, 
struct bpf_prog *prog)
+        */
+       for (i = 0; i < priv->channels.num; i++) {
+               struct mlx5e_channel *c = priv->channels.c[i];
+-              bool xsk_open = test_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
+-
+-              clear_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
+-              if (xsk_open)
+-                      clear_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state);
+-              napi_synchronize(&c->napi);
+-              /* prevent mlx5e_poll_rx_cq from accessing rq->xdp_prog */
+-
+-              old_prog = xchg(&c->rq.xdp_prog, prog);
+-              if (old_prog)
+-                      bpf_prog_put(old_prog);
+-
+-              if (xsk_open) {
+-                      old_prog = xchg(&c->xskrq.xdp_prog, prog);
+-                      if (old_prog)
+-                              bpf_prog_put(old_prog);
+-              }
+ 
+-              set_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
+-              if (xsk_open)
+-                      set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state);
+-              /* napi_schedule in case we have missed anything */
+-              napi_schedule(&c->napi);
++              mlx5e_rq_replace_xdp_prog(&c->rq, prog);
++              if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
++                      mlx5e_rq_replace_xdp_prog(&c->xskrq, prog);
+       }
+ 
+ unlock:
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 
b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+index dbb1c63239672..409fecbcc5d2b 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+@@ -1072,7 +1072,6 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct 
mlx5_cqe64 *cqe,
+       struct xdp_buff xdp;
+       struct sk_buff *skb;
+       void *va, *data;
+-      bool consumed;
+       u32 frag_size;
+ 
+       va             = page_address(di->page) + wi->offset;
+@@ -1084,11 +1083,8 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct 
mlx5_cqe64 *cqe,
+       prefetchw(va); /* xdp_frame data area */
+       prefetch(data);
+ 
+-      rcu_read_lock();
+       mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp);
+-      consumed = mlx5e_xdp_handle(rq, di, &cqe_bcnt, &xdp);
+-      rcu_read_unlock();
+-      if (consumed)
++      if (mlx5e_xdp_handle(rq, di, &cqe_bcnt, &xdp))
+               return NULL; /* page/packet was consumed by XDP */
+ 
+       rx_headroom = xdp.data - xdp.data_hard_start;
+@@ -1369,7 +1365,6 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, 
struct mlx5e_mpw_info *wi,
+       struct sk_buff *skb;
+       void *va, *data;
+       u32 frag_size;
+-      bool consumed;
+ 
+       /* Check packet size. Note LRO doesn't use linear SKB */
+       if (unlikely(cqe_bcnt > rq->hw_mtu)) {
+@@ -1386,11 +1381,8 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, 
struct mlx5e_mpw_info *wi,
+       prefetchw(va); /* xdp_frame data area */
+       prefetch(data);
+ 
+-      rcu_read_lock();
+       mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt32, &xdp);
+-      consumed = mlx5e_xdp_handle(rq, di, &cqe_bcnt32, &xdp);
+-      rcu_read_unlock();
+-      if (consumed) {
++      if (mlx5e_xdp_handle(rq, di, &cqe_bcnt32, &xdp)) {
+               if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
+                       __set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic 
*/
+               return NULL; /* page/packet was consumed by XDP */
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 
b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+index fcedb5bdca9e5..7da1e7462f64e 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -1399,11 +1399,8 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv 
*priv,
+ 
+       mlx5e_put_flow_tunnel_id(flow);
+ 
+-      if (flow_flag_test(flow, NOT_READY)) {
++      if (flow_flag_test(flow, NOT_READY))
+               remove_unready_flow(flow);
+-              kvfree(attr->parse_attr);
+-              return;
+-      }
+ 
+       if (mlx5e_is_offloaded_flow(flow)) {
+               if (flow_flag_test(flow, SLOW))
+@@ -2734,6 +2731,22 @@ static struct mlx5_fields fields[] = {
+       OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest,   0, udp_dport),
+ };
+ 
++static unsigned long mask_to_le(unsigned long mask, int size)
++{
++      __be32 mask_be32;
++      __be16 mask_be16;
++
++      if (size == 32) {
++              mask_be32 = (__force __be32)(mask);
++              mask = (__force unsigned 
long)cpu_to_le32(be32_to_cpu(mask_be32));
++      } else if (size == 16) {
++              mask_be32 = (__force __be32)(mask);
++              mask_be16 = *(__be16 *)&mask_be32;
++              mask = (__force unsigned 
long)cpu_to_le16(be16_to_cpu(mask_be16));
++      }
++
++      return mask;
++}
+ static int offload_pedit_fields(struct mlx5e_priv *priv,
+                               int namespace,
+                               struct pedit_headers_action *hdrs,
+@@ -2747,9 +2760,7 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
+       u32 *s_masks_p, *a_masks_p, s_mask, a_mask;
+       struct mlx5e_tc_mod_hdr_acts *mod_acts;
+       struct mlx5_fields *f;
+-      unsigned long mask;
+-      __be32 mask_be32;
+-      __be16 mask_be16;
++      unsigned long mask, field_mask;
+       int err;
+       u8 cmd;
+ 
+@@ -2815,14 +2826,7 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
+               if (skip)
+                       continue;
+ 
+-              if (f->field_bsize == 32) {
+-                      mask_be32 = (__force __be32)(mask);
+-                      mask = (__force unsigned 
long)cpu_to_le32(be32_to_cpu(mask_be32));
+-              } else if (f->field_bsize == 16) {
+-                      mask_be32 = (__force __be32)(mask);
+-                      mask_be16 = *(__be16 *)&mask_be32;
+-                      mask = (__force unsigned 
long)cpu_to_le16(be16_to_cpu(mask_be16));
+-              }
++              mask = mask_to_le(mask, f->field_bsize);
+ 
+               first = find_first_bit(&mask, f->field_bsize);
+               next_z = find_next_zero_bit(&mask, f->field_bsize, first);
+@@ -2853,9 +2857,10 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
+               if (cmd == MLX5_ACTION_TYPE_SET) {
+                       int start;
+ 
++                      field_mask = mask_to_le(f->field_mask, f->field_bsize);
++
+                       /* if field is bit sized it can start not from first 
bit */
+-                      start = find_first_bit((unsigned long *)&f->field_mask,
+-                                             f->field_bsize);
++                      start = find_first_bit(&field_mask, f->field_bsize);
+ 
+                       MLX5_SET(set_action_in, action, offset, first - start);
+                       /* length is num of bits to be written, zero means 
length of 32 */
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c 
b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+index 8480278f2ee20..954a2f0513d67 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+@@ -121,13 +121,17 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
+       struct mlx5e_xdpsq *xsksq = &c->xsksq;
+       struct mlx5e_rq *xskrq = &c->xskrq;
+       struct mlx5e_rq *rq = &c->rq;
+-      bool xsk_open = test_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
+       bool aff_change = false;
+       bool busy_xsk = false;
+       bool busy = false;
+       int work_done = 0;
++      bool xsk_open;
+       int i;
+ 
++      rcu_read_lock();
++
++      xsk_open = test_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
++
+       ch_stats->poll++;
+ 
+       for (i = 0; i < c->num_tc; i++)
+@@ -167,8 +171,10 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
+       busy |= busy_xsk;
+ 
+       if (busy) {
+-              if (likely(mlx5e_channel_no_affinity_change(c)))
+-                      return budget;
++              if (likely(mlx5e_channel_no_affinity_change(c))) {
++                      work_done = budget;
++                      goto out;
++              }
+               ch_stats->aff_change++;
+               aff_change = true;
+               if (budget && work_done == budget)
+@@ -176,7 +182,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
+       }
+ 
+       if (unlikely(!napi_complete_done(napi, work_done)))
+-              return work_done;
++              goto out;
+ 
+       ch_stats->arm++;
+ 
+@@ -203,6 +209,9 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
+               ch_stats->force_irq++;
+       }
+ 
++out:
++      rcu_read_unlock();
++
+       return work_done;
+ }
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 
b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+index ed75353c56b85..f16610feab88d 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+@@ -1219,35 +1219,37 @@ static int esw_create_offloads_fdb_tables(struct 
mlx5_eswitch *esw, int nvports)
+       }
+       esw->fdb_table.offloads.send_to_vport_grp = g;
+ 
+-      /* create peer esw miss group */
+-      memset(flow_group_in, 0, inlen);
++      if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
++              /* create peer esw miss group */
++              memset(flow_group_in, 0, inlen);
+ 
+-      esw_set_flow_group_source_port(esw, flow_group_in);
++              esw_set_flow_group_source_port(esw, flow_group_in);
+ 
+-      if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
+-              match_criteria = MLX5_ADDR_OF(create_flow_group_in,
+-                                            flow_group_in,
+-                                            match_criteria);
++              if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
++                      match_criteria = MLX5_ADDR_OF(create_flow_group_in,
++                                                    flow_group_in,
++                                                    match_criteria);
+ 
+-              MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+-                               misc_parameters.source_eswitch_owner_vhca_id);
++                      MLX5_SET_TO_ONES(fte_match_param, match_criteria,
++                                       
misc_parameters.source_eswitch_owner_vhca_id);
+ 
+-              MLX5_SET(create_flow_group_in, flow_group_in,
+-                       source_eswitch_owner_vhca_id_valid, 1);
+-      }
++                      MLX5_SET(create_flow_group_in, flow_group_in,
++                               source_eswitch_owner_vhca_id_valid, 1);
++              }
+ 
+-      MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
+-      MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
+-               ix + esw->total_vports - 1);
+-      ix += esw->total_vports;
++              MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 
ix);
++              MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
++                       ix + esw->total_vports - 1);
++              ix += esw->total_vports;
+ 
+-      g = mlx5_create_flow_group(fdb, flow_group_in);
+-      if (IS_ERR(g)) {
+-              err = PTR_ERR(g);
+-              esw_warn(dev, "Failed to create peer miss flow group 
err(%d)\n", err);
+-              goto peer_miss_err;
++              g = mlx5_create_flow_group(fdb, flow_group_in);
++              if (IS_ERR(g)) {
++                      err = PTR_ERR(g);
++                      esw_warn(dev, "Failed to create peer miss flow group 
err(%d)\n", err);
++                      goto peer_miss_err;
++              }
++              esw->fdb_table.offloads.peer_miss_grp = g;
+       }
+-      esw->fdb_table.offloads.peer_miss_grp = g;
+ 
+       /* create miss group */
+       memset(flow_group_in, 0, inlen);
+@@ -1282,7 +1284,8 @@ static int esw_create_offloads_fdb_tables(struct 
mlx5_eswitch *esw, int nvports)
+ miss_rule_err:
+       mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
+ miss_err:
+-      mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
++      if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
++              mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
+ peer_miss_err:
+       mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
+ send_vport_err:
+@@ -1306,7 +1309,8 @@ static void esw_destroy_offloads_fdb_tables(struct 
mlx5_eswitch *esw)
+       mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
+       mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
+       mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
+-      mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
++      if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
++              mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
+       mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
+ 
+       mlx5_esw_chains_destroy(esw);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c 
b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+index 2e5f7efb82a88..1f96f9efa3c18 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+@@ -655,7 +655,7 @@ static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft,
+       fte->action = *flow_act;
+       fte->flow_context = spec->flow_context;
+ 
+-      tree_init_node(&fte->node, NULL, del_sw_fte);
++      tree_init_node(&fte->node, del_hw_fte, del_sw_fte);
+ 
+       return fte;
+ }
+@@ -1792,7 +1792,6 @@ skip_search:
+               up_write_ref_node(&g->node, false);
+               rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
+               up_write_ref_node(&fte->node, false);
+-              tree_put_node(&fte->node, false);
+               return rule;
+       }
+       rule = ERR_PTR(-ENOENT);
+@@ -1891,7 +1890,6 @@ search_again_locked:
+       up_write_ref_node(&g->node, false);
+       rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
+       up_write_ref_node(&fte->node, false);
+-      tree_put_node(&fte->node, false);
+       tree_put_node(&g->node, false);
+       return rule;
+ 
+@@ -2001,7 +1999,9 @@ void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
+               up_write_ref_node(&fte->node, false);
+       } else {
+               del_hw_fte(&fte->node);
+-              up_write(&fte->node.lock);
++              /* Avoid double call to del_hw_fte */
++              fte->node.del_hw_func = NULL;
++              up_write_ref_node(&fte->node, false);
+               tree_put_node(&fte->node, false);
+       }
+       kfree(handle);
+diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c 
b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+index 6eb9fb9a18145..9c9ae33d84ce9 100644
+--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
++++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+@@ -829,8 +829,8 @@ nfp_port_get_fecparam(struct net_device *netdev,
+       struct nfp_eth_table_port *eth_port;
+       struct nfp_port *port;
+ 
+-      param->active_fec = ETHTOOL_FEC_NONE_BIT;
+-      param->fec = ETHTOOL_FEC_NONE_BIT;
++      param->active_fec = ETHTOOL_FEC_NONE;
++      param->fec = ETHTOOL_FEC_NONE;
+ 
+       port = nfp_port_from_netdev(netdev);
+       eth_port = nfp_port_get_eth_port(port);
+diff --git a/drivers/net/ethernet/ti/cpsw_new.c 
b/drivers/net/ethernet/ti/cpsw_new.c
+index 8ed78577cdedf..15672d0a4de69 100644
+--- a/drivers/net/ethernet/ti/cpsw_new.c
++++ b/drivers/net/ethernet/ti/cpsw_new.c
+@@ -17,6 +17,7 @@
+ #include <linux/phy.h>
+ #include <linux/phy/phy.h>
+ #include <linux/delay.h>
++#include <linux/pinctrl/consumer.h>
+ #include <linux/pm_runtime.h>
+ #include <linux/gpio/consumer.h>
+ #include <linux/of.h>
+@@ -2070,9 +2071,61 @@ static int cpsw_remove(struct platform_device *pdev)
+       return 0;
+ }
+ 
++static int __maybe_unused cpsw_suspend(struct device *dev)
++{
++      struct cpsw_common *cpsw = dev_get_drvdata(dev);
++      int i;
++
++      rtnl_lock();
++
++      for (i = 0; i < cpsw->data.slaves; i++) {
++              struct net_device *ndev = cpsw->slaves[i].ndev;
++
++              if (!(ndev && netif_running(ndev)))
++                      continue;
++
++              cpsw_ndo_stop(ndev);
++      }
++
++      rtnl_unlock();
++
++      /* Select sleep pin state */
++      pinctrl_pm_select_sleep_state(dev);
++
++      return 0;
++}
++
++static int __maybe_unused cpsw_resume(struct device *dev)
++{
++      struct cpsw_common *cpsw = dev_get_drvdata(dev);
++      int i;
++
++      /* Select default pin state */
++      pinctrl_pm_select_default_state(dev);
++
++      /* shut up ASSERT_RTNL() warning in netif_set_real_num_tx/rx_queues */
++      rtnl_lock();
++
++      for (i = 0; i < cpsw->data.slaves; i++) {
++              struct net_device *ndev = cpsw->slaves[i].ndev;
++
++              if (!(ndev && netif_running(ndev)))
++                      continue;
++
++              cpsw_ndo_open(ndev);
++      }
++
++      rtnl_unlock();
++
++      return 0;
++}
++
++static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume);
++
+ static struct platform_driver cpsw_driver = {
+       .driver = {
+               .name    = "cpsw-switch",
++              .pm      = &cpsw_pm_ops,
+               .of_match_table = cpsw_of_mtable,
+       },
+       .probe = cpsw_probe,
+diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
+index dec52b763d508..deede92b17fc7 100644
+--- a/drivers/net/geneve.c
++++ b/drivers/net/geneve.c
+@@ -773,7 +773,8 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
+                                      struct net_device *dev,
+                                      struct geneve_sock *gs4,
+                                      struct flowi4 *fl4,
+-                                     const struct ip_tunnel_info *info)
++                                     const struct ip_tunnel_info *info,
++                                     __be16 dport, __be16 sport)
+ {
+       bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
+       struct geneve_dev *geneve = netdev_priv(dev);
+@@ -789,6 +790,8 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
+       fl4->flowi4_proto = IPPROTO_UDP;
+       fl4->daddr = info->key.u.ipv4.dst;
+       fl4->saddr = info->key.u.ipv4.src;
++      fl4->fl4_dport = dport;
++      fl4->fl4_sport = sport;
+ 
+       tos = info->key.tos;
+       if ((tos == 1) && !geneve->collect_md) {
+@@ -823,7 +826,8 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff 
*skb,
+                                          struct net_device *dev,
+                                          struct geneve_sock *gs6,
+                                          struct flowi6 *fl6,
+-                                         const struct ip_tunnel_info *info)
++                                         const struct ip_tunnel_info *info,
++                                         __be16 dport, __be16 sport)
+ {
+       bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
+       struct geneve_dev *geneve = netdev_priv(dev);
+@@ -839,6 +843,9 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff 
*skb,
+       fl6->flowi6_proto = IPPROTO_UDP;
+       fl6->daddr = info->key.u.ipv6.dst;
+       fl6->saddr = info->key.u.ipv6.src;
++      fl6->fl6_dport = dport;
++      fl6->fl6_sport = sport;
++
+       prio = info->key.tos;
+       if ((prio == 1) && !geneve->collect_md) {
+               prio = ip_tunnel_get_dsfield(ip_hdr(skb), skb);
+@@ -885,14 +892,15 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct 
net_device *dev,
+       __be16 sport;
+       int err;
+ 
+-      rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info);
++      sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
++      rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info,
++                            geneve->info.key.tp_dst, sport);
+       if (IS_ERR(rt))
+               return PTR_ERR(rt);
+ 
+       skb_tunnel_check_pmtu(skb, &rt->dst,
+                             GENEVE_IPV4_HLEN + info->options_len);
+ 
+-      sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
+       if (geneve->collect_md) {
+               tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
+               ttl = key->ttl;
+@@ -947,13 +955,14 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct 
net_device *dev,
+       __be16 sport;
+       int err;
+ 
+-      dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info);
++      sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
++      dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info,
++                              geneve->info.key.tp_dst, sport);
+       if (IS_ERR(dst))
+               return PTR_ERR(dst);
+ 
+       skb_tunnel_check_pmtu(skb, dst, GENEVE_IPV6_HLEN + info->options_len);
+ 
+-      sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
+       if (geneve->collect_md) {
+               prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
+               ttl = key->ttl;
+@@ -1034,13 +1043,18 @@ static int geneve_fill_metadata_dst(struct net_device 
*dev, struct sk_buff *skb)
+ {
+       struct ip_tunnel_info *info = skb_tunnel_info(skb);
+       struct geneve_dev *geneve = netdev_priv(dev);
++      __be16 sport;
+ 
+       if (ip_tunnel_info_af(info) == AF_INET) {
+               struct rtable *rt;
+               struct flowi4 fl4;
++
+               struct geneve_sock *gs4 = rcu_dereference(geneve->sock4);
++              sport = udp_flow_src_port(geneve->net, skb,
++                                        1, USHRT_MAX, true);
+ 
+-              rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info);
++              rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info,
++                                    geneve->info.key.tp_dst, sport);
+               if (IS_ERR(rt))
+                       return PTR_ERR(rt);
+ 
+@@ -1050,9 +1064,13 @@ static int geneve_fill_metadata_dst(struct net_device 
*dev, struct sk_buff *skb)
+       } else if (ip_tunnel_info_af(info) == AF_INET6) {
+               struct dst_entry *dst;
+               struct flowi6 fl6;
++
+               struct geneve_sock *gs6 = rcu_dereference(geneve->sock6);
++              sport = udp_flow_src_port(geneve->net, skb,
++                                        1, USHRT_MAX, true);
+ 
+-              dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info);
++              dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info,
++                                      geneve->info.key.tp_dst, sport);
+               if (IS_ERR(dst))
+                       return PTR_ERR(dst);
+ 
+@@ -1063,8 +1081,7 @@ static int geneve_fill_metadata_dst(struct net_device 
*dev, struct sk_buff *skb)
+               return -EINVAL;
+       }
+ 
+-      info->key.tp_src = udp_flow_src_port(geneve->net, skb,
+-                                           1, USHRT_MAX, true);
++      info->key.tp_src = sport;
+       info->key.tp_dst = geneve->info.key.tp_dst;
+       return 0;
+ }
+diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
+index 47159b31e6b39..8309194b351a9 100644
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -2544,8 +2544,8 @@ static int netvsc_remove(struct hv_device *dev)
+ static int netvsc_suspend(struct hv_device *dev)
+ {
+       struct net_device_context *ndev_ctx;
+-      struct net_device *vf_netdev, *net;
+       struct netvsc_device *nvdev;
++      struct net_device *net;
+       int ret;
+ 
+       net = hv_get_drvdata(dev);
+@@ -2561,10 +2561,6 @@ static int netvsc_suspend(struct hv_device *dev)
+               goto out;
+       }
+ 
+-      vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
+-      if (vf_netdev)
+-              netvsc_unregister_vf(vf_netdev);
+-
+       /* Save the current config info */
+       ndev_ctx->saved_netvsc_dev_info = netvsc_devinfo_get(nvdev);
+ 
+@@ -2580,6 +2576,7 @@ static int netvsc_resume(struct hv_device *dev)
+       struct net_device *net = hv_get_drvdata(dev);
+       struct net_device_context *net_device_ctx;
+       struct netvsc_device_info *device_info;
++      struct net_device *vf_netdev;
+       int ret;
+ 
+       rtnl_lock();
+@@ -2592,6 +2589,15 @@ static int netvsc_resume(struct hv_device *dev)
+       netvsc_devinfo_put(device_info);
+       net_device_ctx->saved_netvsc_dev_info = NULL;
+ 
++      /* A NIC driver (e.g. mlx5) may keep the VF network interface across
++       * hibernation, but here the data path is implicitly switched to the
++       * netvsc NIC since the vmbus channel is closed and re-opened, so
++       * netvsc_vf_changed() must be used to switch the data path to the VF.
++       */
++      vf_netdev = rtnl_dereference(net_device_ctx->vf_netdev);
++      if (vf_netdev && netvsc_vf_changed(vf_netdev) != NOTIFY_OK)
++              ret = -EINVAL;
++
+       rtnl_unlock();
+ 
+       return ret;
+diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
+index 9df2a3e78c989..d08c626b2baa6 100644
+--- a/drivers/net/ipa/ipa_table.c
++++ b/drivers/net/ipa/ipa_table.c
+@@ -521,7 +521,7 @@ static void ipa_filter_tuple_zero(struct ipa_endpoint 
*endpoint)
+       val = ioread32(endpoint->ipa->reg_virt + offset);
+ 
+       /* Zero all filter-related fields, preserving the rest */
+-      u32_replace_bits(val, 0, IPA_REG_ENDP_FILTER_HASH_MSK_ALL);
++      u32p_replace_bits(&val, 0, IPA_REG_ENDP_FILTER_HASH_MSK_ALL);
+ 
+       iowrite32(val, endpoint->ipa->reg_virt + offset);
+ }
+@@ -572,7 +572,7 @@ static void ipa_route_tuple_zero(struct ipa *ipa, u32 
route_id)
+       val = ioread32(ipa->reg_virt + offset);
+ 
+       /* Zero all route-related fields, preserving the rest */
+-      u32_replace_bits(val, 0, IPA_REG_ENDP_ROUTER_HASH_MSK_ALL);
++      u32p_replace_bits(&val, 0, IPA_REG_ENDP_ROUTER_HASH_MSK_ALL);
+ 
+       iowrite32(val, ipa->reg_virt + offset);
+ }
+diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
+index 56cfae9504727..f5620f91dbf3a 100644
+--- a/drivers/net/phy/phy.c
++++ b/drivers/net/phy/phy.c
+@@ -948,7 +948,7 @@ void phy_stop(struct phy_device *phydev)
+ {
+       struct net_device *dev = phydev->attached_dev;
+ 
+-      if (!phy_is_started(phydev)) {
++      if (!phy_is_started(phydev) && phydev->state != PHY_DOWN) {
+               WARN(1, "called from state %s\n",
+                    phy_state_to_str(phydev->state));
+               return;
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
+index 98369430a3be5..067910d242ab3 100644
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -1092,10 +1092,6 @@ int phy_init_hw(struct phy_device *phydev)
+       if (ret < 0)
+               return ret;
+ 
+-      ret = phy_disable_interrupts(phydev);
+-      if (ret)
+-              return ret;
+-
+       if (phydev->drv->config_init)
+               ret = phydev->drv->config_init(phydev);
+ 
+@@ -1372,6 +1368,10 @@ int phy_attach_direct(struct net_device *dev, struct 
phy_device *phydev,
+       if (err)
+               goto error;
+ 
++      err = phy_disable_interrupts(phydev);
++      if (err)
++              return err;
++
+       phy_resume(phydev);
+       phy_led_triggers_register(phydev);
+ 
+@@ -1631,7 +1631,8 @@ void phy_detach(struct phy_device *phydev)
+ 
+       phy_led_triggers_unregister(phydev);
+ 
+-      module_put(phydev->mdio.dev.driver->owner);
++      if (phydev->mdio.dev.driver)
++              module_put(phydev->mdio.dev.driver->owner);
+ 
+       /* If the device had no specific driver before (i.e. - it
+        * was using the generic driver), we unbind the device
+diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
+index 48ced3912576c..16f33d1ffbfb9 100644
+--- a/drivers/net/wan/hdlc_ppp.c
++++ b/drivers/net/wan/hdlc_ppp.c
+@@ -383,11 +383,8 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 
pid, u8 id,
+       }
+ 
+       for (opt = data; len; len -= opt[1], opt += opt[1]) {
+-              if (len < 2 || len < opt[1]) {
+-                      dev->stats.rx_errors++;
+-                      kfree(out);
+-                      return; /* bad packet, drop silently */
+-              }
++              if (len < 2 || opt[1] < 2 || len < opt[1])
++                      goto err_out;
+ 
+               if (pid == PID_LCP)
+                       switch (opt[0]) {
+@@ -395,6 +392,8 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 
pid, u8 id,
+                               continue; /* MRU always OK and > 1500 bytes? */
+ 
+                       case LCP_OPTION_ACCM: /* async control character map */
++                              if (opt[1] < sizeof(valid_accm))
++                                      goto err_out;
+                               if (!memcmp(opt, valid_accm,
+                                           sizeof(valid_accm)))
+                                       continue;
+@@ -406,6 +405,8 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 
pid, u8 id,
+                               }
+                               break;
+                       case LCP_OPTION_MAGIC:
++                              if (len < 6)
++                                      goto err_out;
+                               if (opt[1] != 6 || (!opt[2] && !opt[3] &&
+                                                   !opt[4] && !opt[5]))
+                                       break; /* reject invalid magic number */
+@@ -424,6 +425,11 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 
pid, u8 id,
+               ppp_cp_event(dev, pid, RCR_GOOD, CP_CONF_ACK, id, req_len, 
data);
+ 
+       kfree(out);
++      return;
++
++err_out:
++      dev->stats.rx_errors++;
++      kfree(out);
+ }
+ 
+ static int ppp_rx(struct sk_buff *skb)
+diff --git a/drivers/net/wireguard/noise.c b/drivers/net/wireguard/noise.c
+index 201a22681945f..27cb5045bed2d 100644
+--- a/drivers/net/wireguard/noise.c
++++ b/drivers/net/wireguard/noise.c
+@@ -87,15 +87,12 @@ static void handshake_zero(struct noise_handshake 
*handshake)
+ 
+ void wg_noise_handshake_clear(struct noise_handshake *handshake)
+ {
++      down_write(&handshake->lock);
+       wg_index_hashtable_remove(
+                       handshake->entry.peer->device->index_hashtable,
+                       &handshake->entry);
+-      down_write(&handshake->lock);
+       handshake_zero(handshake);
+       up_write(&handshake->lock);
+-      wg_index_hashtable_remove(
+-                      handshake->entry.peer->device->index_hashtable,
+-                      &handshake->entry);
+ }
+ 
+ static struct noise_keypair *keypair_create(struct wg_peer *peer)
+diff --git a/drivers/net/wireguard/peerlookup.c 
b/drivers/net/wireguard/peerlookup.c
+index e4deb331476b3..f2783aa7a88f1 100644
+--- a/drivers/net/wireguard/peerlookup.c
++++ b/drivers/net/wireguard/peerlookup.c
+@@ -167,9 +167,13 @@ bool wg_index_hashtable_replace(struct index_hashtable 
*table,
+                               struct index_hashtable_entry *old,
+                               struct index_hashtable_entry *new)
+ {
+-      if (unlikely(hlist_unhashed(&old->index_hash)))
+-              return false;
++      bool ret;
++
+       spin_lock_bh(&table->lock);
++      ret = !hlist_unhashed(&old->index_hash);
++      if (unlikely(!ret))
++              goto out;
++
+       new->index = old->index;
+       hlist_replace_rcu(&old->index_hash, &new->index_hash);
+ 
+@@ -180,8 +184,9 @@ bool wg_index_hashtable_replace(struct index_hashtable 
*table,
+        * simply gets dropped, which isn't terrible.
+        */
+       INIT_HLIST_NODE(&old->index_hash);
++out:
+       spin_unlock_bh(&table->lock);
+-      return true;
++      return ret;
+ }
+ 
+ void wg_index_hashtable_remove(struct index_hashtable *table,
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index 0c0377fc00c2a..1119463cf2425 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -3208,8 +3208,9 @@ static inline int skb_padto(struct sk_buff *skb, 
unsigned int len)
+  *    is untouched. Otherwise it is extended. Returns zero on
+  *    success. The skb is freed on error if @free_on_error is true.
+  */
+-static inline int __skb_put_padto(struct sk_buff *skb, unsigned int len,
+-                                bool free_on_error)
++static inline int __must_check __skb_put_padto(struct sk_buff *skb,
++                                             unsigned int len,
++                                             bool free_on_error)
+ {
+       unsigned int size = skb->len;
+ 
+@@ -3232,7 +3233,7 @@ static inline int __skb_put_padto(struct sk_buff *skb, 
unsigned int len,
+  *    is untouched. Otherwise it is extended. Returns zero on
+  *    success. The skb is freed on error.
+  */
+-static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
++static inline int __must_check skb_put_padto(struct sk_buff *skb, unsigned 
int len)
+ {
+       return __skb_put_padto(skb, len, true);
+ }
+diff --git a/include/net/flow.h b/include/net/flow.h
+index a50fb77a0b279..d058e63fb59a3 100644
+--- a/include/net/flow.h
++++ b/include/net/flow.h
+@@ -116,6 +116,7 @@ static inline void flowi4_init_output(struct flowi4 *fl4, 
int oif,
+       fl4->saddr = saddr;
+       fl4->fl4_dport = dport;
+       fl4->fl4_sport = sport;
++      fl4->flowi4_multipath_hash = 0;
+ }
+ 
+ /* Reset some input parameters after previous lookup */
+diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
+index fb42c90348d3b..f3c5d9d2f82d2 100644
+--- a/include/net/sctp/structs.h
++++ b/include/net/sctp/structs.h
+@@ -226,12 +226,14 @@ struct sctp_sock {
+               data_ready_signalled:1;
+ 
+       atomic_t pd_mode;
++
++      /* Fields after this point will be skipped on copies, like on accept
++       * and peeloff operations
++       */
++
+       /* Receive to here while partial delivery is in effect. */
+       struct sk_buff_head pd_lobby;
+ 
+-      /* These must be the last fields, as they will skipped on copies,
+-       * like on accept and peeloff operations
+-       */
+       struct list_head auto_asconf_list;
+       int do_auto_asconf;
+ };
+diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
+index f9092c71225fd..61c94cefa8436 100644
+--- a/net/bridge/br_vlan.c
++++ b/net/bridge/br_vlan.c
+@@ -1288,11 +1288,13 @@ void br_vlan_get_stats(const struct net_bridge_vlan *v,
+       }
+ }
+ 
+-static int __br_vlan_get_pvid(const struct net_device *dev,
+-                            struct net_bridge_port *p, u16 *p_pvid)
++int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid)
+ {
+       struct net_bridge_vlan_group *vg;
++      struct net_bridge_port *p;
+ 
++      ASSERT_RTNL();
++      p = br_port_get_check_rtnl(dev);
+       if (p)
+               vg = nbp_vlan_group(p);
+       else if (netif_is_bridge_master(dev))
+@@ -1303,18 +1305,23 @@ static int __br_vlan_get_pvid(const struct net_device 
*dev,
+       *p_pvid = br_get_pvid(vg);
+       return 0;
+ }
+-
+-int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid)
+-{
+-      ASSERT_RTNL();
+-
+-      return __br_vlan_get_pvid(dev, br_port_get_check_rtnl(dev), p_pvid);
+-}
+ EXPORT_SYMBOL_GPL(br_vlan_get_pvid);
+ 
+ int br_vlan_get_pvid_rcu(const struct net_device *dev, u16 *p_pvid)
+ {
+-      return __br_vlan_get_pvid(dev, br_port_get_check_rcu(dev), p_pvid);
++      struct net_bridge_vlan_group *vg;
++      struct net_bridge_port *p;
++
++      p = br_port_get_check_rcu(dev);
++      if (p)
++              vg = nbp_vlan_group_rcu(p);
++      else if (netif_is_bridge_master(dev))
++              vg = br_vlan_group_rcu(netdev_priv(dev));
++      else
++              return -EINVAL;
++
++      *p_pvid = br_get_pvid(vg);
++      return 0;
+ }
+ EXPORT_SYMBOL_GPL(br_vlan_get_pvid_rcu);
+ 
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 5bd0b550893fb..181b13e02bdc0 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -8641,7 +8641,7 @@ int dev_get_port_parent_id(struct net_device *dev,
+               if (!first.id_len)
+                       first = *ppid;
+               else if (memcmp(&first, ppid, sizeof(*ppid)))
+-                      return -ENODATA;
++                      return -EOPNOTSUPP;
+       }
+ 
+       return err;
+diff --git a/net/core/filter.c b/net/core/filter.c
+index a69e79327c29e..d13ea1642b974 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -4774,6 +4774,7 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct 
bpf_fib_lookup *params,
+       fl4.saddr = params->ipv4_src;
+       fl4.fl4_sport = params->sport;
+       fl4.fl4_dport = params->dport;
++      fl4.flowi4_multipath_hash = 0;
+ 
+       if (flags & BPF_FIB_LOOKUP_DIRECT) {
+               u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN;
+diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
+index dcd61aca343ec..944ab214e5ae8 100644
+--- a/net/core/net_namespace.c
++++ b/net/core/net_namespace.c
+@@ -251,10 +251,10 @@ int peernet2id_alloc(struct net *net, struct net *peer, 
gfp_t gfp)
+       if (refcount_read(&net->count) == 0)
+               return NETNSA_NSID_NOT_ASSIGNED;
+ 
+-      spin_lock(&net->nsid_lock);
++      spin_lock_bh(&net->nsid_lock);
+       id = __peernet2id(net, peer);
+       if (id >= 0) {
+-              spin_unlock(&net->nsid_lock);
++              spin_unlock_bh(&net->nsid_lock);
+               return id;
+       }
+ 
+@@ -264,12 +264,12 @@ int peernet2id_alloc(struct net *net, struct net *peer, 
gfp_t gfp)
+        * just been idr_remove()'d from there in cleanup_net().
+        */
+       if (!maybe_get_net(peer)) {
+-              spin_unlock(&net->nsid_lock);
++              spin_unlock_bh(&net->nsid_lock);
+               return NETNSA_NSID_NOT_ASSIGNED;
+       }
+ 
+       id = alloc_netid(net, peer, -1);
+-      spin_unlock(&net->nsid_lock);
++      spin_unlock_bh(&net->nsid_lock);
+ 
+       put_net(peer);
+       if (id < 0)
+@@ -534,20 +534,20 @@ static void unhash_nsid(struct net *net, struct net 
*last)
+       for_each_net(tmp) {
+               int id;
+ 
+-              spin_lock(&tmp->nsid_lock);
++              spin_lock_bh(&tmp->nsid_lock);
+               id = __peernet2id(tmp, net);
+               if (id >= 0)
+                       idr_remove(&tmp->netns_ids, id);
+-              spin_unlock(&tmp->nsid_lock);
++              spin_unlock_bh(&tmp->nsid_lock);
+               if (id >= 0)
+                       rtnl_net_notifyid(tmp, RTM_DELNSID, id, 0, NULL,
+                                         GFP_KERNEL);
+               if (tmp == last)
+                       break;
+       }
+-      spin_lock(&net->nsid_lock);
++      spin_lock_bh(&net->nsid_lock);
+       idr_destroy(&net->netns_ids);
+-      spin_unlock(&net->nsid_lock);
++      spin_unlock_bh(&net->nsid_lock);
+ }
+ 
+ static LLIST_HEAD(cleanup_list);
+@@ -760,9 +760,9 @@ static int rtnl_net_newid(struct sk_buff *skb, struct 
nlmsghdr *nlh,
+               return PTR_ERR(peer);
+       }
+ 
+-      spin_lock(&net->nsid_lock);
++      spin_lock_bh(&net->nsid_lock);
+       if (__peernet2id(net, peer) >= 0) {
+-              spin_unlock(&net->nsid_lock);
++              spin_unlock_bh(&net->nsid_lock);
+               err = -EEXIST;
+               NL_SET_BAD_ATTR(extack, nla);
+               NL_SET_ERR_MSG(extack,
+@@ -771,7 +771,7 @@ static int rtnl_net_newid(struct sk_buff *skb, struct 
nlmsghdr *nlh,
+       }
+ 
+       err = alloc_netid(net, peer, nsid);
+-      spin_unlock(&net->nsid_lock);
++      spin_unlock_bh(&net->nsid_lock);
+       if (err >= 0) {
+               rtnl_net_notifyid(net, RTM_NEWNSID, err, NETLINK_CB(skb).portid,
+                                 nlh, GFP_KERNEL);
+diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
+index d2a4553bcf39d..0fd1c2aa13615 100644
+--- a/net/dcb/dcbnl.c
++++ b/net/dcb/dcbnl.c
+@@ -1426,6 +1426,7 @@ static int dcbnl_ieee_set(struct net_device *netdev, 
struct nlmsghdr *nlh,
+ {
+       const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
+       struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
++      int prio;
+       int err;
+ 
+       if (!ops)
+@@ -1475,6 +1476,13 @@ static int dcbnl_ieee_set(struct net_device *netdev, 
struct nlmsghdr *nlh,
+               struct dcbnl_buffer *buffer =
+                       nla_data(ieee[DCB_ATTR_DCB_BUFFER]);
+ 
++              for (prio = 0; prio < ARRAY_SIZE(buffer->prio2buffer); prio++) {
++                      if (buffer->prio2buffer[prio] >= DCBX_MAX_BUFFERS) {
++                              err = -EINVAL;
++                              goto err;
++                      }
++              }
++
+               err = ops->dcbnl_setbuffer(netdev, buffer);
+               if (err)
+                       goto err;
+diff --git a/net/dsa/slave.c b/net/dsa/slave.c
+index 4c7f086a047b1..3f7be8c64c504 100644
+--- a/net/dsa/slave.c
++++ b/net/dsa/slave.c
+@@ -1801,15 +1801,27 @@ int dsa_slave_create(struct dsa_port *port)
+ 
+       dsa_slave_notify(slave_dev, DSA_PORT_REGISTER);
+ 
+-      ret = register_netdev(slave_dev);
++      rtnl_lock();
++
++      ret = register_netdevice(slave_dev);
+       if (ret) {
+               netdev_err(master, "error %d registering interface %s\n",
+                          ret, slave_dev->name);
++              rtnl_unlock();
+               goto out_phy;
+       }
+ 
++      ret = netdev_upper_dev_link(master, slave_dev, NULL);
++
++      rtnl_unlock();
++
++      if (ret)
++              goto out_unregister;
++
+       return 0;
+ 
++out_unregister:
++      unregister_netdev(slave_dev);
+ out_phy:
+       rtnl_lock();
+       phylink_disconnect_phy(p->dp->pl);
+@@ -1826,16 +1838,18 @@ out_free:
+ 
+ void dsa_slave_destroy(struct net_device *slave_dev)
+ {
++      struct net_device *master = dsa_slave_to_master(slave_dev);
+       struct dsa_port *dp = dsa_slave_to_port(slave_dev);
+       struct dsa_slave_priv *p = netdev_priv(slave_dev);
+ 
+       netif_carrier_off(slave_dev);
+       rtnl_lock();
++      netdev_upper_dev_unlink(master, slave_dev);
++      unregister_netdevice(slave_dev);
+       phylink_disconnect_phy(dp->pl);
+       rtnl_unlock();
+ 
+       dsa_slave_notify(slave_dev, DSA_PORT_UNREGISTER);
+-      unregister_netdev(slave_dev);
+       phylink_destroy(dp->pl);
+       gro_cells_destroy(&p->gcells);
+       free_percpu(p->stats64);
+diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
+index 41079490a1181..86a23e4a6a50f 100644
+--- a/net/ipv4/fib_frontend.c
++++ b/net/ipv4/fib_frontend.c
+@@ -362,6 +362,7 @@ static int __fib_validate_source(struct sk_buff *skb, 
__be32 src, __be32 dst,
+       fl4.flowi4_tun_key.tun_id = 0;
+       fl4.flowi4_flags = 0;
+       fl4.flowi4_uid = sock_net_uid(net, NULL);
++      fl4.flowi4_multipath_hash = 0;
+ 
+       no_addr = idev->ifa_list == NULL;
+ 
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index 17206677d5033..f09a188397165 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -74,6 +74,7 @@
+ #include <net/icmp.h>
+ #include <net/checksum.h>
+ #include <net/inetpeer.h>
++#include <net/inet_ecn.h>
+ #include <net/lwtunnel.h>
+ #include <linux/bpf-cgroup.h>
+ #include <linux/igmp.h>
+@@ -1697,7 +1698,7 @@ void ip_send_unicast_reply(struct sock *sk, struct 
sk_buff *skb,
+       if (IS_ERR(rt))
+               return;
+ 
+-      inet_sk(sk)->tos = arg->tos;
++      inet_sk(sk)->tos = arg->tos & ~INET_ECN_MASK;
+ 
+       sk->sk_protocol = ip_hdr(skb)->protocol;
+       sk->sk_bound_dev_if = arg->bound_dev_if;
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index a01efa062f6bc..37f1288894747 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -786,8 +786,10 @@ static void __ip_do_redirect(struct rtable *rt, struct 
sk_buff *skb, struct flow
+                       neigh_event_send(n, NULL);
+               } else {
+                       if (fib_lookup(net, fl4, &res, 0) == 0) {
+-                              struct fib_nh_common *nhc = FIB_RES_NHC(res);
++                              struct fib_nh_common *nhc;
+ 
++                              fib_select_path(net, &res, fl4, skb);
++                              nhc = FIB_RES_NHC(res);
+                               update_or_create_fnhe(nhc, fl4->daddr, new_gw,
+                                               0, false,
+                                               jiffies + ip_rt_gc_timeout);
+@@ -1013,6 +1015,7 @@ out:     kfree_skb(skb);
+ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 
mtu)
+ {
+       struct dst_entry *dst = &rt->dst;
++      struct net *net = dev_net(dst->dev);
+       u32 old_mtu = ipv4_mtu(dst);
+       struct fib_result res;
+       bool lock = false;
+@@ -1033,9 +1036,11 @@ static void __ip_rt_update_pmtu(struct rtable *rt, 
struct flowi4 *fl4, u32 mtu)
+               return;
+ 
+       rcu_read_lock();
+-      if (fib_lookup(dev_net(dst->dev), fl4, &res, 0) == 0) {
+-              struct fib_nh_common *nhc = FIB_RES_NHC(res);
++      if (fib_lookup(net, fl4, &res, 0) == 0) {
++              struct fib_nh_common *nhc;
+ 
++              fib_select_path(net, &res, fl4, NULL);
++              nhc = FIB_RES_NHC(res);
+               update_or_create_fnhe(nhc, fl4->daddr, 0, mtu, lock,
+                                     jiffies + ip_rt_mtu_expires);
+       }
+@@ -2142,6 +2147,7 @@ static int ip_route_input_slow(struct sk_buff *skb, 
__be32 daddr, __be32 saddr,
+       fl4.daddr = daddr;
+       fl4.saddr = saddr;
+       fl4.flowi4_uid = sock_net_uid(net, NULL);
++      fl4.flowi4_multipath_hash = 0;
+ 
+       if (fib4_rules_early_flow_dissect(net, skb, &fl4, &_flkeys)) {
+               flkeys = &_flkeys;
+@@ -2662,8 +2668,6 @@ struct rtable *ip_route_output_key_hash_rcu(struct net 
*net, struct flowi4 *fl4,
+       fib_select_path(net, res, fl4, skb);
+ 
+       dev_out = FIB_RES_DEV(*res);
+-      fl4->flowi4_oif = dev_out->ifindex;
+-
+ 
+ make_route:
+       rth = __mkroute_output(res, fl4, orig_oif, dev_out, flags);
+diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
+index f4f19e89af5ed..9d66af9e4c7fe 100644
+--- a/net/ipv6/Kconfig
++++ b/net/ipv6/Kconfig
+@@ -303,6 +303,7 @@ config IPV6_SEG6_LWTUNNEL
+ config IPV6_SEG6_HMAC
+       bool "IPv6: Segment Routing HMAC support"
+       depends on IPV6
++      select CRYPTO
+       select CRYPTO_HMAC
+       select CRYPTO_SHA1
+       select CRYPTO_SHA256
+diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
+index 49ee89bbcba0c..3c32dcb5fd8e2 100644
+--- a/net/ipv6/ip6_fib.c
++++ b/net/ipv6/ip6_fib.c
+@@ -1992,14 +1992,19 @@ static void fib6_del_route(struct fib6_table *table, 
struct fib6_node *fn,
+ /* Need to own table->tb6_lock */
+ int fib6_del(struct fib6_info *rt, struct nl_info *info)
+ {
+-      struct fib6_node *fn = rcu_dereference_protected(rt->fib6_node,
+-                                  lockdep_is_held(&rt->fib6_table->tb6_lock));
+-      struct fib6_table *table = rt->fib6_table;
+       struct net *net = info->nl_net;
+       struct fib6_info __rcu **rtp;
+       struct fib6_info __rcu **rtp_next;
++      struct fib6_table *table;
++      struct fib6_node *fn;
++
++      if (rt == net->ipv6.fib6_null_entry)
++              return -ENOENT;
+ 
+-      if (!fn || rt == net->ipv6.fib6_null_entry)
++      table = rt->fib6_table;
++      fn = rcu_dereference_protected(rt->fib6_node,
++                                     lockdep_is_held(&table->tb6_lock));
++      if (!fn)
+               return -ENOENT;
+ 
+       WARN_ON(!(fn->fn_flags & RTN_RTINFO));
+diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
+index 85ab4559f0577..0f77e24a5152e 100644
+--- a/net/qrtr/qrtr.c
++++ b/net/qrtr/qrtr.c
+@@ -332,8 +332,7 @@ static int qrtr_node_enqueue(struct qrtr_node *node, 
struct sk_buff *skb,
+ {
+       struct qrtr_hdr_v1 *hdr;
+       size_t len = skb->len;
+-      int rc = -ENODEV;
+-      int confirm_rx;
++      int rc, confirm_rx;
+ 
+       confirm_rx = qrtr_tx_wait(node, to->sq_node, to->sq_port, type);
+       if (confirm_rx < 0) {
+@@ -357,15 +356,17 @@ static int qrtr_node_enqueue(struct qrtr_node *node, 
struct sk_buff *skb,
+       hdr->size = cpu_to_le32(len);
+       hdr->confirm_rx = !!confirm_rx;
+ 
+-      skb_put_padto(skb, ALIGN(len, 4) + sizeof(*hdr));
+-
+-      mutex_lock(&node->ep_lock);
+-      if (node->ep)
+-              rc = node->ep->xmit(node->ep, skb);
+-      else
+-              kfree_skb(skb);
+-      mutex_unlock(&node->ep_lock);
++      rc = skb_put_padto(skb, ALIGN(len, 4) + sizeof(*hdr));
+ 
++      if (!rc) {
++              mutex_lock(&node->ep_lock);
++              rc = -ENODEV;
++              if (node->ep)
++                      rc = node->ep->xmit(node->ep, skb);
++              else
++                      kfree_skb(skb);
++              mutex_unlock(&node->ep_lock);
++      }
+       /* Need to ensure that a subsequent message carries the otherwise lost
+        * confirm_rx flag if we dropped this one */
+       if (rc && confirm_rx)
+diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
+index c1fcd85719d6a..5c568757643b2 100644
+--- a/net/sched/act_ife.c
++++ b/net/sched/act_ife.c
+@@ -436,6 +436,25 @@ static void tcf_ife_cleanup(struct tc_action *a)
+               kfree_rcu(p, rcu);
+ }
+ 
++static int load_metalist(struct nlattr **tb, bool rtnl_held)
++{
++      int i;
++
++      for (i = 1; i < max_metacnt; i++) {
++              if (tb[i]) {
++                      void *val = nla_data(tb[i]);
++                      int len = nla_len(tb[i]);
++                      int rc;
++
++                      rc = load_metaops_and_vet(i, val, len, rtnl_held);
++                      if (rc != 0)
++                              return rc;
++              }
++      }
++
++      return 0;
++}
++
+ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
+                            bool exists, bool rtnl_held)
+ {
+@@ -449,10 +468,6 @@ static int populate_metalist(struct tcf_ife_info *ife, 
struct nlattr **tb,
+                       val = nla_data(tb[i]);
+                       len = nla_len(tb[i]);
+ 
+-                      rc = load_metaops_and_vet(i, val, len, rtnl_held);
+-                      if (rc != 0)
+-                              return rc;
+-
+                       rc = add_metainfo(ife, i, val, len, exists);
+                       if (rc)
+                               return rc;
+@@ -509,6 +524,21 @@ static int tcf_ife_init(struct net *net, struct nlattr 
*nla,
+       if (!p)
+               return -ENOMEM;
+ 
++      if (tb[TCA_IFE_METALST]) {
++              err = nla_parse_nested_deprecated(tb2, IFE_META_MAX,
++                                                tb[TCA_IFE_METALST], NULL,
++                                                NULL);
++              if (err) {
++                      kfree(p);
++                      return err;
++              }
++              err = load_metalist(tb2, rtnl_held);
++              if (err) {
++                      kfree(p);
++                      return err;
++              }
++      }
++
+       index = parm->index;
+       err = tcf_idr_check_alloc(tn, &index, a, bind);
+       if (err < 0) {
+@@ -570,15 +600,9 @@ static int tcf_ife_init(struct net *net, struct nlattr 
*nla,
+       }
+ 
+       if (tb[TCA_IFE_METALST]) {
+-              err = nla_parse_nested_deprecated(tb2, IFE_META_MAX,
+-                                                tb[TCA_IFE_METALST], NULL,
+-                                                NULL);
+-              if (err)
+-                      goto metadata_parse_err;
+               err = populate_metalist(ife, tb2, exists, rtnl_held);
+               if (err)
+                       goto metadata_parse_err;
+-
+       } else {
+               /* if no passed metadata allow list or passed allow-all
+                * then here we process by adding as many supported metadatum
+diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
+index e30bd969fc485..5fe145d97f52e 100644
+--- a/net/sched/cls_flower.c
++++ b/net/sched/cls_flower.c
+@@ -1215,6 +1215,7 @@ static int fl_set_erspan_opt(const struct nlattr *nla, 
struct fl_flow_key *key,
+               }
+               if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
+                       nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX];
++                      memset(&md->u, 0x00, sizeof(md->u));
+                       md->u.index = nla_get_be32(nla);
+               }
+       } else if (md->version == 2) {
+diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
+index 265a61d011dfa..54c417244642a 100644
+--- a/net/sched/sch_generic.c
++++ b/net/sched/sch_generic.c
+@@ -1131,24 +1131,10 @@ EXPORT_SYMBOL(dev_activate);
+ 
+ static void qdisc_deactivate(struct Qdisc *qdisc)
+ {
+-      bool nolock = qdisc->flags & TCQ_F_NOLOCK;
+-
+       if (qdisc->flags & TCQ_F_BUILTIN)
+               return;
+-      if (test_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state))
+-              return;
+-
+-      if (nolock)
+-              spin_lock_bh(&qdisc->seqlock);
+-      spin_lock_bh(qdisc_lock(qdisc));
+ 
+       set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state);
+-
+-      qdisc_reset(qdisc);
+-
+-      spin_unlock_bh(qdisc_lock(qdisc));
+-      if (nolock)
+-              spin_unlock_bh(&qdisc->seqlock);
+ }
+ 
+ static void dev_deactivate_queue(struct net_device *dev,
+@@ -1165,6 +1151,30 @@ static void dev_deactivate_queue(struct net_device *dev,
+       }
+ }
+ 
++static void dev_reset_queue(struct net_device *dev,
++                          struct netdev_queue *dev_queue,
++                          void *_unused)
++{
++      struct Qdisc *qdisc;
++      bool nolock;
++
++      qdisc = dev_queue->qdisc_sleeping;
++      if (!qdisc)
++              return;
++
++      nolock = qdisc->flags & TCQ_F_NOLOCK;
++
++      if (nolock)
++              spin_lock_bh(&qdisc->seqlock);
++      spin_lock_bh(qdisc_lock(qdisc));
++
++      qdisc_reset(qdisc);
++
++      spin_unlock_bh(qdisc_lock(qdisc));
++      if (nolock)
++              spin_unlock_bh(&qdisc->seqlock);
++}
++
+ static bool some_qdisc_is_busy(struct net_device *dev)
+ {
+       unsigned int i;
+@@ -1213,12 +1223,20 @@ void dev_deactivate_many(struct list_head *head)
+               dev_watchdog_down(dev);
+       }
+ 
+-      /* Wait for outstanding qdisc-less dev_queue_xmit calls.
++      /* Wait for outstanding qdisc-less dev_queue_xmit calls or
++       * outstanding qdisc enqueuing calls.
+        * This is avoided if all devices are in dismantle phase :
+        * Caller will call synchronize_net() for us
+        */
+       synchronize_net();
+ 
++      list_for_each_entry(dev, head, close_list) {
++              netdev_for_each_tx_queue(dev, dev_reset_queue, NULL);
++
++              if (dev_ingress_queue(dev))
++                      dev_reset_queue(dev, dev_ingress_queue(dev), NULL);
++      }
++
+       /* Wait for outstanding qdisc_run calls. */
+       list_for_each_entry(dev, head, close_list) {
+               while (some_qdisc_is_busy(dev)) {
+diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
+index 6a5086e586efb..2b797a71e9bda 100644
+--- a/net/sched/sch_taprio.c
++++ b/net/sched/sch_taprio.c
+@@ -777,9 +777,11 @@ static const struct nla_policy 
taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = {
+       [TCA_TAPRIO_ATTR_TXTIME_DELAY]               = { .type = NLA_U32 },
+ };
+ 
+-static int fill_sched_entry(struct nlattr **tb, struct sched_entry *entry,
++static int fill_sched_entry(struct taprio_sched *q, struct nlattr **tb,
++                          struct sched_entry *entry,
+                           struct netlink_ext_ack *extack)
+ {
++      int min_duration = length_to_duration(q, ETH_ZLEN);
+       u32 interval = 0;
+ 
+       if (tb[TCA_TAPRIO_SCHED_ENTRY_CMD])
+@@ -794,7 +796,10 @@ static int fill_sched_entry(struct nlattr **tb, struct 
sched_entry *entry,
+               interval = nla_get_u32(
+                       tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL]);
+ 
+-      if (interval == 0) {
++      /* The interval should allow at least the minimum ethernet
++       * frame to go out.
++       */
++      if (interval < min_duration) {
+               NL_SET_ERR_MSG(extack, "Invalid interval for schedule entry");
+               return -EINVAL;
+       }
+@@ -804,8 +809,9 @@ static int fill_sched_entry(struct nlattr **tb, struct 
sched_entry *entry,
+       return 0;
+ }
+ 
+-static int parse_sched_entry(struct nlattr *n, struct sched_entry *entry,
+-                           int index, struct netlink_ext_ack *extack)
++static int parse_sched_entry(struct taprio_sched *q, struct nlattr *n,
++                           struct sched_entry *entry, int index,
++                           struct netlink_ext_ack *extack)
+ {
+       struct nlattr *tb[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { };
+       int err;
+@@ -819,10 +825,10 @@ static int parse_sched_entry(struct nlattr *n, struct 
sched_entry *entry,
+ 
+       entry->index = index;
+ 
+-      return fill_sched_entry(tb, entry, extack);
++      return fill_sched_entry(q, tb, entry, extack);
+ }
+ 
+-static int parse_sched_list(struct nlattr *list,
++static int parse_sched_list(struct taprio_sched *q, struct nlattr *list,
+                           struct sched_gate_list *sched,
+                           struct netlink_ext_ack *extack)
+ {
+@@ -847,7 +853,7 @@ static int parse_sched_list(struct nlattr *list,
+                       return -ENOMEM;
+               }
+ 
+-              err = parse_sched_entry(n, entry, i, extack);
++              err = parse_sched_entry(q, n, entry, i, extack);
+               if (err < 0) {
+                       kfree(entry);
+                       return err;
+@@ -862,7 +868,7 @@ static int parse_sched_list(struct nlattr *list,
+       return i;
+ }
+ 
+-static int parse_taprio_schedule(struct nlattr **tb,
++static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb,
+                                struct sched_gate_list *new,
+                                struct netlink_ext_ack *extack)
+ {
+@@ -883,8 +889,8 @@ static int parse_taprio_schedule(struct nlattr **tb,
+               new->cycle_time = 
nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]);
+ 
+       if (tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST])
+-              err = parse_sched_list(
+-                      tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST], new, extack);
++              err = parse_sched_list(q, tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST],
++                                     new, extack);
+       if (err < 0)
+               return err;
+ 
+@@ -1474,7 +1480,7 @@ static int taprio_change(struct Qdisc *sch, struct 
nlattr *opt,
+               goto free_sched;
+       }
+ 
+-      err = parse_taprio_schedule(tb, new_admin, extack);
++      err = parse_taprio_schedule(q, tb, new_admin, extack);
+       if (err < 0)
+               goto free_sched;
+ 
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index fa20e945700e0..102aee4f7dfde 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -9457,13 +9457,10 @@ void sctp_copy_sock(struct sock *newsk, struct sock 
*sk,
+ static inline void sctp_copy_descendant(struct sock *sk_to,
+                                       const struct sock *sk_from)
+ {
+-      int ancestor_size = sizeof(struct inet_sock) +
+-                          sizeof(struct sctp_sock) -
+-                          offsetof(struct sctp_sock, pd_lobby);
+-
+-      if (sk_from->sk_family == PF_INET6)
+-              ancestor_size += sizeof(struct ipv6_pinfo);
++      size_t ancestor_size = sizeof(struct inet_sock);
+ 
++      ancestor_size += sk_from->sk_prot->obj_size;
++      ancestor_size -= offsetof(struct sctp_sock, pd_lobby);
+       __inet_sk_copy_descendant(sk_to, sk_from, ancestor_size);
+ }
+ 
+diff --git a/net/tipc/group.c b/net/tipc/group.c
+index 89257e2a980de..f53871baa42eb 100644
+--- a/net/tipc/group.c
++++ b/net/tipc/group.c
+@@ -273,8 +273,8 @@ static struct tipc_member *tipc_group_find_node(struct 
tipc_group *grp,
+       return NULL;
+ }
+ 
+-static void tipc_group_add_to_tree(struct tipc_group *grp,
+-                                 struct tipc_member *m)
++static int tipc_group_add_to_tree(struct tipc_group *grp,
++                                struct tipc_member *m)
+ {
+       u64 nkey, key = (u64)m->node << 32 | m->port;
+       struct rb_node **n, *parent = NULL;
+@@ -291,10 +291,11 @@ static void tipc_group_add_to_tree(struct tipc_group 
*grp,
+               else if (key > nkey)
+                       n = &(*n)->rb_right;
+               else
+-                      return;
++                      return -EEXIST;
+       }
+       rb_link_node(&m->tree_node, parent, n);
+       rb_insert_color(&m->tree_node, &grp->members);
++      return 0;
+ }
+ 
+ static struct tipc_member *tipc_group_create_member(struct tipc_group *grp,
+@@ -302,6 +303,7 @@ static struct tipc_member *tipc_group_create_member(struct 
tipc_group *grp,
+                                                   u32 instance, int state)
+ {
+       struct tipc_member *m;
++      int ret;
+ 
+       m = kzalloc(sizeof(*m), GFP_ATOMIC);
+       if (!m)
+@@ -314,8 +316,12 @@ static struct tipc_member 
*tipc_group_create_member(struct tipc_group *grp,
+       m->port = port;
+       m->instance = instance;
+       m->bc_acked = grp->bc_snd_nxt - 1;
++      ret = tipc_group_add_to_tree(grp, m);
++      if (ret < 0) {
++              kfree(m);
++              return NULL;
++      }
+       grp->member_cnt++;
+-      tipc_group_add_to_tree(grp, m);
+       tipc_nlist_add(&grp->dests, m->node);
+       m->state = state;
+       return m;
+diff --git a/net/tipc/msg.c b/net/tipc/msg.c
+index 01b64869a1739..2776a41e0dece 100644
+--- a/net/tipc/msg.c
++++ b/net/tipc/msg.c
+@@ -150,7 +150,8 @@ int tipc_buf_append(struct sk_buff **headbuf, struct 
sk_buff **buf)
+       if (fragid == FIRST_FRAGMENT) {
+               if (unlikely(head))
+                       goto err;
+-              if (unlikely(skb_unclone(frag, GFP_ATOMIC)))
++              frag = skb_unshare(frag, GFP_ATOMIC);
++              if (unlikely(!frag))
+                       goto err;
+               head = *headbuf = frag;
+               *buf = NULL;
+diff --git a/net/tipc/socket.c b/net/tipc/socket.c
+index 79cc84393f932..59c9e592b0a25 100644
+--- a/net/tipc/socket.c
++++ b/net/tipc/socket.c
+@@ -2773,10 +2773,7 @@ static int tipc_shutdown(struct socket *sock, int how)
+ 
+       trace_tipc_sk_shutdown(sk, NULL, TIPC_DUMP_ALL, " ");
+       __tipc_shutdown(sock, TIPC_CONN_SHUTDOWN);
+-      if (tipc_sk_type_connectionless(sk))
+-              sk->sk_shutdown = SHUTDOWN_MASK;
+-      else
+-              sk->sk_shutdown = SEND_SHUTDOWN;
++      sk->sk_shutdown = SHUTDOWN_MASK;
+ 
+       if (sk->sk_state == TIPC_DISCONNECTING) {
+               /* Discard any unreceived messages */

Reply via email to