commit:     cb9be97dad62162639ad341e50c3dcd6b5691545
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Jul 28 16:27:15 2019 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Jul 28 16:27:15 2019 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=cb9be97d

Linux patch 4.19.62

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1061_linux-4.19.62.patch | 2185 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2189 insertions(+)

diff --git a/0000_README b/0000_README
index 301b6bf..6ff7ffd 100644
--- a/0000_README
+++ b/0000_README
@@ -287,6 +287,10 @@ Patch:  1060-4.19.61.patch
 From:   https://www.kernel.org
 Desc:   Linux 4.19.61
 
+Patch:  1061-4.19.62.patch
+From:   https://www.kernel.org
+Desc:   Linux 4.19.62
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1061_linux-4.19.62.patch b/1061_linux-4.19.62.patch
new file mode 100644
index 0000000..b41f9ba
--- /dev/null
+++ b/1061_linux-4.19.62.patch
@@ -0,0 +1,2185 @@
+diff --git a/Makefile b/Makefile
+index b16485c580d7..a4463d880ae2 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 19
+-SUBLEVEL = 61
++SUBLEVEL = 62
+ EXTRAVERSION =
+ NAME = "People's Front"
+ 
+diff --git a/arch/mips/jz4740/board-qi_lb60.c 
b/arch/mips/jz4740/board-qi_lb60.c
+index 705593d40d12..05c60fa4fa06 100644
+--- a/arch/mips/jz4740/board-qi_lb60.c
++++ b/arch/mips/jz4740/board-qi_lb60.c
+@@ -471,27 +471,27 @@ static unsigned long pin_cfg_bias_disable[] = {
+ static struct pinctrl_map pin_map[] __initdata = {
+       /* NAND pin configuration */
+       PIN_MAP_MUX_GROUP_DEFAULT("jz4740-nand",
+-                      "10010000.jz4740-pinctrl", "nand", "nand-cs1"),
++                      "10010000.pin-controller", "nand-cs1", "nand"),
+ 
+       /* fbdev pin configuration */
+       PIN_MAP_MUX_GROUP("jz4740-fb", PINCTRL_STATE_DEFAULT,
+-                      "10010000.jz4740-pinctrl", "lcd", "lcd-8bit"),
++                      "10010000.pin-controller", "lcd-8bit", "lcd"),
+       PIN_MAP_MUX_GROUP("jz4740-fb", PINCTRL_STATE_SLEEP,
+-                      "10010000.jz4740-pinctrl", "lcd", "lcd-no-pins"),
++                      "10010000.pin-controller", "lcd-no-pins", "lcd"),
+ 
+       /* MMC pin configuration */
+       PIN_MAP_MUX_GROUP_DEFAULT("jz4740-mmc.0",
+-                      "10010000.jz4740-pinctrl", "mmc", "mmc-1bit"),
++                      "10010000.pin-controller", "mmc-1bit", "mmc"),
+       PIN_MAP_MUX_GROUP_DEFAULT("jz4740-mmc.0",
+-                      "10010000.jz4740-pinctrl", "mmc", "mmc-4bit"),
++                      "10010000.pin-controller", "mmc-4bit", "mmc"),
+       PIN_MAP_CONFIGS_PIN_DEFAULT("jz4740-mmc.0",
+-                      "10010000.jz4740-pinctrl", "PD0", pin_cfg_bias_disable),
++                      "10010000.pin-controller", "PD0", pin_cfg_bias_disable),
+       PIN_MAP_CONFIGS_PIN_DEFAULT("jz4740-mmc.0",
+-                      "10010000.jz4740-pinctrl", "PD2", pin_cfg_bias_disable),
++                      "10010000.pin-controller", "PD2", pin_cfg_bias_disable),
+ 
+       /* PWM pin configuration */
+       PIN_MAP_MUX_GROUP_DEFAULT("jz4740-pwm",
+-                      "10010000.jz4740-pinctrl", "pwm4", "pwm4"),
++                      "10010000.pin-controller", "pwm4", "pwm4"),
+ };
+ 
+ 
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 73d6d585dd66..4cf16378dffe 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -8457,6 +8457,7 @@ static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx)
+ {
+       vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, SECONDARY_EXEC_SHADOW_VMCS);
+       vmcs_write64(VMCS_LINK_POINTER, -1ull);
++      vmx->nested.sync_shadow_vmcs = false;
+ }
+ 
+ static inline void nested_release_vmcs12(struct vcpu_vmx *vmx)
+@@ -8468,7 +8469,6 @@ static inline void nested_release_vmcs12(struct vcpu_vmx 
*vmx)
+               /* copy to memory all shadowed fields in case
+                  they were modified */
+               copy_shadow_to_vmcs12(vmx);
+-              vmx->nested.sync_shadow_vmcs = false;
+               vmx_disable_shadow_vmcs(vmx);
+       }
+       vmx->nested.posted_intr_nv = -1;
+@@ -8490,6 +8490,8 @@ static void free_nested(struct vcpu_vmx *vmx)
+       if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
+               return;
+ 
++      kvm_clear_request(KVM_REQ_GET_VMCS12_PAGES, &vmx->vcpu);
++
+       hrtimer_cancel(&vmx->nested.preemption_timer);
+       vmx->nested.vmxon = false;
+       vmx->nested.smm.vmxon = false;
+@@ -8668,6 +8670,9 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
+       u64 field_value;
+       struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
+ 
++      if (WARN_ON(!shadow_vmcs))
++              return;
++
+       preempt_disable();
+ 
+       vmcs_load(shadow_vmcs);
+@@ -8706,6 +8711,9 @@ static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
+       u64 field_value = 0;
+       struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
+ 
++      if (WARN_ON(!shadow_vmcs))
++              return;
++
+       vmcs_load(shadow_vmcs);
+ 
+       for (q = 0; q < ARRAY_SIZE(fields); q++) {
+diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
+index 13884474d158..69842145c223 100644
+--- a/drivers/dma-buf/dma-buf.c
++++ b/drivers/dma-buf/dma-buf.c
+@@ -1069,6 +1069,7 @@ static int dma_buf_debug_show(struct seq_file *s, void 
*unused)
+                                  fence->ops->get_driver_name(fence),
+                                  fence->ops->get_timeline_name(fence),
+                                  dma_fence_is_signaled(fence) ? "" : "un");
++                      dma_fence_put(fence);
+               }
+               rcu_read_unlock();
+ 
+diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
+index 6c95f61a32e7..49ab09468ba1 100644
+--- a/drivers/dma-buf/reservation.c
++++ b/drivers/dma-buf/reservation.c
+@@ -416,6 +416,10 @@ int reservation_object_get_fences_rcu(struct 
reservation_object *obj,
+                                          GFP_NOWAIT | __GFP_NOWARN);
+                       if (!nshared) {
+                               rcu_read_unlock();
++
++                              dma_fence_put(fence_excl);
++                              fence_excl = NULL;
++
+                               nshared = krealloc(shared, sz, GFP_KERNEL);
+                               if (nshared) {
+                                       shared = nshared;
+diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
+index a5ece8ea79bc..abb332d15a13 100644
+--- a/drivers/gpio/gpio-davinci.c
++++ b/drivers/gpio/gpio-davinci.c
+@@ -222,8 +222,9 @@ static int davinci_gpio_probe(struct platform_device *pdev)
+       for (i = 0; i < nirq; i++) {
+               chips->irqs[i] = platform_get_irq(pdev, i);
+               if (chips->irqs[i] < 0) {
+-                      dev_info(dev, "IRQ not populated, err = %d\n",
+-                               chips->irqs[i]);
++                      if (chips->irqs[i] != -EPROBE_DEFER)
++                              dev_info(dev, "IRQ not populated, err = %d\n",
++                                       chips->irqs[i]);
+                       return chips->irqs[i];
+               }
+       }
+diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
+index 433a14b9f731..253a1bbe37e8 100644
+--- a/drivers/net/caif/caif_hsi.c
++++ b/drivers/net/caif/caif_hsi.c
+@@ -1455,7 +1455,7 @@ static void __exit cfhsi_exit_module(void)
+       rtnl_lock();
+       list_for_each_safe(list_node, n, &cfhsi_list) {
+               cfhsi = list_entry(list_node, struct cfhsi, list);
+-              unregister_netdev(cfhsi->ndev);
++              unregister_netdevice(cfhsi->ndev);
+       }
+       rtnl_unlock();
+ }
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c 
b/drivers/net/dsa/mv88e6xxx/chip.c
+index 411cfb806459..703e6bdaf0e1 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -4816,6 +4816,8 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
+               err = PTR_ERR(chip->reset);
+               goto out;
+       }
++      if (chip->reset)
++              usleep_range(1000, 2000);
+ 
+       err = mv88e6xxx_detect(chip);
+       if (err)
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 
b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+index e3ce29951c5e..3edb81a4f075 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+@@ -286,6 +286,9 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata 
*txdata)
+       hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
+       sw_cons = txdata->tx_pkt_cons;
+ 
++      /* Ensure subsequent loads occur after hw_cons */
++      smp_rmb();
++
+       while (sw_cons != hw_cons) {
+               u16 pkt_cons;
+ 
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c 
b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+index 2d6f090bf644..fd587bed32eb 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+@@ -3086,39 +3086,42 @@ static void bcmgenet_timeout(struct net_device *dev)
+       netif_tx_wake_all_queues(dev);
+ }
+ 
+-#define MAX_MC_COUNT  16
++#define MAX_MDF_FILTER        17
+ 
+ static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv,
+                                        unsigned char *addr,
+-                                       int *i,
+-                                       int *mc)
++                                       int *i)
+ {
+-      u32 reg;
+-
+       bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1],
+                            UMAC_MDF_ADDR + (*i * 4));
+       bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 |
+                            addr[4] << 8 | addr[5],
+                            UMAC_MDF_ADDR + ((*i + 1) * 4));
+-      reg = bcmgenet_umac_readl(priv, UMAC_MDF_CTRL);
+-      reg |= (1 << (MAX_MC_COUNT - *mc));
+-      bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL);
+       *i += 2;
+-      (*mc)++;
+ }
+ 
+ static void bcmgenet_set_rx_mode(struct net_device *dev)
+ {
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+       struct netdev_hw_addr *ha;
+-      int i, mc;
++      int i, nfilter;
+       u32 reg;
+ 
+       netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags);
+ 
+-      /* Promiscuous mode */
++      /* Number of filters needed */
++      nfilter = netdev_uc_count(dev) + netdev_mc_count(dev) + 2;
++
++      /*
++       * Turn on promicuous mode for three scenarios
++       * 1. IFF_PROMISC flag is set
++       * 2. IFF_ALLMULTI flag is set
++       * 3. The number of filters needed exceeds the number filters
++       *    supported by the hardware.
++      */
+       reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+-      if (dev->flags & IFF_PROMISC) {
++      if ((dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) ||
++          (nfilter > MAX_MDF_FILTER)) {
+               reg |= CMD_PROMISC;
+               bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+               bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL);
+@@ -3128,32 +3131,24 @@ static void bcmgenet_set_rx_mode(struct net_device 
*dev)
+               bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+       }
+ 
+-      /* UniMac doesn't support ALLMULTI */
+-      if (dev->flags & IFF_ALLMULTI) {
+-              netdev_warn(dev, "ALLMULTI is not supported\n");
+-              return;
+-      }
+-
+       /* update MDF filter */
+       i = 0;
+-      mc = 0;
+       /* Broadcast */
+-      bcmgenet_set_mdf_addr(priv, dev->broadcast, &i, &mc);
++      bcmgenet_set_mdf_addr(priv, dev->broadcast, &i);
+       /* my own address.*/
+-      bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i, &mc);
+-      /* Unicast list*/
+-      if (netdev_uc_count(dev) > (MAX_MC_COUNT - mc))
+-              return;
++      bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i);
+ 
+-      if (!netdev_uc_empty(dev))
+-              netdev_for_each_uc_addr(ha, dev)
+-                      bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
+-      /* Multicast */
+-      if (netdev_mc_empty(dev) || netdev_mc_count(dev) >= (MAX_MC_COUNT - mc))
+-              return;
++      /* Unicast */
++      netdev_for_each_uc_addr(ha, dev)
++              bcmgenet_set_mdf_addr(priv, ha->addr, &i);
+ 
++      /* Multicast */
+       netdev_for_each_mc_addr(ha, dev)
+-              bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
++              bcmgenet_set_mdf_addr(priv, ha->addr, &i);
++
++      /* Enable filters */
++      reg = GENMASK(MAX_MDF_FILTER - 1, MAX_MDF_FILTER - nfilter);
++      bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL);
+ }
+ 
+ /* Set the hardware MAC address. */
+diff --git a/drivers/net/ethernet/marvell/sky2.c 
b/drivers/net/ethernet/marvell/sky2.c
+index 1485f66cf7b0..4ade864c8d53 100644
+--- a/drivers/net/ethernet/marvell/sky2.c
++++ b/drivers/net/ethernet/marvell/sky2.c
+@@ -4947,6 +4947,13 @@ static const struct dmi_system_id msi_blacklist[] = {
+                       DMI_MATCH(DMI_PRODUCT_NAME, "P-79"),
+               },
+       },
++      {
++              .ident = "ASUS P6T",
++              .matches = {
++                      DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
++                      DMI_MATCH(DMI_BOARD_NAME, "P6T"),
++              },
++      },
+       {}
+ };
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 
b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+index 5b7fe8264144..db6aafcced0d 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+@@ -662,7 +662,9 @@ struct net_device *mlx5_rdma_netdev_alloc(struct 
mlx5_core_dev *mdev,
+ 
+       profile->init(mdev, netdev, profile, ipriv);
+ 
+-      mlx5e_attach_netdev(epriv);
++      err = mlx5e_attach_netdev(epriv);
++      if (err)
++              goto detach;
+       netif_carrier_off(netdev);
+ 
+       /* set rdma_netdev func pointers */
+@@ -678,6 +680,11 @@ struct net_device *mlx5_rdma_netdev_alloc(struct 
mlx5_core_dev *mdev,
+ 
+       return netdev;
+ 
++detach:
++      profile->cleanup(epriv);
++      if (ipriv->sub_interface)
++              return NULL;
++      mlx5e_destroy_mdev_resources(mdev);
+ destroy_ht:
+       mlx5i_pkey_qpn_ht_cleanup(netdev);
+ destroy_wq:
+diff --git a/drivers/net/ethernet/realtek/r8169.c 
b/drivers/net/ethernet/realtek/r8169.c
+index 7a50b911b180..a6992c4c7313 100644
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -5202,6 +5202,143 @@ static void rtl_hw_start_8411_2(struct rtl8169_private 
*tp)
+       /* disable aspm and clock request before access ephy */
+       rtl_hw_aspm_clkreq_enable(tp, false);
+       rtl_ephy_init(tp, e_info_8411_2, ARRAY_SIZE(e_info_8411_2));
++
++      /* The following Realtek-provided magic fixes an issue with the RX unit
++       * getting confused after the PHY having been powered-down.
++       */
++      r8168_mac_ocp_write(tp, 0xFC28, 0x0000);
++      r8168_mac_ocp_write(tp, 0xFC2A, 0x0000);
++      r8168_mac_ocp_write(tp, 0xFC2C, 0x0000);
++      r8168_mac_ocp_write(tp, 0xFC2E, 0x0000);
++      r8168_mac_ocp_write(tp, 0xFC30, 0x0000);
++      r8168_mac_ocp_write(tp, 0xFC32, 0x0000);
++      r8168_mac_ocp_write(tp, 0xFC34, 0x0000);
++      r8168_mac_ocp_write(tp, 0xFC36, 0x0000);
++      mdelay(3);
++      r8168_mac_ocp_write(tp, 0xFC26, 0x0000);
++
++      r8168_mac_ocp_write(tp, 0xF800, 0xE008);
++      r8168_mac_ocp_write(tp, 0xF802, 0xE00A);
++      r8168_mac_ocp_write(tp, 0xF804, 0xE00C);
++      r8168_mac_ocp_write(tp, 0xF806, 0xE00E);
++      r8168_mac_ocp_write(tp, 0xF808, 0xE027);
++      r8168_mac_ocp_write(tp, 0xF80A, 0xE04F);
++      r8168_mac_ocp_write(tp, 0xF80C, 0xE05E);
++      r8168_mac_ocp_write(tp, 0xF80E, 0xE065);
++      r8168_mac_ocp_write(tp, 0xF810, 0xC602);
++      r8168_mac_ocp_write(tp, 0xF812, 0xBE00);
++      r8168_mac_ocp_write(tp, 0xF814, 0x0000);
++      r8168_mac_ocp_write(tp, 0xF816, 0xC502);
++      r8168_mac_ocp_write(tp, 0xF818, 0xBD00);
++      r8168_mac_ocp_write(tp, 0xF81A, 0x074C);
++      r8168_mac_ocp_write(tp, 0xF81C, 0xC302);
++      r8168_mac_ocp_write(tp, 0xF81E, 0xBB00);
++      r8168_mac_ocp_write(tp, 0xF820, 0x080A);
++      r8168_mac_ocp_write(tp, 0xF822, 0x6420);
++      r8168_mac_ocp_write(tp, 0xF824, 0x48C2);
++      r8168_mac_ocp_write(tp, 0xF826, 0x8C20);
++      r8168_mac_ocp_write(tp, 0xF828, 0xC516);
++      r8168_mac_ocp_write(tp, 0xF82A, 0x64A4);
++      r8168_mac_ocp_write(tp, 0xF82C, 0x49C0);
++      r8168_mac_ocp_write(tp, 0xF82E, 0xF009);
++      r8168_mac_ocp_write(tp, 0xF830, 0x74A2);
++      r8168_mac_ocp_write(tp, 0xF832, 0x8CA5);
++      r8168_mac_ocp_write(tp, 0xF834, 0x74A0);
++      r8168_mac_ocp_write(tp, 0xF836, 0xC50E);
++      r8168_mac_ocp_write(tp, 0xF838, 0x9CA2);
++      r8168_mac_ocp_write(tp, 0xF83A, 0x1C11);
++      r8168_mac_ocp_write(tp, 0xF83C, 0x9CA0);
++      r8168_mac_ocp_write(tp, 0xF83E, 0xE006);
++      r8168_mac_ocp_write(tp, 0xF840, 0x74F8);
++      r8168_mac_ocp_write(tp, 0xF842, 0x48C4);
++      r8168_mac_ocp_write(tp, 0xF844, 0x8CF8);
++      r8168_mac_ocp_write(tp, 0xF846, 0xC404);
++      r8168_mac_ocp_write(tp, 0xF848, 0xBC00);
++      r8168_mac_ocp_write(tp, 0xF84A, 0xC403);
++      r8168_mac_ocp_write(tp, 0xF84C, 0xBC00);
++      r8168_mac_ocp_write(tp, 0xF84E, 0x0BF2);
++      r8168_mac_ocp_write(tp, 0xF850, 0x0C0A);
++      r8168_mac_ocp_write(tp, 0xF852, 0xE434);
++      r8168_mac_ocp_write(tp, 0xF854, 0xD3C0);
++      r8168_mac_ocp_write(tp, 0xF856, 0x49D9);
++      r8168_mac_ocp_write(tp, 0xF858, 0xF01F);
++      r8168_mac_ocp_write(tp, 0xF85A, 0xC526);
++      r8168_mac_ocp_write(tp, 0xF85C, 0x64A5);
++      r8168_mac_ocp_write(tp, 0xF85E, 0x1400);
++      r8168_mac_ocp_write(tp, 0xF860, 0xF007);
++      r8168_mac_ocp_write(tp, 0xF862, 0x0C01);
++      r8168_mac_ocp_write(tp, 0xF864, 0x8CA5);
++      r8168_mac_ocp_write(tp, 0xF866, 0x1C15);
++      r8168_mac_ocp_write(tp, 0xF868, 0xC51B);
++      r8168_mac_ocp_write(tp, 0xF86A, 0x9CA0);
++      r8168_mac_ocp_write(tp, 0xF86C, 0xE013);
++      r8168_mac_ocp_write(tp, 0xF86E, 0xC519);
++      r8168_mac_ocp_write(tp, 0xF870, 0x74A0);
++      r8168_mac_ocp_write(tp, 0xF872, 0x48C4);
++      r8168_mac_ocp_write(tp, 0xF874, 0x8CA0);
++      r8168_mac_ocp_write(tp, 0xF876, 0xC516);
++      r8168_mac_ocp_write(tp, 0xF878, 0x74A4);
++      r8168_mac_ocp_write(tp, 0xF87A, 0x48C8);
++      r8168_mac_ocp_write(tp, 0xF87C, 0x48CA);
++      r8168_mac_ocp_write(tp, 0xF87E, 0x9CA4);
++      r8168_mac_ocp_write(tp, 0xF880, 0xC512);
++      r8168_mac_ocp_write(tp, 0xF882, 0x1B00);
++      r8168_mac_ocp_write(tp, 0xF884, 0x9BA0);
++      r8168_mac_ocp_write(tp, 0xF886, 0x1B1C);
++      r8168_mac_ocp_write(tp, 0xF888, 0x483F);
++      r8168_mac_ocp_write(tp, 0xF88A, 0x9BA2);
++      r8168_mac_ocp_write(tp, 0xF88C, 0x1B04);
++      r8168_mac_ocp_write(tp, 0xF88E, 0xC508);
++      r8168_mac_ocp_write(tp, 0xF890, 0x9BA0);
++      r8168_mac_ocp_write(tp, 0xF892, 0xC505);
++      r8168_mac_ocp_write(tp, 0xF894, 0xBD00);
++      r8168_mac_ocp_write(tp, 0xF896, 0xC502);
++      r8168_mac_ocp_write(tp, 0xF898, 0xBD00);
++      r8168_mac_ocp_write(tp, 0xF89A, 0x0300);
++      r8168_mac_ocp_write(tp, 0xF89C, 0x051E);
++      r8168_mac_ocp_write(tp, 0xF89E, 0xE434);
++      r8168_mac_ocp_write(tp, 0xF8A0, 0xE018);
++      r8168_mac_ocp_write(tp, 0xF8A2, 0xE092);
++      r8168_mac_ocp_write(tp, 0xF8A4, 0xDE20);
++      r8168_mac_ocp_write(tp, 0xF8A6, 0xD3C0);
++      r8168_mac_ocp_write(tp, 0xF8A8, 0xC50F);
++      r8168_mac_ocp_write(tp, 0xF8AA, 0x76A4);
++      r8168_mac_ocp_write(tp, 0xF8AC, 0x49E3);
++      r8168_mac_ocp_write(tp, 0xF8AE, 0xF007);
++      r8168_mac_ocp_write(tp, 0xF8B0, 0x49C0);
++      r8168_mac_ocp_write(tp, 0xF8B2, 0xF103);
++      r8168_mac_ocp_write(tp, 0xF8B4, 0xC607);
++      r8168_mac_ocp_write(tp, 0xF8B6, 0xBE00);
++      r8168_mac_ocp_write(tp, 0xF8B8, 0xC606);
++      r8168_mac_ocp_write(tp, 0xF8BA, 0xBE00);
++      r8168_mac_ocp_write(tp, 0xF8BC, 0xC602);
++      r8168_mac_ocp_write(tp, 0xF8BE, 0xBE00);
++      r8168_mac_ocp_write(tp, 0xF8C0, 0x0C4C);
++      r8168_mac_ocp_write(tp, 0xF8C2, 0x0C28);
++      r8168_mac_ocp_write(tp, 0xF8C4, 0x0C2C);
++      r8168_mac_ocp_write(tp, 0xF8C6, 0xDC00);
++      r8168_mac_ocp_write(tp, 0xF8C8, 0xC707);
++      r8168_mac_ocp_write(tp, 0xF8CA, 0x1D00);
++      r8168_mac_ocp_write(tp, 0xF8CC, 0x8DE2);
++      r8168_mac_ocp_write(tp, 0xF8CE, 0x48C1);
++      r8168_mac_ocp_write(tp, 0xF8D0, 0xC502);
++      r8168_mac_ocp_write(tp, 0xF8D2, 0xBD00);
++      r8168_mac_ocp_write(tp, 0xF8D4, 0x00AA);
++      r8168_mac_ocp_write(tp, 0xF8D6, 0xE0C0);
++      r8168_mac_ocp_write(tp, 0xF8D8, 0xC502);
++      r8168_mac_ocp_write(tp, 0xF8DA, 0xBD00);
++      r8168_mac_ocp_write(tp, 0xF8DC, 0x0132);
++
++      r8168_mac_ocp_write(tp, 0xFC26, 0x8000);
++
++      r8168_mac_ocp_write(tp, 0xFC2A, 0x0743);
++      r8168_mac_ocp_write(tp, 0xFC2C, 0x0801);
++      r8168_mac_ocp_write(tp, 0xFC2E, 0x0BE9);
++      r8168_mac_ocp_write(tp, 0xFC30, 0x02FD);
++      r8168_mac_ocp_write(tp, 0xFC32, 0x0C25);
++      r8168_mac_ocp_write(tp, 0xFC34, 0x00A9);
++      r8168_mac_ocp_write(tp, 0xFC36, 0x012D);
++
+       rtl_hw_aspm_clkreq_enable(tp, true);
+ }
+ 
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 
b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 5c18874614ba..0101ebaecf02 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -3036,17 +3036,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, 
struct net_device *dev)
+ 
+       /* Manage oversized TCP frames for GMAC4 device */
+       if (skb_is_gso(skb) && priv->tso) {
+-              if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | 
SKB_GSO_TCPV6)) {
+-                      /*
+-                       * There is no way to determine the number of TSO
+-                       * capable Queues. Let's use always the Queue 0
+-                       * because if TSO is supported then at least this
+-                       * one will be capable.
+-                       */
+-                      skb_set_queue_mapping(skb, 0);
+-
++              if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
+                       return stmmac_tso_xmit(skb, dev);
+-              }
+       }
+ 
+       if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
+@@ -3855,6 +3846,23 @@ static int stmmac_setup_tc(struct net_device *ndev, 
enum tc_setup_type type,
+       }
+ }
+ 
++static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
++                             struct net_device *sb_dev,
++                             select_queue_fallback_t fallback)
++{
++      if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
++              /*
++               * There is no way to determine the number of TSO
++               * capable Queues. Let's use always the Queue 0
++               * because if TSO is supported then at least this
++               * one will be capable.
++               */
++              return 0;
++      }
++
++      return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
++}
++
+ static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
+ {
+       struct stmmac_priv *priv = netdev_priv(ndev);
+@@ -4097,6 +4105,7 @@ static const struct net_device_ops stmmac_netdev_ops = {
+       .ndo_tx_timeout = stmmac_tx_timeout,
+       .ndo_do_ioctl = stmmac_ioctl,
+       .ndo_setup_tc = stmmac_setup_tc,
++      .ndo_select_queue = stmmac_select_queue,
+ #ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller = stmmac_poll_controller,
+ #endif
+diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
+index cf6b9b1771f1..cc60ef9634db 100644
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -847,7 +847,6 @@ int netvsc_recv_callback(struct net_device *net,
+                                   csum_info, vlan, data, len);
+       if (unlikely(!skb)) {
+               ++net_device_ctx->eth_stats.rx_no_memory;
+-              rcu_read_unlock();
+               return NVSP_STAT_FAIL;
+       }
+ 
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index 7de88b33d5b9..2c971357e66c 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -869,6 +869,7 @@ static void macsec_reset_skb(struct sk_buff *skb, struct 
net_device *dev)
+ 
+ static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len)
+ {
++      skb->ip_summed = CHECKSUM_NONE;
+       memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN);
+       skb_pull(skb, hdr_len);
+       pskb_trim_unique(skb, skb->len - icv_len);
+@@ -1103,10 +1104,9 @@ static rx_handler_result_t macsec_handle_frame(struct 
sk_buff **pskb)
+       }
+ 
+       skb = skb_unshare(skb, GFP_ATOMIC);
+-      if (!skb) {
+-              *pskb = NULL;
++      *pskb = skb;
++      if (!skb)
+               return RX_HANDLER_CONSUMED;
+-      }
+ 
+       pulled_sci = pskb_may_pull(skb, macsec_extra_len(true));
+       if (!pulled_sci) {
+diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
+index 418522aa2f71..998d08ae7431 100644
+--- a/drivers/net/phy/sfp.c
++++ b/drivers/net/phy/sfp.c
+@@ -514,7 +514,7 @@ static int sfp_hwmon_read_sensor(struct sfp *sfp, int reg, 
long *value)
+ 
+ static void sfp_hwmon_to_rx_power(long *value)
+ {
+-      *value = DIV_ROUND_CLOSEST(*value, 100);
++      *value = DIV_ROUND_CLOSEST(*value, 10);
+ }
+ 
+ static void sfp_hwmon_calibrate(struct sfp *sfp, unsigned int slope, int 
offset,
+diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
+index 449fc52f9a89..9f895083bc0a 100644
+--- a/drivers/net/vrf.c
++++ b/drivers/net/vrf.c
+@@ -169,23 +169,29 @@ static int vrf_ip6_local_out(struct net *net, struct 
sock *sk,
+ static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
+                                          struct net_device *dev)
+ {
+-      const struct ipv6hdr *iph = ipv6_hdr(skb);
++      const struct ipv6hdr *iph;
+       struct net *net = dev_net(skb->dev);
+-      struct flowi6 fl6 = {
+-              /* needed to match OIF rule */
+-              .flowi6_oif = dev->ifindex,
+-              .flowi6_iif = LOOPBACK_IFINDEX,
+-              .daddr = iph->daddr,
+-              .saddr = iph->saddr,
+-              .flowlabel = ip6_flowinfo(iph),
+-              .flowi6_mark = skb->mark,
+-              .flowi6_proto = iph->nexthdr,
+-              .flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF,
+-      };
++      struct flowi6 fl6;
+       int ret = NET_XMIT_DROP;
+       struct dst_entry *dst;
+       struct dst_entry *dst_null = &net->ipv6.ip6_null_entry->dst;
+ 
++      if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct ipv6hdr)))
++              goto err;
++
++      iph = ipv6_hdr(skb);
++
++      memset(&fl6, 0, sizeof(fl6));
++      /* needed to match OIF rule */
++      fl6.flowi6_oif = dev->ifindex;
++      fl6.flowi6_iif = LOOPBACK_IFINDEX;
++      fl6.daddr = iph->daddr;
++      fl6.saddr = iph->saddr;
++      fl6.flowlabel = ip6_flowinfo(iph);
++      fl6.flowi6_mark = skb->mark;
++      fl6.flowi6_proto = iph->nexthdr;
++      fl6.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF;
++
+       dst = ip6_route_output(net, NULL, &fl6);
+       if (dst == dst_null)
+               goto err;
+@@ -241,21 +247,27 @@ static int vrf_ip_local_out(struct net *net, struct sock 
*sk,
+ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
+                                          struct net_device *vrf_dev)
+ {
+-      struct iphdr *ip4h = ip_hdr(skb);
++      struct iphdr *ip4h;
+       int ret = NET_XMIT_DROP;
+-      struct flowi4 fl4 = {
+-              /* needed to match OIF rule */
+-              .flowi4_oif = vrf_dev->ifindex,
+-              .flowi4_iif = LOOPBACK_IFINDEX,
+-              .flowi4_tos = RT_TOS(ip4h->tos),
+-              .flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF,
+-              .flowi4_proto = ip4h->protocol,
+-              .daddr = ip4h->daddr,
+-              .saddr = ip4h->saddr,
+-      };
++      struct flowi4 fl4;
+       struct net *net = dev_net(vrf_dev);
+       struct rtable *rt;
+ 
++      if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct iphdr)))
++              goto err;
++
++      ip4h = ip_hdr(skb);
++
++      memset(&fl4, 0, sizeof(fl4));
++      /* needed to match OIF rule */
++      fl4.flowi4_oif = vrf_dev->ifindex;
++      fl4.flowi4_iif = LOOPBACK_IFINDEX;
++      fl4.flowi4_tos = RT_TOS(ip4h->tos);
++      fl4.flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF;
++      fl4.flowi4_proto = ip4h->protocol;
++      fl4.daddr = ip4h->daddr;
++      fl4.saddr = ip4h->saddr;
++
+       rt = ip_route_output_flow(net, &fl4, NULL);
+       if (IS_ERR(rt))
+               goto err;
+diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
+index f93f9881ec18..46d5c40f2835 100644
+--- a/fs/ext4/dir.c
++++ b/fs/ext4/dir.c
+@@ -108,7 +108,6 @@ static int ext4_readdir(struct file *file, struct 
dir_context *ctx)
+       struct inode *inode = file_inode(file);
+       struct super_block *sb = inode->i_sb;
+       struct buffer_head *bh = NULL;
+-      int dir_has_error = 0;
+       struct fscrypt_str fstr = FSTR_INIT(NULL, 0);
+ 
+       if (ext4_encrypted_inode(inode)) {
+@@ -144,8 +143,6 @@ static int ext4_readdir(struct file *file, struct 
dir_context *ctx)
+                       return err;
+       }
+ 
+-      offset = ctx->pos & (sb->s_blocksize - 1);
+-
+       while (ctx->pos < inode->i_size) {
+               struct ext4_map_blocks map;
+ 
+@@ -154,9 +151,18 @@ static int ext4_readdir(struct file *file, struct 
dir_context *ctx)
+                       goto errout;
+               }
+               cond_resched();
++              offset = ctx->pos & (sb->s_blocksize - 1);
+               map.m_lblk = ctx->pos >> EXT4_BLOCK_SIZE_BITS(sb);
+               map.m_len = 1;
+               err = ext4_map_blocks(NULL, inode, &map, 0);
++              if (err == 0) {
++                      /* m_len should never be zero but let's avoid
++                       * an infinite loop if it somehow is */
++                      if (map.m_len == 0)
++                              map.m_len = 1;
++                      ctx->pos += map.m_len * sb->s_blocksize;
++                      continue;
++              }
+               if (err > 0) {
+                       pgoff_t index = map.m_pblk >>
+                                       (PAGE_SHIFT - inode->i_blkbits);
+@@ -175,13 +181,6 @@ static int ext4_readdir(struct file *file, struct 
dir_context *ctx)
+               }
+ 
+               if (!bh) {
+-                      if (!dir_has_error) {
+-                              EXT4_ERROR_FILE(file, 0,
+-                                              "directory contains a "
+-                                              "hole at offset %llu",
+-                                         (unsigned long long) ctx->pos);
+-                              dir_has_error = 1;
+-                      }
+                       /* corrupt size?  Maybe no more blocks to read */
+                       if (ctx->pos > inode->i_blocks << 9)
+                               break;
+diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
+index df908ef79cce..402dc366117e 100644
+--- a/fs/ext4/ext4_jbd2.h
++++ b/fs/ext4/ext4_jbd2.h
+@@ -361,20 +361,20 @@ static inline int ext4_journal_force_commit(journal_t 
*journal)
+ }
+ 
+ static inline int ext4_jbd2_inode_add_write(handle_t *handle,
+-                                          struct inode *inode)
++              struct inode *inode, loff_t start_byte, loff_t length)
+ {
+       if (ext4_handle_valid(handle))
+-              return jbd2_journal_inode_add_write(handle,
+-                                                  EXT4_I(inode)->jinode);
++              return jbd2_journal_inode_ranged_write(handle,
++                              EXT4_I(inode)->jinode, start_byte, length);
+       return 0;
+ }
+ 
+ static inline int ext4_jbd2_inode_add_wait(handle_t *handle,
+-                                         struct inode *inode)
++              struct inode *inode, loff_t start_byte, loff_t length)
+ {
+       if (ext4_handle_valid(handle))
+-              return jbd2_journal_inode_add_wait(handle,
+-                                                 EXT4_I(inode)->jinode);
++              return jbd2_journal_inode_ranged_wait(handle,
++                              EXT4_I(inode)->jinode, start_byte, length);
+       return 0;
+ }
+ 
+diff --git a/fs/ext4/file.c b/fs/ext4/file.c
+index 2c5baa5e8291..f4a24a46245e 100644
+--- a/fs/ext4/file.c
++++ b/fs/ext4/file.c
+@@ -165,6 +165,10 @@ static ssize_t ext4_write_checks(struct kiocb *iocb, 
struct iov_iter *from)
+       ret = generic_write_checks(iocb, from);
+       if (ret <= 0)
+               return ret;
++
++      if (unlikely(IS_IMMUTABLE(inode)))
++              return -EPERM;
++
+       /*
+        * If we have encountered a bitmap-format file, the size limit
+        * is smaller than s_maxbytes, which is for extent-mapped files.
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 05dc5a4ba481..e65559bf7728 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -729,10 +729,16 @@ out_sem:
+                   !(flags & EXT4_GET_BLOCKS_ZERO) &&
+                   !ext4_is_quota_file(inode) &&
+                   ext4_should_order_data(inode)) {
++                      loff_t start_byte =
++                              (loff_t)map->m_lblk << inode->i_blkbits;
++                      loff_t length = (loff_t)map->m_len << inode->i_blkbits;
++
+                       if (flags & EXT4_GET_BLOCKS_IO_SUBMIT)
+-                              ret = ext4_jbd2_inode_add_wait(handle, inode);
++                              ret = ext4_jbd2_inode_add_wait(handle, inode,
++                                              start_byte, length);
+                       else
+-                              ret = ext4_jbd2_inode_add_write(handle, inode);
++                              ret = ext4_jbd2_inode_add_write(handle, inode,
++                                              start_byte, length);
+                       if (ret)
+                               return ret;
+               }
+@@ -4058,7 +4064,8 @@ static int __ext4_block_zero_page_range(handle_t *handle,
+               err = 0;
+               mark_buffer_dirty(bh);
+               if (ext4_should_order_data(inode))
+-                      err = ext4_jbd2_inode_add_write(handle, inode);
++                      err = ext4_jbd2_inode_add_write(handle, inode, from,
++                                      length);
+       }
+ 
+ unlock:
+@@ -5491,6 +5498,14 @@ int ext4_setattr(struct dentry *dentry, struct iattr 
*attr)
+       if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
+               return -EIO;
+ 
++      if (unlikely(IS_IMMUTABLE(inode)))
++              return -EPERM;
++
++      if (unlikely(IS_APPEND(inode) &&
++                   (ia_valid & (ATTR_MODE | ATTR_UID |
++                                ATTR_GID | ATTR_TIMES_SET))))
++              return -EPERM;
++
+       error = setattr_prepare(dentry, attr);
+       if (error)
+               return error;
+@@ -6190,6 +6205,9 @@ int ext4_page_mkwrite(struct vm_fault *vmf)
+       get_block_t *get_block;
+       int retries = 0;
+ 
++      if (unlikely(IS_IMMUTABLE(inode)))
++              return VM_FAULT_SIGBUS;
++
+       sb_start_pagefault(inode->i_sb);
+       file_update_time(vma->vm_file);
+ 
+diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
+index 53d57cdf3c4d..abb6fcff0a1d 100644
+--- a/fs/ext4/ioctl.c
++++ b/fs/ext4/ioctl.c
+@@ -268,6 +268,29 @@ static int uuid_is_zero(__u8 u[16])
+ }
+ #endif
+ 
++/*
++ * If immutable is set and we are not clearing it, we're not allowed to change
++ * anything else in the inode.  Don't error out if we're only trying to set
++ * immutable on an immutable file.
++ */
++static int ext4_ioctl_check_immutable(struct inode *inode, __u32 new_projid,
++                                    unsigned int flags)
++{
++      struct ext4_inode_info *ei = EXT4_I(inode);
++      unsigned int oldflags = ei->i_flags;
++
++      if (!(oldflags & EXT4_IMMUTABLE_FL) || !(flags & EXT4_IMMUTABLE_FL))
++              return 0;
++
++      if ((oldflags & ~EXT4_IMMUTABLE_FL) != (flags & ~EXT4_IMMUTABLE_FL))
++              return -EPERM;
++      if (ext4_has_feature_project(inode->i_sb) &&
++          __kprojid_val(ei->i_projid) != new_projid)
++              return -EPERM;
++
++      return 0;
++}
++
+ static int ext4_ioctl_setflags(struct inode *inode,
+                              unsigned int flags)
+ {
+@@ -321,6 +344,20 @@ static int ext4_ioctl_setflags(struct inode *inode,
+                       goto flags_out;
+       }
+ 
++      /*
++       * Wait for all pending directio and then flush all the dirty pages
++       * for this file.  The flush marks all the pages readonly, so any
++       * subsequent attempt to write to the file (particularly mmap pages)
++       * will come through the filesystem and fail.
++       */
++      if (S_ISREG(inode->i_mode) && !IS_IMMUTABLE(inode) &&
++          (flags & EXT4_IMMUTABLE_FL)) {
++              inode_dio_wait(inode);
++              err = filemap_write_and_wait(inode->i_mapping);
++              if (err)
++                      goto flags_out;
++      }
++
+       handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
+       if (IS_ERR(handle)) {
+               err = PTR_ERR(handle);
+@@ -750,7 +787,11 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, 
unsigned long arg)
+                       return err;
+ 
+               inode_lock(inode);
+-              err = ext4_ioctl_setflags(inode, flags);
++              err = ext4_ioctl_check_immutable(inode,
++                              from_kprojid(&init_user_ns, ei->i_projid),
++                              flags);
++              if (!err)
++                      err = ext4_ioctl_setflags(inode, flags);
+               inode_unlock(inode);
+               mnt_drop_write_file(filp);
+               return err;
+@@ -1120,6 +1161,9 @@ resizefs_out:
+                       goto out;
+               flags = (ei->i_flags & ~EXT4_FL_XFLAG_VISIBLE) |
+                        (flags & EXT4_FL_XFLAG_VISIBLE);
++              err = ext4_ioctl_check_immutable(inode, fa.fsx_projid, flags);
++              if (err)
++                      goto out;
+               err = ext4_ioctl_setflags(inode, flags);
+               if (err)
+                       goto out;
+diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
+index 2f5be02fc6f6..287631bb09e7 100644
+--- a/fs/ext4/move_extent.c
++++ b/fs/ext4/move_extent.c
+@@ -390,7 +390,8 @@ data_copy:
+ 
+       /* Even in case of data=writeback it is reasonable to pin
+        * inode to transaction, to prevent unexpected data loss */
+-      *err = ext4_jbd2_inode_add_write(handle, orig_inode);
++      *err = ext4_jbd2_inode_add_write(handle, orig_inode,
++                      (loff_t)orig_page_offset << PAGE_SHIFT, replaced_size);
+ 
+ unlock_pages:
+       unlock_page(pagep[0]);
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index 4c5aa5df6573..61dc1b0e4465 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -81,8 +81,18 @@ static struct buffer_head *ext4_append(handle_t *handle,
+ static int ext4_dx_csum_verify(struct inode *inode,
+                              struct ext4_dir_entry *dirent);
+ 
++/*
++ * Hints to ext4_read_dirblock regarding whether we expect a directory
++ * block being read to be an index block, or a block containing
++ * directory entries (and if the latter, whether it was found via a
++ * logical block in an htree index block).  This is used to control
++ * what sort of sanity checkinig ext4_read_dirblock() will do on the
++ * directory block read from the storage device.  EITHER will means
++ * the caller doesn't know what kind of directory block will be read,
++ * so no specific verification will be done.
++ */
+ typedef enum {
+-      EITHER, INDEX, DIRENT
++      EITHER, INDEX, DIRENT, DIRENT_HTREE
+ } dirblock_type_t;
+ 
+ #define ext4_read_dirblock(inode, block, type) \
+@@ -108,11 +118,14 @@ static struct buffer_head *__ext4_read_dirblock(struct 
inode *inode,
+ 
+               return bh;
+       }
+-      if (!bh) {
++      if (!bh && (type == INDEX || type == DIRENT_HTREE)) {
+               ext4_error_inode(inode, func, line, block,
+-                               "Directory hole found");
++                               "Directory hole found for htree %s block",
++                               (type == INDEX) ? "index" : "leaf");
+               return ERR_PTR(-EFSCORRUPTED);
+       }
++      if (!bh)
++              return NULL;
+       dirent = (struct ext4_dir_entry *) bh->b_data;
+       /* Determine whether or not we have an index block */
+       if (is_dx(inode)) {
+@@ -979,7 +992,7 @@ static int htree_dirblock_to_tree(struct file *dir_file,
+ 
+       dxtrace(printk(KERN_INFO "In htree dirblock_to_tree: block %lu\n",
+                                                       (unsigned long)block));
+-      bh = ext4_read_dirblock(dir, block, DIRENT);
++      bh = ext4_read_dirblock(dir, block, DIRENT_HTREE);
+       if (IS_ERR(bh))
+               return PTR_ERR(bh);
+ 
+@@ -1509,7 +1522,7 @@ static struct buffer_head * ext4_dx_find_entry(struct 
inode *dir,
+               return (struct buffer_head *) frame;
+       do {
+               block = dx_get_block(frame->at);
+-              bh = ext4_read_dirblock(dir, block, DIRENT);
++              bh = ext4_read_dirblock(dir, block, DIRENT_HTREE);
+               if (IS_ERR(bh))
+                       goto errout;
+ 
+@@ -2079,6 +2092,11 @@ static int ext4_add_entry(handle_t *handle, struct 
dentry *dentry,
+       blocks = dir->i_size >> sb->s_blocksize_bits;
+       for (block = 0; block < blocks; block++) {
+               bh = ext4_read_dirblock(dir, block, DIRENT);
++              if (bh == NULL) {
++                      bh = ext4_bread(handle, dir, block,
++                                      EXT4_GET_BLOCKS_CREATE);
++                      goto add_to_new_block;
++              }
+               if (IS_ERR(bh)) {
+                       retval = PTR_ERR(bh);
+                       bh = NULL;
+@@ -2099,6 +2117,7 @@ static int ext4_add_entry(handle_t *handle, struct 
dentry *dentry,
+               brelse(bh);
+       }
+       bh = ext4_append(handle, dir, &block);
++add_to_new_block:
+       if (IS_ERR(bh)) {
+               retval = PTR_ERR(bh);
+               bh = NULL;
+@@ -2143,7 +2162,7 @@ again:
+               return PTR_ERR(frame);
+       entries = frame->entries;
+       at = frame->at;
+-      bh = ext4_read_dirblock(dir, dx_get_block(frame->at), DIRENT);
++      bh = ext4_read_dirblock(dir, dx_get_block(frame->at), DIRENT_HTREE);
+       if (IS_ERR(bh)) {
+               err = PTR_ERR(bh);
+               bh = NULL;
+@@ -2691,7 +2710,10 @@ bool ext4_empty_dir(struct inode *inode)
+               EXT4_ERROR_INODE(inode, "invalid size");
+               return true;
+       }
+-      bh = ext4_read_dirblock(inode, 0, EITHER);
++      /* The first directory block must not be a hole,
++       * so treat it as DIRENT_HTREE
++       */
++      bh = ext4_read_dirblock(inode, 0, DIRENT_HTREE);
+       if (IS_ERR(bh))
+               return true;
+ 
+@@ -2713,6 +2735,10 @@ bool ext4_empty_dir(struct inode *inode)
+                       brelse(bh);
+                       lblock = offset >> EXT4_BLOCK_SIZE_BITS(sb);
+                       bh = ext4_read_dirblock(inode, lblock, EITHER);
++                      if (bh == NULL) {
++                              offset += sb->s_blocksize;
++                              continue;
++                      }
+                       if (IS_ERR(bh))
+                               return true;
+                       de = (struct ext4_dir_entry_2 *) bh->b_data;
+@@ -3256,7 +3282,10 @@ static struct buffer_head 
*ext4_get_first_dir_block(handle_t *handle,
+       struct buffer_head *bh;
+ 
+       if (!ext4_has_inline_data(inode)) {
+-              bh = ext4_read_dirblock(inode, 0, EITHER);
++              /* The first directory block must not be a hole, so
++               * treat it as DIRENT_HTREE
++               */
++              bh = ext4_read_dirblock(inode, 0, DIRENT_HTREE);
+               if (IS_ERR(bh)) {
+                       *retval = PTR_ERR(bh);
+                       return NULL;
+diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
+index 65ea0355a4f6..24f86ffe11d7 100644
+--- a/fs/jbd2/commit.c
++++ b/fs/jbd2/commit.c
+@@ -187,14 +187,15 @@ static int journal_wait_on_commit_record(journal_t 
*journal,
+  * use writepages() because with dealyed allocation we may be doing
+  * block allocation in writepages().
+  */
+-static int journal_submit_inode_data_buffers(struct address_space *mapping)
++static int journal_submit_inode_data_buffers(struct address_space *mapping,
++              loff_t dirty_start, loff_t dirty_end)
+ {
+       int ret;
+       struct writeback_control wbc = {
+               .sync_mode =  WB_SYNC_ALL,
+               .nr_to_write = mapping->nrpages * 2,
+-              .range_start = 0,
+-              .range_end = i_size_read(mapping->host),
++              .range_start = dirty_start,
++              .range_end = dirty_end,
+       };
+ 
+       ret = generic_writepages(mapping, &wbc);
+@@ -218,6 +219,9 @@ static int journal_submit_data_buffers(journal_t *journal,
+ 
+       spin_lock(&journal->j_list_lock);
+       list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
++              loff_t dirty_start = jinode->i_dirty_start;
++              loff_t dirty_end = jinode->i_dirty_end;
++
+               if (!(jinode->i_flags & JI_WRITE_DATA))
+                       continue;
+               mapping = jinode->i_vfs_inode->i_mapping;
+@@ -230,7 +234,8 @@ static int journal_submit_data_buffers(journal_t *journal,
+                * only allocated blocks here.
+                */
+               trace_jbd2_submit_inode_data(jinode->i_vfs_inode);
+-              err = journal_submit_inode_data_buffers(mapping);
++              err = journal_submit_inode_data_buffers(mapping, dirty_start,
++                              dirty_end);
+               if (!ret)
+                       ret = err;
+               spin_lock(&journal->j_list_lock);
+@@ -257,12 +262,16 @@ static int journal_finish_inode_data_buffers(journal_t 
*journal,
+       /* For locking, see the comment in journal_submit_data_buffers() */
+       spin_lock(&journal->j_list_lock);
+       list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
++              loff_t dirty_start = jinode->i_dirty_start;
++              loff_t dirty_end = jinode->i_dirty_end;
++
+               if (!(jinode->i_flags & JI_WAIT_DATA))
+                       continue;
+               jinode->i_flags |= JI_COMMIT_RUNNING;
+               spin_unlock(&journal->j_list_lock);
+-              err = filemap_fdatawait_keep_errors(
+-                              jinode->i_vfs_inode->i_mapping);
++              err = filemap_fdatawait_range_keep_errors(
++                              jinode->i_vfs_inode->i_mapping, dirty_start,
++                              dirty_end);
+               if (!ret)
+                       ret = err;
+               spin_lock(&journal->j_list_lock);
+@@ -282,6 +291,8 @@ static int journal_finish_inode_data_buffers(journal_t 
*journal,
+                               &jinode->i_transaction->t_inode_list);
+               } else {
+                       jinode->i_transaction = NULL;
++                      jinode->i_dirty_start = 0;
++                      jinode->i_dirty_end = 0;
+               }
+       }
+       spin_unlock(&journal->j_list_lock);
+diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
+index e9cf88f0bc29..df390a69c49a 100644
+--- a/fs/jbd2/journal.c
++++ b/fs/jbd2/journal.c
+@@ -94,6 +94,8 @@ EXPORT_SYMBOL(jbd2_journal_try_to_free_buffers);
+ EXPORT_SYMBOL(jbd2_journal_force_commit);
+ EXPORT_SYMBOL(jbd2_journal_inode_add_write);
+ EXPORT_SYMBOL(jbd2_journal_inode_add_wait);
++EXPORT_SYMBOL(jbd2_journal_inode_ranged_write);
++EXPORT_SYMBOL(jbd2_journal_inode_ranged_wait);
+ EXPORT_SYMBOL(jbd2_journal_init_jbd_inode);
+ EXPORT_SYMBOL(jbd2_journal_release_jbd_inode);
+ EXPORT_SYMBOL(jbd2_journal_begin_ordered_truncate);
+@@ -2588,6 +2590,8 @@ void jbd2_journal_init_jbd_inode(struct jbd2_inode 
*jinode, struct inode *inode)
+       jinode->i_next_transaction = NULL;
+       jinode->i_vfs_inode = inode;
+       jinode->i_flags = 0;
++      jinode->i_dirty_start = 0;
++      jinode->i_dirty_end = 0;
+       INIT_LIST_HEAD(&jinode->i_list);
+ }
+ 
+diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
+index e20a6703531f..911ff18249b7 100644
+--- a/fs/jbd2/transaction.c
++++ b/fs/jbd2/transaction.c
+@@ -2500,7 +2500,7 @@ void jbd2_journal_refile_buffer(journal_t *journal, 
struct journal_head *jh)
+  * File inode in the inode list of the handle's transaction
+  */
+ static int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode 
*jinode,
+-                                 unsigned long flags)
++              unsigned long flags, loff_t start_byte, loff_t end_byte)
+ {
+       transaction_t *transaction = handle->h_transaction;
+       journal_t *journal;
+@@ -2512,26 +2512,17 @@ static int jbd2_journal_file_inode(handle_t *handle, 
struct jbd2_inode *jinode,
+       jbd_debug(4, "Adding inode %lu, tid:%d\n", jinode->i_vfs_inode->i_ino,
+                       transaction->t_tid);
+ 
+-      /*
+-       * First check whether inode isn't already on the transaction's
+-       * lists without taking the lock. Note that this check is safe
+-       * without the lock as we cannot race with somebody removing inode
+-       * from the transaction. The reason is that we remove inode from the
+-       * transaction only in journal_release_jbd_inode() and when we commit
+-       * the transaction. We are guarded from the first case by holding
+-       * a reference to the inode. We are safe against the second case
+-       * because if jinode->i_transaction == transaction, commit code
+-       * cannot touch the transaction because we hold reference to it,
+-       * and if jinode->i_next_transaction == transaction, commit code
+-       * will only file the inode where we want it.
+-       */
+-      if ((jinode->i_transaction == transaction ||
+-          jinode->i_next_transaction == transaction) &&
+-          (jinode->i_flags & flags) == flags)
+-              return 0;
+-
+       spin_lock(&journal->j_list_lock);
+       jinode->i_flags |= flags;
++
++      if (jinode->i_dirty_end) {
++              jinode->i_dirty_start = min(jinode->i_dirty_start, start_byte);
++              jinode->i_dirty_end = max(jinode->i_dirty_end, end_byte);
++      } else {
++              jinode->i_dirty_start = start_byte;
++              jinode->i_dirty_end = end_byte;
++      }
++
+       /* Is inode already attached where we need it? */
+       if (jinode->i_transaction == transaction ||
+           jinode->i_next_transaction == transaction)
+@@ -2566,12 +2557,28 @@ done:
+ int jbd2_journal_inode_add_write(handle_t *handle, struct jbd2_inode *jinode)
+ {
+       return jbd2_journal_file_inode(handle, jinode,
+-                                     JI_WRITE_DATA | JI_WAIT_DATA);
++                      JI_WRITE_DATA | JI_WAIT_DATA, 0, LLONG_MAX);
+ }
+ 
+ int jbd2_journal_inode_add_wait(handle_t *handle, struct jbd2_inode *jinode)
+ {
+-      return jbd2_journal_file_inode(handle, jinode, JI_WAIT_DATA);
++      return jbd2_journal_file_inode(handle, jinode, JI_WAIT_DATA, 0,
++                      LLONG_MAX);
++}
++
++int jbd2_journal_inode_ranged_write(handle_t *handle,
++              struct jbd2_inode *jinode, loff_t start_byte, loff_t length)
++{
++      return jbd2_journal_file_inode(handle, jinode,
++                      JI_WRITE_DATA | JI_WAIT_DATA, start_byte,
++                      start_byte + length - 1);
++}
++
++int jbd2_journal_inode_ranged_wait(handle_t *handle, struct jbd2_inode 
*jinode,
++              loff_t start_byte, loff_t length)
++{
++      return jbd2_journal_file_inode(handle, jinode, JI_WAIT_DATA,
++                      start_byte, start_byte + length - 1);
+ }
+ 
+ /*
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index d4e1b43a53c3..92420009b9bc 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -2651,6 +2651,8 @@ extern int filemap_flush(struct address_space *);
+ extern int filemap_fdatawait_keep_errors(struct address_space *mapping);
+ extern int filemap_fdatawait_range(struct address_space *, loff_t lstart,
+                                  loff_t lend);
++extern int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
++              loff_t start_byte, loff_t end_byte);
+ 
+ static inline int filemap_fdatawait(struct address_space *mapping)
+ {
+diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
+index 583b82b5a1e9..1cf1b9b8e975 100644
+--- a/include/linux/jbd2.h
++++ b/include/linux/jbd2.h
+@@ -454,6 +454,22 @@ struct jbd2_inode {
+        * @i_flags: Flags of inode [j_list_lock]
+        */
+       unsigned long i_flags;
++
++      /**
++       * @i_dirty_start:
++       *
++       * Offset in bytes where the dirty range for this inode starts.
++       * [j_list_lock]
++       */
++      loff_t i_dirty_start;
++
++      /**
++       * @i_dirty_end:
++       *
++       * Inclusive offset in bytes where the dirty range for this inode
++       * ends. [j_list_lock]
++       */
++      loff_t i_dirty_end;
+ };
+ 
+ struct jbd2_revoke_table_s;
+@@ -1399,6 +1415,12 @@ extern int         jbd2_journal_force_commit(journal_t 
*);
+ extern int       jbd2_journal_force_commit_nested(journal_t *);
+ extern int       jbd2_journal_inode_add_write(handle_t *handle, struct 
jbd2_inode *inode);
+ extern int       jbd2_journal_inode_add_wait(handle_t *handle, struct 
jbd2_inode *inode);
++extern int       jbd2_journal_inode_ranged_write(handle_t *handle,
++                      struct jbd2_inode *inode, loff_t start_byte,
++                      loff_t length);
++extern int       jbd2_journal_inode_ranged_wait(handle_t *handle,
++                      struct jbd2_inode *inode, loff_t start_byte,
++                      loff_t length);
+ extern int       jbd2_journal_begin_ordered_truncate(journal_t *journal,
+                               struct jbd2_inode *inode, loff_t new_size);
+ extern void      jbd2_journal_init_jbd_inode(struct jbd2_inode *jinode, 
struct inode *inode);
+diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
+index 42fc852bf512..b22bc81f3669 100644
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -1030,6 +1030,11 @@ static inline int in_software_context(struct perf_event 
*event)
+       return event->ctx->pmu->task_ctx_nr == perf_sw_context;
+ }
+ 
++static inline int is_exclusive_pmu(struct pmu *pmu)
++{
++      return pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE;
++}
++
+ extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
+ 
+ extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64);
+diff --git a/include/net/dst.h b/include/net/dst.h
+index 6cf0870414c7..ffc8ee0ea5e5 100644
+--- a/include/net/dst.h
++++ b/include/net/dst.h
+@@ -313,8 +313,9 @@ static inline bool dst_hold_safe(struct dst_entry *dst)
+  * @skb: buffer
+  *
+  * If dst is not yet refcounted and not destroyed, grab a ref on it.
++ * Returns true if dst is refcounted.
+  */
+-static inline void skb_dst_force(struct sk_buff *skb)
++static inline bool skb_dst_force(struct sk_buff *skb)
+ {
+       if (skb_dst_is_noref(skb)) {
+               struct dst_entry *dst = skb_dst(skb);
+@@ -325,6 +326,8 @@ static inline void skb_dst_force(struct sk_buff *skb)
+ 
+               skb->_skb_refdst = (unsigned long)dst;
+       }
++
++      return skb->_skb_refdst != 0UL;
+ }
+ 
+ 
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index e75661f92daa..abcf53a6db04 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -1054,7 +1054,8 @@ void tcp_get_default_congestion_control(struct net *net, 
char *name);
+ void tcp_get_available_congestion_control(char *buf, size_t len);
+ void tcp_get_allowed_congestion_control(char *buf, size_t len);
+ int tcp_set_allowed_congestion_control(char *allowed);
+-int tcp_set_congestion_control(struct sock *sk, const char *name, bool load, 
bool reinit);
++int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
++                             bool reinit, bool cap_net_admin);
+ u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
+ void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
+ 
+@@ -1646,6 +1647,11 @@ static inline struct sk_buff *tcp_rtx_queue_head(const 
struct sock *sk)
+       return skb_rb_first(&sk->tcp_rtx_queue);
+ }
+ 
++static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk)
++{
++      return skb_rb_last(&sk->tcp_rtx_queue);
++}
++
+ static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
+ {
+       return skb_peek(&sk->sk_write_queue);
+diff --git a/include/net/tls.h b/include/net/tls.h
+index 954110575891..98f5ad0319a2 100644
+--- a/include/net/tls.h
++++ b/include/net/tls.h
+@@ -234,6 +234,7 @@ struct tls_offload_context_rx {
+       (ALIGN(sizeof(struct tls_offload_context_rx), sizeof(void *)) + \
+        TLS_DRIVER_STATE_SIZE)
+ 
++void tls_ctx_free(struct tls_context *ctx);
+ int wait_on_pending_writer(struct sock *sk, long *timeo);
+ int tls_sk_query(struct sock *sk, int optname, char __user *optval,
+               int __user *optlen);
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 3b61ff40bfe2..e8979c72514b 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -2541,6 +2541,9 @@ unlock:
+       return ret;
+ }
+ 
++static bool exclusive_event_installable(struct perf_event *event,
++                                      struct perf_event_context *ctx);
++
+ /*
+  * Attach a performance event to a context.
+  *
+@@ -2555,6 +2558,8 @@ perf_install_in_context(struct perf_event_context *ctx,
+ 
+       lockdep_assert_held(&ctx->mutex);
+ 
++      WARN_ON_ONCE(!exclusive_event_installable(event, ctx));
++
+       if (event->cpu != -1)
+               event->cpu = cpu;
+ 
+@@ -4341,7 +4346,7 @@ static int exclusive_event_init(struct perf_event *event)
+ {
+       struct pmu *pmu = event->pmu;
+ 
+-      if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
++      if (!is_exclusive_pmu(pmu))
+               return 0;
+ 
+       /*
+@@ -4372,7 +4377,7 @@ static void exclusive_event_destroy(struct perf_event 
*event)
+ {
+       struct pmu *pmu = event->pmu;
+ 
+-      if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
++      if (!is_exclusive_pmu(pmu))
+               return;
+ 
+       /* see comment in exclusive_event_init() */
+@@ -4392,14 +4397,15 @@ static bool exclusive_event_match(struct perf_event 
*e1, struct perf_event *e2)
+       return false;
+ }
+ 
+-/* Called under the same ctx::mutex as perf_install_in_context() */
+ static bool exclusive_event_installable(struct perf_event *event,
+                                       struct perf_event_context *ctx)
+ {
+       struct perf_event *iter_event;
+       struct pmu *pmu = event->pmu;
+ 
+-      if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
++      lockdep_assert_held(&ctx->mutex);
++
++      if (!is_exclusive_pmu(pmu))
+               return true;
+ 
+       list_for_each_entry(iter_event, &ctx->event_list, event_entry) {
+@@ -4446,12 +4452,20 @@ static void _free_event(struct perf_event *event)
+       if (event->destroy)
+               event->destroy(event);
+ 
+-      if (event->ctx)
+-              put_ctx(event->ctx);
+-
++      /*
++       * Must be after ->destroy(), due to uprobe_perf_close() using
++       * hw.target.
++       */
+       if (event->hw.target)
+               put_task_struct(event->hw.target);
+ 
++      /*
++       * perf_event_free_task() relies on put_ctx() being 'last', in 
particular
++       * all task references must be cleaned up.
++       */
++      if (event->ctx)
++              put_ctx(event->ctx);
++
+       exclusive_event_destroy(event);
+       module_put(event->pmu->module);
+ 
+@@ -4631,8 +4645,17 @@ again:
+       mutex_unlock(&event->child_mutex);
+ 
+       list_for_each_entry_safe(child, tmp, &free_list, child_list) {
++              void *var = &child->ctx->refcount;
++
+               list_del(&child->child_list);
+               free_event(child);
++
++              /*
++               * Wake any perf_event_free_task() waiting for this event to be
++               * freed.
++               */
++              smp_mb(); /* pairs with wait_var_event() */
++              wake_up_var(var);
+       }
+ 
+ no_ctx:
+@@ -10613,11 +10636,6 @@ SYSCALL_DEFINE5(perf_event_open,
+               goto err_alloc;
+       }
+ 
+-      if ((pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) && group_leader) {
+-              err = -EBUSY;
+-              goto err_context;
+-      }
+-
+       /*
+        * Look up the group leader (we will attach this event to it):
+        */
+@@ -10705,6 +10723,18 @@ SYSCALL_DEFINE5(perf_event_open,
+                               move_group = 0;
+                       }
+               }
++
++              /*
++               * Failure to create exclusive events returns -EBUSY.
++               */
++              err = -EBUSY;
++              if (!exclusive_event_installable(group_leader, ctx))
++                      goto err_locked;
++
++              for_each_sibling_event(sibling, group_leader) {
++                      if (!exclusive_event_installable(sibling, ctx))
++                              goto err_locked;
++              }
+       } else {
+               mutex_lock(&ctx->mutex);
+       }
+@@ -10741,9 +10771,6 @@ SYSCALL_DEFINE5(perf_event_open,
+        * because we need to serialize with concurrent event creation.
+        */
+       if (!exclusive_event_installable(event, ctx)) {
+-              /* exclusive and group stuff are assumed mutually exclusive */
+-              WARN_ON_ONCE(move_group);
+-
+               err = -EBUSY;
+               goto err_locked;
+       }
+@@ -11210,11 +11237,11 @@ static void perf_free_event(struct perf_event *event,
+ }
+ 
+ /*
+- * Free an unexposed, unused context as created by inheritance by
+- * perf_event_init_task below, used by fork() in case of fail.
++ * Free a context as created by inheritance by perf_event_init_task() below,
++ * used by fork() in case of fail.
+  *
+- * Not all locks are strictly required, but take them anyway to be nice and
+- * help out with the lockdep assertions.
++ * Even though the task has never lived, the context and events have been
++ * exposed through the child_list, so we must take care tearing it all down.
+  */
+ void perf_event_free_task(struct task_struct *task)
+ {
+@@ -11244,7 +11271,23 @@ void perf_event_free_task(struct task_struct *task)
+                       perf_free_event(event, ctx);
+ 
+               mutex_unlock(&ctx->mutex);
+-              put_ctx(ctx);
++
++              /*
++               * perf_event_release_kernel() could've stolen some of our
++               * child events and still have them on its free_list. In that
++               * case we must wait for these events to have been freed (in
++               * particular all their references to this task must've been
++               * dropped).
++               *
++               * Without this copy_process() will unconditionally free this
++               * task (irrespective of its reference count) and
++               * _free_event()'s put_task_struct(event->hw.target) will be a
++               * use-after-free.
++               *
++               * Wait for all events to drop their context reference.
++               */
++              wait_var_event(&ctx->refcount, atomic_read(&ctx->refcount) == 
1);
++              put_ctx(ctx); /* must be last */
+       }
+ }
+ 
+diff --git a/mm/filemap.c b/mm/filemap.c
+index 52517f28e6f4..287f3fa02e5e 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -561,6 +561,28 @@ int filemap_fdatawait_range(struct address_space 
*mapping, loff_t start_byte,
+ }
+ EXPORT_SYMBOL(filemap_fdatawait_range);
+ 
++/**
++ * filemap_fdatawait_range_keep_errors - wait for writeback to complete
++ * @mapping:          address space structure to wait for
++ * @start_byte:               offset in bytes where the range starts
++ * @end_byte:         offset in bytes where the range ends (inclusive)
++ *
++ * Walk the list of under-writeback pages of the given address space in the
++ * given range and wait for all of them.  Unlike filemap_fdatawait_range(),
++ * this function does not clear error status of the address space.
++ *
++ * Use this function if callers don't handle errors themselves.  Expected
++ * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
++ * fsfreeze(8)
++ */
++int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
++              loff_t start_byte, loff_t end_byte)
++{
++      __filemap_fdatawait_range(mapping, start_byte, end_byte);
++      return filemap_check_and_keep_errors(mapping);
++}
++EXPORT_SYMBOL(filemap_fdatawait_range_keep_errors);
++
+ /**
+  * file_fdatawait_range - wait for writeback to complete
+  * @file:             file pointing to address space structure to wait for
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index e42f44cf7b43..576379e87421 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -2190,7 +2190,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
+  *   10TB     320        32GB
+  */
+ static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
+-                               struct scan_control *sc, bool actual_reclaim)
++                               struct scan_control *sc, bool trace)
+ {
+       enum lru_list active_lru = file * LRU_FILE + LRU_ACTIVE;
+       struct pglist_data *pgdat = lruvec_pgdat(lruvec);
+@@ -2216,7 +2216,7 @@ static bool inactive_list_is_low(struct lruvec *lruvec, 
bool file,
+        * rid of the stale workingset quickly.
+        */
+       refaults = lruvec_page_state(lruvec, WORKINGSET_ACTIVATE);
+-      if (file && actual_reclaim && lruvec->refaults != refaults) {
++      if (file && lruvec->refaults != refaults) {
+               inactive_ratio = 0;
+       } else {
+               gb = (inactive + active) >> (30 - PAGE_SHIFT);
+@@ -2226,7 +2226,7 @@ static bool inactive_list_is_low(struct lruvec *lruvec, 
bool file,
+                       inactive_ratio = 1;
+       }
+ 
+-      if (actual_reclaim)
++      if (trace)
+               trace_mm_vmscan_inactive_list_is_low(pgdat->node_id, 
sc->reclaim_idx,
+                       lruvec_lru_size(lruvec, inactive_lru, MAX_NR_ZONES), 
inactive,
+                       lruvec_lru_size(lruvec, active_lru, MAX_NR_ZONES), 
active,
+diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
+index fed0ff446abb..2532c1a19645 100644
+--- a/net/bridge/br_input.c
++++ b/net/bridge/br_input.c
+@@ -79,7 +79,6 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, 
struct sk_buff *skb
+       struct net_bridge_fdb_entry *dst = NULL;
+       struct net_bridge_mdb_entry *mdst;
+       bool local_rcv, mcast_hit = false;
+-      const unsigned char *dest;
+       struct net_bridge *br;
+       u16 vid = 0;
+ 
+@@ -97,10 +96,9 @@ int br_handle_frame_finish(struct net *net, struct sock 
*sk, struct sk_buff *skb
+               br_fdb_update(br, p, eth_hdr(skb)->h_source, vid, false);
+ 
+       local_rcv = !!(br->dev->flags & IFF_PROMISC);
+-      dest = eth_hdr(skb)->h_dest;
+-      if (is_multicast_ether_addr(dest)) {
++      if (is_multicast_ether_addr(eth_hdr(skb)->h_dest)) {
+               /* by definition the broadcast is also a multicast address */
+-              if (is_broadcast_ether_addr(dest)) {
++              if (is_broadcast_ether_addr(eth_hdr(skb)->h_dest)) {
+                       pkt_type = BR_PKT_BROADCAST;
+                       local_rcv = true;
+               } else {
+@@ -150,7 +148,7 @@ int br_handle_frame_finish(struct net *net, struct sock 
*sk, struct sk_buff *skb
+               }
+               break;
+       case BR_PKT_UNICAST:
+-              dst = br_fdb_find_rcu(br, dest, vid);
++              dst = br_fdb_find_rcu(br, eth_hdr(skb)->h_dest, vid);
+       default:
+               break;
+       }
+diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
+index 75901c4641b1..fb54d32321ec 100644
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -1147,6 +1147,7 @@ static int br_ip4_multicast_igmp3_report(struct 
net_bridge *br,
+       int type;
+       int err = 0;
+       __be32 group;
++      u16 nsrcs;
+ 
+       ih = igmpv3_report_hdr(skb);
+       num = ntohs(ih->ngrec);
+@@ -1160,8 +1161,9 @@ static int br_ip4_multicast_igmp3_report(struct 
net_bridge *br,
+               grec = (void *)(skb->data + len - sizeof(*grec));
+               group = grec->grec_mca;
+               type = grec->grec_type;
++              nsrcs = ntohs(grec->grec_nsrcs);
+ 
+-              len += ntohs(grec->grec_nsrcs) * 4;
++              len += nsrcs * 4;
+               if (!pskb_may_pull(skb, len))
+                       return -EINVAL;
+ 
+@@ -1182,7 +1184,7 @@ static int br_ip4_multicast_igmp3_report(struct 
net_bridge *br,
+               src = eth_hdr(skb)->h_source;
+               if ((type == IGMPV3_CHANGE_TO_INCLUDE ||
+                    type == IGMPV3_MODE_IS_INCLUDE) &&
+-                  ntohs(grec->grec_nsrcs) == 0) {
++                  nsrcs == 0) {
+                       br_ip4_multicast_leave_group(br, port, group, vid, src);
+               } else {
+                       err = br_ip4_multicast_add_group(br, port, group, vid,
+@@ -1217,23 +1219,26 @@ static int br_ip6_multicast_mld2_report(struct 
net_bridge *br,
+       len = skb_transport_offset(skb) + sizeof(*icmp6h);
+ 
+       for (i = 0; i < num; i++) {
+-              __be16 *nsrcs, _nsrcs;
+-
+-              nsrcs = skb_header_pointer(skb,
+-                                         len + offsetof(struct mld2_grec,
+-                                                        grec_nsrcs),
+-                                         sizeof(_nsrcs), &_nsrcs);
+-              if (!nsrcs)
++              __be16 *_nsrcs, __nsrcs;
++              u16 nsrcs;
++
++              _nsrcs = skb_header_pointer(skb,
++                                          len + offsetof(struct mld2_grec,
++                                                         grec_nsrcs),
++                                          sizeof(__nsrcs), &__nsrcs);
++              if (!_nsrcs)
+                       return -EINVAL;
+ 
++              nsrcs = ntohs(*_nsrcs);
++
+               if (!pskb_may_pull(skb,
+                                  len + sizeof(*grec) +
+-                                 sizeof(struct in6_addr) * ntohs(*nsrcs)))
++                                 sizeof(struct in6_addr) * nsrcs))
+                       return -EINVAL;
+ 
+               grec = (struct mld2_grec *)(skb->data + len);
+               len += sizeof(*grec) +
+-                     sizeof(struct in6_addr) * ntohs(*nsrcs);
++                     sizeof(struct in6_addr) * nsrcs;
+ 
+               /* We treat these as MLDv1 reports for now. */
+               switch (grec->grec_type) {
+@@ -1252,7 +1257,7 @@ static int br_ip6_multicast_mld2_report(struct 
net_bridge *br,
+               src = eth_hdr(skb)->h_source;
+               if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE ||
+                    grec->grec_type == MLD2_MODE_IS_INCLUDE) &&
+-                  ntohs(*nsrcs) == 0) {
++                  nsrcs == 0) {
+                       br_ip6_multicast_leave_group(br, port, &grec->grec_mca,
+                                                    vid, src);
+               } else {
+@@ -1505,7 +1510,6 @@ static int br_ip6_multicast_query(struct net_bridge *br,
+                                 struct sk_buff *skb,
+                                 u16 vid)
+ {
+-      const struct ipv6hdr *ip6h = ipv6_hdr(skb);
+       struct mld_msg *mld;
+       struct net_bridge_mdb_entry *mp;
+       struct mld2_query *mld2q;
+@@ -1549,7 +1553,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
+ 
+       if (is_general_query) {
+               saddr.proto = htons(ETH_P_IPV6);
+-              saddr.u.ip6 = ip6h->saddr;
++              saddr.u.ip6 = ipv6_hdr(skb)->saddr;
+ 
+               br_multicast_query_received(br, port, &br->ip6_other_query,
+                                           &saddr, max_delay);
+diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
+index 1b75d6bf12bd..37ddcea3fc96 100644
+--- a/net/bridge/br_stp_bpdu.c
++++ b/net/bridge/br_stp_bpdu.c
+@@ -147,7 +147,6 @@ void br_send_tcn_bpdu(struct net_bridge_port *p)
+ void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
+               struct net_device *dev)
+ {
+-      const unsigned char *dest = eth_hdr(skb)->h_dest;
+       struct net_bridge_port *p;
+       struct net_bridge *br;
+       const unsigned char *buf;
+@@ -176,7 +175,7 @@ void br_stp_rcv(const struct stp_proto *proto, struct 
sk_buff *skb,
+       if (p->state == BR_STATE_DISABLED)
+               goto out;
+ 
+-      if (!ether_addr_equal(dest, br->group_addr))
++      if (!ether_addr_equal(eth_hdr(skb)->h_dest, br->group_addr))
+               goto out;
+ 
+       if (p->flags & BR_BPDU_GUARD) {
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 34ec9324737b..c996380f2959 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -3991,7 +3991,7 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, 
bpf_sock,
+                                                   TCP_CA_NAME_MAX-1));
+                       name[TCP_CA_NAME_MAX-1] = 0;
+                       ret = tcp_set_congestion_control(sk, name, false,
+-                                                       reinit);
++                                                       reinit, true);
+               } else {
+                       struct tcp_sock *tp = tcp_sk(sk);
+ 
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index cd9e991f21d7..c52d6e6b341c 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -1021,6 +1021,7 @@ int __neigh_event_send(struct neighbour *neigh, struct 
sk_buff *skb)
+ 
+                       atomic_set(&neigh->probes,
+                                  NEIGH_VAR(neigh->parms, UCAST_PROBES));
++                      neigh_del_timer(neigh);
+                       neigh->nud_state     = NUD_INCOMPLETE;
+                       neigh->updated = now;
+                       next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
+@@ -1037,6 +1038,7 @@ int __neigh_event_send(struct neighbour *neigh, struct 
sk_buff *skb)
+               }
+       } else if (neigh->nud_state & NUD_STALE) {
+               neigh_dbg(2, "neigh %p is delayed\n", neigh);
++              neigh_del_timer(neigh);
+               neigh->nud_state = NUD_DELAY;
+               neigh->updated = jiffies;
+               neigh_add_timer(neigh, jiffies +
+diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
+index ea4bd8a52422..d23746143cd2 100644
+--- a/net/ipv4/devinet.c
++++ b/net/ipv4/devinet.c
+@@ -66,6 +66,11 @@
+ #include <net/net_namespace.h>
+ #include <net/addrconf.h>
+ 
++#define IPV6ONLY_FLAGS        \
++              (IFA_F_NODAD | IFA_F_OPTIMISTIC | IFA_F_DADFAILED | \
++               IFA_F_HOMEADDRESS | IFA_F_TENTATIVE | \
++               IFA_F_MANAGETEMPADDR | IFA_F_STABLE_PRIVACY)
++
+ static struct ipv4_devconf ipv4_devconf = {
+       .data = {
+               [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
+@@ -462,6 +467,9 @@ static int __inet_insert_ifa(struct in_ifaddr *ifa, struct 
nlmsghdr *nlh,
+       ifa->ifa_flags &= ~IFA_F_SECONDARY;
+       last_primary = &in_dev->ifa_list;
+ 
++      /* Don't set IPv6 only flags to IPv4 addresses */
++      ifa->ifa_flags &= ~IPV6ONLY_FLAGS;
++
+       for (ifap = &in_dev->ifa_list; (ifa1 = *ifap) != NULL;
+            ifap = &ifa1->ifa_next) {
+               if (!(ifa1->ifa_flags & IFA_F_SECONDARY) &&
+diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
+index d187ee8156a1..b2240b7f225d 100644
+--- a/net/ipv4/igmp.c
++++ b/net/ipv4/igmp.c
+@@ -1218,12 +1218,8 @@ static void igmpv3_del_delrec(struct in_device *in_dev, 
struct ip_mc_list *im)
+       if (pmc) {
+               im->interface = pmc->interface;
+               if (im->sfmode == MCAST_INCLUDE) {
+-                      im->tomb = pmc->tomb;
+-                      pmc->tomb = NULL;
+-
+-                      im->sources = pmc->sources;
+-                      pmc->sources = NULL;
+-
++                      swap(im->tomb, pmc->tomb);
++                      swap(im->sources, pmc->sources);
+                       for (psf = im->sources; psf; psf = psf->sf_next)
+                               psf->sf_crcount = in_dev->mr_qrv ?: 
net->ipv4.sysctl_igmp_qrv;
+               } else {
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 364e6fdaa38f..b7ef367fe6a1 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -2594,6 +2594,8 @@ int tcp_disconnect(struct sock *sk, int flags)
+       tcp_saved_syn_free(tp);
+       tp->compressed_ack = 0;
+       tp->bytes_sent = 0;
++      tp->bytes_acked = 0;
++      tp->bytes_received = 0;
+       tp->bytes_retrans = 0;
+       tp->dsack_dups = 0;
+       tp->reord_seen = 0;
+@@ -2729,7 +2731,9 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
+               name[val] = 0;
+ 
+               lock_sock(sk);
+-              err = tcp_set_congestion_control(sk, name, true, true);
++              err = tcp_set_congestion_control(sk, name, true, true,
++                                               
ns_capable(sock_net(sk)->user_ns,
++                                                          CAP_NET_ADMIN));
+               release_sock(sk);
+               return err;
+       }
+diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
+index bc6c02f16243..48f79db446a0 100644
+--- a/net/ipv4/tcp_cong.c
++++ b/net/ipv4/tcp_cong.c
+@@ -332,7 +332,8 @@ out:
+  * tcp_reinit_congestion_control (if the current congestion control was
+  * already initialized.
+  */
+-int tcp_set_congestion_control(struct sock *sk, const char *name, bool load, 
bool reinit)
++int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
++                             bool reinit, bool cap_net_admin)
+ {
+       struct inet_connection_sock *icsk = inet_csk(sk);
+       const struct tcp_congestion_ops *ca;
+@@ -368,8 +369,7 @@ int tcp_set_congestion_control(struct sock *sk, const char 
*name, bool load, boo
+               } else {
+                       err = -EBUSY;
+               }
+-      } else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) ||
+-                   ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))) {
++      } else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) || cap_net_admin)) {
+               err = -EPERM;
+       } else if (!try_module_get(ca->owner)) {
+               err = -EBUSY;
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 221d9b72423b..88c7e821fd11 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -1289,6 +1289,7 @@ int tcp_fragment(struct sock *sk, enum tcp_queue 
tcp_queue,
+       struct tcp_sock *tp = tcp_sk(sk);
+       struct sk_buff *buff;
+       int nsize, old_factor;
++      long limit;
+       int nlen;
+       u8 flags;
+ 
+@@ -1299,8 +1300,16 @@ int tcp_fragment(struct sock *sk, enum tcp_queue 
tcp_queue,
+       if (nsize < 0)
+               nsize = 0;
+ 
+-      if (unlikely((sk->sk_wmem_queued >> 1) > sk->sk_sndbuf &&
+-                   tcp_queue != TCP_FRAG_IN_WRITE_QUEUE)) {
++      /* tcp_sendmsg() can overshoot sk_wmem_queued by one full size skb.
++       * We need some allowance to not penalize applications setting small
++       * SO_SNDBUF values.
++       * Also allow first and last skb in retransmit queue to be split.
++       */
++      limit = sk->sk_sndbuf + 2 * SKB_TRUESIZE(GSO_MAX_SIZE);
++      if (unlikely((sk->sk_wmem_queued >> 1) > limit &&
++                   tcp_queue != TCP_FRAG_IN_WRITE_QUEUE &&
++                   skb != tcp_rtx_queue_head(sk) &&
++                   skb != tcp_rtx_queue_tail(sk))) {
+               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPWQUEUETOOBIG);
+               return -ENOMEM;
+       }
+diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
+index a6c0479c1d55..bbb5ffb3397d 100644
+--- a/net/ipv6/ip6_fib.c
++++ b/net/ipv6/ip6_fib.c
+@@ -1081,8 +1081,24 @@ add:
+               err = call_fib6_entry_notifiers(info->nl_net,
+                                               FIB_EVENT_ENTRY_ADD,
+                                               rt, extack);
+-              if (err)
++              if (err) {
++                      struct fib6_info *sibling, *next_sibling;
++
++                      /* If the route has siblings, then it first
++                       * needs to be unlinked from them.
++                       */
++                      if (!rt->fib6_nsiblings)
++                              return err;
++
++                      list_for_each_entry_safe(sibling, next_sibling,
++                                               &rt->fib6_siblings,
++                                               fib6_siblings)
++                              sibling->fib6_nsiblings--;
++                      rt->fib6_nsiblings = 0;
++                      list_del_init(&rt->fib6_siblings);
++                      rt6_multipath_rebalance(next_sibling);
+                       return err;
++              }
+ 
+               rcu_assign_pointer(rt->fib6_next, iter);
+               atomic_inc(&rt->fib6_ref);
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 24f7b2cf504b..81220077d62f 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -2214,7 +2214,7 @@ static struct dst_entry *rt6_check(struct rt6_info *rt,
+ {
+       u32 rt_cookie = 0;
+ 
+-      if ((from && !fib6_get_cookie_safe(from, &rt_cookie)) ||
++      if (!from || !fib6_get_cookie_safe(from, &rt_cookie) ||
+           rt_cookie != cookie)
+               return NULL;
+ 
+diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
+index 7569ba00e732..a96a8c16baf9 100644
+--- a/net/netfilter/nf_queue.c
++++ b/net/netfilter/nf_queue.c
+@@ -174,6 +174,11 @@ static int __nf_queue(struct sk_buff *skb, const struct 
nf_hook_state *state,
+               goto err;
+       }
+ 
++      if (!skb_dst_force(skb) && state->hook != NF_INET_PRE_ROUTING) {
++              status = -ENETDOWN;
++              goto err;
++      }
++
+       *entry = (struct nf_queue_entry) {
+               .skb    = skb,
+               .state  = *state,
+@@ -182,7 +187,6 @@ static int __nf_queue(struct sk_buff *skb, const struct 
nf_hook_state *state,
+       };
+ 
+       nf_queue_entry_get_refs(entry);
+-      skb_dst_force(skb);
+ 
+       switch (entry->state.pf) {
+       case AF_INET:
+diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
+index 71ffd1a6dc7c..43910e50752c 100644
+--- a/net/netrom/af_netrom.c
++++ b/net/netrom/af_netrom.c
+@@ -872,7 +872,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device 
*dev)
+       unsigned short frametype, flags, window, timeout;
+       int ret;
+ 
+-      skb->sk = NULL;         /* Initially we don't know who it's for */
++      skb_orphan(skb);
+ 
+       /*
+        *      skb->data points to the netrom frame start
+@@ -970,7 +970,9 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device 
*dev)
+ 
+       window = skb->data[20];
+ 
++      sock_hold(make);
+       skb->sk             = make;
++      skb->destructor     = sock_efree;
+       make->sk_state      = TCP_ESTABLISHED;
+ 
+       /* Fill in his circuit details */
+diff --git a/net/nfc/nci/data.c b/net/nfc/nci/data.c
+index 908f25e3773e..5405d073804c 100644
+--- a/net/nfc/nci/data.c
++++ b/net/nfc/nci/data.c
+@@ -119,7 +119,7 @@ static int nci_queue_tx_data_frags(struct nci_dev *ndev,
+       conn_info = nci_get_conn_info_by_conn_id(ndev, conn_id);
+       if (!conn_info) {
+               rc = -EPROTO;
+-              goto free_exit;
++              goto exit;
+       }
+ 
+       __skb_queue_head_init(&frags_q);
+diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
+index 85ae53d8fd09..8211e8e97c96 100644
+--- a/net/openvswitch/actions.c
++++ b/net/openvswitch/actions.c
+@@ -175,8 +175,7 @@ static void update_ethertype(struct sk_buff *skb, struct 
ethhdr *hdr,
+       if (skb->ip_summed == CHECKSUM_COMPLETE) {
+               __be16 diff[] = { ~(hdr->h_proto), ethertype };
+ 
+-              skb->csum = ~csum_partial((char *)diff, sizeof(diff),
+-                                      ~skb->csum);
++              skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum);
+       }
+ 
+       hdr->h_proto = ethertype;
+@@ -268,8 +267,7 @@ static int set_mpls(struct sk_buff *skb, struct 
sw_flow_key *flow_key,
+       if (skb->ip_summed == CHECKSUM_COMPLETE) {
+               __be32 diff[] = { ~(stack->label_stack_entry), lse };
+ 
+-              skb->csum = ~csum_partial((char *)diff, sizeof(diff),
+-                                        ~skb->csum);
++              skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum);
+       }
+ 
+       stack->label_stack_entry = lse;
+diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
+index 3c39b8805d01..d76e5e58905d 100644
+--- a/net/rxrpc/af_rxrpc.c
++++ b/net/rxrpc/af_rxrpc.c
+@@ -552,6 +552,7 @@ static int rxrpc_sendmsg(struct socket *sock, struct 
msghdr *m, size_t len)
+ 
+       switch (rx->sk.sk_state) {
+       case RXRPC_UNBOUND:
++      case RXRPC_CLIENT_UNBOUND:
+               rx->srx.srx_family = AF_RXRPC;
+               rx->srx.srx_service = 0;
+               rx->srx.transport_type = SOCK_DGRAM;
+@@ -576,10 +577,9 @@ static int rxrpc_sendmsg(struct socket *sock, struct 
msghdr *m, size_t len)
+               }
+ 
+               rx->local = local;
+-              rx->sk.sk_state = RXRPC_CLIENT_UNBOUND;
++              rx->sk.sk_state = RXRPC_CLIENT_BOUND;
+               /* Fall through */
+ 
+-      case RXRPC_CLIENT_UNBOUND:
+       case RXRPC_CLIENT_BOUND:
+               if (!m->msg_name &&
+                   test_bit(RXRPC_SOCK_CONNECTED, &rx->flags)) {
+diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
+index 2167c6ca55e3..4159bcb479c6 100644
+--- a/net/sched/cls_api.c
++++ b/net/sched/cls_api.c
+@@ -1325,6 +1325,9 @@ replay:
+                       tcf_chain_tp_insert(chain, &chain_info, tp);
+               tfilter_notify(net, skb, n, tp, block, q, parent, fh,
+                              RTM_NEWTFILTER, false);
++              /* q pointer is NULL for shared blocks */
++              if (q)
++                      q->flags &= ~TCQ_F_CAN_BYPASS;
+       } else {
+               if (tp_created)
+                       tcf_proto_destroy(tp, NULL);
+diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
+index 6c0a9d5dbf94..137692cb8b4f 100644
+--- a/net/sched/sch_fq_codel.c
++++ b/net/sched/sch_fq_codel.c
+@@ -600,8 +600,6 @@ static unsigned long fq_codel_find(struct Qdisc *sch, u32 
classid)
+ static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent,
+                             u32 classid)
+ {
+-      /* we cannot bypass queue discipline anymore */
+-      sch->flags &= ~TCQ_F_CAN_BYPASS;
+       return 0;
+ }
+ 
+diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
+index 2f2678197760..650f21463853 100644
+--- a/net/sched/sch_sfq.c
++++ b/net/sched/sch_sfq.c
+@@ -828,8 +828,6 @@ static unsigned long sfq_find(struct Qdisc *sch, u32 
classid)
+ static unsigned long sfq_bind(struct Qdisc *sch, unsigned long parent,
+                             u32 classid)
+ {
+-      /* we cannot bypass queue discipline anymore */
+-      sch->flags &= ~TCQ_F_CAN_BYPASS;
+       return 0;
+ }
+ 
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 8c00a7ef1bcd..9f5b4e547b63 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -4507,34 +4507,18 @@ out_nounlock:
+ static int sctp_connect(struct sock *sk, struct sockaddr *addr,
+                       int addr_len, int flags)
+ {
+-      struct inet_sock *inet = inet_sk(sk);
+       struct sctp_af *af;
+-      int err = 0;
++      int err = -EINVAL;
+ 
+       lock_sock(sk);
+ 
+       pr_debug("%s: sk:%p, sockaddr:%p, addr_len:%d\n", __func__, sk,
+                addr, addr_len);
+ 
+-      /* We may need to bind the socket. */
+-      if (!inet->inet_num) {
+-              if (sk->sk_prot->get_port(sk, 0)) {
+-                      release_sock(sk);
+-                      return -EAGAIN;
+-              }
+-              inet->inet_sport = htons(inet->inet_num);
+-      }
+-
+       /* Validate addr_len before calling common connect/connectx routine. */
+       af = sctp_get_af_specific(addr->sa_family);
+-      if (!af || addr_len < af->sockaddr_len) {
+-              err = -EINVAL;
+-      } else {
+-              /* Pass correct addr len to common routine (so it knows there
+-               * is only one address being passed.
+-               */
++      if (af && addr_len >= af->sockaddr_len)
+               err = __sctp_connect(sk, addr, af->sockaddr_len, flags, NULL);
+-      }
+ 
+       release_sock(sk);
+       return err;
+diff --git a/net/sctp/stream.c b/net/sctp/stream.c
+index 3b47457862cc..0da57938a6c5 100644
+--- a/net/sctp/stream.c
++++ b/net/sctp/stream.c
+@@ -253,13 +253,20 @@ out:
+ int sctp_stream_init_ext(struct sctp_stream *stream, __u16 sid)
+ {
+       struct sctp_stream_out_ext *soute;
++      int ret;
+ 
+       soute = kzalloc(sizeof(*soute), GFP_KERNEL);
+       if (!soute)
+               return -ENOMEM;
+       SCTP_SO(stream, sid)->ext = soute;
+ 
+-      return sctp_sched_init_sid(stream, sid, GFP_KERNEL);
++      ret = sctp_sched_init_sid(stream, sid, GFP_KERNEL);
++      if (ret) {
++              kfree(SCTP_SO(stream, sid)->ext);
++              SCTP_SO(stream, sid)->ext = NULL;
++      }
++
++      return ret;
+ }
+ 
+ void sctp_stream_free(struct sctp_stream *stream)
+diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
+index ead29c2aefa7..0a613e0ef3bf 100644
+--- a/net/tls/tls_device.c
++++ b/net/tls/tls_device.c
+@@ -61,7 +61,7 @@ static void tls_device_free_ctx(struct tls_context *ctx)
+       if (ctx->rx_conf == TLS_HW)
+               kfree(tls_offload_ctx_rx(ctx));
+ 
+-      kfree(ctx);
++      tls_ctx_free(ctx);
+ }
+ 
+ static void tls_device_gc_task(struct work_struct *work)
+diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
+index 25b3fb585777..4c0ac79f82d4 100644
+--- a/net/tls/tls_main.c
++++ b/net/tls/tls_main.c
+@@ -241,7 +241,7 @@ static void tls_write_space(struct sock *sk)
+       ctx->sk_write_space(sk);
+ }
+ 
+-static void tls_ctx_free(struct tls_context *ctx)
++void tls_ctx_free(struct tls_context *ctx)
+ {
+       if (!ctx)
+               return;

Reply via email to