commit:     2924718e5b11fe3a7209b755097cba3a3f955839
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed May  3 17:46:16 2017 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed May  3 17:46:16 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=2924718e

Linux patch 4.10.14

 0000_README              |    4 +
 1013_linux-4.10.14.patch | 2251 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2255 insertions(+)

diff --git a/0000_README b/0000_README
index 0aa6665..5295a7d 100644
--- a/0000_README
+++ b/0000_README
@@ -95,6 +95,10 @@ Patch:  1012_linux-4.10.13.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.10.13
 
+Patch:  1013_linux-4.10.14.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.10.14
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1013_linux-4.10.14.patch b/1013_linux-4.10.14.patch
new file mode 100644
index 0000000..ae4d094
--- /dev/null
+++ b/1013_linux-4.10.14.patch
@@ -0,0 +1,2251 @@
+diff --git a/Makefile b/Makefile
+index 8285f4de02d1..48756653c42c 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 10
+-SUBLEVEL = 13
++SUBLEVEL = 14
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+ 
+diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
+index b65930a49589..54b54da6384c 100644
+--- a/arch/arc/include/asm/atomic.h
++++ b/arch/arc/include/asm/atomic.h
+@@ -17,10 +17,11 @@
+ #include <asm/barrier.h>
+ #include <asm/smp.h>
+ 
++#define ATOMIC_INIT(i)        { (i) }
++
+ #ifndef CONFIG_ARC_PLAT_EZNPS
+ 
+ #define atomic_read(v)  READ_ONCE((v)->counter)
+-#define ATOMIC_INIT(i)        { (i) }
+ 
+ #ifdef CONFIG_ARC_HAS_LLSC
+ 
+diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
+index 804d2a2a19fe..dd6a18bc10ab 100644
+--- a/arch/mips/kernel/cevt-r4k.c
++++ b/arch/mips/kernel/cevt-r4k.c
+@@ -80,7 +80,7 @@ static unsigned int calculate_min_delta(void)
+               }
+ 
+               /* Sorted insert of 75th percentile into buf2 */
+-              for (k = 0; k < i; ++k) {
++              for (k = 0; k < i && k < ARRAY_SIZE(buf2); ++k) {
+                       if (buf1[ARRAY_SIZE(buf1) - 1] < buf2[k]) {
+                               l = min_t(unsigned int,
+                                         i, ARRAY_SIZE(buf2) - 1);
+diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c
+index 6430bff21fff..5c429d70e17f 100644
+--- a/arch/mips/kernel/elf.c
++++ b/arch/mips/kernel/elf.c
+@@ -257,7 +257,7 @@ int arch_check_elf(void *_ehdr, bool has_interpreter, void 
*_interp_ehdr,
+       else if ((prog_req.fr1 && prog_req.frdefault) ||
+                (prog_req.single && !prog_req.frdefault))
+               /* Make sure 64-bit MIPS III/IV/64R1 will not pick FR1 */
+-              state->overall_fp_mode = ((current_cpu_data.fpu_id & 
MIPS_FPIR_F64) &&
++              state->overall_fp_mode = ((raw_current_cpu_data.fpu_id & 
MIPS_FPIR_F64) &&
+                                         cpu_has_mips_r2_r6) ?
+                                         FP_FR1 : FP_FR0;
+       else if (prog_req.fr1)
+diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
+index 1f4bd222ba76..eb6c0d582626 100644
+--- a/arch/mips/kernel/kgdb.c
++++ b/arch/mips/kernel/kgdb.c
+@@ -244,9 +244,6 @@ static int compute_signal(int tt)
+ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct 
*p)
+ {
+       int reg;
+-      struct thread_info *ti = task_thread_info(p);
+-      unsigned long ksp = (unsigned long)ti + THREAD_SIZE - 32;
+-      struct pt_regs *regs = (struct pt_regs *)ksp - 1;
+ #if (KGDB_GDB_REG_SIZE == 32)
+       u32 *ptr = (u32 *)gdb_regs;
+ #else
+@@ -254,25 +251,46 @@ void sleeping_thread_to_gdb_regs(unsigned long 
*gdb_regs, struct task_struct *p)
+ #endif
+ 
+       for (reg = 0; reg < 16; reg++)
+-              *(ptr++) = regs->regs[reg];
++              *(ptr++) = 0;
+ 
+       /* S0 - S7 */
+-      for (reg = 16; reg < 24; reg++)
+-              *(ptr++) = regs->regs[reg];
++      *(ptr++) = p->thread.reg16;
++      *(ptr++) = p->thread.reg17;
++      *(ptr++) = p->thread.reg18;
++      *(ptr++) = p->thread.reg19;
++      *(ptr++) = p->thread.reg20;
++      *(ptr++) = p->thread.reg21;
++      *(ptr++) = p->thread.reg22;
++      *(ptr++) = p->thread.reg23;
+ 
+       for (reg = 24; reg < 28; reg++)
+               *(ptr++) = 0;
+ 
+       /* GP, SP, FP, RA */
+-      for (reg = 28; reg < 32; reg++)
+-              *(ptr++) = regs->regs[reg];
+-
+-      *(ptr++) = regs->cp0_status;
+-      *(ptr++) = regs->lo;
+-      *(ptr++) = regs->hi;
+-      *(ptr++) = regs->cp0_badvaddr;
+-      *(ptr++) = regs->cp0_cause;
+-      *(ptr++) = regs->cp0_epc;
++      *(ptr++) = (long)p;
++      *(ptr++) = p->thread.reg29;
++      *(ptr++) = p->thread.reg30;
++      *(ptr++) = p->thread.reg31;
++
++      *(ptr++) = p->thread.cp0_status;
++
++      /* lo, hi */
++      *(ptr++) = 0;
++      *(ptr++) = 0;
++
++      /*
++       * BadVAddr, Cause
++       * Ideally these would come from the last exception frame up the stack
++       * but that requires unwinding, otherwise we can't know much for sure.
++       */
++      *(ptr++) = 0;
++      *(ptr++) = 0;
++
++      /*
++       * PC
++       * use return address (RA), i.e. the moment after return from resume()
++       */
++      *(ptr++) = p->thread.reg31;
+ }
+ 
+ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
+diff --git a/arch/sparc/include/asm/pgtable_64.h 
b/arch/sparc/include/asm/pgtable_64.h
+index 314b66851348..f0266cef56e4 100644
+--- a/arch/sparc/include/asm/pgtable_64.h
++++ b/arch/sparc/include/asm/pgtable_64.h
+@@ -673,26 +673,27 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
+       return pte_pfn(pte);
+ }
+ 
+-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+-static inline unsigned long pmd_dirty(pmd_t pmd)
++#define __HAVE_ARCH_PMD_WRITE
++static inline unsigned long pmd_write(pmd_t pmd)
+ {
+       pte_t pte = __pte(pmd_val(pmd));
+ 
+-      return pte_dirty(pte);
++      return pte_write(pte);
+ }
+ 
+-static inline unsigned long pmd_young(pmd_t pmd)
++#ifdef CONFIG_TRANSPARENT_HUGEPAGE
++static inline unsigned long pmd_dirty(pmd_t pmd)
+ {
+       pte_t pte = __pte(pmd_val(pmd));
+ 
+-      return pte_young(pte);
++      return pte_dirty(pte);
+ }
+ 
+-static inline unsigned long pmd_write(pmd_t pmd)
++static inline unsigned long pmd_young(pmd_t pmd)
+ {
+       pte_t pte = __pte(pmd_val(pmd));
+ 
+-      return pte_write(pte);
++      return pte_young(pte);
+ }
+ 
+ static inline unsigned long pmd_trans_huge(pmd_t pmd)
+diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
+index 5d2f91511c60..47ecac5106d3 100644
+--- a/arch/sparc/mm/init_64.c
++++ b/arch/sparc/mm/init_64.c
+@@ -1495,7 +1495,7 @@ bool kern_addr_valid(unsigned long addr)
+       if ((long)addr < 0L) {
+               unsigned long pa = __pa(addr);
+ 
+-              if ((addr >> max_phys_bits) != 0UL)
++              if ((pa >> max_phys_bits) != 0UL)
+                       return false;
+ 
+               return pfn_valid(pa >> PAGE_SHIFT);
+diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
+index 8639bb2ae058..6bf09f5594b2 100644
+--- a/arch/x86/kernel/ftrace.c
++++ b/arch/x86/kernel/ftrace.c
+@@ -983,6 +983,18 @@ void prepare_ftrace_return(unsigned long self_addr, 
unsigned long *parent,
+       unsigned long return_hooker = (unsigned long)
+                               &return_to_handler;
+ 
++      /*
++       * When resuming from suspend-to-ram, this function can be indirectly
++       * called from early CPU startup code while the CPU is in real mode,
++       * which would fail miserably.  Make sure the stack pointer is a
++       * virtual address.
++       *
++       * This check isn't as accurate as virt_addr_valid(), but it should be
++       * good enough for this purpose, and it's fast.
++       */
++      if (unlikely((long)__builtin_frame_address(0) >= 0))
++              return;
++
+       if (unlikely(ftrace_graph_is_dead()))
+               return;
+ 
+diff --git a/drivers/input/serio/i8042-x86ia64io.h 
b/drivers/input/serio/i8042-x86ia64io.h
+index 27ae2a0ef1b9..ecd075fd5754 100644
+--- a/drivers/input/serio/i8042-x86ia64io.h
++++ b/drivers/input/serio/i8042-x86ia64io.h
+@@ -613,6 +613,13 @@ static const struct dmi_system_id __initconst 
i8042_dmi_reset_table[] = {
+                       DMI_MATCH(DMI_PRODUCT_NAME, "20046"),
+               },
+       },
++      {
++              /* Clevo P650RS, 650RP6, Sager NP8152-S, and others */
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
++                      DMI_MATCH(DMI_PRODUCT_NAME, "P65xRP"),
++              },
++      },
+       { }
+ };
+ 
+diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
+index a0dabd4038ba..7ab24c5262f3 100644
+--- a/drivers/net/can/usb/gs_usb.c
++++ b/drivers/net/can/usb/gs_usb.c
+@@ -740,13 +740,18 @@ static const struct net_device_ops gs_usb_netdev_ops = {
+ static int gs_usb_set_identify(struct net_device *netdev, bool do_identify)
+ {
+       struct gs_can *dev = netdev_priv(netdev);
+-      struct gs_identify_mode imode;
++      struct gs_identify_mode *imode;
+       int rc;
+ 
++      imode = kmalloc(sizeof(*imode), GFP_KERNEL);
++
++      if (!imode)
++              return -ENOMEM;
++
+       if (do_identify)
+-              imode.mode = GS_CAN_IDENTIFY_ON;
++              imode->mode = GS_CAN_IDENTIFY_ON;
+       else
+-              imode.mode = GS_CAN_IDENTIFY_OFF;
++              imode->mode = GS_CAN_IDENTIFY_OFF;
+ 
+       rc = usb_control_msg(interface_to_usbdev(dev->iface),
+                            usb_sndctrlpipe(interface_to_usbdev(dev->iface),
+@@ -756,10 +761,12 @@ static int gs_usb_set_identify(struct net_device 
*netdev, bool do_identify)
+                            USB_RECIP_INTERFACE,
+                            dev->channel,
+                            0,
+-                           &imode,
+-                           sizeof(imode),
++                           imode,
++                           sizeof(*imode),
+                            100);
+ 
++      kfree(imode);
++
+       return (rc > 0) ? 0 : rc;
+ }
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h 
b/drivers/net/ethernet/mellanox/mlx5/core/en.h
+index 41db47050991..0145765002b3 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
+@@ -82,7 +82,7 @@
+ #define MLX5E_VALID_NUM_MTTS(num_mtts) (MLX5_MTT_OCTW(num_mtts) - 1 <= 
U16_MAX)
+ 
+ #define MLX5_UMR_ALIGN                                (2048)
+-#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD     (128)
++#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD     (256)
+ 
+ #define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ                 (64 * 1024)
+ #define MLX5E_DEFAULT_LRO_TIMEOUT                       32
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c 
b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+index f33f72d0237c..32d56cd1b638 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+@@ -564,6 +564,7 @@ int mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv, 
struct ethtool_rxnfc *i
+       int idx = 0;
+       int err = 0;
+ 
++      info->data = MAX_NUM_OF_ETHTOOL_RULES;
+       while ((!err || err == -ENOENT) && idx < info->rule_cnt) {
+               err = mlx5e_ethtool_get_flow(priv, info, location);
+               if (!err)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 
b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+index cc718814c378..dc5c594f7c5e 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -611,7 +611,8 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
+ 
+       if (!err && esw->mode == SRIOV_OFFLOADS &&
+           rep->vport != FDB_UPLINK_VPORT) {
+-              if (min_inline > esw->offloads.inline_mode) {
++              if (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
++                  esw->offloads.inline_mode < min_inline) {
+                       netdev_warn(priv->netdev,
+                                   "Flow is not offloaded due to min inline 
setting, required %d actual %d\n",
+                                   min_inline, esw->offloads.inline_mode);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c 
b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+index 7bce2bdbb79b..4d111c129144 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+@@ -908,8 +908,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink 
*devlink, u8 mode)
+       struct mlx5_core_dev *dev = devlink_priv(devlink);
+       struct mlx5_eswitch *esw = dev->priv.eswitch;
+       int num_vports = esw->enabled_vports;
+-      int err;
+-      int vport;
++      int err, vport;
+       u8 mlx5_mode;
+ 
+       if (!MLX5_CAP_GEN(dev, vport_group_manager))
+@@ -918,9 +917,17 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink 
*devlink, u8 mode)
+       if (esw->mode == SRIOV_NONE)
+               return -EOPNOTSUPP;
+ 
+-      if (MLX5_CAP_ETH(dev, wqe_inline_mode) !=
+-          MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
++      switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
++      case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
++              if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
++                      return 0;
++              /* fall through */
++      case MLX5_CAP_INLINE_MODE_L2:
++              esw_warn(dev, "Inline mode can't be set\n");
+               return -EOPNOTSUPP;
++      case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
++              break;
++      }
+ 
+       if (esw->offloads.num_flows > 0) {
+               esw_warn(dev, "Can't set inline mode when flows are 
configured\n");
+@@ -963,18 +970,14 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink 
*devlink, u8 *mode)
+       if (esw->mode == SRIOV_NONE)
+               return -EOPNOTSUPP;
+ 
+-      if (MLX5_CAP_ETH(dev, wqe_inline_mode) !=
+-          MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
+-              return -EOPNOTSUPP;
+-
+       return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
+ }
+ 
+ int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
+ {
++      u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
+       struct mlx5_core_dev *dev = esw->dev;
+       int vport;
+-      u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
+ 
+       if (!MLX5_CAP_GEN(dev, vport_group_manager))
+               return -EOPNOTSUPP;
+@@ -982,10 +985,18 @@ int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch 
*esw, int nvfs, u8 *mode)
+       if (esw->mode == SRIOV_NONE)
+               return -EOPNOTSUPP;
+ 
+-      if (MLX5_CAP_ETH(dev, wqe_inline_mode) !=
+-          MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
+-              return -EOPNOTSUPP;
++      switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
++      case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
++              mlx5_mode = MLX5_INLINE_MODE_NONE;
++              goto out;
++      case MLX5_CAP_INLINE_MODE_L2:
++              mlx5_mode = MLX5_INLINE_MODE_L2;
++              goto out;
++      case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
++              goto query_vports;
++      }
+ 
++query_vports:
+       for (vport = 1; vport <= nvfs; vport++) {
+               mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
+               if (vport > 1 && prev_mlx5_mode != mlx5_mode)
+@@ -993,6 +1004,7 @@ int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch 
*esw, int nvfs, u8 *mode)
+               prev_mlx5_mode = mlx5_mode;
+       }
+ 
++out:
+       *mode = mlx5_mode;
+       return 0;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c 
b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+index 55957246c0e8..b5d5519542e8 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+@@ -294,7 +294,7 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag 
*ldev,
+                                        struct 
netdev_notifier_changeupper_info *info)
+ {
+       struct net_device *upper = info->upper_dev, *ndev_tmp;
+-      struct netdev_lag_upper_info *lag_upper_info;
++      struct netdev_lag_upper_info *lag_upper_info = NULL;
+       bool is_bonded;
+       int bond_status = 0;
+       int num_slaves = 0;
+@@ -303,7 +303,8 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag 
*ldev,
+       if (!netif_is_lag_master(upper))
+               return 0;
+ 
+-      lag_upper_info = info->upper_info;
++      if (info->linking)
++              lag_upper_info = info->upper_info;
+ 
+       /* The event may still be of interest if the slave does not belong to
+        * us, but is enslaved to a master which has one or more of our netdevs
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c 
b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+index 4aca265d9c14..4ee7ea775a02 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -1001,7 +1001,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, 
struct mlx5_priv *priv,
+       if (err) {
+               dev_err(&dev->pdev->dev, "Firmware over %d MS in initializing 
state, aborting\n",
+                       FW_INIT_TIMEOUT_MILI);
+-              goto out_err;
++              goto err_cmd_cleanup;
+       }
+ 
+       err = mlx5_core_enable_hca(dev, 0);
+diff --git a/drivers/net/ethernet/renesas/sh_eth.c 
b/drivers/net/ethernet/renesas/sh_eth.c
+index f729a6b43958..1a012b3e0ded 100644
+--- a/drivers/net/ethernet/renesas/sh_eth.c
++++ b/drivers/net/ethernet/renesas/sh_eth.c
+@@ -1061,12 +1061,70 @@ static struct mdiobb_ops bb_ops = {
+       .get_mdio_data = sh_get_mdio,
+ };
+ 
++/* free Tx skb function */
++static int sh_eth_tx_free(struct net_device *ndev, bool sent_only)
++{
++      struct sh_eth_private *mdp = netdev_priv(ndev);
++      struct sh_eth_txdesc *txdesc;
++      int free_num = 0;
++      int entry;
++      bool sent;
++
++      for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
++              entry = mdp->dirty_tx % mdp->num_tx_ring;
++              txdesc = &mdp->tx_ring[entry];
++              sent = !(txdesc->status & cpu_to_le32(TD_TACT));
++              if (sent_only && !sent)
++                      break;
++              /* TACT bit must be checked before all the following reads */
++              dma_rmb();
++              netif_info(mdp, tx_done, ndev,
++                         "tx entry %d status 0x%08x\n",
++                         entry, le32_to_cpu(txdesc->status));
++              /* Free the original skb. */
++              if (mdp->tx_skbuff[entry]) {
++                      dma_unmap_single(&ndev->dev, le32_to_cpu(txdesc->addr),
++                                       le32_to_cpu(txdesc->len) >> 16,
++                                       DMA_TO_DEVICE);
++                      dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
++                      mdp->tx_skbuff[entry] = NULL;
++                      free_num++;
++              }
++              txdesc->status = cpu_to_le32(TD_TFP);
++              if (entry >= mdp->num_tx_ring - 1)
++                      txdesc->status |= cpu_to_le32(TD_TDLE);
++
++              if (sent) {
++                      ndev->stats.tx_packets++;
++                      ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16;
++              }
++      }
++      return free_num;
++}
++
+ /* free skb and descriptor buffer */
+ static void sh_eth_ring_free(struct net_device *ndev)
+ {
+       struct sh_eth_private *mdp = netdev_priv(ndev);
+       int ringsize, i;
+ 
++      if (mdp->rx_ring) {
++              for (i = 0; i < mdp->num_rx_ring; i++) {
++                      if (mdp->rx_skbuff[i]) {
++                              struct sh_eth_rxdesc *rxdesc = &mdp->rx_ring[i];
++
++                              dma_unmap_single(&ndev->dev,
++                                               le32_to_cpu(rxdesc->addr),
++                                               ALIGN(mdp->rx_buf_sz, 32),
++                                               DMA_FROM_DEVICE);
++                      }
++              }
++              ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
++              dma_free_coherent(NULL, ringsize, mdp->rx_ring,
++                                mdp->rx_desc_dma);
++              mdp->rx_ring = NULL;
++      }
++
+       /* Free Rx skb ringbuffer */
+       if (mdp->rx_skbuff) {
+               for (i = 0; i < mdp->num_rx_ring; i++)
+@@ -1075,27 +1133,18 @@ static void sh_eth_ring_free(struct net_device *ndev)
+       kfree(mdp->rx_skbuff);
+       mdp->rx_skbuff = NULL;
+ 
+-      /* Free Tx skb ringbuffer */
+-      if (mdp->tx_skbuff) {
+-              for (i = 0; i < mdp->num_tx_ring; i++)
+-                      dev_kfree_skb(mdp->tx_skbuff[i]);
+-      }
+-      kfree(mdp->tx_skbuff);
+-      mdp->tx_skbuff = NULL;
+-
+-      if (mdp->rx_ring) {
+-              ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
+-              dma_free_coherent(NULL, ringsize, mdp->rx_ring,
+-                                mdp->rx_desc_dma);
+-              mdp->rx_ring = NULL;
+-      }
+-
+       if (mdp->tx_ring) {
++              sh_eth_tx_free(ndev, false);
++
+               ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
+               dma_free_coherent(NULL, ringsize, mdp->tx_ring,
+                                 mdp->tx_desc_dma);
+               mdp->tx_ring = NULL;
+       }
++
++      /* Free Tx skb ringbuffer */
++      kfree(mdp->tx_skbuff);
++      mdp->tx_skbuff = NULL;
+ }
+ 
+ /* format skb and descriptor buffer */
+@@ -1343,43 +1392,6 @@ static void sh_eth_dev_exit(struct net_device *ndev)
+       update_mac_address(ndev);
+ }
+ 
+-/* free Tx skb function */
+-static int sh_eth_txfree(struct net_device *ndev)
+-{
+-      struct sh_eth_private *mdp = netdev_priv(ndev);
+-      struct sh_eth_txdesc *txdesc;
+-      int free_num = 0;
+-      int entry;
+-
+-      for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
+-              entry = mdp->dirty_tx % mdp->num_tx_ring;
+-              txdesc = &mdp->tx_ring[entry];
+-              if (txdesc->status & cpu_to_le32(TD_TACT))
+-                      break;
+-              /* TACT bit must be checked before all the following reads */
+-              dma_rmb();
+-              netif_info(mdp, tx_done, ndev,
+-                         "tx entry %d status 0x%08x\n",
+-                         entry, le32_to_cpu(txdesc->status));
+-              /* Free the original skb. */
+-              if (mdp->tx_skbuff[entry]) {
+-                      dma_unmap_single(&ndev->dev, le32_to_cpu(txdesc->addr),
+-                                       le32_to_cpu(txdesc->len) >> 16,
+-                                       DMA_TO_DEVICE);
+-                      dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
+-                      mdp->tx_skbuff[entry] = NULL;
+-                      free_num++;
+-              }
+-              txdesc->status = cpu_to_le32(TD_TFP);
+-              if (entry >= mdp->num_tx_ring - 1)
+-                      txdesc->status |= cpu_to_le32(TD_TDLE);
+-
+-              ndev->stats.tx_packets++;
+-              ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16;
+-      }
+-      return free_num;
+-}
+-
+ /* Packet receive function */
+ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
+ {
+@@ -1622,7 +1634,7 @@ static void sh_eth_error(struct net_device *ndev, u32 
intr_status)
+                          intr_status, mdp->cur_tx, mdp->dirty_tx,
+                          (u32)ndev->state, edtrr);
+               /* dirty buffer free */
+-              sh_eth_txfree(ndev);
++              sh_eth_tx_free(ndev, true);
+ 
+               /* SH7712 BUG */
+               if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
+@@ -1681,7 +1693,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void 
*netdev)
+               /* Clear Tx interrupts */
+               sh_eth_write(ndev, intr_status & cd->tx_check, EESR);
+ 
+-              sh_eth_txfree(ndev);
++              sh_eth_tx_free(ndev, true);
+               netif_wake_queue(ndev);
+       }
+ 
+@@ -2309,7 +2321,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct 
net_device *ndev)
+ 
+       spin_lock_irqsave(&mdp->lock, flags);
+       if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
+-              if (!sh_eth_txfree(ndev)) {
++              if (!sh_eth_tx_free(ndev, true)) {
+                       netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n");
+                       netif_stop_queue(ndev);
+                       spin_unlock_irqrestore(&mdp->lock, flags);
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index f83cf6696820..8420069594b3 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -2713,7 +2713,7 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
+ }
+ 
+ #define MACSEC_FEATURES \
+-      (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
++      (NETIF_F_SG | NETIF_F_HIGHDMA)
+ static struct lock_class_key macsec_netdev_addr_lock_key;
+ 
+ static int macsec_dev_init(struct net_device *dev)
+diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
+index 20b3fdf282c5..7d49a36d6020 100644
+--- a/drivers/net/macvlan.c
++++ b/drivers/net/macvlan.c
+@@ -1140,6 +1140,7 @@ static int macvlan_port_create(struct net_device *dev)
+ static void macvlan_port_destroy(struct net_device *dev)
+ {
+       struct macvlan_port *port = macvlan_port_get_rtnl(dev);
++      struct sk_buff *skb;
+ 
+       dev->priv_flags &= ~IFF_MACVLAN_PORT;
+       netdev_rx_handler_unregister(dev);
+@@ -1148,7 +1149,15 @@ static void macvlan_port_destroy(struct net_device *dev)
+        * but we need to cancel it and purge left skbs if any.
+        */
+       cancel_work_sync(&port->bc_work);
+-      __skb_queue_purge(&port->bc_queue);
++
++      while ((skb = __skb_dequeue(&port->bc_queue))) {
++              const struct macvlan_dev *src = MACVLAN_SKB_CB(skb)->src;
++
++              if (src)
++                      dev_put(src->dev);
++
++              kfree_skb(skb);
++      }
+ 
+       kfree(port);
+ }
+diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
+index e2460a57e4b1..ed0d10f54f26 100644
+--- a/drivers/net/phy/dp83640.c
++++ b/drivers/net/phy/dp83640.c
+@@ -1438,8 +1438,6 @@ static bool dp83640_rxtstamp(struct phy_device *phydev,
+               skb_info->tmo = jiffies + SKB_TIMESTAMP_TIMEOUT;
+               skb_queue_tail(&dp83640->rx_queue, skb);
+               schedule_delayed_work(&dp83640->ts_work, SKB_TIMESTAMP_TIMEOUT);
+-      } else {
+-              netif_rx_ni(skb);
+       }
+ 
+       return true;
+diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
+index 7cc1b7dcfe05..b41a32b26be7 100644
+--- a/drivers/net/phy/phy.c
++++ b/drivers/net/phy/phy.c
+@@ -591,16 +591,18 @@ int phy_mii_ioctl(struct phy_device *phydev, struct 
ifreq *ifr, int cmd)
+ EXPORT_SYMBOL(phy_mii_ioctl);
+ 
+ /**
+- * phy_start_aneg - start auto-negotiation for this PHY device
++ * phy_start_aneg_priv - start auto-negotiation for this PHY device
+  * @phydev: the phy_device struct
++ * @sync: indicate whether we should wait for the workqueue cancelation
+  *
+  * Description: Sanitizes the settings (if we're not autonegotiating
+  *   them), and then calls the driver's config_aneg function.
+  *   If the PHYCONTROL Layer is operating, we change the state to
+  *   reflect the beginning of Auto-negotiation or forcing.
+  */
+-int phy_start_aneg(struct phy_device *phydev)
++static int phy_start_aneg_priv(struct phy_device *phydev, bool sync)
+ {
++      bool trigger = 0;
+       int err;
+ 
+       mutex_lock(&phydev->lock);
+@@ -625,10 +627,40 @@ int phy_start_aneg(struct phy_device *phydev)
+               }
+       }
+ 
++      /* Re-schedule a PHY state machine to check PHY status because
++       * negotiation may already be done and aneg interrupt may not be
++       * generated.
++       */
++      if (phy_interrupt_is_valid(phydev) && (phydev->state == PHY_AN)) {
++              err = phy_aneg_done(phydev);
++              if (err > 0) {
++                      trigger = true;
++                      err = 0;
++              }
++      }
++
+ out_unlock:
+       mutex_unlock(&phydev->lock);
++
++      if (trigger)
++              phy_trigger_machine(phydev, sync);
++
+       return err;
+ }
++
++/**
++ * phy_start_aneg - start auto-negotiation for this PHY device
++ * @phydev: the phy_device struct
++ *
++ * Description: Sanitizes the settings (if we're not autonegotiating
++ *   them), and then calls the driver's config_aneg function.
++ *   If the PHYCONTROL Layer is operating, we change the state to
++ *   reflect the beginning of Auto-negotiation or forcing.
++ */
++int phy_start_aneg(struct phy_device *phydev)
++{
++      return phy_start_aneg_priv(phydev, true);
++}
+ EXPORT_SYMBOL(phy_start_aneg);
+ 
+ /**
+@@ -656,7 +688,7 @@ void phy_start_machine(struct phy_device *phydev)
+  *   state machine runs.
+  */
+ 
+-static void phy_trigger_machine(struct phy_device *phydev, bool sync)
++void phy_trigger_machine(struct phy_device *phydev, bool sync)
+ {
+       if (sync)
+               cancel_delayed_work_sync(&phydev->state_queue);
+@@ -678,7 +710,7 @@ void phy_stop_machine(struct phy_device *phydev)
+       cancel_delayed_work_sync(&phydev->state_queue);
+ 
+       mutex_lock(&phydev->lock);
+-      if (phydev->state > PHY_UP)
++      if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
+               phydev->state = PHY_UP;
+       mutex_unlock(&phydev->lock);
+ }
+@@ -1151,7 +1183,7 @@ void phy_state_machine(struct work_struct *work)
+       mutex_unlock(&phydev->lock);
+ 
+       if (needs_aneg)
+-              err = phy_start_aneg(phydev);
++              err = phy_start_aneg_priv(phydev, false);
+       else if (do_suspend)
+               phy_suspend(phydev);
+ 
+diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
+index 921fef275ea4..f2fd52e71a5e 100644
+--- a/drivers/net/vrf.c
++++ b/drivers/net/vrf.c
+@@ -1126,7 +1126,7 @@ static int vrf_fib_rule(const struct net_device *dev, 
__u8 family, bool add_it)
+               goto nla_put_failure;
+ 
+       /* rule only needs to appear once */
+-      nlh->nlmsg_flags &= NLM_F_EXCL;
++      nlh->nlmsg_flags |= NLM_F_EXCL;
+ 
+       frh = nlmsg_data(nlh);
+       memset(frh, 0, sizeof(*frh));
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index d438430c49a2..dba671d88377 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -1038,10 +1038,10 @@ int scsi_init_io(struct scsi_cmnd *cmd)
+       struct scsi_device *sdev = cmd->device;
+       struct request *rq = cmd->request;
+       bool is_mq = (rq->mq_ctx != NULL);
+-      int error;
++      int error = BLKPREP_KILL;
+ 
+       if (WARN_ON_ONCE(!blk_rq_nr_phys_segments(rq)))
+-              return -EINVAL;
++              goto err_exit;
+ 
+       error = scsi_init_sgtable(rq, &cmd->sdb);
+       if (error)
+diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
+index 5e659d054b40..4299348c880a 100644
+--- a/fs/ceph/inode.c
++++ b/fs/ceph/inode.c
+@@ -2069,11 +2069,6 @@ int __ceph_setattr(struct inode *inode, struct iattr 
*attr)
+       if (inode_dirty_flags)
+               __mark_inode_dirty(inode, inode_dirty_flags);
+ 
+-      if (ia_valid & ATTR_MODE) {
+-              err = posix_acl_chmod(inode, attr->ia_mode);
+-              if (err)
+-                      goto out_put;
+-      }
+ 
+       if (mask) {
+               req->r_inode = inode;
+@@ -2087,13 +2082,11 @@ int __ceph_setattr(struct inode *inode, struct iattr 
*attr)
+            ceph_cap_string(dirtied), mask);
+ 
+       ceph_mdsc_put_request(req);
+-      if (mask & CEPH_SETATTR_SIZE)
+-              __ceph_do_pending_vmtruncate(inode);
+-      ceph_free_cap_flush(prealloc_cf);
+-      return err;
+-out_put:
+-      ceph_mdsc_put_request(req);
+       ceph_free_cap_flush(prealloc_cf);
++
++      if (err >= 0 && (mask & CEPH_SETATTR_SIZE))
++              __ceph_do_pending_vmtruncate(inode);
++
+       return err;
+ }
+ 
+@@ -2112,7 +2105,12 @@ int ceph_setattr(struct dentry *dentry, struct iattr 
*attr)
+       if (err != 0)
+               return err;
+ 
+-      return __ceph_setattr(inode, attr);
++      err = __ceph_setattr(inode, attr);
++
++      if (err >= 0 && (attr->ia_valid & ATTR_MODE))
++              err = posix_acl_chmod(inode, attr->ia_mode);
++
++      return err;
+ }
+ 
+ /*
+diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
+index dba2ff8eaa68..452334694a5d 100644
+--- a/fs/nfsd/nfs3xdr.c
++++ b/fs/nfsd/nfs3xdr.c
+@@ -358,6 +358,8 @@ nfs3svc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
+ {
+       unsigned int len, v, hdr, dlen;
+       u32 max_blocksize = svc_max_payload(rqstp);
++      struct kvec *head = rqstp->rq_arg.head;
++      struct kvec *tail = rqstp->rq_arg.tail;
+ 
+       p = decode_fh(p, &args->fh);
+       if (!p)
+@@ -367,6 +369,8 @@ nfs3svc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
+       args->count = ntohl(*p++);
+       args->stable = ntohl(*p++);
+       len = args->len = ntohl(*p++);
++      if ((void *)p > head->iov_base + head->iov_len)
++              return 0;
+       /*
+        * The count must equal the amount of data passed.
+        */
+@@ -377,9 +381,8 @@ nfs3svc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
+        * Check to make sure that we got the right number of
+        * bytes.
+        */
+-      hdr = (void*)p - rqstp->rq_arg.head[0].iov_base;
+-      dlen = rqstp->rq_arg.head[0].iov_len + rqstp->rq_arg.page_len
+-              + rqstp->rq_arg.tail[0].iov_len - hdr;
++      hdr = (void*)p - head->iov_base;
++      dlen = head->iov_len + rqstp->rq_arg.page_len + tail->iov_len - hdr;
+       /*
+        * Round the length of the data which was specified up to
+        * the next multiple of XDR units and then compare that
+@@ -396,7 +399,7 @@ nfs3svc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
+               len = args->len = max_blocksize;
+       }
+       rqstp->rq_vec[0].iov_base = (void*)p;
+-      rqstp->rq_vec[0].iov_len = rqstp->rq_arg.head[0].iov_len - hdr;
++      rqstp->rq_vec[0].iov_len = head->iov_len - hdr;
+       v = 0;
+       while (len > rqstp->rq_vec[v].iov_len) {
+               len -= rqstp->rq_vec[v].iov_len;
+@@ -471,6 +474,8 @@ nfs3svc_decode_symlinkargs(struct svc_rqst *rqstp, __be32 
*p,
+       /* first copy and check from the first page */
+       old = (char*)p;
+       vec = &rqstp->rq_arg.head[0];
++      if ((void *)old > vec->iov_base + vec->iov_len)
++              return 0;
+       avail = vec->iov_len - (old - (char*)vec->iov_base);
+       while (len && avail && *old) {
+               *new++ = *old++;
+diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
+index e6bfd96734c0..15497cbbc563 100644
+--- a/fs/nfsd/nfssvc.c
++++ b/fs/nfsd/nfssvc.c
+@@ -733,6 +733,37 @@ static __be32 map_new_errors(u32 vers, __be32 nfserr)
+       return nfserr;
+ }
+ 
++/*
++ * A write procedure can have a large argument, and a read procedure can
++ * have a large reply, but no NFSv2 or NFSv3 procedure has argument and
++ * reply that can both be larger than a page.  The xdr code has taken
++ * advantage of this assumption to be a sloppy about bounds checking in
++ * some cases.  Pending a rewrite of the NFSv2/v3 xdr code to fix that
++ * problem, we enforce these assumptions here:
++ */
++static bool nfs_request_too_big(struct svc_rqst *rqstp,
++                              struct svc_procedure *proc)
++{
++      /*
++       * The ACL code has more careful bounds-checking and is not
++       * susceptible to this problem:
++       */
++      if (rqstp->rq_prog != NFS_PROGRAM)
++              return false;
++      /*
++       * Ditto NFSv4 (which can in theory have argument and reply both
++       * more than a page):
++       */
++      if (rqstp->rq_vers >= 4)
++              return false;
++      /* The reply will be small, we're OK: */
++      if (proc->pc_xdrressize > 0 &&
++          proc->pc_xdrressize < XDR_QUADLEN(PAGE_SIZE))
++              return false;
++
++      return rqstp->rq_arg.len > PAGE_SIZE;
++}
++
+ int
+ nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
+ {
+@@ -745,6 +776,11 @@ nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
+                               rqstp->rq_vers, rqstp->rq_proc);
+       proc = rqstp->rq_procinfo;
+ 
++      if (nfs_request_too_big(rqstp, proc)) {
++              dprintk("nfsd: NFSv%d argument too large\n", rqstp->rq_vers);
++              *statp = rpc_garbage_args;
++              return 1;
++      }
+       /*
+        * Give the xdr decoder a chance to change this if it wants
+        * (necessary in the NFSv4.0 compound case)
+diff --git a/fs/nfsd/nfsxdr.c b/fs/nfsd/nfsxdr.c
+index 41b468a6a90f..de07ff625777 100644
+--- a/fs/nfsd/nfsxdr.c
++++ b/fs/nfsd/nfsxdr.c
+@@ -280,6 +280,7 @@ nfssvc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
+                                       struct nfsd_writeargs *args)
+ {
+       unsigned int len, hdr, dlen;
++      struct kvec *head = rqstp->rq_arg.head;
+       int v;
+ 
+       p = decode_fh(p, &args->fh);
+@@ -300,9 +301,10 @@ nfssvc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
+        * Check to make sure that we got the right number of
+        * bytes.
+        */
+-      hdr = (void*)p - rqstp->rq_arg.head[0].iov_base;
+-      dlen = rqstp->rq_arg.head[0].iov_len + rqstp->rq_arg.page_len
+-              - hdr;
++      hdr = (void*)p - head->iov_base;
++      if (hdr > head->iov_len)
++              return 0;
++      dlen = head->iov_len + rqstp->rq_arg.page_len - hdr;
+ 
+       /*
+        * Round the length of the data which was specified up to
+@@ -316,7 +318,7 @@ nfssvc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
+               return 0;
+ 
+       rqstp->rq_vec[0].iov_base = (void*)p;
+-      rqstp->rq_vec[0].iov_len = rqstp->rq_arg.head[0].iov_len - hdr;
++      rqstp->rq_vec[0].iov_len = head->iov_len - hdr;
+       v = 0;
+       while (len > rqstp->rq_vec[v].iov_len) {
+               len -= rqstp->rq_vec[v].iov_len;
+diff --git a/include/linux/errqueue.h b/include/linux/errqueue.h
+index 9ca23fcfb5d7..6fdfc884fdeb 100644
+--- a/include/linux/errqueue.h
++++ b/include/linux/errqueue.h
+@@ -20,6 +20,8 @@ struct sock_exterr_skb {
+       struct sock_extended_err        ee;
+       u16                             addr_offset;
+       __be16                          port;
++      u8                              opt_stats:1,
++                                      unused:7;
+ };
+ 
+ #endif
+diff --git a/include/linux/phy.h b/include/linux/phy.h
+index 7fc1105605bf..b19ae667c9c4 100644
+--- a/include/linux/phy.h
++++ b/include/linux/phy.h
+@@ -840,6 +840,7 @@ void phy_change_work(struct work_struct *work);
+ void phy_mac_interrupt(struct phy_device *phydev, int new_link);
+ void phy_start_machine(struct phy_device *phydev);
+ void phy_stop_machine(struct phy_device *phydev);
++void phy_trigger_machine(struct phy_device *phydev, bool sync);
+ int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
+ int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd);
+ int phy_ethtool_ksettings_get(struct phy_device *phydev,
+diff --git a/include/uapi/linux/ipv6_route.h b/include/uapi/linux/ipv6_route.h
+index f6598d1c886e..316e838b7470 100644
+--- a/include/uapi/linux/ipv6_route.h
++++ b/include/uapi/linux/ipv6_route.h
+@@ -34,7 +34,7 @@
+ #define RTF_PREF(pref)        ((pref) << 27)
+ #define RTF_PREF_MASK 0x18000000
+ 
+-#define RTF_PCPU      0x40000000
++#define RTF_PCPU      0x40000000      /* read-only: can not be set by user */
+ #define RTF_LOCAL     0x80000000
+ 
+ 
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index cdc43b899f28..f3c938ba87a2 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -1859,14 +1859,15 @@ static void find_good_pkt_pointers(struct 
bpf_verifier_state *state,
+ 
+       for (i = 0; i < MAX_BPF_REG; i++)
+               if (regs[i].type == PTR_TO_PACKET && regs[i].id == dst_reg->id)
+-                      regs[i].range = dst_reg->off;
++                      /* keep the maximum range already checked */
++                      regs[i].range = max(regs[i].range, dst_reg->off);
+ 
+       for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
+               if (state->stack_slot_type[i] != STACK_SPILL)
+                       continue;
+               reg = &state->spilled_regs[i / BPF_REG_SIZE];
+               if (reg->type == PTR_TO_PACKET && reg->id == dst_reg->id)
+-                      reg->range = dst_reg->off;
++                      reg->range = max(reg->range, dst_reg->off);
+       }
+ }
+ 
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index 0a5f630f5c54..f90ef82076a9 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -1333,26 +1333,21 @@ static int cpuhp_store_callbacks(enum cpuhp_state 
state, const char *name,
+       struct cpuhp_step *sp;
+       int ret = 0;
+ 
+-      mutex_lock(&cpuhp_state_mutex);
+-
+       if (state == CPUHP_AP_ONLINE_DYN || state == CPUHP_BP_PREPARE_DYN) {
+               ret = cpuhp_reserve_state(state);
+               if (ret < 0)
+-                      goto out;
++                      return ret;
+               state = ret;
+       }
+       sp = cpuhp_get_step(state);
+-      if (name && sp->name) {
+-              ret = -EBUSY;
+-              goto out;
+-      }
++      if (name && sp->name)
++              return -EBUSY;
++
+       sp->startup.single = startup;
+       sp->teardown.single = teardown;
+       sp->name = name;
+       sp->multi_instance = multi_instance;
+       INIT_HLIST_HEAD(&sp->list);
+-out:
+-      mutex_unlock(&cpuhp_state_mutex);
+       return ret;
+ }
+ 
+@@ -1426,6 +1421,7 @@ int __cpuhp_state_add_instance(enum cpuhp_state state, 
struct hlist_node *node,
+               return -EINVAL;
+ 
+       get_online_cpus();
++      mutex_lock(&cpuhp_state_mutex);
+ 
+       if (!invoke || !sp->startup.multi)
+               goto add_node;
+@@ -1445,16 +1441,14 @@ int __cpuhp_state_add_instance(enum cpuhp_state state, 
struct hlist_node *node,
+               if (ret) {
+                       if (sp->teardown.multi)
+                               cpuhp_rollback_install(cpu, state, node);
+-                      goto err;
++                      goto unlock;
+               }
+       }
+ add_node:
+       ret = 0;
+-      mutex_lock(&cpuhp_state_mutex);
+       hlist_add_head(node, &sp->list);
++unlock:
+       mutex_unlock(&cpuhp_state_mutex);
+-
+-err:
+       put_online_cpus();
+       return ret;
+ }
+@@ -1489,6 +1483,7 @@ int __cpuhp_setup_state(enum cpuhp_state state,
+               return -EINVAL;
+ 
+       get_online_cpus();
++      mutex_lock(&cpuhp_state_mutex);
+ 
+       ret = cpuhp_store_callbacks(state, name, startup, teardown,
+                                   multi_instance);
+@@ -1522,6 +1517,7 @@ int __cpuhp_setup_state(enum cpuhp_state state,
+               }
+       }
+ out:
++      mutex_unlock(&cpuhp_state_mutex);
+       put_online_cpus();
+       /*
+        * If the requested state is CPUHP_AP_ONLINE_DYN, return the
+@@ -1545,6 +1541,8 @@ int __cpuhp_state_remove_instance(enum cpuhp_state state,
+               return -EINVAL;
+ 
+       get_online_cpus();
++      mutex_lock(&cpuhp_state_mutex);
++
+       if (!invoke || !cpuhp_get_teardown_cb(state))
+               goto remove;
+       /*
+@@ -1561,7 +1559,6 @@ int __cpuhp_state_remove_instance(enum cpuhp_state state,
+       }
+ 
+ remove:
+-      mutex_lock(&cpuhp_state_mutex);
+       hlist_del(node);
+       mutex_unlock(&cpuhp_state_mutex);
+       put_online_cpus();
+@@ -1569,6 +1566,7 @@ int __cpuhp_state_remove_instance(enum cpuhp_state state,
+       return 0;
+ }
+ EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
++
+ /**
+  * __cpuhp_remove_state - Remove the callbacks for an hotplug machine state
+  * @state:    The state to remove
+@@ -1587,6 +1585,7 @@ void __cpuhp_remove_state(enum cpuhp_state state, bool 
invoke)
+ 
+       get_online_cpus();
+ 
++      mutex_lock(&cpuhp_state_mutex);
+       if (sp->multi_instance) {
+               WARN(!hlist_empty(&sp->list),
+                    "Error: Removing state %d which has instances left.\n",
+@@ -1611,6 +1610,7 @@ void __cpuhp_remove_state(enum cpuhp_state state, bool 
invoke)
+       }
+ remove:
+       cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
++      mutex_unlock(&cpuhp_state_mutex);
+       put_online_cpus();
+ }
+ EXPORT_SYMBOL(__cpuhp_remove_state);
+diff --git a/net/9p/client.c b/net/9p/client.c
+index 3fc94a49ccd5..cf129fec7329 100644
+--- a/net/9p/client.c
++++ b/net/9p/client.c
+@@ -2101,6 +2101,10 @@ int p9_client_readdir(struct p9_fid *fid, char *data, 
u32 count, u64 offset)
+               trace_9p_protocol_dump(clnt, req->rc);
+               goto free_and_error;
+       }
++      if (rsize < count) {
++              pr_err("bogus RREADDIR count (%d > %d)\n", count, rsize);
++              count = rsize;
++      }
+ 
+       p9_debug(P9_DEBUG_9P, "<<< RREADDIR count %d\n", count);
+ 
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index e7c12caa20c8..4526cbd7e28a 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -860,7 +860,8 @@ static void neigh_probe(struct neighbour *neigh)
+       if (skb)
+               skb = skb_clone(skb, GFP_ATOMIC);
+       write_unlock(&neigh->lock);
+-      neigh->ops->solicit(neigh, skb);
++      if (neigh->ops->solicit)
++              neigh->ops->solicit(neigh, skb);
+       atomic_inc(&neigh->probes);
+       kfree_skb(skb);
+ }
+diff --git a/net/core/netpoll.c b/net/core/netpoll.c
+index 9424673009c1..29be2466970c 100644
+--- a/net/core/netpoll.c
++++ b/net/core/netpoll.c
+@@ -105,15 +105,21 @@ static void queue_process(struct work_struct *work)
+       while ((skb = skb_dequeue(&npinfo->txq))) {
+               struct net_device *dev = skb->dev;
+               struct netdev_queue *txq;
++              unsigned int q_index;
+ 
+               if (!netif_device_present(dev) || !netif_running(dev)) {
+                       kfree_skb(skb);
+                       continue;
+               }
+ 
+-              txq = skb_get_tx_queue(dev, skb);
+-
+               local_irq_save(flags);
++              /* check if skb->queue_mapping is still valid */
++              q_index = skb_get_queue_mapping(skb);
++              if (unlikely(q_index >= dev->real_num_tx_queues)) {
++                      q_index = q_index % dev->real_num_tx_queues;
++                      skb_set_queue_mapping(skb, q_index);
++              }
++              txq = netdev_get_tx_queue(dev, q_index);
+               HARD_TX_LOCK(dev, txq, smp_processor_id());
+               if (netif_xmit_frozen_or_stopped(txq) ||
+                   netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) {
+diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
+index 88a8e429fc3e..0fd421713775 100644
+--- a/net/core/secure_seq.c
++++ b/net/core/secure_seq.c
+@@ -16,9 +16,11 @@
+ #define NET_SECRET_SIZE (MD5_MESSAGE_BYTES / 4)
+ 
+ static u32 net_secret[NET_SECRET_SIZE] ____cacheline_aligned;
++static u32 ts_secret[2];
+ 
+ static __always_inline void net_secret_init(void)
+ {
++      net_get_random_once(ts_secret, sizeof(ts_secret));
+       net_get_random_once(net_secret, sizeof(net_secret));
+ }
+ #endif
+@@ -41,6 +43,21 @@ static u32 seq_scale(u32 seq)
+ #endif
+ 
+ #if IS_ENABLED(CONFIG_IPV6)
++static u32 secure_tcpv6_ts_off(const __be32 *saddr, const __be32 *daddr)
++{
++      u32 hash[4 + 4 + 1];
++
++      if (sysctl_tcp_timestamps != 1)
++              return 0;
++
++      memcpy(hash, saddr, 16);
++      memcpy(hash + 4, daddr, 16);
++
++      hash[8] = ts_secret[0];
++
++      return jhash2(hash, ARRAY_SIZE(hash), ts_secret[1]);
++}
++
+ u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
+                                __be16 sport, __be16 dport, u32 *tsoff)
+ {
+@@ -59,7 +76,7 @@ u32 secure_tcpv6_sequence_number(const __be32 *saddr, const 
__be32 *daddr,
+ 
+       md5_transform(hash, secret);
+ 
+-      *tsoff = sysctl_tcp_timestamps == 1 ? hash[1] : 0;
++      *tsoff = secure_tcpv6_ts_off(saddr, daddr);
+       return seq_scale(hash[0]);
+ }
+ EXPORT_SYMBOL(secure_tcpv6_sequence_number);
+@@ -87,6 +104,14 @@ EXPORT_SYMBOL(secure_ipv6_port_ephemeral);
+ #endif
+ 
+ #ifdef CONFIG_INET
++static u32 secure_tcp_ts_off(__be32 saddr, __be32 daddr)
++{
++      if (sysctl_tcp_timestamps != 1)
++              return 0;
++
++      return jhash_3words((__force u32)saddr, (__force u32)daddr,
++                          ts_secret[0], ts_secret[1]);
++}
+ 
+ u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
+                              __be16 sport, __be16 dport, u32 *tsoff)
+@@ -101,7 +126,7 @@ u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
+ 
+       md5_transform(hash, net_secret);
+ 
+-      *tsoff = sysctl_tcp_timestamps == 1 ? hash[1] : 0;
++      *tsoff = secure_tcp_ts_off(saddr, daddr);
+       return seq_scale(hash[0]);
+ }
+ 
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index aa3a13378c90..887995e6df9a 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -3078,22 +3078,32 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
+       if (sg && csum && (mss != GSO_BY_FRAGS))  {
+               if (!(features & NETIF_F_GSO_PARTIAL)) {
+                       struct sk_buff *iter;
++                      unsigned int frag_len;
+ 
+                       if (!list_skb ||
+                           !net_gso_ok(features, 
skb_shinfo(head_skb)->gso_type))
+                               goto normal;
+ 
+-                      /* Split the buffer at the frag_list pointer.
+-                       * This is based on the assumption that all
+-                       * buffers in the chain excluding the last
+-                       * containing the same amount of data.
++                      /* If we get here then all the required
++                       * GSO features except frag_list are supported.
++                       * Try to split the SKB to multiple GSO SKBs
++                       * with no frag_list.
++                       * Currently we can do that only when the buffers don't
++                       * have a linear part and all the buffers except
++                       * the last are of the same length.
+                        */
++                      frag_len = list_skb->len;
+                       skb_walk_frags(head_skb, iter) {
++                              if (frag_len != iter->len && iter->next)
++                                      goto normal;
+                               if (skb_headlen(iter))
+                                       goto normal;
+ 
+                               len -= iter->len;
+                       }
++
++                      if (len != frag_len)
++                              goto normal;
+               }
+ 
+               /* GSO partial only requires that we trim off any excess that
+@@ -3690,6 +3700,15 @@ static void sock_rmem_free(struct sk_buff *skb)
+       atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
+ }
+ 
++static void skb_set_err_queue(struct sk_buff *skb)
++{
++      /* pkt_type of skbs received on local sockets is never PACKET_OUTGOING.
++       * So, it is safe to (mis)use it to mark skbs on the error queue.
++       */
++      skb->pkt_type = PACKET_OUTGOING;
++      BUILD_BUG_ON(PACKET_OUTGOING == 0);
++}
++
+ /*
+  * Note: We dont mem charge error packets (no sk_forward_alloc changes)
+  */
+@@ -3703,6 +3722,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff 
*skb)
+       skb->sk = sk;
+       skb->destructor = sock_rmem_free;
+       atomic_add(skb->truesize, &sk->sk_rmem_alloc);
++      skb_set_err_queue(skb);
+ 
+       /* before exiting rcu section, make sure dst is refcounted */
+       skb_dst_force(skb);
+@@ -3779,16 +3799,21 @@ EXPORT_SYMBOL(skb_clone_sk);
+ 
+ static void __skb_complete_tx_timestamp(struct sk_buff *skb,
+                                       struct sock *sk,
+-                                      int tstype)
++                                      int tstype,
++                                      bool opt_stats)
+ {
+       struct sock_exterr_skb *serr;
+       int err;
+ 
++      BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb));
++
+       serr = SKB_EXT_ERR(skb);
+       memset(serr, 0, sizeof(*serr));
+       serr->ee.ee_errno = ENOMSG;
+       serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
+       serr->ee.ee_info = tstype;
++      serr->opt_stats = opt_stats;
++      serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0;
+       if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
+               serr->ee.ee_data = skb_shinfo(skb)->tskey;
+               if (sk->sk_protocol == IPPROTO_TCP &&
+@@ -3829,7 +3854,7 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
+        */
+       if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) {
+               *skb_hwtstamps(skb) = *hwtstamps;
+-              __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND);
++              __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false);
+               sock_put(sk);
+       }
+ }
+@@ -3840,7 +3865,7 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
+                    struct sock *sk, int tstype)
+ {
+       struct sk_buff *skb;
+-      bool tsonly;
++      bool tsonly, opt_stats = false;
+ 
+       if (!sk)
+               return;
+@@ -3853,9 +3878,10 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
+ #ifdef CONFIG_INET
+               if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) &&
+                   sk->sk_protocol == IPPROTO_TCP &&
+-                  sk->sk_type == SOCK_STREAM)
++                  sk->sk_type == SOCK_STREAM) {
+                       skb = tcp_get_timestamping_opt_stats(sk);
+-              else
++                      opt_stats = true;
++              } else
+ #endif
+                       skb = alloc_skb(0, GFP_ATOMIC);
+       } else {
+@@ -3874,7 +3900,7 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
+       else
+               skb->tstamp = ktime_get_real();
+ 
+-      __skb_complete_tx_timestamp(skb, sk, tstype);
++      __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats);
+ }
+ EXPORT_SYMBOL_GPL(__skb_tstamp_tx);
+ 
+diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
+index fc4bf4d54158..fcf53a399560 100644
+--- a/net/ipv4/ip_sockglue.c
++++ b/net/ipv4/ip_sockglue.c
+@@ -488,16 +488,15 @@ static bool ipv4_datagram_support_cmsg(const struct sock 
*sk,
+               return false;
+ 
+       /* Support IP_PKTINFO on tstamp packets if requested, to correlate
+-       * timestamp with egress dev. Not possible for packets without dev
++       * timestamp with egress dev. Not possible for packets without iif
+        * or without payload (SOF_TIMESTAMPING_OPT_TSONLY).
+        */
+-      if ((!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG)) ||
+-          (!skb->dev))
++      info = PKTINFO_SKB_CB(skb);
++      if (!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG) ||
++          !info->ipi_ifindex)
+               return false;
+ 
+-      info = PKTINFO_SKB_CB(skb);
+       info->ipi_spec_dst.s_addr = ip_hdr(skb)->saddr;
+-      info->ipi_ifindex = skb->dev->ifindex;
+       return true;
+ }
+ 
+diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
+index 68d77b1f1495..51e2f3c5e954 100644
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -156,17 +156,18 @@ int ping_hash(struct sock *sk)
+ void ping_unhash(struct sock *sk)
+ {
+       struct inet_sock *isk = inet_sk(sk);
++
+       pr_debug("ping_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num);
++      write_lock_bh(&ping_table.lock);
+       if (sk_hashed(sk)) {
+-              write_lock_bh(&ping_table.lock);
+               hlist_nulls_del(&sk->sk_nulls_node);
+               sk_nulls_node_init(&sk->sk_nulls_node);
+               sock_put(sk);
+               isk->inet_num = 0;
+               isk->inet_sport = 0;
+               sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
+-              write_unlock_bh(&ping_table.lock);
+       }
++      write_unlock_bh(&ping_table.lock);
+ }
+ EXPORT_SYMBOL_GPL(ping_unhash);
+ 
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 8976887dc83e..6263af2f6ce8 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -2608,7 +2608,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, 
struct nlmsghdr *nlh)
+       skb_reset_network_header(skb);
+ 
+       /* Bugfix: need to give ip_route_input enough of an IP header to not 
gag. */
+-      ip_hdr(skb)->protocol = IPPROTO_ICMP;
++      ip_hdr(skb)->protocol = IPPROTO_UDP;
+       skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
+ 
+       src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 0efb4c7f6704..53fa3a4275de 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -2301,6 +2301,7 @@ int tcp_disconnect(struct sock *sk, int flags)
+       tcp_init_send_head(sk);
+       memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
+       __sk_dst_reset(sk);
++      tcp_saved_syn_free(tp);
+ 
+       WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
+ 
+diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
+index 79c4817abc94..6e3c512054a6 100644
+--- a/net/ipv4/tcp_cong.c
++++ b/net/ipv4/tcp_cong.c
+@@ -168,12 +168,8 @@ void tcp_assign_congestion_control(struct sock *sk)
+       }
+ out:
+       rcu_read_unlock();
++      memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
+ 
+-      /* Clear out private data before diag gets it and
+-       * the ca has not been initialized.
+-       */
+-      if (ca->get_info)
+-              memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
+       if (ca->flags & TCP_CONG_NEEDS_ECN)
+               INET_ECN_xmit(sk);
+       else
+@@ -200,11 +196,10 @@ static void tcp_reinit_congestion_control(struct sock 
*sk,
+       tcp_cleanup_congestion_control(sk);
+       icsk->icsk_ca_ops = ca;
+       icsk->icsk_ca_setsockopt = 1;
++      memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
+ 
+-      if (sk->sk_state != TCP_CLOSE) {
+-              memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
++      if (sk->sk_state != TCP_CLOSE)
+               tcp_init_congestion_control(sk);
+-      }
+ }
+ 
+ /* Manage refcounts on socket close. */
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index a7bcc0ab5e99..ec76bbee2c35 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -3263,14 +3263,24 @@ static void addrconf_gre_config(struct net_device *dev)
+ static int fixup_permanent_addr(struct inet6_dev *idev,
+                               struct inet6_ifaddr *ifp)
+ {
+-      if (!ifp->rt) {
+-              struct rt6_info *rt;
++      /* rt6i_ref == 0 means the host route was removed from the
++       * FIB, for example, if 'lo' device is taken down. In that
++       * case regenerate the host route.
++       */
++      if (!ifp->rt || !atomic_read(&ifp->rt->rt6i_ref)) {
++              struct rt6_info *rt, *prev;
+ 
+               rt = addrconf_dst_alloc(idev, &ifp->addr, false);
+               if (unlikely(IS_ERR(rt)))
+                       return PTR_ERR(rt);
+ 
++              /* ifp->rt can be accessed outside of rtnl */
++              spin_lock(&ifp->lock);
++              prev = ifp->rt;
+               ifp->rt = rt;
++              spin_unlock(&ifp->lock);
++
++              ip6_rt_put(prev);
+       }
+ 
+       if (!(ifp->flags & IFA_F_NOPREFIXROUTE)) {
+@@ -3618,14 +3628,19 @@ static int addrconf_ifdown(struct net_device *dev, int 
how)
+       INIT_LIST_HEAD(&del_list);
+       list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) {
+               struct rt6_info *rt = NULL;
++              bool keep;
+ 
+               addrconf_del_dad_work(ifa);
+ 
++              keep = keep_addr && (ifa->flags & IFA_F_PERMANENT) &&
++                      !addr_is_local(&ifa->addr);
++              if (!keep)
++                      list_move(&ifa->if_list, &del_list);
++
+               write_unlock_bh(&idev->lock);
+               spin_lock_bh(&ifa->lock);
+ 
+-              if (keep_addr && (ifa->flags & IFA_F_PERMANENT) &&
+-                  !addr_is_local(&ifa->addr)) {
++              if (keep) {
+                       /* set state to skip the notifier below */
+                       state = INET6_IFADDR_STATE_DEAD;
+                       ifa->state = 0;
+@@ -3637,8 +3652,6 @@ static int addrconf_ifdown(struct net_device *dev, int 
how)
+               } else {
+                       state = ifa->state;
+                       ifa->state = INET6_IFADDR_STATE_DEAD;
+-
+-                      list_move(&ifa->if_list, &del_list);
+               }
+ 
+               spin_unlock_bh(&ifa->lock);
+diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
+index eec27f87efac..e011122ebd43 100644
+--- a/net/ipv6/datagram.c
++++ b/net/ipv6/datagram.c
+@@ -405,9 +405,6 @@ static inline bool ipv6_datagram_support_addr(struct 
sock_exterr_skb *serr)
+  * At one point, excluding local errors was a quick test to identify 
icmp/icmp6
+  * errors. This is no longer true, but the test remained, so the v6 stack,
+  * unlike v4, also honors cmsg requests on all wifi and timestamp errors.
+- *
+- * Timestamp code paths do not initialize the fields expected by cmsg:
+- * the PKTINFO fields in skb->cb[]. Fill those in here.
+  */
+ static bool ip6_datagram_support_cmsg(struct sk_buff *skb,
+                                     struct sock_exterr_skb *serr)
+@@ -419,14 +416,9 @@ static bool ip6_datagram_support_cmsg(struct sk_buff *skb,
+       if (serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL)
+               return false;
+ 
+-      if (!skb->dev)
++      if (!IP6CB(skb)->iif)
+               return false;
+ 
+-      if (skb->protocol == htons(ETH_P_IPV6))
+-              IP6CB(skb)->iif = skb->dev->ifindex;
+-      else
+-              PKTINFO_SKB_CB(skb)->ipi_ifindex = skb->dev->ifindex;
+-
+       return true;
+ }
+ 
+diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
+index 275cac628a95..d32e2110aff2 100644
+--- a/net/ipv6/exthdrs.c
++++ b/net/ipv6/exthdrs.c
+@@ -388,7 +388,6 @@ static int ipv6_srh_rcv(struct sk_buff *skb)
+               icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
+                                 ((&hdr->segments_left) -
+                                  skb_network_header(skb)));
+-              kfree_skb(skb);
+               return -1;
+       }
+ 
+@@ -910,6 +909,8 @@ static void ipv6_push_rthdr(struct sk_buff *skb, u8 *proto,
+ {
+       switch (opt->type) {
+       case IPV6_SRCRT_TYPE_0:
++      case IPV6_SRCRT_STRICT:
++      case IPV6_SRCRT_TYPE_2:
+               ipv6_push_rthdr0(skb, proto, opt, addr_p, saddr);
+               break;
+       case IPV6_SRCRT_TYPE_4:
+@@ -1164,6 +1165,8 @@ struct in6_addr *fl6_update_dst(struct flowi6 *fl6,
+ 
+       switch (opt->srcrt->type) {
+       case IPV6_SRCRT_TYPE_0:
++      case IPV6_SRCRT_STRICT:
++      case IPV6_SRCRT_TYPE_2:
+               fl6->daddr = *((struct rt0_hdr *)opt->srcrt)->addr;
+               break;
+       case IPV6_SRCRT_TYPE_4:
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index 75fac933c209..a9692ec0cd6d 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -1037,7 +1037,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device 
*dev, __u8 dsfield,
+       struct ip6_tnl *t = netdev_priv(dev);
+       struct net *net = t->net;
+       struct net_device_stats *stats = &t->dev->stats;
+-      struct ipv6hdr *ipv6h = ipv6_hdr(skb);
++      struct ipv6hdr *ipv6h;
+       struct ipv6_tel_txoption opt;
+       struct dst_entry *dst = NULL, *ndst = NULL;
+       struct net_device *tdev;
+@@ -1057,26 +1057,28 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct 
net_device *dev, __u8 dsfield,
+ 
+       /* NBMA tunnel */
+       if (ipv6_addr_any(&t->parms.raddr)) {
+-              struct in6_addr *addr6;
+-              struct neighbour *neigh;
+-              int addr_type;
++              if (skb->protocol == htons(ETH_P_IPV6)) {
++                      struct in6_addr *addr6;
++                      struct neighbour *neigh;
++                      int addr_type;
+ 
+-              if (!skb_dst(skb))
+-                      goto tx_err_link_failure;
++                      if (!skb_dst(skb))
++                              goto tx_err_link_failure;
+ 
+-              neigh = dst_neigh_lookup(skb_dst(skb),
+-                                       &ipv6_hdr(skb)->daddr);
+-              if (!neigh)
+-                      goto tx_err_link_failure;
++                      neigh = dst_neigh_lookup(skb_dst(skb),
++                                               &ipv6_hdr(skb)->daddr);
++                      if (!neigh)
++                              goto tx_err_link_failure;
+ 
+-              addr6 = (struct in6_addr *)&neigh->primary_key;
+-              addr_type = ipv6_addr_type(addr6);
++                      addr6 = (struct in6_addr *)&neigh->primary_key;
++                      addr_type = ipv6_addr_type(addr6);
+ 
+-              if (addr_type == IPV6_ADDR_ANY)
+-                      addr6 = &ipv6_hdr(skb)->daddr;
++                      if (addr_type == IPV6_ADDR_ANY)
++                              addr6 = &ipv6_hdr(skb)->daddr;
+ 
+-              memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
+-              neigh_release(neigh);
++                      memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
++                      neigh_release(neigh);
++              }
+       } else if (!(t->parms.flags &
+                    (IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) {
+               /* enable the cache only only if the routing decision does
+diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
+index 604d8953c775..72a00e4961ba 100644
+--- a/net/ipv6/ip6mr.c
++++ b/net/ipv6/ip6mr.c
+@@ -774,7 +774,8 @@ static struct net_device *ip6mr_reg_vif(struct net *net, 
struct mr6_table *mrt)
+  *    Delete a VIF entry
+  */
+ 
+-static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head 
*head)
++static int mif6_delete(struct mr6_table *mrt, int vifi, int notify,
++                     struct list_head *head)
+ {
+       struct mif_device *v;
+       struct net_device *dev;
+@@ -820,7 +821,7 @@ static int mif6_delete(struct mr6_table *mrt, int vifi, 
struct list_head *head)
+                                            dev->ifindex, &in6_dev->cnf);
+       }
+ 
+-      if (v->flags & MIFF_REGISTER)
++      if ((v->flags & MIFF_REGISTER) && !notify)
+               unregister_netdevice_queue(dev, head);
+ 
+       dev_put(dev);
+@@ -1331,7 +1332,6 @@ static int ip6mr_device_event(struct notifier_block 
*this,
+       struct mr6_table *mrt;
+       struct mif_device *v;
+       int ct;
+-      LIST_HEAD(list);
+ 
+       if (event != NETDEV_UNREGISTER)
+               return NOTIFY_DONE;
+@@ -1340,10 +1340,9 @@ static int ip6mr_device_event(struct notifier_block 
*this,
+               v = &mrt->vif6_table[0];
+               for (ct = 0; ct < mrt->maxvif; ct++, v++) {
+                       if (v->dev == dev)
+-                              mif6_delete(mrt, ct, &list);
++                              mif6_delete(mrt, ct, 1, NULL);
+               }
+       }
+-      unregister_netdevice_many(&list);
+ 
+       return NOTIFY_DONE;
+ }
+@@ -1552,7 +1551,7 @@ static void mroute_clean_tables(struct mr6_table *mrt, 
bool all)
+       for (i = 0; i < mrt->maxvif; i++) {
+               if (!all && (mrt->vif6_table[i].flags & VIFF_STATIC))
+                       continue;
+-              mif6_delete(mrt, i, &list);
++              mif6_delete(mrt, i, 0, &list);
+       }
+       unregister_netdevice_many(&list);
+ 
+@@ -1706,7 +1705,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, 
char __user *optval, uns
+               if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
+                       return -EFAULT;
+               rtnl_lock();
+-              ret = mif6_delete(mrt, mifi, NULL);
++              ret = mif6_delete(mrt, mifi, 0, NULL);
+               rtnl_unlock();
+               return ret;
+ 
+diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
+index ea89073c8247..294fb6f743cb 100644
+--- a/net/ipv6/raw.c
++++ b/net/ipv6/raw.c
+@@ -1174,8 +1174,7 @@ static int rawv6_ioctl(struct sock *sk, int cmd, 
unsigned long arg)
+               spin_lock_bh(&sk->sk_receive_queue.lock);
+               skb = skb_peek(&sk->sk_receive_queue);
+               if (skb)
+-                      amount = skb_tail_pointer(skb) -
+-                              skb_transport_header(skb);
++                      amount = skb->len;
+               spin_unlock_bh(&sk->sk_receive_queue.lock);
+               return put_user(amount, (int __user *)arg);
+       }
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 7ea85370c11c..523681a5c898 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -1831,6 +1831,10 @@ static struct rt6_info *ip6_route_info_create(struct 
fib6_config *cfg)
+       int addr_type;
+       int err = -EINVAL;
+ 
++      /* RTF_PCPU is an internal flag; can not be set by userspace */
++      if (cfg->fc_flags & RTF_PCPU)
++              goto out;
++
+       if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128)
+               goto out;
+ #ifndef CONFIG_IPV6_SUBTREES
+diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c
+index a855eb325b03..5f44ffed2576 100644
+--- a/net/ipv6/seg6.c
++++ b/net/ipv6/seg6.c
+@@ -53,6 +53,9 @@ bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len)
+               struct sr6_tlv *tlv;
+               unsigned int tlv_len;
+ 
++              if (trailing < sizeof(*tlv))
++                      return false;
++
+               tlv = (struct sr6_tlv *)((unsigned char *)srh + tlv_offset);
+               tlv_len = sizeof(*tlv) + tlv->len;
+ 
+diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
+index a646f3481240..fecad1098cf8 100644
+--- a/net/kcm/kcmsock.c
++++ b/net/kcm/kcmsock.c
+@@ -1685,7 +1685,7 @@ static int kcm_ioctl(struct socket *sock, unsigned int 
cmd, unsigned long arg)
+               struct kcm_attach info;
+ 
+               if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
+-                      err = -EFAULT;
++                      return -EFAULT;
+ 
+               err = kcm_attach_ioctl(sock, &info);
+ 
+@@ -1695,7 +1695,7 @@ static int kcm_ioctl(struct socket *sock, unsigned int 
cmd, unsigned long arg)
+               struct kcm_unattach info;
+ 
+               if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
+-                      err = -EFAULT;
++                      return -EFAULT;
+ 
+               err = kcm_unattach_ioctl(sock, &info);
+ 
+@@ -1706,7 +1706,7 @@ static int kcm_ioctl(struct socket *sock, unsigned int 
cmd, unsigned long arg)
+               struct socket *newsock = NULL;
+ 
+               if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
+-                      err = -EFAULT;
++                      return -EFAULT;
+ 
+               err = kcm_clone(sock, &info, &newsock);
+ 
+diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
+index 85948c69b236..56036ab5dcb7 100644
+--- a/net/l2tp/l2tp_core.c
++++ b/net/l2tp/l2tp_core.c
+@@ -278,7 +278,8 @@ struct l2tp_session *l2tp_session_find(struct net *net, 
struct l2tp_tunnel *tunn
+ }
+ EXPORT_SYMBOL_GPL(l2tp_session_find);
+ 
+-struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int 
nth)
++struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth,
++                                        bool do_ref)
+ {
+       int hash;
+       struct l2tp_session *session;
+@@ -288,6 +289,9 @@ struct l2tp_session *l2tp_session_find_nth(struct 
l2tp_tunnel *tunnel, int nth)
+       for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
+               hlist_for_each_entry(session, &tunnel->session_hlist[hash], 
hlist) {
+                       if (++count > nth) {
++                              l2tp_session_inc_refcount(session);
++                              if (do_ref && session->ref)
++                                      session->ref(session);
+                               read_unlock_bh(&tunnel->hlist_lock);
+                               return session;
+                       }
+@@ -298,7 +302,7 @@ struct l2tp_session *l2tp_session_find_nth(struct 
l2tp_tunnel *tunnel, int nth)
+ 
+       return NULL;
+ }
+-EXPORT_SYMBOL_GPL(l2tp_session_find_nth);
++EXPORT_SYMBOL_GPL(l2tp_session_get_nth);
+ 
+ /* Lookup a session by interface name.
+  * This is very inefficient but is only used by management interfaces.
+diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
+index aebf281d09ee..221648b07b3c 100644
+--- a/net/l2tp/l2tp_core.h
++++ b/net/l2tp/l2tp_core.h
+@@ -233,7 +233,8 @@ static inline struct l2tp_tunnel 
*l2tp_sock_to_tunnel(struct sock *sk)
+ struct l2tp_session *l2tp_session_find(struct net *net,
+                                      struct l2tp_tunnel *tunnel,
+                                      u32 session_id);
+-struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int 
nth);
++struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth,
++                                        bool do_ref);
+ struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char 
*ifname);
+ struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id);
+ struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth);
+diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
+index 2d6760a2ae34..d100aed3d06f 100644
+--- a/net/l2tp/l2tp_debugfs.c
++++ b/net/l2tp/l2tp_debugfs.c
+@@ -53,7 +53,7 @@ static void l2tp_dfs_next_tunnel(struct l2tp_dfs_seq_data 
*pd)
+ 
+ static void l2tp_dfs_next_session(struct l2tp_dfs_seq_data *pd)
+ {
+-      pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx);
++      pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx, true);
+       pd->session_idx++;
+ 
+       if (pd->session == NULL) {
+@@ -238,10 +238,14 @@ static int l2tp_dfs_seq_show(struct seq_file *m, void *v)
+       }
+ 
+       /* Show the tunnel or session context */
+-      if (pd->session == NULL)
++      if (!pd->session) {
+               l2tp_dfs_seq_tunnel_show(m, pd->tunnel);
+-      else
++      } else {
+               l2tp_dfs_seq_session_show(m, pd->session);
++              if (pd->session->deref)
++                      pd->session->deref(pd->session);
++              l2tp_session_dec_refcount(pd->session);
++      }
+ 
+ out:
+       return 0;
+diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
+index 3ed30153a6f5..fa2bcfce53df 100644
+--- a/net/l2tp/l2tp_ip.c
++++ b/net/l2tp/l2tp_ip.c
+@@ -171,9 +171,10 @@ static int l2tp_ip_recv(struct sk_buff *skb)
+ 
+       tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
+       tunnel = l2tp_tunnel_find(net, tunnel_id);
+-      if (tunnel != NULL)
++      if (tunnel) {
+               sk = tunnel->sock;
+-      else {
++              sock_hold(sk);
++      } else {
+               struct iphdr *iph = (struct iphdr *) skb_network_header(skb);
+ 
+               read_lock_bh(&l2tp_ip_lock);
+diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
+index f47c45250f86..4e4fa1538cbb 100644
+--- a/net/l2tp/l2tp_ip6.c
++++ b/net/l2tp/l2tp_ip6.c
+@@ -183,9 +183,10 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
+ 
+       tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
+       tunnel = l2tp_tunnel_find(net, tunnel_id);
+-      if (tunnel != NULL)
++      if (tunnel) {
+               sk = tunnel->sock;
+-      else {
++              sock_hold(sk);
++      } else {
+               struct ipv6hdr *iph = ipv6_hdr(skb);
+ 
+               read_lock_bh(&l2tp_ip6_lock);
+diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
+index 3620fba31786..ad191a786806 100644
+--- a/net/l2tp/l2tp_netlink.c
++++ b/net/l2tp/l2tp_netlink.c
+@@ -852,7 +852,7 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, 
struct netlink_callback
+                               goto out;
+               }
+ 
+-              session = l2tp_session_find_nth(tunnel, si);
++              session = l2tp_session_get_nth(tunnel, si, false);
+               if (session == NULL) {
+                       ti++;
+                       tunnel = NULL;
+@@ -862,8 +862,11 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, 
struct netlink_callback
+ 
+               if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).portid,
+                                        cb->nlh->nlmsg_seq, NLM_F_MULTI,
+-                                       session, L2TP_CMD_SESSION_GET) < 0)
++                                       session, L2TP_CMD_SESSION_GET) < 0) {
++                      l2tp_session_dec_refcount(session);
+                       break;
++              }
++              l2tp_session_dec_refcount(session);
+ 
+               si++;
+       }
+diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
+index 36cc56fd0418..781d22272f4a 100644
+--- a/net/l2tp/l2tp_ppp.c
++++ b/net/l2tp/l2tp_ppp.c
+@@ -450,6 +450,10 @@ static void pppol2tp_session_close(struct l2tp_session 
*session)
+ static void pppol2tp_session_destruct(struct sock *sk)
+ {
+       struct l2tp_session *session = sk->sk_user_data;
++
++      skb_queue_purge(&sk->sk_receive_queue);
++      skb_queue_purge(&sk->sk_write_queue);
++
+       if (session) {
+               sk->sk_user_data = NULL;
+               BUG_ON(session->magic != L2TP_SESSION_MAGIC);
+@@ -488,9 +492,6 @@ static int pppol2tp_release(struct socket *sock)
+               l2tp_session_queue_purge(session);
+               sock_put(sk);
+       }
+-      skb_queue_purge(&sk->sk_receive_queue);
+-      skb_queue_purge(&sk->sk_write_queue);
+-
+       release_sock(sk);
+ 
+       /* This will delete the session context via
+@@ -1554,7 +1555,7 @@ static void pppol2tp_next_tunnel(struct net *net, struct 
pppol2tp_seq_data *pd)
+ 
+ static void pppol2tp_next_session(struct net *net, struct pppol2tp_seq_data 
*pd)
+ {
+-      pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx);
++      pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx, true);
+       pd->session_idx++;
+ 
+       if (pd->session == NULL) {
+@@ -1681,10 +1682,14 @@ static int pppol2tp_seq_show(struct seq_file *m, void 
*v)
+ 
+       /* Show the tunnel or session context.
+        */
+-      if (pd->session == NULL)
++      if (!pd->session) {
+               pppol2tp_seq_tunnel_show(m, pd->tunnel);
+-      else
++      } else {
+               pppol2tp_seq_session_show(m, pd->session);
++              if (pd->session->deref)
++                      pd->session->deref(pd->session);
++              l2tp_session_dec_refcount(pd->session);
++      }
+ 
+ out:
+       return 0;
+@@ -1843,4 +1848,4 @@ MODULE_DESCRIPTION("PPP over L2TP over UDP");
+ MODULE_LICENSE("GPL");
+ MODULE_VERSION(PPPOL2TP_DRV_VERSION);
+ MODULE_ALIAS_NET_PF_PROTO(PF_PPPOX, PX_PROTO_OL2TP);
+-MODULE_ALIAS_L2TP_PWTYPE(11);
++MODULE_ALIAS_L2TP_PWTYPE(7);
+diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
+index 2c0a00f7f1b7..bb789359a29b 100644
+--- a/net/openvswitch/flow.c
++++ b/net/openvswitch/flow.c
+@@ -527,7 +527,7 @@ static int key_extract(struct sk_buff *skb, struct 
sw_flow_key *key)
+ 
+       /* Link layer. */
+       clear_vlan(key);
+-      if (key->mac_proto == MAC_PROTO_NONE) {
++      if (ovs_key_mac_proto(key) == MAC_PROTO_NONE) {
+               if (unlikely(eth_type_vlan(skb->protocol)))
+                       return -EINVAL;
+ 
+@@ -745,7 +745,13 @@ static int key_extract(struct sk_buff *skb, struct 
sw_flow_key *key)
+ 
+ int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key)
+ {
+-      return key_extract(skb, key);
++      int res;
++
++      res = key_extract(skb, key);
++      if (!res)
++              key->mac_proto &= ~SW_FLOW_KEY_INVALID;
++
++      return res;
+ }
+ 
+ static int key_extract_mac_proto(struct sk_buff *skb)
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 5c919933a39b..0f074c96f43f 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -3644,6 +3644,8 @@ packet_setsockopt(struct socket *sock, int level, int 
optname, char __user *optv
+                       return -EBUSY;
+               if (copy_from_user(&val, optval, sizeof(val)))
+                       return -EFAULT;
++              if (val > INT_MAX)
++                      return -EINVAL;
+               po->tp_reserve = val;
+               return 0;
+       }
+@@ -4189,6 +4191,8 @@ static int packet_set_ring(struct sock *sk, union 
tpacket_req_u *req_u,
+               rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
+               if (unlikely(rb->frames_per_block == 0))
+                       goto out;
++              if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr))
++                      goto out;
+               if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
+                                       req->tp_frame_nr))
+                       goto out;
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index d04a8b66098c..6932cf34fea8 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -6860,6 +6860,9 @@ int sctp_inet_listen(struct socket *sock, int backlog)
+       if (sock->state != SS_UNCONNECTED)
+               goto out;
+ 
++      if (!sctp_sstate(sk, LISTENING) && !sctp_sstate(sk, CLOSED))
++              goto out;
++
+       /* If backlog is zero, disable listening. */
+       if (!backlog) {
+               if (sctp_sstate(sk, CLOSED))
+diff --git a/net/socket.c b/net/socket.c
+index 02bd9249e295..6361d3161120 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -654,6 +654,16 @@ int kernel_sendmsg(struct socket *sock, struct msghdr 
*msg,
+ }
+ EXPORT_SYMBOL(kernel_sendmsg);
+ 
++static bool skb_is_err_queue(const struct sk_buff *skb)
++{
++      /* pkt_type of skbs enqueued on the error queue are set to
++       * PACKET_OUTGOING in skb_set_err_queue(). This is only safe to do
++       * in recvmsg, since skbs received on a local socket will never
++       * have a pkt_type of PACKET_OUTGOING.
++       */
++      return skb->pkt_type == PACKET_OUTGOING;
++}
++
+ /*
+  * called from sock_recv_timestamp() if sock_flag(sk, SOCK_RCVTSTAMP)
+  */
+@@ -697,7 +707,8 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock 
*sk,
+               put_cmsg(msg, SOL_SOCKET,
+                        SCM_TIMESTAMPING, sizeof(tss), &tss);
+ 
+-              if (skb->len && (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS))
++              if (skb_is_err_queue(skb) && skb->len &&
++                  SKB_EXT_ERR(skb)->opt_stats)
+                       put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPING_OPT_STATS,
+                                skb->len, skb->data);
+       }
+diff --git a/sound/core/seq/seq_lock.c b/sound/core/seq/seq_lock.c
+index 3b693e924db7..12ba83367b1b 100644
+--- a/sound/core/seq/seq_lock.c
++++ b/sound/core/seq/seq_lock.c
+@@ -28,19 +28,16 @@
+ /* wait until all locks are released */
+ void snd_use_lock_sync_helper(snd_use_lock_t *lockp, const char *file, int 
line)
+ {
+-      int max_count = 5 * HZ;
++      int warn_count = 5 * HZ;
+ 
+       if (atomic_read(lockp) < 0) {
+               pr_warn("ALSA: seq_lock: lock trouble [counter = %d] in 
%s:%d\n", atomic_read(lockp), file, line);
+               return;
+       }
+       while (atomic_read(lockp) > 0) {
+-              if (max_count == 0) {
+-                      pr_warn("ALSA: seq_lock: timeout [%d left] in %s:%d\n", 
atomic_read(lockp), file, line);
+-                      break;
+-              }
++              if (warn_count-- == 0)
++                      pr_warn("ALSA: seq_lock: waiting [%d left] in %s:%d\n", 
atomic_read(lockp), file, line);
+               schedule_timeout_uninterruptible(1);
+-              max_count--;
+       }
+ }
+ 
+diff --git a/sound/firewire/lib.h b/sound/firewire/lib.h
+index f6769312ebfc..c3768cd494a5 100644
+--- a/sound/firewire/lib.h
++++ b/sound/firewire/lib.h
+@@ -45,7 +45,7 @@ struct snd_fw_async_midi_port {
+ 
+       struct snd_rawmidi_substream *substream;
+       snd_fw_async_midi_port_fill fill;
+-      unsigned int consume_bytes;
++      int consume_bytes;
+ };
+ 
+ int snd_fw_async_midi_port_init(struct snd_fw_async_midi_port *port,
+diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c
+index e629b88f7d93..474b06d8acd1 100644
+--- a/sound/firewire/oxfw/oxfw.c
++++ b/sound/firewire/oxfw/oxfw.c
+@@ -226,11 +226,11 @@ static void do_registration(struct work_struct *work)
+       if (err < 0)
+               goto error;
+ 
+-      err = detect_quirks(oxfw);
++      err = snd_oxfw_stream_discover(oxfw);
+       if (err < 0)
+               goto error;
+ 
+-      err = snd_oxfw_stream_discover(oxfw);
++      err = detect_quirks(oxfw);
+       if (err < 0)
+               goto error;
+ 
+diff --git a/sound/soc/intel/boards/bytcr_rt5640.c 
b/sound/soc/intel/boards/bytcr_rt5640.c
+index 1bd985f01c73..342d8425bc1f 100644
+--- a/sound/soc/intel/boards/bytcr_rt5640.c
++++ b/sound/soc/intel/boards/bytcr_rt5640.c
+@@ -621,7 +621,7 @@ static struct snd_soc_dai_link byt_rt5640_dais[] = {
+               .codec_dai_name = "snd-soc-dummy-dai",
+               .codec_name = "snd-soc-dummy",
+               .platform_name = "sst-mfld-platform",
+-              .ignore_suspend = 1,
++              .nonatomic = true,
+               .dynamic = 1,
+               .dpcm_playback = 1,
+               .dpcm_capture = 1,
+@@ -634,7 +634,6 @@ static struct snd_soc_dai_link byt_rt5640_dais[] = {
+               .codec_dai_name = "snd-soc-dummy-dai",
+               .codec_name = "snd-soc-dummy",
+               .platform_name = "sst-mfld-platform",
+-              .ignore_suspend = 1,
+               .nonatomic = true,
+               .dynamic = 1,
+               .dpcm_playback = 1,
+@@ -661,6 +660,7 @@ static struct snd_soc_dai_link byt_rt5640_dais[] = {
+                                               | SND_SOC_DAIFMT_CBS_CFS,
+               .be_hw_params_fixup = byt_rt5640_codec_fixup,
+               .ignore_suspend = 1,
++              .nonatomic = true,
+               .dpcm_playback = 1,
+               .dpcm_capture = 1,
+               .init = byt_rt5640_init,
+diff --git a/sound/soc/intel/boards/bytcr_rt5651.c 
b/sound/soc/intel/boards/bytcr_rt5651.c
+index 2d24dc04b597..d938328dc64f 100644
+--- a/sound/soc/intel/boards/bytcr_rt5651.c
++++ b/sound/soc/intel/boards/bytcr_rt5651.c
+@@ -235,7 +235,6 @@ static struct snd_soc_dai_link byt_rt5651_dais[] = {
+               .codec_dai_name = "snd-soc-dummy-dai",
+               .codec_name = "snd-soc-dummy",
+               .platform_name = "sst-mfld-platform",
+-              .ignore_suspend = 1,
+               .nonatomic = true,
+               .dynamic = 1,
+               .dpcm_playback = 1,
+@@ -249,7 +248,6 @@ static struct snd_soc_dai_link byt_rt5651_dais[] = {
+               .codec_dai_name = "snd-soc-dummy-dai",
+               .codec_name = "snd-soc-dummy",
+               .platform_name = "sst-mfld-platform",
+-              .ignore_suspend = 1,
+               .nonatomic = true,
+               .dynamic = 1,
+               .dpcm_playback = 1,
+diff --git a/tools/testing/selftests/bpf/test_verifier.c 
b/tools/testing/selftests/bpf/test_verifier.c
+index 853d7e43434a..e1aea9e60f33 100644
+--- a/tools/testing/selftests/bpf/test_verifier.c
++++ b/tools/testing/selftests/bpf/test_verifier.c
+@@ -2876,6 +2876,26 @@ static struct bpf_test tests[] = {
+               .prog_type = BPF_PROG_TYPE_LWT_XMIT,
+       },
+       {
++              "overlapping checks for direct packet access",
++              .insns = {
++                      BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
++                                  offsetof(struct __sk_buff, data)),
++                      BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
++                                  offsetof(struct __sk_buff, data_end)),
++                      BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
++                      BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
++                      BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
++                      BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
++                      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
++                      BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
++                      BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
++                      BPF_MOV64_IMM(BPF_REG_0, 0),
++                      BPF_EXIT_INSN(),
++              },
++              .result = ACCEPT,
++              .prog_type = BPF_PROG_TYPE_LWT_XMIT,
++      },
++      {
+               "invalid access of tc_classid for LWT_IN",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,

Reply via email to