commit:     97b52b16ce743e5b7a476c30350fbabe35e68f1c
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun May 14 13:29:09 2017 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun May 14 13:29:09 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=97b52b16

Linux patch 4.11.1

 0000_README             |    4 +
 1000_linux-4.11.1.patch | 1626 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1630 insertions(+)

diff --git a/0000_README b/0000_README
index 70e3319..85e105d 100644
--- a/0000_README
+++ b/0000_README
@@ -43,6 +43,10 @@ EXPERIMENTAL
 Individual Patch Descriptions:
 --------------------------------------------------------------------------
 
+Patch:  1000_linux-4.11.1.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.11.1
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1000_linux-4.11.1.patch b/1000_linux-4.11.1.patch
new file mode 100644
index 0000000..3c56ecd
--- /dev/null
+++ b/1000_linux-4.11.1.patch
@@ -0,0 +1,1626 @@
+diff --git a/Makefile b/Makefile
+index 4b074a904106..9dc2aec1c2e5 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 11
+-SUBLEVEL = 0
++SUBLEVEL = 1
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+ 
+diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h
+index 220ba207be91..36ec9c8f6e16 100644
+--- a/arch/arm/include/asm/device.h
++++ b/arch/arm/include/asm/device.h
+@@ -16,6 +16,9 @@ struct dev_archdata {
+ #ifdef CONFIG_ARM_DMA_USE_IOMMU
+       struct dma_iommu_mapping        *mapping;
+ #endif
++#ifdef CONFIG_XEN
++      const struct dma_map_ops *dev_dma_ops;
++#endif
+       bool dma_coherent;
+ };
+ 
+diff --git a/arch/arm/include/asm/dma-mapping.h 
b/arch/arm/include/asm/dma-mapping.h
+index 716656925975..680d3f3889e7 100644
+--- a/arch/arm/include/asm/dma-mapping.h
++++ b/arch/arm/include/asm/dma-mapping.h
+@@ -16,19 +16,9 @@
+ extern const struct dma_map_ops arm_dma_ops;
+ extern const struct dma_map_ops arm_coherent_dma_ops;
+ 
+-static inline const struct dma_map_ops *__generic_dma_ops(struct device *dev)
+-{
+-      if (dev && dev->dma_ops)
+-              return dev->dma_ops;
+-      return &arm_dma_ops;
+-}
+-
+ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
+ {
+-      if (xen_initial_domain())
+-              return xen_dma_ops;
+-      else
+-              return __generic_dma_ops(NULL);
++      return &arm_dma_ops;
+ }
+ 
+ #define HAVE_ARCH_DMA_SUPPORTED 1
+diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
+index 475811f5383a..0268584f1fa0 100644
+--- a/arch/arm/mm/dma-mapping.c
++++ b/arch/arm/mm/dma-mapping.c
+@@ -2414,6 +2414,13 @@ void arch_setup_dma_ops(struct device *dev, u64 
dma_base, u64 size,
+               dma_ops = arm_get_dma_map_ops(coherent);
+ 
+       set_dma_ops(dev, dma_ops);
++
++#ifdef CONFIG_XEN
++      if (xen_initial_domain()) {
++              dev->archdata.dev_dma_ops = dev->dma_ops;
++              dev->dma_ops = xen_dma_ops;
++      }
++#endif
+ }
+ 
+ void arch_teardown_dma_ops(struct device *dev)
+diff --git a/arch/arm64/include/asm/device.h b/arch/arm64/include/asm/device.h
+index 73d5bab015eb..5a5fa47a6b18 100644
+--- a/arch/arm64/include/asm/device.h
++++ b/arch/arm64/include/asm/device.h
+@@ -20,6 +20,9 @@ struct dev_archdata {
+ #ifdef CONFIG_IOMMU_API
+       void *iommu;                    /* private IOMMU data */
+ #endif
++#ifdef CONFIG_XEN
++      const struct dma_map_ops *dev_dma_ops;
++#endif
+       bool dma_coherent;
+ };
+ 
+diff --git a/arch/arm64/include/asm/dma-mapping.h 
b/arch/arm64/include/asm/dma-mapping.h
+index 505756cdc67a..5392dbeffa45 100644
+--- a/arch/arm64/include/asm/dma-mapping.h
++++ b/arch/arm64/include/asm/dma-mapping.h
+@@ -27,11 +27,8 @@
+ #define DMA_ERROR_CODE        (~(dma_addr_t)0)
+ extern const struct dma_map_ops dummy_dma_ops;
+ 
+-static inline const struct dma_map_ops *__generic_dma_ops(struct device *dev)
++static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
+ {
+-      if (dev && dev->dma_ops)
+-              return dev->dma_ops;
+-
+       /*
+        * We expect no ISA devices, and all other DMA masters are expected to
+        * have someone call arch_setup_dma_ops at device creation time.
+@@ -39,14 +36,6 @@ static inline const struct dma_map_ops 
*__generic_dma_ops(struct device *dev)
+       return &dummy_dma_ops;
+ }
+ 
+-static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
+-{
+-      if (xen_initial_domain())
+-              return xen_dma_ops;
+-      else
+-              return __generic_dma_ops(NULL);
+-}
+-
+ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
+                       const struct iommu_ops *iommu, bool coherent);
+ #define arch_setup_dma_ops    arch_setup_dma_ops
+diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
+index 81cdb2e844ed..7f8b37e85a2b 100644
+--- a/arch/arm64/mm/dma-mapping.c
++++ b/arch/arm64/mm/dma-mapping.c
+@@ -977,4 +977,11 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, 
u64 size,
+ 
+       dev->archdata.dma_coherent = coherent;
+       __iommu_setup_dma_ops(dev, dma_base, size, iommu);
++
++#ifdef CONFIG_XEN
++      if (xen_initial_domain()) {
++              dev->archdata.dev_dma_ops = dev->dma_ops;
++              dev->dma_ops = xen_dma_ops;
++      }
++#endif
+ }
+diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
+index a785554916c0..ce8ab0409deb 100644
+--- a/arch/arm64/net/bpf_jit_comp.c
++++ b/arch/arm64/net/bpf_jit_comp.c
+@@ -779,14 +779,14 @@ static int build_body(struct jit_ctx *ctx)
+               int ret;
+ 
+               ret = build_insn(insn, ctx);
+-
+-              if (ctx->image == NULL)
+-                      ctx->offset[i] = ctx->idx;
+-
+               if (ret > 0) {
+                       i++;
++                      if (ctx->image == NULL)
++                              ctx->offset[i] = ctx->idx;
+                       continue;
+               }
++              if (ctx->image == NULL)
++                      ctx->offset[i] = ctx->idx;
+               if (ret)
+                       return ret;
+       }
+diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
+index 44101196d02b..41a407328667 100644
+--- a/arch/sparc/kernel/head_64.S
++++ b/arch/sparc/kernel/head_64.S
+@@ -939,3 +939,9 @@ ENTRY(__retl_o1)
+       retl
+        mov    %o1, %o0
+ ENDPROC(__retl_o1)
++
++ENTRY(__retl_o1_asi)
++      wr      %o5, 0x0, %asi
++      retl
++       mov    %o1, %o0
++ENDPROC(__retl_o1_asi)
+diff --git a/arch/sparc/lib/GENbzero.S b/arch/sparc/lib/GENbzero.S
+index 8e7a843ddd88..2fbf6297d57c 100644
+--- a/arch/sparc/lib/GENbzero.S
++++ b/arch/sparc/lib/GENbzero.S
+@@ -8,7 +8,7 @@
+ 98:   x,y;                    \
+       .section __ex_table,"a";\
+       .align 4;               \
+-      .word 98b, __retl_o1;   \
++      .word 98b, __retl_o1_asi;\
+       .text;                  \
+       .align 4;
+ 
+diff --git a/arch/sparc/lib/NGbzero.S b/arch/sparc/lib/NGbzero.S
+index beab29bf419b..33053bdf3766 100644
+--- a/arch/sparc/lib/NGbzero.S
++++ b/arch/sparc/lib/NGbzero.S
+@@ -8,7 +8,7 @@
+ 98:   x,y;                    \
+       .section __ex_table,"a";\
+       .align 4;               \
+-      .word 98b, __retl_o1;   \
++      .word 98b, __retl_o1_asi;\
+       .text;                  \
+       .align 4;
+ 
+diff --git a/arch/x86/include/asm/xen/events.h 
b/arch/x86/include/asm/xen/events.h
+index 608a79d5a466..e6911caf5bbf 100644
+--- a/arch/x86/include/asm/xen/events.h
++++ b/arch/x86/include/asm/xen/events.h
+@@ -20,4 +20,15 @@ static inline int xen_irqs_disabled(struct pt_regs *regs)
+ /* No need for a barrier -- XCHG is a barrier on x86. */
+ #define xchg_xen_ulong(ptr, val) xchg((ptr), (val))
+ 
++extern int xen_have_vector_callback;
++
++/*
++ * Events delivered via platform PCI interrupts are always
++ * routed to vcpu 0 and hence cannot be rebound.
++ */
++static inline bool xen_support_evtchn_rebind(void)
++{
++      return (!xen_hvm_domain() || xen_have_vector_callback);
++}
++
+ #endif /* _ASM_X86_XEN_EVENTS_H */
+diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
+index 292ab0364a89..c4b3646bd04c 100644
+--- a/arch/x86/pci/xen.c
++++ b/arch/x86/pci/xen.c
+@@ -447,7 +447,7 @@ void __init xen_msi_init(void)
+ 
+ int __init pci_xen_hvm_init(void)
+ {
+-      if (!xen_feature(XENFEAT_hvm_pirqs))
++      if (!xen_have_vector_callback || !xen_feature(XENFEAT_hvm_pirqs))
+               return 0;
+ 
+ #ifdef CONFIG_ACPI
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index ec1d5c46e58f..29b239025b57 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -138,6 +138,8 @@ struct shared_info xen_dummy_shared_info;
+ void *xen_initial_gdt;
+ 
+ RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
++__read_mostly int xen_have_vector_callback;
++EXPORT_SYMBOL_GPL(xen_have_vector_callback);
+ 
+ static int xen_cpu_up_prepare(unsigned int cpu);
+ static int xen_cpu_up_online(unsigned int cpu);
+@@ -1861,7 +1863,9 @@ static int xen_cpu_up_prepare(unsigned int cpu)
+               xen_vcpu_setup(cpu);
+       }
+ 
+-      if (xen_pv_domain() || xen_feature(XENFEAT_hvm_safe_pvclock))
++      if (xen_pv_domain() ||
++          (xen_have_vector_callback &&
++           xen_feature(XENFEAT_hvm_safe_pvclock)))
+               xen_setup_timer(cpu);
+ 
+       rc = xen_smp_intr_init(cpu);
+@@ -1877,7 +1881,9 @@ static int xen_cpu_dead(unsigned int cpu)
+ {
+       xen_smp_intr_free(cpu);
+ 
+-      if (xen_pv_domain() || xen_feature(XENFEAT_hvm_safe_pvclock))
++      if (xen_pv_domain() ||
++          (xen_have_vector_callback &&
++           xen_feature(XENFEAT_hvm_safe_pvclock)))
+               xen_teardown_timer(cpu);
+ 
+       return 0;
+@@ -1916,8 +1922,8 @@ static void __init xen_hvm_guest_init(void)
+ 
+       xen_panic_handler_init();
+ 
+-      BUG_ON(!xen_feature(XENFEAT_hvm_callback_vector));
+-
++      if (xen_feature(XENFEAT_hvm_callback_vector))
++              xen_have_vector_callback = 1;
+       xen_hvm_smp_init();
+       WARN_ON(xen_cpuhp_setup());
+       xen_unplug_emulated_devices();
+@@ -1958,7 +1964,7 @@ bool xen_hvm_need_lapic(void)
+               return false;
+       if (!xen_hvm_domain())
+               return false;
+-      if (xen_feature(XENFEAT_hvm_pirqs))
++      if (xen_feature(XENFEAT_hvm_pirqs) && xen_have_vector_callback)
+               return false;
+       return true;
+ }
+diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
+index 7ff2f1bfb7ec..4e6b65baf8e2 100644
+--- a/arch/x86/xen/smp.c
++++ b/arch/x86/xen/smp.c
+@@ -742,6 +742,8 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int 
max_cpus)
+ 
+ void __init xen_hvm_smp_init(void)
+ {
++      if (!xen_have_vector_callback)
++              return;
+       smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
+       smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
+       smp_ops.cpu_die = xen_cpu_die;
+diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
+index 1e69956d7852..4535627cf532 100644
+--- a/arch/x86/xen/time.c
++++ b/arch/x86/xen/time.c
+@@ -432,6 +432,11 @@ static void xen_hvm_setup_cpu_clockevents(void)
+ 
+ void __init xen_hvm_init_time_ops(void)
+ {
++      /* vector callback is needed otherwise we cannot receive interrupts
++       * on cpu > 0 and at this point we don't know how many cpus are
++       * available */
++      if (!xen_have_vector_callback)
++              return;
+       if (!xen_feature(XENFEAT_hvm_safe_pvclock)) {
+               printk(KERN_INFO "Xen doesn't support pvclock on HVM,"
+                               "disable pv timer\n");
+diff --git a/block/blk-integrity.c b/block/blk-integrity.c
+index 9f0ff5ba4f84..35c5af1ea068 100644
+--- a/block/blk-integrity.c
++++ b/block/blk-integrity.c
+@@ -417,7 +417,7 @@ void blk_integrity_register(struct gendisk *disk, struct 
blk_integrity *template
+       bi->tuple_size = template->tuple_size;
+       bi->tag_size = template->tag_size;
+ 
+-      blk_integrity_revalidate(disk);
++      disk->queue->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
+ }
+ EXPORT_SYMBOL(blk_integrity_register);
+ 
+@@ -430,26 +430,11 @@ EXPORT_SYMBOL(blk_integrity_register);
+  */
+ void blk_integrity_unregister(struct gendisk *disk)
+ {
+-      blk_integrity_revalidate(disk);
++      disk->queue->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES;
+       memset(&disk->queue->integrity, 0, sizeof(struct blk_integrity));
+ }
+ EXPORT_SYMBOL(blk_integrity_unregister);
+ 
+-void blk_integrity_revalidate(struct gendisk *disk)
+-{
+-      struct blk_integrity *bi = &disk->queue->integrity;
+-
+-      if (!(disk->flags & GENHD_FL_UP))
+-              return;
+-
+-      if (bi->profile)
+-              disk->queue->backing_dev_info->capabilities |=
+-                      BDI_CAP_STABLE_WRITES;
+-      else
+-              disk->queue->backing_dev_info->capabilities &=
+-                      ~BDI_CAP_STABLE_WRITES;
+-}
+-
+ void blk_integrity_add(struct gendisk *disk)
+ {
+       if (kobject_init_and_add(&disk->integrity_kobj, &integrity_ktype,
+diff --git a/block/partition-generic.c b/block/partition-generic.c
+index 7afb9907821f..0171a2faad68 100644
+--- a/block/partition-generic.c
++++ b/block/partition-generic.c
+@@ -497,7 +497,6 @@ int rescan_partitions(struct gendisk *disk, struct 
block_device *bdev)
+ 
+       if (disk->fops->revalidate_disk)
+               disk->fops->revalidate_disk(disk);
+-      blk_integrity_revalidate(disk);
+       check_disk_size_change(disk, bdev);
+       bdev->bd_invalidated = 0;
+       if (!get_capacity(disk) || !(state = check_partition(disk, bdev)))
+diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
+index 86279f5022c2..88f16cdf6a4b 100644
+--- a/drivers/gpu/drm/sti/sti_gdp.c
++++ b/drivers/gpu/drm/sti/sti_gdp.c
+@@ -66,7 +66,9 @@ static struct gdp_format_to_str {
+ #define GAM_GDP_ALPHARANGE_255  BIT(5)
+ #define GAM_GDP_AGC_FULL_RANGE  0x00808080
+ #define GAM_GDP_PPT_IGNORE      (BIT(1) | BIT(0))
+-#define GAM_GDP_SIZE_MAX        0x7FF
++
++#define GAM_GDP_SIZE_MAX_WIDTH  3840
++#define GAM_GDP_SIZE_MAX_HEIGHT 2160
+ 
+ #define GDP_NODE_NB_BANK        2
+ #define GDP_NODE_PER_FIELD      2
+@@ -632,8 +634,8 @@ static int sti_gdp_atomic_check(struct drm_plane 
*drm_plane,
+       /* src_x are in 16.16 format */
+       src_x = state->src_x >> 16;
+       src_y = state->src_y >> 16;
+-      src_w = clamp_val(state->src_w >> 16, 0, GAM_GDP_SIZE_MAX);
+-      src_h = clamp_val(state->src_h >> 16, 0, GAM_GDP_SIZE_MAX);
++      src_w = clamp_val(state->src_w >> 16, 0, GAM_GDP_SIZE_MAX_WIDTH);
++      src_h = clamp_val(state->src_h >> 16, 0, GAM_GDP_SIZE_MAX_HEIGHT);
+ 
+       format = sti_gdp_fourcc2format(fb->format->format);
+       if (format == -1) {
+@@ -741,8 +743,8 @@ static void sti_gdp_atomic_update(struct drm_plane 
*drm_plane,
+       /* src_x are in 16.16 format */
+       src_x = state->src_x >> 16;
+       src_y = state->src_y >> 16;
+-      src_w = clamp_val(state->src_w >> 16, 0, GAM_GDP_SIZE_MAX);
+-      src_h = clamp_val(state->src_h >> 16, 0, GAM_GDP_SIZE_MAX);
++      src_w = clamp_val(state->src_w >> 16, 0, GAM_GDP_SIZE_MAX_WIDTH);
++      src_h = clamp_val(state->src_h >> 16, 0, GAM_GDP_SIZE_MAX_HEIGHT);
+ 
+       list = sti_gdp_get_free_nodes(gdp);
+       top_field = list->top_field;
+diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
+index 4da6fc6b1ffd..3d040f52539c 100644
+--- a/drivers/md/dm-ioctl.c
++++ b/drivers/md/dm-ioctl.c
+@@ -1848,7 +1848,7 @@ static int ctl_ioctl(uint command, struct dm_ioctl 
__user *user)
+       if (r)
+               goto out;
+ 
+-      param->data_size = sizeof(*param);
++      param->data_size = offsetof(struct dm_ioctl, data);
+       r = fn(param, input_param_size);
+ 
+       if (unlikely(param->flags & DM_BUFFER_FULL_FLAG) &&
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c 
b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 1f1e54ba0ecb..2c02a4cebc24 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -3000,7 +3000,8 @@ static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
+               INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
+ 
+       bp->ntp_fltr_count = 0;
+-      bp->ntp_fltr_bmap = kzalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
++      bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
++                                  sizeof(long),
+                                   GFP_KERNEL);
+ 
+       if (!bp->ntp_fltr_bmap)
+diff --git a/drivers/net/ethernet/cadence/macb.c 
b/drivers/net/ethernet/cadence/macb.c
+index 30606b11b128..377fb0f22a5b 100644
+--- a/drivers/net/ethernet/cadence/macb.c
++++ b/drivers/net/ethernet/cadence/macb.c
+@@ -432,15 +432,17 @@ static int macb_mii_probe(struct net_device *dev)
+       }
+ 
+       pdata = dev_get_platdata(&bp->pdev->dev);
+-      if (pdata && gpio_is_valid(pdata->phy_irq_pin)) {
+-              ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin,
+-                                      "phy int");
+-              if (!ret) {
+-                      phy_irq = gpio_to_irq(pdata->phy_irq_pin);
+-                      phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
++      if (pdata) {
++              if (gpio_is_valid(pdata->phy_irq_pin)) {
++                      ret = devm_gpio_request(&bp->pdev->dev,
++                                              pdata->phy_irq_pin, "phy int");
++                      if (!ret) {
++                              phy_irq = gpio_to_irq(pdata->phy_irq_pin);
++                              phydev->irq = (phy_irq < 0) ? PHY_POLL : 
phy_irq;
++                      }
++              } else {
++                      phydev->irq = PHY_POLL;
+               }
+-      } else {
+-              phydev->irq = PHY_POLL;
+       }
+ 
+       /* attach the mac to the phy */
+diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
+index 7074b40ebd7f..dec5d563ab19 100644
+--- a/drivers/net/geneve.c
++++ b/drivers/net/geneve.c
+@@ -1244,7 +1244,7 @@ static int geneve_newlink(struct net *net, struct 
net_device *dev,
+               metadata = true;
+ 
+       if (data[IFLA_GENEVE_UDP_CSUM] &&
+-          !nla_get_u8(data[IFLA_GENEVE_UDP_CSUM]))
++          nla_get_u8(data[IFLA_GENEVE_UDP_CSUM]))
+               info.key.tun_flags |= TUNNEL_CSUM;
+ 
+       if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX] &&
+diff --git a/drivers/net/phy/mdio-mux-bcm-iproc.c 
b/drivers/net/phy/mdio-mux-bcm-iproc.c
+index 0a0412524cec..0a5f62e0efcc 100644
+--- a/drivers/net/phy/mdio-mux-bcm-iproc.c
++++ b/drivers/net/phy/mdio-mux-bcm-iproc.c
+@@ -203,11 +203,14 @@ static int mdio_mux_iproc_probe(struct platform_device 
*pdev)
+                          &md->mux_handle, md, md->mii_bus);
+       if (rc) {
+               dev_info(md->dev, "mdiomux initialization failed\n");
+-              goto out;
++              goto out_register;
+       }
+ 
+       dev_info(md->dev, "iProc mdiomux registered\n");
+       return 0;
++
++out_register:
++      mdiobus_unregister(bus);
+ out:
+       mdiobus_free(bus);
+       return rc;
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 2474618404f5..4e34568db64f 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -907,6 +907,7 @@ static const struct usb_device_id products[] = {
+       {QMI_FIXED_INTF(0x2357, 0x0201, 4)},    /* TP-LINK HSUPA Modem MA180 */
+       {QMI_FIXED_INTF(0x2357, 0x9000, 4)},    /* TP-LINK MA260 */
+       {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
++      {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)},    /* Telit ME910 */
+       {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)},    /* Telit LE920 */
+       {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */
+       {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)},    /* XS Stick W100-2 from 4G 
Systems */
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c 
b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+index 60da86a8d95b..f6b17fb58877 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+@@ -198,7 +198,7 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff 
*skb,
+       int ret;
+       struct brcmf_if *ifp = netdev_priv(ndev);
+       struct brcmf_pub *drvr = ifp->drvr;
+-      struct ethhdr *eh = (struct ethhdr *)(skb->data);
++      struct ethhdr *eh;
+ 
+       brcmf_dbg(DATA, "Enter, bsscfgidx=%d\n", ifp->bsscfgidx);
+ 
+@@ -211,22 +211,13 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct 
sk_buff *skb,
+               goto done;
+       }
+ 
+-      /* Make sure there's enough room for any header */
+-      if (skb_headroom(skb) < drvr->hdrlen) {
+-              struct sk_buff *skb2;
+-
+-              brcmf_dbg(INFO, "%s: insufficient headroom\n",
++      /* Make sure there's enough writable headroom*/
++      ret = skb_cow_head(skb, drvr->hdrlen);
++      if (ret < 0) {
++              brcmf_err("%s: skb_cow_head failed\n",
+                         brcmf_ifname(ifp));
+-              drvr->bus_if->tx_realloc++;
+-              skb2 = skb_realloc_headroom(skb, drvr->hdrlen);
+               dev_kfree_skb(skb);
+-              skb = skb2;
+-              if (skb == NULL) {
+-                      brcmf_err("%s: skb_realloc_headroom failed\n",
+-                                brcmf_ifname(ifp));
+-                      ret = -ENOMEM;
+-                      goto done;
+-              }
++              goto done;
+       }
+ 
+       /* validate length for ether packet */
+@@ -236,6 +227,8 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff 
*skb,
+               goto done;
+       }
+ 
++      eh = (struct ethhdr *)(skb->data);
++
+       if (eh->h_proto == htons(ETH_P_PAE))
+               atomic_inc(&ifp->pend_8021x_cnt);
+ 
+diff --git a/drivers/power/supply/lp8788-charger.c 
b/drivers/power/supply/lp8788-charger.c
+index 509e2b341bd6..677f7c40b25a 100644
+--- a/drivers/power/supply/lp8788-charger.c
++++ b/drivers/power/supply/lp8788-charger.c
+@@ -651,7 +651,7 @@ static ssize_t lp8788_show_eoc_time(struct device *dev,
+ {
+       struct lp8788_charger *pchg = dev_get_drvdata(dev);
+       char *stime[] = { "400ms", "5min", "10min", "15min",
+-                      "20min", "25min", "30min" "No timeout" };
++                      "20min", "25min", "30min", "No timeout" };
+       u8 val;
+ 
+       lp8788_read_byte(pchg->lp, LP8788_CHG_EOC, &val);
+diff --git a/drivers/xen/events/events_base.c 
b/drivers/xen/events/events_base.c
+index 6a53577772c9..42807ce11c42 100644
+--- a/drivers/xen/events/events_base.c
++++ b/drivers/xen/events/events_base.c
+@@ -1312,6 +1312,9 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
+       if (!VALID_EVTCHN(evtchn))
+               return -1;
+ 
++      if (!xen_support_evtchn_rebind())
++              return -1;
++
+       /* Send future instances of this interrupt to other vcpu. */
+       bind_vcpu.port = evtchn;
+       bind_vcpu.vcpu = xen_vcpu_nr(tcpu);
+@@ -1645,15 +1648,20 @@ void xen_callback_vector(void)
+ {
+       int rc;
+       uint64_t callback_via;
+-
+-      callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR);
+-      rc = xen_set_callback_via(callback_via);
+-      BUG_ON(rc);
+-      pr_info("Xen HVM callback vector for event delivery is enabled\n");
+-      /* in the restore case the vector has already been allocated */
+-      if (!test_bit(HYPERVISOR_CALLBACK_VECTOR, used_vectors))
+-              alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR,
+-                              xen_hvm_callback_vector);
++      if (xen_have_vector_callback) {
++              callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR);
++              rc = xen_set_callback_via(callback_via);
++              if (rc) {
++                      pr_err("Request for Xen HVM callback vector failed\n");
++                      xen_have_vector_callback = 0;
++                      return;
++              }
++              pr_info("Xen HVM callback vector for event delivery is 
enabled\n");
++              /* in the restore case the vector has already been allocated */
++              if (!test_bit(HYPERVISOR_CALLBACK_VECTOR, used_vectors))
++                      alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR,
++                                      xen_hvm_callback_vector);
++      }
+ }
+ #else
+ void xen_callback_vector(void) {}
+diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
+index 2a165cc8a43c..1c4deac9b0f8 100644
+--- a/drivers/xen/platform-pci.c
++++ b/drivers/xen/platform-pci.c
+@@ -67,7 +67,7 @@ static uint64_t get_callback_via(struct pci_dev *pdev)
+       pin = pdev->pin;
+ 
+       /* We don't know the GSI. Specify the PCI INTx line instead. */
+-      return ((uint64_t)0x01 << HVM_CALLBACK_VIA_TYPE_SHIFT) | /* PCI INTx 
identifier */
++      return ((uint64_t)0x01 << 56) | /* PCI INTx identifier */
+               ((uint64_t)pci_domain_nr(pdev->bus) << 32) |
+               ((uint64_t)pdev->bus->number << 16) |
+               ((uint64_t)(pdev->devfn & 0xff) << 8) |
+@@ -90,7 +90,7 @@ static int xen_allocate_irq(struct pci_dev *pdev)
+ static int platform_pci_resume(struct pci_dev *pdev)
+ {
+       int err;
+-      if (!xen_pv_domain())
++      if (xen_have_vector_callback)
+               return 0;
+       err = xen_set_callback_via(callback_via);
+       if (err) {
+@@ -138,14 +138,7 @@ static int platform_pci_probe(struct pci_dev *pdev,
+       platform_mmio = mmio_addr;
+       platform_mmiolen = mmio_len;
+ 
+-      /* 
+-       * Xen HVM guests always use the vector callback mechanism.
+-       * L1 Dom0 in a nested Xen environment is a PV guest inside in an
+-       * HVM environment. It needs the platform-pci driver to get
+-       * notifications from L0 Xen, but it cannot use the vector callback
+-       * as it is not exported by L1 Xen.
+-       */
+-      if (xen_pv_domain()) {
++      if (!xen_have_vector_callback) {
+               ret = xen_allocate_irq(pdev);
+               if (ret) {
+                       dev_warn(&pdev->dev, "request_irq failed err=%d\n", 
ret);
+diff --git a/fs/block_dev.c b/fs/block_dev.c
+index 2eca00ec4370..56039dfbc674 100644
+--- a/fs/block_dev.c
++++ b/fs/block_dev.c
+@@ -1451,7 +1451,6 @@ int revalidate_disk(struct gendisk *disk)
+ 
+       if (disk->fops->revalidate_disk)
+               ret = disk->fops->revalidate_disk(disk);
+-      blk_integrity_revalidate(disk);
+       bdev = bdget_disk(disk, 0);
+       if (!bdev)
+               return ret;
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index 96fe8ed73100..858aef564a58 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -1483,6 +1483,13 @@ static int sanity_check_raw_super(struct f2fs_sb_info 
*sbi,
+               return 1;
+       }
+ 
++      if (le32_to_cpu(raw_super->segment_count) > F2FS_MAX_SEGMENT) {
++              f2fs_msg(sb, KERN_INFO,
++                      "Invalid segment count (%u)",
++                      le32_to_cpu(raw_super->segment_count));
++              return 1;
++      }
++
+       /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
+       if (sanity_check_area_boundary(sbi, bh))
+               return 1;
+diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
+index e2d239ed4c60..661200e6d281 100644
+--- a/include/linux/f2fs_fs.h
++++ b/include/linux/f2fs_fs.h
+@@ -302,6 +302,12 @@ struct f2fs_nat_block {
+ #define SIT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_sit_entry))
+ 
+ /*
++ * F2FS uses 4 bytes to represent block address. As a result, supported size 
of
++ * disk is 16 TB and it equals to 16 * 1024 * 1024 / 2 segments.
++ */
++#define F2FS_MAX_SEGMENT       ((16 * 1024 * 1024) / 2)
++
++/*
+  * Note that f2fs_sit_entry->vblocks has the following bit-field information.
+  * [15:10] : allocation type such as CURSEG_XXXX_TYPE
+  * [9:0] : valid block count
+diff --git a/include/linux/genhd.h b/include/linux/genhd.h
+index 76f39754e7b0..76d6a1cd4153 100644
+--- a/include/linux/genhd.h
++++ b/include/linux/genhd.h
+@@ -722,11 +722,9 @@ static inline void part_nr_sects_write(struct hd_struct 
*part, sector_t size)
+ #if defined(CONFIG_BLK_DEV_INTEGRITY)
+ extern void blk_integrity_add(struct gendisk *);
+ extern void blk_integrity_del(struct gendisk *);
+-extern void blk_integrity_revalidate(struct gendisk *);
+ #else /* CONFIG_BLK_DEV_INTEGRITY */
+ static inline void blk_integrity_add(struct gendisk *disk) { }
+ static inline void blk_integrity_del(struct gendisk *disk) { }
+-static inline void blk_integrity_revalidate(struct gendisk *disk) { }
+ #endif        /* CONFIG_BLK_DEV_INTEGRITY */
+ 
+ #else /* CONFIG_BLOCK */
+diff --git a/include/net/addrconf.h b/include/net/addrconf.h
+index 17c6fd84e287..4d93c5ec9b12 100644
+--- a/include/net/addrconf.h
++++ b/include/net/addrconf.h
+@@ -20,6 +20,8 @@
+ #define ADDRCONF_TIMER_FUZZ           (HZ / 4)
+ #define ADDRCONF_TIMER_FUZZ_MAX               (HZ)
+ 
++#define ADDRCONF_NOTIFY_PRIORITY      0
++
+ #include <linux/in.h>
+ #include <linux/in6.h>
+ 
+diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
+index 9dc2c182a263..f5e625f53367 100644
+--- a/include/net/ip6_route.h
++++ b/include/net/ip6_route.h
+@@ -84,6 +84,7 @@ struct dst_entry *ip6_route_lookup(struct net *net, struct 
flowi6 *fl6,
+ struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
+                              int ifindex, struct flowi6 *fl6, int flags);
+ 
++void ip6_route_init_special_entries(void);
+ int ip6_route_init(void);
+ void ip6_route_cleanup(void);
+ 
+diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
+index 0caee631a836..b94006f6fbdd 100644
+--- a/include/net/secure_seq.h
++++ b/include/net/secure_seq.h
+@@ -6,10 +6,12 @@
+ u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
+ u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
+                              __be16 dport);
+-u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
+-                             __be16 sport, __be16 dport, u32 *tsoff);
+-u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
+-                               __be16 sport, __be16 dport, u32 *tsoff);
++u32 secure_tcp_seq(__be32 saddr, __be32 daddr,
++                 __be16 sport, __be16 dport);
++u32 secure_tcp_ts_off(__be32 saddr, __be32 daddr);
++u32 secure_tcpv6_seq(const __be32 *saddr, const __be32 *daddr,
++                   __be16 sport, __be16 dport);
++u32 secure_tcpv6_ts_off(const __be32 *saddr, const __be32 *daddr);
+ u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
+                               __be16 sport, __be16 dport);
+ u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 6ec4ea652f3f..6423b4698880 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -471,7 +471,7 @@ void inet_sk_rx_dst_set(struct sock *sk, const struct 
sk_buff *skb);
+ /* From syncookies.c */
+ struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
+                                struct request_sock *req,
+-                               struct dst_entry *dst);
++                               struct dst_entry *dst, u32 tsoff);
+ int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
+                     u32 cookie);
+ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
+@@ -1816,7 +1816,8 @@ struct tcp_request_sock_ops {
+       struct dst_entry *(*route_req)(const struct sock *sk, struct flowi *fl,
+                                      const struct request_sock *req,
+                                      bool *strict);
+-      __u32 (*init_seq)(const struct sk_buff *skb, u32 *tsoff);
++      u32 (*init_seq)(const struct sk_buff *skb);
++      u32 (*init_ts_off)(const struct sk_buff *skb);
+       int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
+                          struct flowi *fl, struct request_sock *req,
+                          struct tcp_fastopen_cookie *foc,
+diff --git a/include/xen/arm/page-coherent.h b/include/xen/arm/page-coherent.h
+index 95ce6ac3a971..b0a2bfc8d647 100644
+--- a/include/xen/arm/page-coherent.h
++++ b/include/xen/arm/page-coherent.h
+@@ -2,8 +2,16 @@
+ #define _ASM_ARM_XEN_PAGE_COHERENT_H
+ 
+ #include <asm/page.h>
++#include <asm/dma-mapping.h>
+ #include <linux/dma-mapping.h>
+ 
++static inline const struct dma_map_ops *__generic_dma_ops(struct device *dev)
++{
++      if (dev && dev->archdata.dev_dma_ops)
++              return dev->archdata.dev_dma_ops;
++      return get_arch_dma_ops(NULL);
++}
++
+ void __xen_dma_map_page(struct device *hwdev, struct page *page,
+            dma_addr_t dev_addr, unsigned long offset, size_t size,
+            enum dma_data_direction dir, unsigned long attrs);
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index a834068a400e..6fd78d4c4164 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -296,7 +296,8 @@ static const char *const bpf_jmp_string[16] = {
+       [BPF_EXIT >> 4] = "exit",
+ };
+ 
+-static void print_bpf_insn(struct bpf_insn *insn)
++static void print_bpf_insn(const struct bpf_verifier_env *env,
++                         const struct bpf_insn *insn)
+ {
+       u8 class = BPF_CLASS(insn->code);
+ 
+@@ -360,9 +361,19 @@ static void print_bpf_insn(struct bpf_insn *insn)
+                               insn->code,
+                               bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
+                               insn->src_reg, insn->imm);
+-              } else if (BPF_MODE(insn->code) == BPF_IMM) {
+-                      verbose("(%02x) r%d = 0x%x\n",
+-                              insn->code, insn->dst_reg, insn->imm);
++              } else if (BPF_MODE(insn->code) == BPF_IMM &&
++                         BPF_SIZE(insn->code) == BPF_DW) {
++                      /* At this point, we already made sure that the second
++                       * part of the ldimm64 insn is accessible.
++                       */
++                      u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
++                      bool map_ptr = insn->src_reg == BPF_PSEUDO_MAP_FD;
++
++                      if (map_ptr && !env->allow_ptr_leaks)
++                              imm = 0;
++
++                      verbose("(%02x) r%d = 0x%llx\n", insn->code,
++                              insn->dst_reg, (unsigned long long)imm);
+               } else {
+                       verbose("BUG_ld_%02x\n", insn->code);
+                       return;
+@@ -1911,6 +1922,17 @@ static int check_alu_op(struct bpf_verifier_env *env, 
struct bpf_insn *insn)
+                       return 0;
+               } else if (opcode == BPF_ADD &&
+                          BPF_CLASS(insn->code) == BPF_ALU64 &&
++                         dst_reg->type == PTR_TO_STACK &&
++                         ((BPF_SRC(insn->code) == BPF_X &&
++                           regs[insn->src_reg].type == CONST_IMM) ||
++                          BPF_SRC(insn->code) == BPF_K)) {
++                      if (BPF_SRC(insn->code) == BPF_X)
++                              dst_reg->imm += regs[insn->src_reg].imm;
++                      else
++                              dst_reg->imm += insn->imm;
++                      return 0;
++              } else if (opcode == BPF_ADD &&
++                         BPF_CLASS(insn->code) == BPF_ALU64 &&
+                          (dst_reg->type == PTR_TO_PACKET ||
+                           (BPF_SRC(insn->code) == BPF_X &&
+                            regs[insn->src_reg].type == PTR_TO_PACKET))) {
+@@ -2824,7 +2846,7 @@ static int do_check(struct bpf_verifier_env *env)
+ 
+               if (log_level) {
+                       verbose("%d: ", insn_idx);
+-                      print_bpf_insn(insn);
++                      print_bpf_insn(env, insn);
+               }
+ 
+               err = ext_analyzer_insn_hook(env, insn_idx, prev_insn_idx);
+diff --git a/lib/refcount.c b/lib/refcount.c
+index aa09ad3c30b0..26dffb7e4c04 100644
+--- a/lib/refcount.c
++++ b/lib/refcount.c
+@@ -62,13 +62,13 @@ bool refcount_add_not_zero(unsigned int i, refcount_t *r)
+ 
+       return true;
+ }
+-EXPORT_SYMBOL_GPL(refcount_add_not_zero);
++EXPORT_SYMBOL(refcount_add_not_zero);
+ 
+ void refcount_add(unsigned int i, refcount_t *r)
+ {
+       WARN_ONCE(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; 
use-after-free.\n");
+ }
+-EXPORT_SYMBOL_GPL(refcount_add);
++EXPORT_SYMBOL(refcount_add);
+ 
+ /*
+  * Similar to atomic_inc_not_zero(), will saturate at UINT_MAX and WARN.
+@@ -101,7 +101,7 @@ bool refcount_inc_not_zero(refcount_t *r)
+ 
+       return true;
+ }
+-EXPORT_SYMBOL_GPL(refcount_inc_not_zero);
++EXPORT_SYMBOL(refcount_inc_not_zero);
+ 
+ /*
+  * Similar to atomic_inc(), will saturate at UINT_MAX and WARN.
+@@ -113,7 +113,7 @@ void refcount_inc(refcount_t *r)
+ {
+       WARN_ONCE(!refcount_inc_not_zero(r), "refcount_t: increment on 0; 
use-after-free.\n");
+ }
+-EXPORT_SYMBOL_GPL(refcount_inc);
++EXPORT_SYMBOL(refcount_inc);
+ 
+ bool refcount_sub_and_test(unsigned int i, refcount_t *r)
+ {
+@@ -138,7 +138,7 @@ bool refcount_sub_and_test(unsigned int i, refcount_t *r)
+ 
+       return !new;
+ }
+-EXPORT_SYMBOL_GPL(refcount_sub_and_test);
++EXPORT_SYMBOL(refcount_sub_and_test);
+ 
+ /*
+  * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
+@@ -152,7 +152,7 @@ bool refcount_dec_and_test(refcount_t *r)
+ {
+       return refcount_sub_and_test(1, r);
+ }
+-EXPORT_SYMBOL_GPL(refcount_dec_and_test);
++EXPORT_SYMBOL(refcount_dec_and_test);
+ 
+ /*
+  * Similar to atomic_dec(), it will WARN on underflow and fail to decrement
+@@ -166,7 +166,7 @@ void refcount_dec(refcount_t *r)
+ {
+       WARN_ONCE(refcount_dec_and_test(r), "refcount_t: decrement hit 0; 
leaking memory.\n");
+ }
+-EXPORT_SYMBOL_GPL(refcount_dec);
++EXPORT_SYMBOL(refcount_dec);
+ 
+ /*
+  * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the
+@@ -183,7 +183,7 @@ bool refcount_dec_if_one(refcount_t *r)
+ {
+       return atomic_cmpxchg_release(&r->refs, 1, 0) == 1;
+ }
+-EXPORT_SYMBOL_GPL(refcount_dec_if_one);
++EXPORT_SYMBOL(refcount_dec_if_one);
+ 
+ /*
+  * No atomic_t counterpart, it decrements unless the value is 1, in which case
+@@ -217,7 +217,7 @@ bool refcount_dec_not_one(refcount_t *r)
+ 
+       return true;
+ }
+-EXPORT_SYMBOL_GPL(refcount_dec_not_one);
++EXPORT_SYMBOL(refcount_dec_not_one);
+ 
+ /*
+  * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail
+@@ -240,7 +240,7 @@ bool refcount_dec_and_mutex_lock(refcount_t *r, struct 
mutex *lock)
+ 
+       return true;
+ }
+-EXPORT_SYMBOL_GPL(refcount_dec_and_mutex_lock);
++EXPORT_SYMBOL(refcount_dec_and_mutex_lock);
+ 
+ /*
+  * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
+@@ -263,5 +263,5 @@ bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
+ 
+       return true;
+ }
+-EXPORT_SYMBOL_GPL(refcount_dec_and_lock);
++EXPORT_SYMBOL(refcount_dec_and_lock);
+ 
+diff --git a/lib/test_bpf.c b/lib/test_bpf.c
+index 0362da0b66c3..2e385026915c 100644
+--- a/lib/test_bpf.c
++++ b/lib/test_bpf.c
+@@ -4656,6 +4656,51 @@ static struct bpf_test tests[] = {
+               { },
+               { { 0, 1 } },
+       },
++      {
++              /* Mainly testing JIT + imm64 here. */
++              "JMP_JGE_X: ldimm64 test 1",
++              .u.insns_int = {
++                      BPF_ALU32_IMM(BPF_MOV, R0, 0),
++                      BPF_LD_IMM64(R1, 3),
++                      BPF_LD_IMM64(R2, 2),
++                      BPF_JMP_REG(BPF_JGE, R1, R2, 2),
++                      BPF_LD_IMM64(R0, 0xffffffffffffffffUL),
++                      BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeUL),
++                      BPF_EXIT_INSN(),
++              },
++              INTERNAL,
++              { },
++              { { 0, 0xeeeeeeeeU } },
++      },
++      {
++              "JMP_JGE_X: ldimm64 test 2",
++              .u.insns_int = {
++                      BPF_ALU32_IMM(BPF_MOV, R0, 0),
++                      BPF_LD_IMM64(R1, 3),
++                      BPF_LD_IMM64(R2, 2),
++                      BPF_JMP_REG(BPF_JGE, R1, R2, 0),
++                      BPF_LD_IMM64(R0, 0xffffffffffffffffUL),
++                      BPF_EXIT_INSN(),
++              },
++              INTERNAL,
++              { },
++              { { 0, 0xffffffffU } },
++      },
++      {
++              "JMP_JGE_X: ldimm64 test 3",
++              .u.insns_int = {
++                      BPF_ALU32_IMM(BPF_MOV, R0, 1),
++                      BPF_LD_IMM64(R1, 3),
++                      BPF_LD_IMM64(R2, 2),
++                      BPF_JMP_REG(BPF_JGE, R1, R2, 4),
++                      BPF_LD_IMM64(R0, 0xffffffffffffffffUL),
++                      BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeUL),
++                      BPF_EXIT_INSN(),
++              },
++              INTERNAL,
++              { },
++              { { 0, 1 } },
++      },
+       /* BPF_JMP | BPF_JNE | BPF_X */
+       {
+               "JMP_JNE_X: if (3 != 2) return 1",
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index c4e84c558240..69daf393cbe1 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -1056,7 +1056,7 @@ static int rtnl_phys_port_name_fill(struct sk_buff *skb, 
struct net_device *dev)
+               return err;
+       }
+ 
+-      if (nla_put(skb, IFLA_PHYS_PORT_NAME, strlen(name), name))
++      if (nla_put_string(skb, IFLA_PHYS_PORT_NAME, name))
+               return -EMSGSIZE;
+ 
+       return 0;
+diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
+index d28da7d363f1..ae35cce3a40d 100644
+--- a/net/core/secure_seq.c
++++ b/net/core/secure_seq.c
+@@ -24,9 +24,13 @@ static siphash_key_t ts_secret __read_mostly;
+ 
+ static __always_inline void net_secret_init(void)
+ {
+-      net_get_random_once(&ts_secret, sizeof(ts_secret));
+       net_get_random_once(&net_secret, sizeof(net_secret));
+ }
++
++static __always_inline void ts_secret_init(void)
++{
++      net_get_random_once(&ts_secret, sizeof(ts_secret));
++}
+ #endif
+ 
+ #ifdef CONFIG_INET
+@@ -47,7 +51,7 @@ static u32 seq_scale(u32 seq)
+ #endif
+ 
+ #if IS_ENABLED(CONFIG_IPV6)
+-static u32 secure_tcpv6_ts_off(const __be32 *saddr, const __be32 *daddr)
++u32 secure_tcpv6_ts_off(const __be32 *saddr, const __be32 *daddr)
+ {
+       const struct {
+               struct in6_addr saddr;
+@@ -60,12 +64,14 @@ static u32 secure_tcpv6_ts_off(const __be32 *saddr, const 
__be32 *daddr)
+       if (sysctl_tcp_timestamps != 1)
+               return 0;
+ 
++      ts_secret_init();
+       return siphash(&combined, offsetofend(typeof(combined), daddr),
+                      &ts_secret);
+ }
++EXPORT_SYMBOL(secure_tcpv6_ts_off);
+ 
+-u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
+-                               __be16 sport, __be16 dport, u32 *tsoff)
++u32 secure_tcpv6_seq(const __be32 *saddr, const __be32 *daddr,
++                   __be16 sport, __be16 dport)
+ {
+       const struct {
+               struct in6_addr saddr;
+@@ -78,14 +84,14 @@ u32 secure_tcpv6_sequence_number(const __be32 *saddr, 
const __be32 *daddr,
+               .sport = sport,
+               .dport = dport
+       };
+-      u64 hash;
++      u32 hash;
++
+       net_secret_init();
+       hash = siphash(&combined, offsetofend(typeof(combined), dport),
+                      &net_secret);
+-      *tsoff = secure_tcpv6_ts_off(saddr, daddr);
+       return seq_scale(hash);
+ }
+-EXPORT_SYMBOL(secure_tcpv6_sequence_number);
++EXPORT_SYMBOL(secure_tcpv6_seq);
+ 
+ u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
+                              __be16 dport)
+@@ -107,30 +113,30 @@ EXPORT_SYMBOL(secure_ipv6_port_ephemeral);
+ #endif
+ 
+ #ifdef CONFIG_INET
+-static u32 secure_tcp_ts_off(__be32 saddr, __be32 daddr)
++u32 secure_tcp_ts_off(__be32 saddr, __be32 daddr)
+ {
+       if (sysctl_tcp_timestamps != 1)
+               return 0;
+ 
++      ts_secret_init();
+       return siphash_2u32((__force u32)saddr, (__force u32)daddr,
+                           &ts_secret);
+ }
+ 
+-/* secure_tcp_sequence_number(a, b, 0, d) == secure_ipv4_port_ephemeral(a, b, 
d),
++/* secure_tcp_seq_and_tsoff(a, b, 0, d) == secure_ipv4_port_ephemeral(a, b, 
d),
+  * but fortunately, `sport' cannot be 0 in any circumstances. If this changes,
+  * it would be easy enough to have the former function use siphash_4u32, 
passing
+  * the arguments as separate u32.
+  */
+-
+-u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
+-                             __be16 sport, __be16 dport, u32 *tsoff)
++u32 secure_tcp_seq(__be32 saddr, __be32 daddr,
++                 __be16 sport, __be16 dport)
+ {
+-      u64 hash;
++      u32 hash;
++
+       net_secret_init();
+       hash = siphash_3u32((__force u32)saddr, (__force u32)daddr,
+                           (__force u32)sport << 16 | (__force u32)dport,
+                           &net_secret);
+-      *tsoff = secure_tcp_ts_off(saddr, daddr);
+       return seq_scale(hash);
+ }
+ 
+diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
+index 9d943974de2b..bdffad875691 100644
+--- a/net/ipv4/raw.c
++++ b/net/ipv4/raw.c
+@@ -358,6 +358,9 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 
*fl4,
+                              rt->dst.dev->mtu);
+               return -EMSGSIZE;
+       }
++      if (length < sizeof(struct iphdr))
++              return -EINVAL;
++
+       if (flags&MSG_PROBE)
+               goto out;
+ 
+diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
+index 496b97e17aaf..0257d965f111 100644
+--- a/net/ipv4/syncookies.c
++++ b/net/ipv4/syncookies.c
+@@ -16,6 +16,7 @@
+ #include <linux/siphash.h>
+ #include <linux/kernel.h>
+ #include <linux/export.h>
++#include <net/secure_seq.h>
+ #include <net/tcp.h>
+ #include <net/route.h>
+ 
+@@ -203,7 +204,7 @@ EXPORT_SYMBOL_GPL(__cookie_v4_check);
+ 
+ struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
+                                struct request_sock *req,
+-                               struct dst_entry *dst)
++                               struct dst_entry *dst, u32 tsoff)
+ {
+       struct inet_connection_sock *icsk = inet_csk(sk);
+       struct sock *child;
+@@ -213,6 +214,7 @@ struct sock *tcp_get_cookie_sock(struct sock *sk, struct 
sk_buff *skb,
+                                                NULL, &own_req);
+       if (child) {
+               atomic_set(&req->rsk_refcnt, 1);
++              tcp_sk(child)->tsoffset = tsoff;
+               sock_rps_save_rxhash(child, skb);
+               inet_csk_reqsk_queue_add(sk, req, child);
+       } else {
+@@ -292,6 +294,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct 
sk_buff *skb)
+       struct rtable *rt;
+       __u8 rcv_wscale;
+       struct flowi4 fl4;
++      u32 tsoff = 0;
+ 
+       if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies || !th->ack || th->rst)
+               goto out;
+@@ -311,6 +314,11 @@ struct sock *cookie_v4_check(struct sock *sk, struct 
sk_buff *skb)
+       memset(&tcp_opt, 0, sizeof(tcp_opt));
+       tcp_parse_options(skb, &tcp_opt, 0, NULL);
+ 
++      if (tcp_opt.saw_tstamp && tcp_opt.rcv_tsecr) {
++              tsoff = secure_tcp_ts_off(ip_hdr(skb)->daddr, 
ip_hdr(skb)->saddr);
++              tcp_opt.rcv_tsecr -= tsoff;
++      }
++
+       if (!cookie_timestamp_decode(&tcp_opt))
+               goto out;
+ 
+@@ -381,7 +389,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct 
sk_buff *skb)
+       ireq->rcv_wscale  = rcv_wscale;
+       ireq->ecn_ok = cookie_ecn_ok(&tcp_opt, sock_net(sk), &rt->dst);
+ 
+-      ret = tcp_get_cookie_sock(sk, skb, req, &rt->dst);
++      ret = tcp_get_cookie_sock(sk, skb, req, &rt->dst, tsoff);
+       /* ip_queue_xmit() depends on our flow being setup
+        * Normal sockets get it right from inet_csk_route_child_sock()
+        */
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 40ba4249a586..2dc7fcf60bf3 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -533,7 +533,7 @@ unsigned int tcp_poll(struct file *file, struct socket 
*sock, poll_table *wait)
+ 
+               if (tp->urg_data & TCP_URG_VALID)
+                       mask |= POLLPRI;
+-      } else if (sk->sk_state == TCP_SYN_SENT && inet_sk(sk)->defer_connect) {
++      } else if (state == TCP_SYN_SENT && inet_sk(sk)->defer_connect) {
+               /* Active TCP fastopen socket with defer_connect
+                * Return POLLOUT so application can call write()
+                * in order for kernel to generate SYN+data
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 659d1baefb2b..3c6c8787b42e 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -85,7 +85,6 @@ int sysctl_tcp_dsack __read_mostly = 1;
+ int sysctl_tcp_app_win __read_mostly = 31;
+ int sysctl_tcp_adv_win_scale __read_mostly = 1;
+ EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
+-EXPORT_SYMBOL(sysctl_tcp_timestamps);
+ 
+ /* rfc5961 challenge ack rate limiting */
+ int sysctl_tcp_challenge_ack_limit = 1000;
+@@ -6332,8 +6331,8 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
+       if (security_inet_conn_request(sk, skb, req))
+               goto drop_and_free;
+ 
+-      if (isn && tmp_opt.tstamp_ok)
+-              af_ops->init_seq(skb, &tcp_rsk(req)->ts_off);
++      if (tmp_opt.tstamp_ok)
++              tcp_rsk(req)->ts_off = af_ops->init_ts_off(skb);
+ 
+       if (!want_cookie && !isn) {
+               /* VJ's idea. We save last timestamp seen
+@@ -6375,7 +6374,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
+                       goto drop_and_release;
+               }
+ 
+-              isn = af_ops->init_seq(skb, &tcp_rsk(req)->ts_off);
++              isn = af_ops->init_seq(skb);
+       }
+       if (!dst) {
+               dst = af_ops->route_req(sk, &fl, req, NULL);
+@@ -6387,7 +6386,6 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
+ 
+       if (want_cookie) {
+               isn = cookie_init_sequence(af_ops, sk, skb, &req->mss);
+-              tcp_rsk(req)->ts_off = 0;
+               req->cookie_ts = tmp_opt.tstamp_ok;
+               if (!tmp_opt.tstamp_ok)
+                       inet_rsk(req)->ecn_ok = 0;
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 575e19dcc017..1a5fa95c981f 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -94,12 +94,18 @@ static int tcp_v4_md5_hash_hdr(char *md5_hash, const 
struct tcp_md5sig_key *key,
+ struct inet_hashinfo tcp_hashinfo;
+ EXPORT_SYMBOL(tcp_hashinfo);
+ 
+-static u32 tcp_v4_init_sequence(const struct sk_buff *skb, u32 *tsoff)
++static u32 tcp_v4_init_seq(const struct sk_buff *skb)
+ {
+-      return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
+-                                        ip_hdr(skb)->saddr,
+-                                        tcp_hdr(skb)->dest,
+-                                        tcp_hdr(skb)->source, tsoff);
++      return secure_tcp_seq(ip_hdr(skb)->daddr,
++                            ip_hdr(skb)->saddr,
++                            tcp_hdr(skb)->dest,
++                            tcp_hdr(skb)->source);
++}
++
++static u32 tcp_v4_init_ts_off(const struct sk_buff *skb)
++{
++      return secure_tcp_ts_off(ip_hdr(skb)->daddr,
++                               ip_hdr(skb)->saddr);
+ }
+ 
+ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
+@@ -145,7 +151,6 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr 
*uaddr, int addr_len)
+       struct flowi4 *fl4;
+       struct rtable *rt;
+       int err;
+-      u32 seq;
+       struct ip_options_rcu *inet_opt;
+       struct inet_timewait_death_row *tcp_death_row = 
&sock_net(sk)->ipv4.tcp_death_row;
+ 
+@@ -236,13 +241,13 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr 
*uaddr, int addr_len)
+       rt = NULL;
+ 
+       if (likely(!tp->repair)) {
+-              seq = secure_tcp_sequence_number(inet->inet_saddr,
+-                                               inet->inet_daddr,
+-                                               inet->inet_sport,
+-                                               usin->sin_port,
+-                                               &tp->tsoffset);
+               if (!tp->write_seq)
+-                      tp->write_seq = seq;
++                      tp->write_seq = secure_tcp_seq(inet->inet_saddr,
++                                                     inet->inet_daddr,
++                                                     inet->inet_sport,
++                                                     usin->sin_port);
++              tp->tsoffset = secure_tcp_ts_off(inet->inet_saddr,
++                                               inet->inet_daddr);
+       }
+ 
+       inet->inet_id = tp->write_seq ^ jiffies;
+@@ -1253,7 +1258,8 @@ static const struct tcp_request_sock_ops 
tcp_request_sock_ipv4_ops = {
+       .cookie_init_seq =      cookie_v4_init_sequence,
+ #endif
+       .route_req      =       tcp_v4_route_req,
+-      .init_seq       =       tcp_v4_init_sequence,
++      .init_seq       =       tcp_v4_init_seq,
++      .init_ts_off    =       tcp_v4_init_ts_off,
+       .send_synack    =       tcp_v4_send_synack,
+ };
+ 
+diff --git a/net/ipv4/tcp_lp.c b/net/ipv4/tcp_lp.c
+index 046fd3910873..d6fb6c067af4 100644
+--- a/net/ipv4/tcp_lp.c
++++ b/net/ipv4/tcp_lp.c
+@@ -264,13 +264,15 @@ static void tcp_lp_pkts_acked(struct sock *sk, const 
struct ack_sample *sample)
+ {
+       struct tcp_sock *tp = tcp_sk(sk);
+       struct lp *lp = inet_csk_ca(sk);
++      u32 delta;
+ 
+       if (sample->rtt_us > 0)
+               tcp_lp_rtt_sample(sk, sample->rtt_us);
+ 
+       /* calc inference */
+-      if (tcp_time_stamp > tp->rx_opt.rcv_tsecr)
+-              lp->inference = 3 * (tcp_time_stamp - tp->rx_opt.rcv_tsecr);
++      delta = tcp_time_stamp - tp->rx_opt.rcv_tsecr;
++      if ((s32)delta > 0)
++              lp->inference = 3 * delta;
+ 
+       /* test if within inference */
+       if (lp->last_drop && (tcp_time_stamp - lp->last_drop < lp->inference))
+diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
+index 65c0f3d13eca..c1259ccc422f 100644
+--- a/net/ipv4/tcp_minisocks.c
++++ b/net/ipv4/tcp_minisocks.c
+@@ -536,6 +536,7 @@ struct sock *tcp_create_openreq_child(const struct sock 
*sk,
+                       newicsk->icsk_ack.last_seg_size = skb->len - 
newtp->tcp_header_len;
+               newtp->rx_opt.mss_clamp = req->mss;
+               tcp_ecn_openreq_child(newtp, req);
++              newtp->fastopen_req = NULL;
+               newtp->fastopen_rsk = NULL;
+               newtp->syn_data_acked = 0;
+               newtp->rack.mstamp.v64 = 0;
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 0ea96c4d334d..311f45641673 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -3296,7 +3296,8 @@ static int fixup_permanent_addr(struct inet6_dev *idev,
+                                     idev->dev, 0, 0);
+       }
+ 
+-      addrconf_dad_start(ifp);
++      if (ifp->state == INET6_IFADDR_STATE_PREDAD)
++              addrconf_dad_start(ifp);
+ 
+       return 0;
+ }
+@@ -3515,6 +3516,7 @@ static int addrconf_notify(struct notifier_block *this, 
unsigned long event,
+  */
+ static struct notifier_block ipv6_dev_notf = {
+       .notifier_call = addrconf_notify,
++      .priority = ADDRCONF_NOTIFY_PRIORITY,
+ };
+ 
+ static void addrconf_type_change(struct net_device *dev, unsigned long event)
+@@ -3651,7 +3653,7 @@ static int addrconf_ifdown(struct net_device *dev, int 
how)
+               if (keep) {
+                       /* set state to skip the notifier below */
+                       state = INET6_IFADDR_STATE_DEAD;
+-                      ifa->state = 0;
++                      ifa->state = INET6_IFADDR_STATE_PREDAD;
+                       if (!(ifa->flags & IFA_F_NODAD))
+                               ifa->flags |= IFA_F_TENTATIVE;
+ 
+@@ -6408,6 +6410,8 @@ int __init addrconf_init(void)
+               goto errlo;
+       }
+ 
++      ip6_route_init_special_entries();
++
+       for (i = 0; i < IN6_ADDR_HSIZE; i++)
+               INIT_HLIST_HEAD(&inet6_addr_lst[i]);
+ 
+diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
+index 0da6a12b5472..1f992d9e261d 100644
+--- a/net/ipv6/raw.c
++++ b/net/ipv6/raw.c
+@@ -632,6 +632,8 @@ static int rawv6_send_hdrinc(struct sock *sk, struct 
msghdr *msg, int length,
+               ipv6_local_error(sk, EMSGSIZE, fl6, rt->dst.dev->mtu);
+               return -EMSGSIZE;
+       }
++      if (length < sizeof(struct ipv6hdr))
++              return -EINVAL;
+       if (flags&MSG_PROBE)
+               goto out;
+ 
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index fb174b590fd3..d316d00e11ab 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -3704,7 +3704,10 @@ static int ip6_route_dev_notify(struct notifier_block 
*this,
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+       struct net *net = dev_net(dev);
+ 
+-      if (event == NETDEV_REGISTER && (dev->flags & IFF_LOOPBACK)) {
++      if (!(dev->flags & IFF_LOOPBACK))
++              return NOTIFY_OK;
++
++      if (event == NETDEV_REGISTER) {
+               net->ipv6.ip6_null_entry->dst.dev = dev;
+               net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
+ #ifdef CONFIG_IPV6_MULTIPLE_TABLES
+@@ -3713,6 +3716,12 @@ static int ip6_route_dev_notify(struct notifier_block 
*this,
+               net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
+               net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
+ #endif
++       } else if (event == NETDEV_UNREGISTER) {
++              in6_dev_put(net->ipv6.ip6_null_entry->rt6i_idev);
++#ifdef CONFIG_IPV6_MULTIPLE_TABLES
++              in6_dev_put(net->ipv6.ip6_prohibit_entry->rt6i_idev);
++              in6_dev_put(net->ipv6.ip6_blk_hole_entry->rt6i_idev);
++#endif
+       }
+ 
+       return NOTIFY_OK;
+@@ -4019,9 +4028,24 @@ static struct pernet_operations ip6_route_net_late_ops 
= {
+ 
+ static struct notifier_block ip6_route_dev_notifier = {
+       .notifier_call = ip6_route_dev_notify,
+-      .priority = 0,
++      .priority = ADDRCONF_NOTIFY_PRIORITY - 10,
+ };
+ 
++void __init ip6_route_init_special_entries(void)
++{
++      /* Registering of the loopback is done before this portion of code,
++       * the loopback reference in rt6_info will not be taken, do it
++       * manually for init_net */
++      init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
++      init_net.ipv6.ip6_null_entry->rt6i_idev = 
in6_dev_get(init_net.loopback_dev);
++  #ifdef CONFIG_IPV6_MULTIPLE_TABLES
++      init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
++      init_net.ipv6.ip6_prohibit_entry->rt6i_idev = 
in6_dev_get(init_net.loopback_dev);
++      init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
++      init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = 
in6_dev_get(init_net.loopback_dev);
++  #endif
++}
++
+ int __init ip6_route_init(void)
+ {
+       int ret;
+@@ -4048,17 +4072,6 @@ int __init ip6_route_init(void)
+ 
+       ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
+ 
+-      /* Registering of the loopback is done before this portion of code,
+-       * the loopback reference in rt6_info will not be taken, do it
+-       * manually for init_net */
+-      init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
+-      init_net.ipv6.ip6_null_entry->rt6i_idev = 
in6_dev_get(init_net.loopback_dev);
+-  #ifdef CONFIG_IPV6_MULTIPLE_TABLES
+-      init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
+-      init_net.ipv6.ip6_prohibit_entry->rt6i_idev = 
in6_dev_get(init_net.loopback_dev);
+-      init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
+-      init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = 
in6_dev_get(init_net.loopback_dev);
+-  #endif
+       ret = fib6_init();
+       if (ret)
+               goto out_register_subsys;
+diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
+index 895ff650db43..5abc3692b901 100644
+--- a/net/ipv6/syncookies.c
++++ b/net/ipv6/syncookies.c
+@@ -18,6 +18,7 @@
+ #include <linux/random.h>
+ #include <linux/siphash.h>
+ #include <linux/kernel.h>
++#include <net/secure_seq.h>
+ #include <net/ipv6.h>
+ #include <net/tcp.h>
+ 
+@@ -143,6 +144,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct 
sk_buff *skb)
+       int mss;
+       struct dst_entry *dst;
+       __u8 rcv_wscale;
++      u32 tsoff = 0;
+ 
+       if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies || !th->ack || th->rst)
+               goto out;
+@@ -162,6 +164,12 @@ struct sock *cookie_v6_check(struct sock *sk, struct 
sk_buff *skb)
+       memset(&tcp_opt, 0, sizeof(tcp_opt));
+       tcp_parse_options(skb, &tcp_opt, 0, NULL);
+ 
++      if (tcp_opt.saw_tstamp && tcp_opt.rcv_tsecr) {
++              tsoff = secure_tcpv6_ts_off(ipv6_hdr(skb)->daddr.s6_addr32,
++                                          ipv6_hdr(skb)->saddr.s6_addr32);
++              tcp_opt.rcv_tsecr -= tsoff;
++      }
++
+       if (!cookie_timestamp_decode(&tcp_opt))
+               goto out;
+ 
+@@ -242,7 +250,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct 
sk_buff *skb)
+       ireq->rcv_wscale = rcv_wscale;
+       ireq->ecn_ok = cookie_ecn_ok(&tcp_opt, sock_net(sk), dst);
+ 
+-      ret = tcp_get_cookie_sock(sk, skb, req, dst);
++      ret = tcp_get_cookie_sock(sk, skb, req, dst, tsoff);
+ out:
+       return ret;
+ out_free:
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 49fa2e8c3fa9..4c4afdca41ff 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -101,12 +101,18 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const 
struct sk_buff *skb)
+       }
+ }
+ 
+-static u32 tcp_v6_init_sequence(const struct sk_buff *skb, u32 *tsoff)
++static u32 tcp_v6_init_seq(const struct sk_buff *skb)
+ {
+-      return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
+-                                          ipv6_hdr(skb)->saddr.s6_addr32,
+-                                          tcp_hdr(skb)->dest,
+-                                          tcp_hdr(skb)->source, tsoff);
++      return secure_tcpv6_seq(ipv6_hdr(skb)->daddr.s6_addr32,
++                              ipv6_hdr(skb)->saddr.s6_addr32,
++                              tcp_hdr(skb)->dest,
++                              tcp_hdr(skb)->source);
++}
++
++static u32 tcp_v6_init_ts_off(const struct sk_buff *skb)
++{
++      return secure_tcpv6_ts_off(ipv6_hdr(skb)->daddr.s6_addr32,
++                                 ipv6_hdr(skb)->saddr.s6_addr32);
+ }
+ 
+ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
+@@ -122,7 +128,6 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr 
*uaddr,
+       struct flowi6 fl6;
+       struct dst_entry *dst;
+       int addr_type;
+-      u32 seq;
+       int err;
+       struct inet_timewait_death_row *tcp_death_row = 
&sock_net(sk)->ipv4.tcp_death_row;
+ 
+@@ -287,13 +292,13 @@ static int tcp_v6_connect(struct sock *sk, struct 
sockaddr *uaddr,
+       sk_set_txhash(sk);
+ 
+       if (likely(!tp->repair)) {
+-              seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
+-                                                 sk->sk_v6_daddr.s6_addr32,
+-                                                 inet->inet_sport,
+-                                                 inet->inet_dport,
+-                                                 &tp->tsoffset);
+               if (!tp->write_seq)
+-                      tp->write_seq = seq;
++                      tp->write_seq = secure_tcpv6_seq(np->saddr.s6_addr32,
++                                                       
sk->sk_v6_daddr.s6_addr32,
++                                                       inet->inet_sport,
++                                                       inet->inet_dport);
++              tp->tsoffset = secure_tcpv6_ts_off(np->saddr.s6_addr32,
++                                                 sk->sk_v6_daddr.s6_addr32);
+       }
+ 
+       if (tcp_fastopen_defer_connect(sk, &err))
+@@ -757,7 +762,8 @@ static const struct tcp_request_sock_ops 
tcp_request_sock_ipv6_ops = {
+       .cookie_init_seq =      cookie_v6_init_sequence,
+ #endif
+       .route_req      =       tcp_v6_route_req,
+-      .init_seq       =       tcp_v6_init_sequence,
++      .init_seq       =       tcp_v6_init_seq,
++      .init_ts_off    =       tcp_v6_init_ts_off,
+       .send_synack    =       tcp_v6_send_synack,
+ };
+ 
+diff --git a/tools/testing/selftests/bpf/test_verifier.c 
b/tools/testing/selftests/bpf/test_verifier.c
+index c848e90b6421..8b433bf3fdd7 100644
+--- a/tools/testing/selftests/bpf/test_verifier.c
++++ b/tools/testing/selftests/bpf/test_verifier.c
+@@ -1809,16 +1809,22 @@ static struct bpf_test tests[] = {
+               .result = ACCEPT,
+       },
+       {
+-              "unpriv: obfuscate stack pointer",
++              "stack pointer arithmetic",
+               .insns = {
+-                      BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+-                      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+-                      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
++                      BPF_MOV64_IMM(BPF_REG_1, 4),
++                      BPF_JMP_IMM(BPF_JA, 0, 0, 0),
++                      BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
++                      BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
++                      BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
++                      BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
++                      BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
++                      BPF_ST_MEM(0, BPF_REG_2, 4, 0),
++                      BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
++                      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
++                      BPF_ST_MEM(0, BPF_REG_2, 4, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+-              .errstr_unpriv = "R2 pointer arithmetic",
+-              .result_unpriv = REJECT,
+               .result = ACCEPT,
+       },
+       {

Reply via email to