commit:     69c2259e3678793f2aa4beddaa1453a039d9ac43
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Wed Jan 31 13:27:39 2018 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Wed Jan 31 13:27:39 2018 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=69c2259e

linux kernel 4.9.79

 0000_README             |    4 +
 1078_linux-4.9.79.patch | 2352 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2356 insertions(+)

diff --git a/0000_README b/0000_README
index 9048086..d0865d5 100644
--- a/0000_README
+++ b/0000_README
@@ -355,6 +355,10 @@ Patch:  1077_linux-4.9.78.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.9.78
 
+Patch:  1078_linux-4.9.79.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.9.79
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1078_linux-4.9.79.patch b/1078_linux-4.9.79.patch
new file mode 100644
index 0000000..debc1a0
--- /dev/null
+++ b/1078_linux-4.9.79.patch
@@ -0,0 +1,2352 @@
+diff --git a/Makefile b/Makefile
+index 8a6f158a1176..4a7e6dff1c2e 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 9
+-SUBLEVEL = 78
++SUBLEVEL = 79
+ EXTRAVERSION =
+ NAME = Roaring Lionus
+ 
+diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
+index 2206e0e00934..2a35c1963f6d 100644
+--- a/arch/arm/kvm/mmu.c
++++ b/arch/arm/kvm/mmu.c
+@@ -1284,7 +1284,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, 
phys_addr_t fault_ipa,
+               return -EFAULT;
+       }
+ 
+-      if (is_vm_hugetlb_page(vma) && !logging_active) {
++      if (vma_kernel_pagesize(vma) && !logging_active) {
+               hugetlb = true;
+               gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
+       } else {
+diff --git a/arch/um/Makefile b/arch/um/Makefile
+index 0ca46ededfc7..9c150ccb35d2 100644
+--- a/arch/um/Makefile
++++ b/arch/um/Makefile
+@@ -117,7 +117,7 @@ archheaders:
+ archprepare: include/generated/user_constants.h
+ 
+ LINK-$(CONFIG_LD_SCRIPT_STATIC) += -static
+-LINK-$(CONFIG_LD_SCRIPT_DYN) += -Wl,-rpath,/lib
++LINK-$(CONFIG_LD_SCRIPT_DYN) += -Wl,-rpath,/lib $(call cc-option, -no-pie)
+ 
+ CFLAGS_NO_HARDENING := $(call cc-option, -fno-PIC,) $(call cc-option, 
-fno-pic,) \
+       $(call cc-option, -fno-stack-protector,) \
+diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c 
b/arch/x86/entry/vsyscall/vsyscall_64.c
+index 6bb7e92c6d50..0174290b2857 100644
+--- a/arch/x86/entry/vsyscall/vsyscall_64.c
++++ b/arch/x86/entry/vsyscall/vsyscall_64.c
+@@ -46,6 +46,7 @@ static enum { EMULATE, NATIVE, NONE } vsyscall_mode =
+ #else
+       EMULATE;
+ #endif
++unsigned long vsyscall_pgprot = __PAGE_KERNEL_VSYSCALL;
+ 
+ static int __init vsyscall_setup(char *str)
+ {
+@@ -336,11 +337,11 @@ void __init map_vsyscall(void)
+       extern char __vsyscall_page;
+       unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
+ 
++      if (vsyscall_mode != NATIVE)
++              vsyscall_pgprot = __PAGE_KERNEL_VVAR;
+       if (vsyscall_mode != NONE)
+               __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
+-                           vsyscall_mode == NATIVE
+-                           ? PAGE_KERNEL_VSYSCALL
+-                           : PAGE_KERNEL_VVAR);
++                           __pgprot(vsyscall_pgprot));
+ 
+       BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
+                    (unsigned long)VSYSCALL_ADDR);
+diff --git a/arch/x86/events/amd/power.c b/arch/x86/events/amd/power.c
+index 9842270ed2f2..21a4e4127f43 100644
+--- a/arch/x86/events/amd/power.c
++++ b/arch/x86/events/amd/power.c
+@@ -277,7 +277,7 @@ static int __init amd_power_pmu_init(void)
+       int ret;
+ 
+       if (!x86_match_cpu(cpu_match))
+-              return 0;
++              return -ENODEV;
+ 
+       if (!boot_cpu_has(X86_FEATURE_ACC_POWER))
+               return -ENODEV;
+diff --git a/arch/x86/include/asm/processor.h 
b/arch/x86/include/asm/processor.h
+index e40b19ca486e..353f038ec645 100644
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -596,7 +596,7 @@ static inline void sync_core(void)
+ {
+       int tmp;
+ 
+-#ifdef CONFIG_M486
++#ifdef CONFIG_X86_32
+       /*
+        * Do a CPUID if available, otherwise do a jump.  The jump
+        * can conveniently enough be the jump around CPUID.
+diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
+index 4865e10dbb55..9ee85066f407 100644
+--- a/arch/x86/include/asm/vsyscall.h
++++ b/arch/x86/include/asm/vsyscall.h
+@@ -13,6 +13,7 @@ extern void map_vsyscall(void);
+  */
+ extern bool emulate_vsyscall(struct pt_regs *regs, unsigned long address);
+ extern bool vsyscall_enabled(void);
++extern unsigned long vsyscall_pgprot;
+ #else
+ static inline void map_vsyscall(void) {}
+ static inline bool emulate_vsyscall(struct pt_regs *regs, unsigned long 
address)
+diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c 
b/arch/x86/kernel/cpu/intel_cacheinfo.c
+index de6626c18e42..be6337156502 100644
+--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
++++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
+@@ -934,6 +934,8 @@ static int __populate_cache_leaves(unsigned int cpu)
+               ci_leaf_init(this_leaf++, &id4_regs);
+               __cache_cpumap_setup(cpu, idx, &id4_regs);
+       }
++      this_cpu_ci->cpu_map_populated = true;
++
+       return 0;
+ }
+ 
+diff --git a/arch/x86/kernel/cpu/microcode/intel.c 
b/arch/x86/kernel/cpu/microcode/intel.c
+index ac3e636ad586..f90f17610f62 100644
+--- a/arch/x86/kernel/cpu/microcode/intel.c
++++ b/arch/x86/kernel/cpu/microcode/intel.c
+@@ -40,6 +40,9 @@
+ #include <asm/setup.h>
+ #include <asm/msr.h>
+ 
++/* last level cache size per core */
++static int llc_size_per_core;
++
+ /*
+  * Temporary microcode blobs pointers storage. We note here during early load
+  * the pointers to microcode blobs we've got from whatever storage (detached
+@@ -1053,12 +1056,14 @@ static bool is_blacklisted(unsigned int cpu)
+ 
+       /*
+        * Late loading on model 79 with microcode revision less than 0x0b000021
+-       * may result in a system hang. This behavior is documented in item
+-       * BDF90, #334165 (Intel Xeon Processor E7-8800/4800 v4 Product Family).
++       * and LLC size per core bigger than 2.5MB may result in a system hang.
++       * This behavior is documented in item BDF90, #334165 (Intel Xeon
++       * Processor E7-8800/4800 v4 Product Family).
+        */
+       if (c->x86 == 6 &&
+           c->x86_model == INTEL_FAM6_BROADWELL_X &&
+           c->x86_mask == 0x01 &&
++          llc_size_per_core > 2621440 &&
+           c->microcode < 0x0b000021) {
+               pr_err_once("Erratum BDF90: late loading with revision < 
0x0b000021 (0x%x) disabled.\n", c->microcode);
+               pr_err_once("Please consider either early loading through 
initrd/built-in or a potential BIOS update.\n");
+@@ -1125,6 +1130,15 @@ static struct microcode_ops microcode_intel_ops = {
+       .microcode_fini_cpu               = microcode_fini_cpu,
+ };
+ 
++static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c)
++{
++      u64 llc_size = c->x86_cache_size * 1024;
++
++      do_div(llc_size, c->x86_max_cores);
++
++      return (int)llc_size;
++}
++
+ struct microcode_ops * __init init_intel_microcode(void)
+ {
+       struct cpuinfo_x86 *c = &boot_cpu_data;
+@@ -1135,6 +1149,8 @@ struct microcode_ops * __init init_intel_microcode(void)
+               return NULL;
+       }
+ 
++      llc_size_per_core = calc_llc_size_per_core(c);
++
+       return &microcode_intel_ops;
+ }
+ 
+diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c
+index 073d1f1a620b..9758524ee99f 100644
+--- a/arch/x86/lib/delay.c
++++ b/arch/x86/lib/delay.c
+@@ -93,6 +93,13 @@ static void delay_mwaitx(unsigned long __loops)
+ {
+       u64 start, end, delay, loops = __loops;
+ 
++      /*
++       * Timer value of 0 causes MWAITX to wait indefinitely, unless there
++       * is a store on the memory monitored by MONITORX.
++       */
++      if (loops == 0)
++              return;
++
+       start = rdtsc_ordered();
+ 
+       for (;;) {
+diff --git a/arch/x86/mm/kaiser.c b/arch/x86/mm/kaiser.c
+index a8ade08a9bf5..ec678aafa3f8 100644
+--- a/arch/x86/mm/kaiser.c
++++ b/arch/x86/mm/kaiser.c
+@@ -344,7 +344,7 @@ void __init kaiser_init(void)
+       if (vsyscall_enabled())
+               kaiser_add_user_map_early((void *)VSYSCALL_ADDR,
+                                         PAGE_SIZE,
+-                                         __PAGE_KERNEL_VSYSCALL);
++                                        vsyscall_pgprot);
+ 
+       for_each_possible_cpu(cpu) {
+               void *percpu_vaddr = __per_cpu_user_mapped_start +
+diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
+index 15f743615923..7840331d3056 100644
+--- a/arch/x86/net/bpf_jit_comp.c
++++ b/arch/x86/net/bpf_jit_comp.c
+@@ -278,10 +278,10 @@ static void emit_bpf_tail_call(u8 **pprog)
+       /* if (index >= array->map.max_entries)
+        *   goto out;
+        */
+-      EMIT4(0x48, 0x8B, 0x46,                   /* mov rax, qword ptr [rsi + 
16] */
++      EMIT2(0x89, 0xD2);                        /* mov edx, edx */
++      EMIT3(0x39, 0x56,                         /* cmp dword ptr [rsi + 16], 
edx */
+             offsetof(struct bpf_array, map.max_entries));
+-      EMIT3(0x48, 0x39, 0xD0);                  /* cmp rax, rdx */
+-#define OFFSET1 47 /* number of bytes to jump */
++#define OFFSET1 43 /* number of bytes to jump */
+       EMIT2(X86_JBE, OFFSET1);                  /* jbe out */
+       label1 = cnt;
+ 
+@@ -290,21 +290,20 @@ static void emit_bpf_tail_call(u8 **pprog)
+        */
+       EMIT2_off32(0x8B, 0x85, -STACKSIZE + 36); /* mov eax, dword ptr [rbp - 
516] */
+       EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT);     /* cmp eax, MAX_TAIL_CALL_CNT 
*/
+-#define OFFSET2 36
++#define OFFSET2 32
+       EMIT2(X86_JA, OFFSET2);                   /* ja out */
+       label2 = cnt;
+       EMIT3(0x83, 0xC0, 0x01);                  /* add eax, 1 */
+       EMIT2_off32(0x89, 0x85, -STACKSIZE + 36); /* mov dword ptr [rbp - 516], 
eax */
+ 
+       /* prog = array->ptrs[index]; */
+-      EMIT4_off32(0x48, 0x8D, 0x84, 0xD6,       /* lea rax, [rsi + rdx * 8 + 
offsetof(...)] */
++      EMIT4_off32(0x48, 0x8B, 0x84, 0xD6,       /* mov rax, [rsi + rdx * 8 + 
offsetof(...)] */
+                   offsetof(struct bpf_array, ptrs));
+-      EMIT3(0x48, 0x8B, 0x00);                  /* mov rax, qword ptr [rax] */
+ 
+       /* if (prog == NULL)
+        *   goto out;
+        */
+-      EMIT4(0x48, 0x83, 0xF8, 0x00);            /* cmp rax, 0 */
++      EMIT3(0x48, 0x85, 0xC0);                  /* test rax,rax */
+ #define OFFSET3 10
+       EMIT2(X86_JE, OFFSET3);                   /* je out */
+       label3 = cnt;
+diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
+index 691814dfed31..943702dd9517 100644
+--- a/drivers/acpi/acpica/nsutils.c
++++ b/drivers/acpi/acpica/nsutils.c
+@@ -594,25 +594,20 @@ struct acpi_namespace_node 
*acpi_ns_validate_handle(acpi_handle handle)
+ void acpi_ns_terminate(void)
+ {
+       acpi_status status;
++      union acpi_operand_object *prev;
++      union acpi_operand_object *next;
+ 
+       ACPI_FUNCTION_TRACE(ns_terminate);
+ 
+-#ifdef ACPI_EXEC_APP
+-      {
+-              union acpi_operand_object *prev;
+-              union acpi_operand_object *next;
++      /* Delete any module-level code blocks */
+ 
+-              /* Delete any module-level code blocks */
+-
+-              next = acpi_gbl_module_code_list;
+-              while (next) {
+-                      prev = next;
+-                      next = next->method.mutex;
+-                      prev->method.mutex = NULL;      /* Clear the Mutex 
(cheated) field */
+-                      acpi_ut_remove_reference(prev);
+-              }
++      next = acpi_gbl_module_code_list;
++      while (next) {
++              prev = next;
++              next = next->method.mutex;
++              prev->method.mutex = NULL;      /* Clear the Mutex (cheated) 
field */
++              acpi_ut_remove_reference(prev);
+       }
+-#endif
+ 
+       /*
+        * Free the entire namespace -- all nodes and all objects
+diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
+index 73c9c7fa9001..f06317d6fc38 100644
+--- a/drivers/acpi/glue.c
++++ b/drivers/acpi/glue.c
+@@ -99,13 +99,13 @@ static int find_child_checks(struct acpi_device *adev, 
bool check_children)
+               return -ENODEV;
+ 
+       /*
+-       * If the device has a _HID (or _CID) returning a valid ACPI/PNP
+-       * device ID, it is better to make it look less attractive here, so that
+-       * the other device with the same _ADR value (that may not have a valid
+-       * device ID) can be matched going forward.  [This means a second spec
+-       * violation in a row, so whatever we do here is best effort anyway.]
++       * If the device has a _HID returning a valid ACPI/PNP device ID, it is
++       * better to make it look less attractive here, so that the other device
++       * with the same _ADR value (that may not have a valid device ID) can be
++       * matched going forward.  [This means a second spec violation in a row,
++       * so whatever we do here is best effort anyway.]
+        */
+-      return sta_present && list_empty(&adev->pnp.ids) ?
++      return sta_present && !adev->pnp.type.platform_id ?
+                       FIND_CHILD_MAX_SCORE : FIND_CHILD_MIN_SCORE;
+ }
+ 
+diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
+index e9fd32e91668..70e13cf06ed0 100644
+--- a/drivers/base/cacheinfo.c
++++ b/drivers/base/cacheinfo.c
+@@ -16,6 +16,7 @@
+  * You should have received a copy of the GNU General Public License
+  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+  */
++#include <linux/acpi.h>
+ #include <linux/bitops.h>
+ #include <linux/cacheinfo.h>
+ #include <linux/compiler.h>
+@@ -104,9 +105,16 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
+       struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+       struct cacheinfo *this_leaf, *sib_leaf;
+       unsigned int index;
+-      int ret;
++      int ret = 0;
++
++      if (this_cpu_ci->cpu_map_populated)
++              return 0;
+ 
+-      ret = cache_setup_of_node(cpu);
++      if (of_have_populated_dt())
++              ret = cache_setup_of_node(cpu);
++      else if (!acpi_disabled)
++              /* No cache property/hierarchy support yet in ACPI */
++              ret = -ENOTSUPP;
+       if (ret)
+               return ret;
+ 
+@@ -203,8 +211,7 @@ static int detect_cache_attributes(unsigned int cpu)
+        */
+       ret = cache_shared_cpu_map_setup(cpu);
+       if (ret) {
+-              pr_warn("Unable to detect cache hierarchy from DT for CPU %d\n",
+-                      cpu);
++              pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu);
+               goto free_ci;
+       }
+       return 0;
+diff --git a/drivers/input/mouse/trackpoint.c 
b/drivers/input/mouse/trackpoint.c
+index 7e2dc5e56632..0b49f29bf0da 100644
+--- a/drivers/input/mouse/trackpoint.c
++++ b/drivers/input/mouse/trackpoint.c
+@@ -383,6 +383,9 @@ int trackpoint_detect(struct psmouse *psmouse, bool 
set_properties)
+       if (trackpoint_read(&psmouse->ps2dev, TP_EXT_BTN, &button_info)) {
+               psmouse_warn(psmouse, "failed to get extended button data, 
assuming 3 buttons\n");
+               button_info = 0x33;
++      } else if (!button_info) {
++              psmouse_warn(psmouse, "got 0 in extended button data, assuming 
3 buttons\n");
++              button_info = 0x33;
+       }
+ 
+       psmouse->private = kzalloc(sizeof(struct trackpoint_data), GFP_KERNEL);
+diff --git a/drivers/net/ethernet/emulex/benet/be_main.c 
b/drivers/net/ethernet/emulex/benet/be_main.c
+index 1644896568c4..b2eeecb26939 100644
+--- a/drivers/net/ethernet/emulex/benet/be_main.c
++++ b/drivers/net/ethernet/emulex/benet/be_main.c
+@@ -4733,6 +4733,15 @@ int be_update_queues(struct be_adapter *adapter)
+ 
+       be_schedule_worker(adapter);
+ 
++      /*
++       * The IF was destroyed and re-created. We need to clear
++       * all promiscuous flags valid for the destroyed IF.
++       * Without this promisc mode is not restored during
++       * be_open() because the driver thinks that it is
++       * already enabled in HW.
++       */
++      adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
++
+       if (netif_running(netdev))
+               status = be_open(netdev);
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c 
b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+index 8aa91ddff287..16556011d571 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+@@ -765,11 +765,8 @@ static void mlxsw_sp_router_neigh_ent_ipv4_process(struct 
mlxsw_sp *mlxsw_sp,
+       dipn = htonl(dip);
+       dev = mlxsw_sp->rifs[rif]->dev;
+       n = neigh_lookup(&arp_tbl, &dipn, dev);
+-      if (!n) {
+-              netdev_err(dev, "Failed to find matching neighbour for 
IP=%pI4h\n",
+-                         &dip);
++      if (!n)
+               return;
+-      }
+ 
+       netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
+       neigh_event_send(n, NULL);
+diff --git a/drivers/net/ethernet/realtek/r8169.c 
b/drivers/net/ethernet/realtek/r8169.c
+index 2c4350a1c629..298b74ebc1e9 100644
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -2222,19 +2222,14 @@ static bool rtl8169_do_counters(struct net_device 
*dev, u32 counter_cmd)
+       void __iomem *ioaddr = tp->mmio_addr;
+       dma_addr_t paddr = tp->counters_phys_addr;
+       u32 cmd;
+-      bool ret;
+ 
+       RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
++      RTL_R32(CounterAddrHigh);
+       cmd = (u64)paddr & DMA_BIT_MASK(32);
+       RTL_W32(CounterAddrLow, cmd);
+       RTL_W32(CounterAddrLow, cmd | counter_cmd);
+ 
+-      ret = rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000);
+-
+-      RTL_W32(CounterAddrLow, 0);
+-      RTL_W32(CounterAddrHigh, 0);
+-
+-      return ret;
++      return rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000);
+ }
+ 
+ static bool rtl8169_reset_counters(struct net_device *dev)
+diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
+index b883af93929c..fc4c2ccc3d22 100644
+--- a/drivers/net/ppp/ppp_generic.c
++++ b/drivers/net/ppp/ppp_generic.c
+@@ -1002,17 +1002,18 @@ static int ppp_unit_register(struct ppp *ppp, int 
unit, bool ifname_is_set)
+       if (!ifname_is_set)
+               snprintf(ppp->dev->name, IFNAMSIZ, "ppp%i", ppp->file.index);
+ 
++      mutex_unlock(&pn->all_ppp_mutex);
++
+       ret = register_netdevice(ppp->dev);
+       if (ret < 0)
+               goto err_unit;
+ 
+       atomic_inc(&ppp_unit_count);
+ 
+-      mutex_unlock(&pn->all_ppp_mutex);
+-
+       return 0;
+ 
+ err_unit:
++      mutex_lock(&pn->all_ppp_mutex);
+       unit_put(&pn->units_idr, ppp->file.index);
+ err:
+       mutex_unlock(&pn->all_ppp_mutex);
+diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
+index 4ddae8118c85..dc36c2ec1d10 100644
+--- a/drivers/net/ppp/pppoe.c
++++ b/drivers/net/ppp/pppoe.c
+@@ -842,6 +842,7 @@ static int pppoe_sendmsg(struct socket *sock, struct 
msghdr *m,
+       struct pppoe_hdr *ph;
+       struct net_device *dev;
+       char *start;
++      int hlen;
+ 
+       lock_sock(sk);
+       if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) {
+@@ -860,16 +861,16 @@ static int pppoe_sendmsg(struct socket *sock, struct 
msghdr *m,
+       if (total_len > (dev->mtu + dev->hard_header_len))
+               goto end;
+ 
+-
+-      skb = sock_wmalloc(sk, total_len + dev->hard_header_len + 32,
+-                         0, GFP_KERNEL);
++      hlen = LL_RESERVED_SPACE(dev);
++      skb = sock_wmalloc(sk, hlen + sizeof(*ph) + total_len +
++                         dev->needed_tailroom, 0, GFP_KERNEL);
+       if (!skb) {
+               error = -ENOMEM;
+               goto end;
+       }
+ 
+       /* Reserve space for headers. */
+-      skb_reserve(skb, dev->hard_header_len);
++      skb_reserve(skb, hlen);
+       skb_reset_network_header(skb);
+ 
+       skb->dev = dev;
+@@ -930,7 +931,7 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff 
*skb)
+       /* Copy the data if there is no space for the header or if it's
+        * read-only.
+        */
+-      if (skb_cow_head(skb, sizeof(*ph) + dev->hard_header_len))
++      if (skb_cow_head(skb, LL_RESERVED_SPACE(dev) + sizeof(*ph)))
+               goto abort;
+ 
+       __skb_push(skb, sizeof(*ph));
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 518cbfbc8b65..eb6dc28e5e52 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -525,6 +525,14 @@ static void tun_queue_purge(struct tun_file *tfile)
+       skb_queue_purge(&tfile->sk.sk_error_queue);
+ }
+ 
++static void tun_cleanup_tx_array(struct tun_file *tfile)
++{
++      if (tfile->tx_array.ring.queue) {
++              skb_array_cleanup(&tfile->tx_array);
++              memset(&tfile->tx_array, 0, sizeof(tfile->tx_array));
++      }
++}
++
+ static void __tun_detach(struct tun_file *tfile, bool clean)
+ {
+       struct tun_file *ntfile;
+@@ -566,8 +574,7 @@ static void __tun_detach(struct tun_file *tfile, bool 
clean)
+                           tun->dev->reg_state == NETREG_REGISTERED)
+                               unregister_netdevice(tun->dev);
+               }
+-              if (tun)
+-                      skb_array_cleanup(&tfile->tx_array);
++              tun_cleanup_tx_array(tfile);
+               sock_put(&tfile->sk);
+       }
+ }
+@@ -606,11 +613,13 @@ static void tun_detach_all(struct net_device *dev)
+               /* Drop read queue */
+               tun_queue_purge(tfile);
+               sock_put(&tfile->sk);
++              tun_cleanup_tx_array(tfile);
+       }
+       list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
+               tun_enable_queue(tfile);
+               tun_queue_purge(tfile);
+               sock_put(&tfile->sk);
++              tun_cleanup_tx_array(tfile);
+       }
+       BUG_ON(tun->numdisabled != 0);
+ 
+@@ -2363,6 +2372,8 @@ static int tun_chr_open(struct inode *inode, struct file 
* file)
+ 
+       sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
+ 
++      memset(&tfile->tx_array, 0, sizeof(tfile->tx_array));
++
+       return 0;
+ }
+ 
+diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
+index 9c257ffedb15..c53385a0052f 100644
+--- a/drivers/net/usb/lan78xx.c
++++ b/drivers/net/usb/lan78xx.c
+@@ -2197,6 +2197,7 @@ static int lan78xx_reset(struct lan78xx_net *dev)
+               buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
+               dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
+               dev->rx_qlen = 4;
++              dev->tx_qlen = 4;
+       }
+ 
+       ret = lan78xx_write_reg(dev, BURST_CAP, buf);
+diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c 
b/drivers/net/vmxnet3/vmxnet3_drv.c
+index ef83ae3b0a44..4afba17e2403 100644
+--- a/drivers/net/vmxnet3/vmxnet3_drv.c
++++ b/drivers/net/vmxnet3/vmxnet3_drv.c
+@@ -1616,7 +1616,6 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue 
*rq,
+                                         rq->rx_ring[i].basePA);
+                       rq->rx_ring[i].base = NULL;
+               }
+-              rq->buf_info[i] = NULL;
+       }
+ 
+       if (rq->data_ring.base) {
+@@ -1638,6 +1637,7 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue 
*rq,
+                       (rq->rx_ring[0].size + rq->rx_ring[1].size);
+               dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0],
+                                 rq->buf_info_pa);
++              rq->buf_info[0] = rq->buf_info[1] = NULL;
+       }
+ }
+ 
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
+index a530f08592cd..4abd3fce5ab6 100644
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -1727,7 +1727,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct 
scsi_cmnd *sc)
+ 
+       if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
+               reason = FAILURE_SESSION_IN_RECOVERY;
+-              sc->result = DID_REQUEUE;
++              sc->result = DID_REQUEUE << 16;
+               goto fault;
+       }
+ 
+diff --git a/drivers/usb/usbip/usbip_common.h 
b/drivers/usb/usbip/usbip_common.h
+index 9f490375ac92..f0b955f8504e 100644
+--- a/drivers/usb/usbip/usbip_common.h
++++ b/drivers/usb/usbip/usbip_common.h
+@@ -271,6 +271,7 @@ struct usbip_device {
+       /* lock for status */
+       spinlock_t lock;
+ 
++      int sockfd;
+       struct socket *tcp_socket;
+ 
+       struct task_struct *tcp_rx;
+diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c
+index b96e5b189269..c287ccc78fde 100644
+--- a/drivers/usb/usbip/vhci_sysfs.c
++++ b/drivers/usb/usbip/vhci_sysfs.c
+@@ -49,13 +49,17 @@ static ssize_t status_show_vhci(int pdev_nr, char *out)
+ 
+       /*
+        * output example:
+-       * port sta spd dev      socket           local_busid
+-       * 0000 004 000 00000000         c5a7bb80 1-2.3
+-       * 0001 004 000 00000000         d8cee980 2-3.4
++       * port sta spd dev      sockfd local_busid
++       * 0000 004 000 00000000 000003 1-2.3
++       * 0001 004 000 00000000 000004 2-3.4
+        *
+-       * IP address can be retrieved from a socket pointer address by looking
+-       * up /proc/net/{tcp,tcp6}. Also, a userland program may remember a
+-       * port number and its peer IP address.
++       * Output includes socket fd instead of socket pointer address to
++       * avoid leaking kernel memory address in:
++       *      /sys/devices/platform/vhci_hcd.0/status and in debug output.
++       * The socket pointer address is not used at the moment and it was
++       * made visible as a convenient way to find IP address from socket
++       * pointer address by looking up /proc/net/{tcp,tcp6}. As this opens
++       * a security hole, the change is made to use sockfd instead.
+        */
+       for (i = 0; i < VHCI_HC_PORTS; i++) {
+               struct vhci_device *vdev = &vhci->vdev[i];
+@@ -68,13 +72,13 @@ static ssize_t status_show_vhci(int pdev_nr, char *out)
+               if (vdev->ud.status == VDEV_ST_USED) {
+                       out += sprintf(out, "%03u %08x ",
+                                           vdev->speed, vdev->devid);
+-                      out += sprintf(out, "%16p %s",
+-                                          vdev->ud.tcp_socket,
++                      out += sprintf(out, "%06u %s",
++                                          vdev->ud.sockfd,
+                                           dev_name(&vdev->udev->dev));
+ 
+               } else {
+                       out += sprintf(out, "000 00000000 ");
+-                      out += sprintf(out, "0000000000000000 0-0");
++                      out += sprintf(out, "000000 0-0");
+               }
+ 
+               out += sprintf(out, "\n");
+@@ -125,7 +129,7 @@ static ssize_t status_show(struct device *dev,
+       int pdev_nr;
+ 
+       out += sprintf(out,
+-                     "port sta spd dev      socket           local_busid\n");
++                     "port sta spd dev      sockfd local_busid\n");
+ 
+       pdev_nr = status_name_to_id(attr->attr.name);
+       if (pdev_nr < 0)
+@@ -324,6 +328,7 @@ static ssize_t store_attach(struct device *dev, struct 
device_attribute *attr,
+ 
+       vdev->devid         = devid;
+       vdev->speed         = speed;
++      vdev->ud.sockfd     = sockfd;
+       vdev->ud.tcp_socket = socket;
+       vdev->ud.status     = VDEV_ST_NOTASSIGNED;
+ 
+diff --git a/fs/fcntl.c b/fs/fcntl.c
+index 1493ceb0477d..ec03cf620fd7 100644
+--- a/fs/fcntl.c
++++ b/fs/fcntl.c
+@@ -114,6 +114,10 @@ void f_setown(struct file *filp, unsigned long arg, int 
force)
+       int who = arg;
+       type = PIDTYPE_PID;
+       if (who < 0) {
++              /* avoid overflow below */
++              if (who == INT_MIN)
++                      return;
++
+               type = PIDTYPE_PGID;
+               who = -who;
+       }
+diff --git a/fs/nfsd/auth.c b/fs/nfsd/auth.c
+index 75f942ae5176..81c018e5c31e 100644
+--- a/fs/nfsd/auth.c
++++ b/fs/nfsd/auth.c
+@@ -59,10 +59,10 @@ int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export 
*exp)
+                               gi->gid[i] = exp->ex_anon_gid;
+                       else
+                               gi->gid[i] = rqgi->gid[i];
+-
+-                      /* Each thread allocates its own gi, no race */
+-                      groups_sort(gi);
+               }
++
++              /* Each thread allocates its own gi, no race */
++              groups_sort(gi);
+       } else {
+               gi = get_group_info(rqgi);
+       }
+diff --git a/fs/orangefs/devorangefs-req.c b/fs/orangefs/devorangefs-req.c
+index fe2cbeb90772..939aa066e1ca 100644
+--- a/fs/orangefs/devorangefs-req.c
++++ b/fs/orangefs/devorangefs-req.c
+@@ -161,7 +161,7 @@ static ssize_t orangefs_devreq_read(struct file *file,
+       struct orangefs_kernel_op_s *op, *temp;
+       __s32 proto_ver = ORANGEFS_KERNEL_PROTO_VERSION;
+       static __s32 magic = ORANGEFS_DEVREQ_MAGIC;
+-      struct orangefs_kernel_op_s *cur_op = NULL;
++      struct orangefs_kernel_op_s *cur_op;
+       unsigned long ret;
+ 
+       /* We do not support blocking IO. */
+@@ -181,6 +181,7 @@ static ssize_t orangefs_devreq_read(struct file *file,
+       }
+ 
+ restart:
++      cur_op = NULL;
+       /* Get next op (if any) from top of list. */
+       spin_lock(&orangefs_request_list_lock);
+       list_for_each_entry_safe(op, temp, &orangefs_request_list, list) {
+diff --git a/fs/orangefs/file.c b/fs/orangefs/file.c
+index 02cc6139ec90..5b2cbe567365 100644
+--- a/fs/orangefs/file.c
++++ b/fs/orangefs/file.c
+@@ -446,7 +446,7 @@ ssize_t orangefs_inode_read(struct inode *inode,
+ static ssize_t orangefs_file_read_iter(struct kiocb *iocb, struct iov_iter 
*iter)
+ {
+       struct file *file = iocb->ki_filp;
+-      loff_t pos = *(&iocb->ki_pos);
++      loff_t pos = iocb->ki_pos;
+       ssize_t rc = 0;
+ 
+       BUG_ON(iocb->private);
+@@ -485,9 +485,6 @@ static ssize_t orangefs_file_write_iter(struct kiocb 
*iocb, struct iov_iter *ite
+               }
+       }
+ 
+-      if (file->f_pos > i_size_read(file->f_mapping->host))
+-              orangefs_i_size_write(file->f_mapping->host, file->f_pos);
+-
+       rc = generic_write_checks(iocb, iter);
+ 
+       if (rc <= 0) {
+@@ -501,7 +498,7 @@ static ssize_t orangefs_file_write_iter(struct kiocb 
*iocb, struct iov_iter *ite
+        * pos to the end of the file, so we will wait till now to set
+        * pos...
+        */
+-      pos = *(&iocb->ki_pos);
++      pos = iocb->ki_pos;
+ 
+       rc = do_readv_writev(ORANGEFS_IO_WRITE,
+                            file,
+diff --git a/fs/orangefs/orangefs-kernel.h b/fs/orangefs/orangefs-kernel.h
+index 45dd8f27b2ac..f28381a7cd12 100644
+--- a/fs/orangefs/orangefs-kernel.h
++++ b/fs/orangefs/orangefs-kernel.h
+@@ -570,17 +570,6 @@ do {                                                      
                \
+       sys_attr.mask = ORANGEFS_ATTR_SYS_ALL_SETABLE;                  \
+ } while (0)
+ 
+-static inline void orangefs_i_size_write(struct inode *inode, loff_t i_size)
+-{
+-#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
+-      inode_lock(inode);
+-#endif
+-      i_size_write(inode, i_size);
+-#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
+-      inode_unlock(inode);
+-#endif
+-}
+-
+ static inline void orangefs_set_timeout(struct dentry *dentry)
+ {
+       unsigned long time = jiffies + orangefs_dcache_timeout_msecs*HZ/1000;
+diff --git a/fs/orangefs/waitqueue.c b/fs/orangefs/waitqueue.c
+index abcfa3fa9992..f61b00887481 100644
+--- a/fs/orangefs/waitqueue.c
++++ b/fs/orangefs/waitqueue.c
+@@ -28,10 +28,10 @@ static void orangefs_clean_up_interrupted_operation(struct 
orangefs_kernel_op_s
+  */
+ void purge_waiting_ops(void)
+ {
+-      struct orangefs_kernel_op_s *op;
++      struct orangefs_kernel_op_s *op, *tmp;
+ 
+       spin_lock(&orangefs_request_list_lock);
+-      list_for_each_entry(op, &orangefs_request_list, list) {
++      list_for_each_entry_safe(op, tmp, &orangefs_request_list, list) {
+               gossip_debug(GOSSIP_WAIT_DEBUG,
+                            "pvfs2-client-core: purging op tag %llu %s\n",
+                            llu(op->tag),
+diff --git a/fs/reiserfs/bitmap.c b/fs/reiserfs/bitmap.c
+index dc198bc64c61..edc8ef78b63f 100644
+--- a/fs/reiserfs/bitmap.c
++++ b/fs/reiserfs/bitmap.c
+@@ -513,9 +513,17 @@ static void __discard_prealloc(struct 
reiserfs_transaction_handle *th,
+                              "inode has negative prealloc blocks count.");
+ #endif
+       while (ei->i_prealloc_count > 0) {
+-              reiserfs_free_prealloc_block(th, inode, ei->i_prealloc_block);
+-              ei->i_prealloc_block++;
++              b_blocknr_t block_to_free;
++
++              /*
++               * reiserfs_free_prealloc_block can drop the write lock,
++               * which could allow another caller to free the same block.
++               * We can protect against it by modifying the prealloc
++               * state before calling it.
++               */
++              block_to_free = ei->i_prealloc_block++;
+               ei->i_prealloc_count--;
++              reiserfs_free_prealloc_block(th, inode, block_to_free);
+               dirty = 1;
+       }
+       if (dirty)
+@@ -1128,7 +1136,7 @@ static int 
determine_prealloc_size(reiserfs_blocknr_hint_t * hint)
+       hint->prealloc_size = 0;
+ 
+       if (!hint->formatted_node && hint->preallocate) {
+-              if (S_ISREG(hint->inode->i_mode)
++              if (S_ISREG(hint->inode->i_mode) && !IS_PRIVATE(hint->inode)
+                   && hint->inode->i_size >=
+                   REISERFS_SB(hint->th->t_super)->s_alloc_options.
+                   preallocmin * hint->inode->i_sb->s_blocksize)
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index 75ffd3b2149e..7995940d4187 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -36,7 +36,10 @@ struct bpf_map_ops {
+ };
+ 
+ struct bpf_map {
+-      atomic_t refcnt;
++      /* 1st cacheline with read-mostly members of which some
++       * are also accessed in fast-path (e.g. ops, max_entries).
++       */
++      const struct bpf_map_ops *ops ____cacheline_aligned;
+       enum bpf_map_type map_type;
+       u32 key_size;
+       u32 value_size;
+@@ -44,10 +47,15 @@ struct bpf_map {
+       u32 map_flags;
+       u32 pages;
+       bool unpriv_array;
+-      struct user_struct *user;
+-      const struct bpf_map_ops *ops;
+-      struct work_struct work;
++      /* 7 bytes hole */
++
++      /* 2nd cacheline with misc members to avoid false sharing
++       * particularly with refcounting.
++       */
++      struct user_struct *user ____cacheline_aligned;
++      atomic_t refcnt;
+       atomic_t usercnt;
++      struct work_struct work;
+ };
+ 
+ struct bpf_map_type_list {
+diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h
+index 2189935075b4..a951fd10aaaa 100644
+--- a/include/linux/cacheinfo.h
++++ b/include/linux/cacheinfo.h
+@@ -71,6 +71,7 @@ struct cpu_cacheinfo {
+       struct cacheinfo *info_list;
+       unsigned int num_levels;
+       unsigned int num_leaves;
++      bool cpu_map_populated;
+ };
+ 
+ /*
+diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
+index 490f5a83f947..e3d7754f25f0 100644
+--- a/include/linux/mmzone.h
++++ b/include/linux/mmzone.h
+@@ -633,6 +633,8 @@ typedef struct pglist_data {
+       int kswapd_order;
+       enum zone_type kswapd_classzone_idx;
+ 
++      int kswapd_failures;            /* Number of 'reclaimed == 0' runs */
++
+ #ifdef CONFIG_COMPACTION
+       int kcompactd_max_order;
+       enum zone_type kcompactd_classzone_idx;
+diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
+index a3d04934aa96..6f8fbcf10dfb 100644
+--- a/include/linux/vermagic.h
++++ b/include/linux/vermagic.h
+@@ -24,16 +24,10 @@
+ #ifndef MODULE_ARCH_VERMAGIC
+ #define MODULE_ARCH_VERMAGIC ""
+ #endif
+-#ifdef RETPOLINE
+-#define MODULE_VERMAGIC_RETPOLINE "retpoline "
+-#else
+-#define MODULE_VERMAGIC_RETPOLINE ""
+-#endif
+ 
+ #define VERMAGIC_STRING                                               \
+       UTS_RELEASE " "                                                 \
+       MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT                     \
+       MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS       \
+-      MODULE_ARCH_VERMAGIC                                            \
+-      MODULE_VERMAGIC_RETPOLINE
++      MODULE_ARCH_VERMAGIC
+ 
+diff --git a/include/net/arp.h b/include/net/arp.h
+index 5e0f891d476c..1b3f86981757 100644
+--- a/include/net/arp.h
++++ b/include/net/arp.h
+@@ -19,6 +19,9 @@ static inline u32 arp_hashfn(const void *pkey, const struct 
net_device *dev, u32
+ 
+ static inline struct neighbour *__ipv4_neigh_lookup_noref(struct net_device 
*dev, u32 key)
+ {
++      if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
++              key = INADDR_ANY;
++
+       return ___neigh_lookup_noref(&arp_tbl, neigh_key_eq32, arp_hashfn, 
&key, dev);
+ }
+ 
+diff --git a/include/net/ipv6.h b/include/net/ipv6.h
+index 615ce0abba9c..e64210c98c2b 100644
+--- a/include/net/ipv6.h
++++ b/include/net/ipv6.h
+@@ -290,6 +290,7 @@ int ipv6_flowlabel_opt_get(struct sock *sk, struct 
in6_flowlabel_req *freq,
+                          int flags);
+ int ip6_flowlabel_init(void);
+ void ip6_flowlabel_cleanup(void);
++bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np);
+ 
+ static inline void fl6_sock_release(struct ip6_flowlabel *fl)
+ {
+diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
+index 0940598c002f..23102da24dd9 100644
+--- a/include/net/net_namespace.h
++++ b/include/net/net_namespace.h
+@@ -213,6 +213,11 @@ int net_eq(const struct net *net1, const struct net *net2)
+       return net1 == net2;
+ }
+ 
++static inline int check_net(const struct net *net)
++{
++      return atomic_read(&net->count) != 0;
++}
++
+ void net_drop_ns(void *);
+ 
+ #else
+@@ -237,6 +242,11 @@ int net_eq(const struct net *net1, const struct net *net2)
+       return 1;
+ }
+ 
++static inline int check_net(const struct net *net)
++{
++      return 1;
++}
++
+ #define net_drop_ns NULL
+ #endif
+ 
+diff --git a/include/uapi/linux/eventpoll.h b/include/uapi/linux/eventpoll.h
+index 1c3154913a39..bc96b14dfb2c 100644
+--- a/include/uapi/linux/eventpoll.h
++++ b/include/uapi/linux/eventpoll.h
+@@ -26,6 +26,19 @@
+ #define EPOLL_CTL_DEL 2
+ #define EPOLL_CTL_MOD 3
+ 
++/* Epoll event masks */
++#define EPOLLIN               0x00000001
++#define EPOLLPRI      0x00000002
++#define EPOLLOUT      0x00000004
++#define EPOLLERR      0x00000008
++#define EPOLLHUP      0x00000010
++#define EPOLLRDNORM   0x00000040
++#define EPOLLRDBAND   0x00000080
++#define EPOLLWRNORM   0x00000100
++#define EPOLLWRBAND   0x00000200
++#define EPOLLMSG      0x00000400
++#define EPOLLRDHUP    0x00002000
++
+ /* Set exclusive wakeup mode for the target file descriptor */
+ #define EPOLLEXCLUSIVE (1 << 28)
+ 
+diff --git a/init/Kconfig b/init/Kconfig
+index 34407f15e6d3..b331feeabda4 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -1609,6 +1609,13 @@ config BPF_SYSCALL
+         Enable the bpf() system call that allows to manipulate eBPF
+         programs and maps via file descriptors.
+ 
++config BPF_JIT_ALWAYS_ON
++      bool "Permanently enable BPF JIT and remove BPF interpreter"
++      depends on BPF_SYSCALL && HAVE_EBPF_JIT && BPF_JIT
++      help
++        Enables BPF JIT and removes BPF interpreter to avoid
++        speculative execution of BPF instructions by the interpreter
++
+ config SHMEM
+       bool "Use full shmem filesystem" if EXPERT
+       default y
+diff --git a/ipc/msg.c b/ipc/msg.c
+index e12307d0c920..ff10d43b5184 100644
+--- a/ipc/msg.c
++++ b/ipc/msg.c
+@@ -763,7 +763,10 @@ static inline int convert_mode(long *msgtyp, int msgflg)
+       if (*msgtyp == 0)
+               return SEARCH_ANY;
+       if (*msgtyp < 0) {
+-              *msgtyp = -*msgtyp;
++              if (*msgtyp == LONG_MIN) /* -LONG_MIN is undefined */
++                      *msgtyp = LONG_MAX;
++              else
++                      *msgtyp = -*msgtyp;
+               return SEARCH_LESSEQUAL;
+       }
+       if (msgflg & MSG_EXCEPT)
+diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
+index aa6d98154106..879ca844ba1d 100644
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -458,6 +458,7 @@ noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 
r4, u64 r5)
+ }
+ EXPORT_SYMBOL_GPL(__bpf_call_base);
+ 
++#ifndef CONFIG_BPF_JIT_ALWAYS_ON
+ /**
+  *    __bpf_prog_run - run eBPF program on a given context
+  *    @ctx: is the data we are operating on
+@@ -641,7 +642,7 @@ static unsigned int __bpf_prog_run(void *ctx, const struct 
bpf_insn *insn)
+               DST = tmp;
+               CONT;
+       ALU_MOD_X:
+-              if (unlikely(SRC == 0))
++              if (unlikely((u32)SRC == 0))
+                       return 0;
+               tmp = (u32) DST;
+               DST = do_div(tmp, (u32) SRC);
+@@ -660,7 +661,7 @@ static unsigned int __bpf_prog_run(void *ctx, const struct 
bpf_insn *insn)
+               DST = div64_u64(DST, SRC);
+               CONT;
+       ALU_DIV_X:
+-              if (unlikely(SRC == 0))
++              if (unlikely((u32)SRC == 0))
+                       return 0;
+               tmp = (u32) DST;
+               do_div(tmp, (u32) SRC);
+@@ -715,7 +716,7 @@ static unsigned int __bpf_prog_run(void *ctx, const struct 
bpf_insn *insn)
+               struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
+               struct bpf_array *array = container_of(map, struct bpf_array, 
map);
+               struct bpf_prog *prog;
+-              u64 index = BPF_R3;
++              u32 index = BPF_R3;
+ 
+               if (unlikely(index >= array->map.max_entries))
+                       goto out;
+@@ -923,6 +924,13 @@ static unsigned int __bpf_prog_run(void *ctx, const 
struct bpf_insn *insn)
+ }
+ STACK_FRAME_NON_STANDARD(__bpf_prog_run); /* jump table */
+ 
++#else
++static unsigned int __bpf_prog_ret0(void *ctx, const struct bpf_insn *insn)
++{
++      return 0;
++}
++#endif
++
+ bool bpf_prog_array_compatible(struct bpf_array *array,
+                              const struct bpf_prog *fp)
+ {
+@@ -970,7 +978,11 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
+  */
+ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
+ {
++#ifndef CONFIG_BPF_JIT_ALWAYS_ON
+       fp->bpf_func = (void *) __bpf_prog_run;
++#else
++      fp->bpf_func = (void *) __bpf_prog_ret0;
++#endif
+ 
+       /* eBPF JITs can rewrite the program in case constant
+        * blinding is active. However, in case of error during
+@@ -979,6 +991,12 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog 
*fp, int *err)
+        * be JITed, but falls back to the interpreter.
+        */
+       fp = bpf_int_jit_compile(fp);
++#ifdef CONFIG_BPF_JIT_ALWAYS_ON
++      if (!fp->jited) {
++              *err = -ENOTSUPP;
++              return fp;
++      }
++#endif
+       bpf_prog_lock_ro(fp);
+ 
+       /* The tail call compatibility check can only be done at
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 19c44cf59bb2..076e4a0ff95e 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -702,6 +702,13 @@ static bool is_pointer_value(struct bpf_verifier_env 
*env, int regno)
+       return __is_pointer_value(env->allow_ptr_leaks, 
&env->cur_state.regs[regno]);
+ }
+ 
++static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
++{
++      const struct bpf_reg_state *reg = &env->cur_state.regs[regno];
++
++      return reg->type == PTR_TO_CTX;
++}
++
+ static int check_ptr_alignment(struct bpf_verifier_env *env,
+                              struct bpf_reg_state *reg, int off, int size)
+ {
+@@ -896,6 +903,12 @@ static int check_xadd(struct bpf_verifier_env *env, 
struct bpf_insn *insn)
+               return -EACCES;
+       }
+ 
++      if (is_ctx_reg(env, insn->dst_reg)) {
++              verbose("BPF_XADD stores into R%d context is not allowed\n",
++                      insn->dst_reg);
++              return -EACCES;
++      }
++
+       /* check whether atomic_add can read the memory */
+       err = check_mem_access(env, insn->dst_reg, insn->off,
+                              BPF_SIZE(insn->code), BPF_READ, -1);
+@@ -1843,6 +1856,11 @@ static int check_alu_op(struct bpf_verifier_env *env, 
struct bpf_insn *insn)
+                       return -EINVAL;
+               }
+ 
++              if (opcode == BPF_ARSH && BPF_CLASS(insn->code) != BPF_ALU64) {
++                      verbose("BPF_ARSH not supported for 32 bit ALU\n");
++                      return -EINVAL;
++              }
++
+               if ((opcode == BPF_LSH || opcode == BPF_RSH ||
+                    opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
+                       int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
+@@ -3007,6 +3025,12 @@ static int do_check(struct bpf_verifier_env *env)
+                       if (err)
+                               return err;
+ 
++                      if (is_ctx_reg(env, insn->dst_reg)) {
++                              verbose("BPF_ST stores into R%d context is not 
allowed\n",
++                                      insn->dst_reg);
++                              return -EACCES;
++                      }
++
+                       /* check that memory (dst_reg + off) is writeable */
+                       err = check_mem_access(env, insn->dst_reg, insn->off,
+                                              BPF_SIZE(insn->code), BPF_WRITE,
+@@ -3386,6 +3410,24 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
+ 
+ 
+       for (i = 0; i < insn_cnt; i++, insn++) {
++              if (insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
++                  insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
++                      /* due to JIT bugs clear upper 32-bits of src register
++                       * before div/mod operation
++                       */
++                      insn_buf[0] = BPF_MOV32_REG(insn->src_reg, 
insn->src_reg);
++                      insn_buf[1] = *insn;
++                      cnt = 2;
++                      new_prog = bpf_patch_insn_data(env, i + delta, 
insn_buf, cnt);
++                      if (!new_prog)
++                              return -ENOMEM;
++
++                      delta    += cnt - 1;
++                      env->prog = prog = new_prog;
++                      insn      = new_prog->insnsi + i + delta;
++                      continue;
++              }
++
+               if (insn->code != (BPF_JMP | BPF_CALL))
+                       continue;
+ 
+diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+index eeb7f2f5698d..54fd2fed36e9 100644
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -652,7 +652,9 @@ static void hrtimer_reprogram(struct hrtimer *timer,
+ static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
+ {
+       base->expires_next.tv64 = KTIME_MAX;
++      base->hang_detected = 0;
+       base->hres_active = 0;
++      base->next_timer = NULL;
+ }
+ 
+ /*
+@@ -1610,6 +1612,7 @@ int hrtimers_prepare_cpu(unsigned int cpu)
+               timerqueue_init_head(&cpu_base->clock_base[i].active);
+       }
+ 
++      cpu_base->active_bases = 0;
+       cpu_base->cpu = cpu;
+       hrtimer_init_hres(cpu_base);
+       return 0;
+diff --git a/lib/test_bpf.c b/lib/test_bpf.c
+index 2e385026915c..98da7520a6aa 100644
+--- a/lib/test_bpf.c
++++ b/lib/test_bpf.c
+@@ -5646,9 +5646,8 @@ static struct bpf_prog *generate_filter(int which, int 
*err)
+                               return NULL;
+                       }
+               }
+-              /* We don't expect to fail. */
+               if (*err) {
+-                      pr_cont("FAIL to attach err=%d len=%d\n",
++                      pr_cont("FAIL to prog_create err=%d len=%d\n",
+                               *err, fprog.len);
+                       return NULL;
+               }
+@@ -5671,6 +5670,10 @@ static struct bpf_prog *generate_filter(int which, int 
*err)
+                * checks.
+                */
+               fp = bpf_prog_select_runtime(fp, err);
++              if (*err) {
++                      pr_cont("FAIL to select_runtime err=%d\n", *err);
++                      return NULL;
++              }
+               break;
+       }
+ 
+@@ -5856,8 +5859,8 @@ static __init int test_bpf(void)
+                               pass_cnt++;
+                               continue;
+                       }
+-
+-                      return err;
++                      err_cnt++;
++                      continue;
+               }
+ 
+               pr_cont("jited:%u ", fp->jited);
+diff --git a/mm/cma.c b/mm/cma.c
+index c960459eda7e..397687fc51f9 100644
+--- a/mm/cma.c
++++ b/mm/cma.c
+@@ -54,7 +54,7 @@ unsigned long cma_get_size(const struct cma *cma)
+ }
+ 
+ static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
+-                                           int align_order)
++                                           unsigned int align_order)
+ {
+       if (align_order <= cma->order_per_bit)
+               return 0;
+@@ -62,17 +62,14 @@ static unsigned long cma_bitmap_aligned_mask(const struct 
cma *cma,
+ }
+ 
+ /*
+- * Find a PFN aligned to the specified order and return an offset represented 
in
+- * order_per_bits.
++ * Find the offset of the base PFN from the specified align_order.
++ * The value returned is represented in order_per_bits.
+  */
+ static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
+-                                             int align_order)
++                                             unsigned int align_order)
+ {
+-      if (align_order <= cma->order_per_bit)
+-              return 0;
+-
+-      return (ALIGN(cma->base_pfn, (1UL << align_order))
+-              - cma->base_pfn) >> cma->order_per_bit;
++      return (cma->base_pfn & ((1UL << align_order) - 1))
++              >> cma->order_per_bit;
+ }
+ 
+ static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
+diff --git a/mm/internal.h b/mm/internal.h
+index 34a5459e5989..3e2d01694747 100644
+--- a/mm/internal.h
++++ b/mm/internal.h
+@@ -73,6 +73,12 @@ static inline void set_page_refcounted(struct page *page)
+ 
+ extern unsigned long highest_memmap_pfn;
+ 
++/*
++ * Maximum number of reclaim retries without progress before the OOM
++ * killer is consider the only way forward.
++ */
++#define MAX_RECLAIM_RETRIES 16
++
+ /*
+  * in mm/vmscan.c:
+  */
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 2a800c4a39bd..50088150fc17 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -5531,7 +5531,7 @@ static void uncharge_list(struct list_head *page_list)
+               next = page->lru.next;
+ 
+               VM_BUG_ON_PAGE(PageLRU(page), page);
+-              VM_BUG_ON_PAGE(page_count(page), page);
++              VM_BUG_ON_PAGE(!PageHWPoison(page) && page_count(page), page);
+ 
+               if (!page->mem_cgroup)
+                       continue;
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index ce7d416edab7..5aa71a82ca73 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -535,6 +535,13 @@ static int delete_from_lru_cache(struct page *p)
+                */
+               ClearPageActive(p);
+               ClearPageUnevictable(p);
++
++              /*
++               * Poisoned page might never drop its ref count to 0 so we have
++               * to uncharge it manually from its memcg.
++               */
++              mem_cgroup_uncharge(p);
++
+               /*
+                * drop the page count elevated by isolate_lru_page()
+                */
+diff --git a/mm/mmap.c b/mm/mmap.c
+index 5b48adb4aa56..45ac5b973459 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -2240,7 +2240,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned 
long address)
+               gap_addr = TASK_SIZE;
+ 
+       next = vma->vm_next;
+-      if (next && next->vm_start < gap_addr) {
++      if (next && next->vm_start < gap_addr &&
++                      (next->vm_flags & (VM_WRITE|VM_READ|VM_EXEC))) {
+               if (!(next->vm_flags & VM_GROWSUP))
+                       return -ENOMEM;
+               /* Check that both stack segments have the same anon_vma? */
+@@ -2324,7 +2325,8 @@ int expand_downwards(struct vm_area_struct *vma,
+       if (gap_addr > address)
+               return -ENOMEM;
+       prev = vma->vm_prev;
+-      if (prev && prev->vm_end > gap_addr) {
++      if (prev && prev->vm_end > gap_addr &&
++                      (prev->vm_flags & (VM_WRITE|VM_READ|VM_EXEC))) {
+               if (!(prev->vm_flags & VM_GROWSDOWN))
+                       return -ENOMEM;
+               /* Check that both stack segments have the same anon_vma? */
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index fbc38888252b..94018ea5f935 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -2821,9 +2821,6 @@ bool __zone_watermark_ok(struct zone *z, unsigned int 
order, unsigned long mark,
+               if (!area->nr_free)
+                       continue;
+ 
+-              if (alloc_harder)
+-                      return true;
+-
+               for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
+                       if (!list_empty(&area->free_list[mt]))
+                               return true;
+@@ -2835,6 +2832,9 @@ bool __zone_watermark_ok(struct zone *z, unsigned int 
order, unsigned long mark,
+                       return true;
+               }
+ #endif
++              if (alloc_harder &&
++                      !list_empty(&area->free_list[MIGRATE_HIGHATOMIC]))
++                      return true;
+       }
+       return false;
+ }
+@@ -3421,12 +3421,6 @@ bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
+       return false;
+ }
+ 
+-/*
+- * Maximum number of reclaim retries without any progress before OOM killer
+- * is consider as the only way to move forward.
+- */
+-#define MAX_RECLAIM_RETRIES 16
+-
+ /*
+  * Checks whether it makes sense to retry the reclaim to make a forward 
progress
+  * for the given allocation request.
+@@ -4385,7 +4379,8 @@ void show_free_areas(unsigned int filter)
+                       K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
+                       K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
+                       node_page_state(pgdat, NR_PAGES_SCANNED),
+-                      !pgdat_reclaimable(pgdat) ? "yes" : "no");
++                      pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ?
++                              "yes" : "no");
+       }
+ 
+       for_each_populated_zone(zone) {
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 30a88b945a44..f118dc23f662 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -2606,6 +2606,15 @@ static bool shrink_node(pg_data_t *pgdat, struct 
scan_control *sc)
+       } while (should_continue_reclaim(pgdat, sc->nr_reclaimed - nr_reclaimed,
+                                        sc->nr_scanned - nr_scanned, sc));
+ 
++      /*
++       * Kswapd gives up on balancing particular nodes after too
++       * many failures to reclaim anything from them and goes to
++       * sleep. On reclaim progress, reset the failure counter. A
++       * successful direct reclaim run will revive a dormant kswapd.
++       */
++      if (reclaimable)
++              pgdat->kswapd_failures = 0;
++
+       return reclaimable;
+ }
+ 
+@@ -2680,10 +2689,6 @@ static void shrink_zones(struct zonelist *zonelist, 
struct scan_control *sc)
+                                                GFP_KERNEL | __GFP_HARDWALL))
+                               continue;
+ 
+-                      if (sc->priority != DEF_PRIORITY &&
+-                          !pgdat_reclaimable(zone->zone_pgdat))
+-                              continue;       /* Let kswapd poll it */
+-
+                       /*
+                        * If we already have plenty of memory free for
+                        * compaction in this zone, don't free any more.
+@@ -2820,7 +2825,7 @@ static unsigned long do_try_to_free_pages(struct 
zonelist *zonelist,
+       return 0;
+ }
+ 
+-static bool pfmemalloc_watermark_ok(pg_data_t *pgdat)
++static bool allow_direct_reclaim(pg_data_t *pgdat)
+ {
+       struct zone *zone;
+       unsigned long pfmemalloc_reserve = 0;
+@@ -2828,6 +2833,9 @@ static bool pfmemalloc_watermark_ok(pg_data_t *pgdat)
+       int i;
+       bool wmark_ok;
+ 
++      if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
++              return true;
++
+       for (i = 0; i <= ZONE_NORMAL; i++) {
+               zone = &pgdat->node_zones[i];
+               if (!managed_zone(zone) ||
+@@ -2908,7 +2916,7 @@ static bool throttle_direct_reclaim(gfp_t gfp_mask, 
struct zonelist *zonelist,
+ 
+               /* Throttle based on the first usable node */
+               pgdat = zone->zone_pgdat;
+-              if (pfmemalloc_watermark_ok(pgdat))
++              if (allow_direct_reclaim(pgdat))
+                       goto out;
+               break;
+       }
+@@ -2930,14 +2938,14 @@ static bool throttle_direct_reclaim(gfp_t gfp_mask, 
struct zonelist *zonelist,
+        */
+       if (!(gfp_mask & __GFP_FS)) {
+               wait_event_interruptible_timeout(pgdat->pfmemalloc_wait,
+-                      pfmemalloc_watermark_ok(pgdat), HZ);
++                      allow_direct_reclaim(pgdat), HZ);
+ 
+               goto check_pending;
+       }
+ 
+       /* Throttle until kswapd wakes the process */
+       wait_event_killable(zone->zone_pgdat->pfmemalloc_wait,
+-              pfmemalloc_watermark_ok(pgdat));
++              allow_direct_reclaim(pgdat));
+ 
+ check_pending:
+       if (fatal_signal_pending(current))
+@@ -3116,7 +3124,7 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int 
order, int classzone_idx)
+ 
+       /*
+        * The throttled processes are normally woken up in balance_pgdat() as
+-       * soon as pfmemalloc_watermark_ok() is true. But there is a potential
++       * soon as allow_direct_reclaim() is true. But there is a potential
+        * race between when kswapd checks the watermarks and a process gets
+        * throttled. There is also a potential race if processes get
+        * throttled, kswapd wakes, a large process exits thereby balancing the
+@@ -3130,6 +3138,10 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int 
order, int classzone_idx)
+       if (waitqueue_active(&pgdat->pfmemalloc_wait))
+               wake_up_all(&pgdat->pfmemalloc_wait);
+ 
++      /* Hopeless node, leave it to direct reclaim */
++      if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
++              return true;
++
+       for (i = 0; i <= classzone_idx; i++) {
+               struct zone *zone = pgdat->node_zones + i;
+ 
+@@ -3216,9 +3228,9 @@ static int balance_pgdat(pg_data_t *pgdat, int order, 
int classzone_idx)
+       count_vm_event(PAGEOUTRUN);
+ 
+       do {
++              unsigned long nr_reclaimed = sc.nr_reclaimed;
+               bool raise_priority = true;
+ 
+-              sc.nr_reclaimed = 0;
+               sc.reclaim_idx = classzone_idx;
+ 
+               /*
+@@ -3297,7 +3309,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, 
int classzone_idx)
+                * able to safely make forward progress. Wake them
+                */
+               if (waitqueue_active(&pgdat->pfmemalloc_wait) &&
+-                              pfmemalloc_watermark_ok(pgdat))
++                              allow_direct_reclaim(pgdat))
+                       wake_up_all(&pgdat->pfmemalloc_wait);
+ 
+               /* Check if kswapd should be suspending */
+@@ -3308,10 +3320,14 @@ static int balance_pgdat(pg_data_t *pgdat, int order, 
int classzone_idx)
+                * Raise priority if scanning rate is too low or there was no
+                * progress in reclaiming pages
+                */
+-              if (raise_priority || !sc.nr_reclaimed)
++              nr_reclaimed = sc.nr_reclaimed - nr_reclaimed;
++              if (raise_priority || !nr_reclaimed)
+                       sc.priority--;
+       } while (sc.priority >= 1);
+ 
++      if (!sc.nr_reclaimed)
++              pgdat->kswapd_failures++;
++
+ out:
+       /*
+        * Return the order kswapd stopped reclaiming at as
+@@ -3511,6 +3527,10 @@ void wakeup_kswapd(struct zone *zone, int order, enum 
zone_type classzone_idx)
+       if (!waitqueue_active(&pgdat->kswapd_wait))
+               return;
+ 
++      /* Hopeless node, leave it to direct reclaim */
++      if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
++              return;
++
+       /* Only wake kswapd if all zones are unbalanced */
+       for (z = 0; z <= classzone_idx; z++) {
+               zone = pgdat->node_zones + z;
+@@ -3781,9 +3801,6 @@ int node_reclaim(struct pglist_data *pgdat, gfp_t 
gfp_mask, unsigned int order)
+           sum_zone_node_page_state(pgdat->node_id, NR_SLAB_RECLAIMABLE) <= 
pgdat->min_slab_pages)
+               return NODE_RECLAIM_FULL;
+ 
+-      if (!pgdat_reclaimable(pgdat))
+-              return NODE_RECLAIM_FULL;
+-
+       /*
+        * Do not scan if the allocation should not be delayed.
+        */
+diff --git a/mm/vmstat.c b/mm/vmstat.c
+index 6a088df04b29..3863b5d6d598 100644
+--- a/mm/vmstat.c
++++ b/mm/vmstat.c
+@@ -1421,7 +1421,7 @@ static void zoneinfo_show_print(struct seq_file *m, 
pg_data_t *pgdat,
+                  "\n  node_unreclaimable:  %u"
+                  "\n  start_pfn:           %lu"
+                  "\n  node_inactive_ratio: %u",
+-                 !pgdat_reclaimable(zone->zone_pgdat),
++                 pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES,
+                  zone->zone_start_pfn,
+                  zone->zone_pgdat->inactive_ratio);
+       seq_putc(m, '\n');
+diff --git a/net/can/af_can.c b/net/can/af_can.c
+index 5488e4a6ccd0..ac1552d8b4ad 100644
+--- a/net/can/af_can.c
++++ b/net/can/af_can.c
+@@ -722,13 +722,12 @@ static int can_rcv(struct sk_buff *skb, struct 
net_device *dev,
+       if (unlikely(!net_eq(dev_net(dev), &init_net)))
+               goto drop;
+ 
+-      if (WARN_ONCE(dev->type != ARPHRD_CAN ||
+-                    skb->len != CAN_MTU ||
+-                    cfd->len > CAN_MAX_DLEN,
+-                    "PF_CAN: dropped non conform CAN skbuf: "
+-                    "dev type %d, len %d, datalen %d\n",
+-                    dev->type, skb->len, cfd->len))
++      if (unlikely(dev->type != ARPHRD_CAN || skb->len != CAN_MTU ||
++                   cfd->len > CAN_MAX_DLEN)) {
++              pr_warn_once("PF_CAN: dropped non conform CAN skbuf: dev type 
%d, len %d, datalen %d\n",
++                           dev->type, skb->len, cfd->len);
+               goto drop;
++      }
+ 
+       can_receive(skb, dev);
+       return NET_RX_SUCCESS;
+@@ -746,13 +745,12 @@ static int canfd_rcv(struct sk_buff *skb, struct 
net_device *dev,
+       if (unlikely(!net_eq(dev_net(dev), &init_net)))
+               goto drop;
+ 
+-      if (WARN_ONCE(dev->type != ARPHRD_CAN ||
+-                    skb->len != CANFD_MTU ||
+-                    cfd->len > CANFD_MAX_DLEN,
+-                    "PF_CAN: dropped non conform CAN FD skbuf: "
+-                    "dev type %d, len %d, datalen %d\n",
+-                    dev->type, skb->len, cfd->len))
++      if (unlikely(dev->type != ARPHRD_CAN || skb->len != CANFD_MTU ||
++                   cfd->len > CANFD_MAX_DLEN)) {
++              pr_warn_once("PF_CAN: dropped non conform CAN FD skbuf: dev 
type %d, len %d, datalen %d\n",
++                           dev->type, skb->len, cfd->len);
+               goto drop;
++      }
+ 
+       can_receive(skb, dev);
+       return NET_RX_SUCCESS;
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 09007a71c8dd..67b5d4d8acb1 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3083,10 +3083,21 @@ static void qdisc_pkt_len_init(struct sk_buff *skb)
+               hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
+ 
+               /* + transport layer */
+-              if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
+-                      hdr_len += tcp_hdrlen(skb);
+-              else
+-                      hdr_len += sizeof(struct udphdr);
++              if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) 
{
++                      const struct tcphdr *th;
++                      struct tcphdr _tcphdr;
++
++                      th = skb_header_pointer(skb, skb_transport_offset(skb),
++                                              sizeof(_tcphdr), &_tcphdr);
++                      if (likely(th))
++                              hdr_len += __tcp_hdrlen(th);
++              } else {
++                      struct udphdr _udphdr;
++
++                      if (skb_header_pointer(skb, skb_transport_offset(skb),
++                                             sizeof(_udphdr), &_udphdr))
++                              hdr_len += sizeof(struct udphdr);
++              }
+ 
+               if (shinfo->gso_type & SKB_GSO_DODGY)
+                       gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 4eb4ce0aeef4..e8c89d2d2bc0 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -441,6 +441,10 @@ static int bpf_convert_filter(struct sock_filter *prog, 
int len,
+                           convert_bpf_extensions(fp, &insn))
+                               break;
+ 
++                      if (fp->code == (BPF_ALU | BPF_DIV | BPF_X) ||
++                          fp->code == (BPF_ALU | BPF_MOD | BPF_X))
++                              *insn++ = BPF_MOV32_REG(BPF_REG_X, BPF_REG_X);
++
+                       *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, 
fp->k);
+                       break;
+ 
+@@ -1005,11 +1009,9 @@ static struct bpf_prog *bpf_migrate_filter(struct 
bpf_prog *fp)
+                */
+               goto out_err_free;
+ 
+-      /* We are guaranteed to never error here with cBPF to eBPF
+-       * transitions, since there's no issue with type compatibility
+-       * checks on program arrays.
+-       */
+       fp = bpf_prog_select_runtime(fp, &err);
++      if (err)
++              goto out_err_free;
+ 
+       kfree(old_prog);
+       return fp;
+diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
+index 32e4e0158846..862d63ec56e4 100644
+--- a/net/core/flow_dissector.c
++++ b/net/core/flow_dissector.c
+@@ -550,8 +550,8 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
+ out_good:
+       ret = true;
+ 
+-      key_control->thoff = (u16)nhoff;
+ out:
++      key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen);
+       key_basic->n_proto = proto;
+       key_basic->ip_proto = ip_proto;
+ 
+@@ -559,7 +559,6 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
+ 
+ out_bad:
+       ret = false;
+-      key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen);
+       goto out;
+ }
+ EXPORT_SYMBOL(__skb_flow_dissect);
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index f45f6198851f..7b315663f840 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -496,7 +496,7 @@ struct neighbour *__neigh_create(struct neigh_table *tbl, 
const void *pkey,
+       if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
+               nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
+ 
+-      hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - 
nht->hash_shift);
++      hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - 
nht->hash_shift);
+ 
+       if (n->parms->dead) {
+               rc = ERR_PTR(-EINVAL);
+@@ -508,7 +508,7 @@ struct neighbour *__neigh_create(struct neigh_table *tbl, 
const void *pkey,
+            n1 != NULL;
+            n1 = rcu_dereference_protected(n1->next,
+                       lockdep_is_held(&tbl->lock))) {
+-              if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
++              if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, 
key_len)) {
+                       if (want_ref)
+                               neigh_hold(n1);
+                       rc = n1;
+diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
+index a7f05f0130e8..1b4619008c4e 100644
+--- a/net/core/sysctl_net_core.c
++++ b/net/core/sysctl_net_core.c
+@@ -292,7 +292,13 @@ static struct ctl_table net_core_table[] = {
+               .data           = &bpf_jit_enable,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
++#ifndef CONFIG_BPF_JIT_ALWAYS_ON
+               .proc_handler   = proc_dointvec
++#else
++              .proc_handler   = proc_dointvec_minmax,
++              .extra1         = &one,
++              .extra2         = &one,
++#endif
+       },
+ # ifdef CONFIG_HAVE_EBPF_JIT
+       {
+diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
+index 5e3a7302f774..7753681195c1 100644
+--- a/net/dccp/ccids/ccid2.c
++++ b/net/dccp/ccids/ccid2.c
+@@ -140,6 +140,9 @@ static void ccid2_hc_tx_rto_expire(unsigned long data)
+ 
+       ccid2_pr_debug("RTO_EXPIRE\n");
+ 
++      if (sk->sk_state == DCCP_CLOSED)
++              goto out;
++
+       /* back-off timer */
+       hc->tx_rto <<= 1;
+       if (hc->tx_rto > DCCP_RTO_MAX)
+diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
+index 51b27ae09fbd..e60517eb1c3a 100644
+--- a/net/ipv4/arp.c
++++ b/net/ipv4/arp.c
+@@ -223,11 +223,16 @@ static bool arp_key_eq(const struct neighbour *neigh, 
const void *pkey)
+ 
+ static int arp_constructor(struct neighbour *neigh)
+ {
+-      __be32 addr = *(__be32 *)neigh->primary_key;
++      __be32 addr;
+       struct net_device *dev = neigh->dev;
+       struct in_device *in_dev;
+       struct neigh_parms *parms;
++      u32 inaddr_any = INADDR_ANY;
+ 
++      if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
++              memcpy(neigh->primary_key, &inaddr_any, arp_tbl.key_len);
++
++      addr = *(__be32 *)neigh->primary_key;
+       rcu_read_lock();
+       in_dev = __in_dev_get_rcu(dev);
+       if (!in_dev) {
+diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
+index 7bff0c65046f..9c7a4cea1628 100644
+--- a/net/ipv4/igmp.c
++++ b/net/ipv4/igmp.c
+@@ -332,7 +332,7 @@ static __be32 igmpv3_get_srcaddr(struct net_device *dev,
+               return htonl(INADDR_ANY);
+ 
+       for_ifa(in_dev) {
+-              if (inet_ifa_match(fl4->saddr, ifa))
++              if (fl4->saddr == ifa->ifa_local)
+                       return fl4->saddr;
+       } endfor_ifa(in_dev);
+ 
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 05d2bde00864..7efa6b062049 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -2215,6 +2215,9 @@ void tcp_close(struct sock *sk, long timeout)
+                       tcp_send_active_reset(sk, GFP_ATOMIC);
+                       __NET_INC_STATS(sock_net(sk),
+                                       LINUX_MIB_TCPABORTONMEMORY);
++              } else if (!check_net(sock_net(sk))) {
++                      /* Not possible to send reset; just close */
++                      tcp_set_state(sk, TCP_CLOSE);
+               }
+       }
+ 
+diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
+index bc68da38ea86..366b1becff9d 100644
+--- a/net/ipv4/tcp_offload.c
++++ b/net/ipv4/tcp_offload.c
+@@ -32,6 +32,9 @@ static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int 
ts_seq,
+ static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
+                                       netdev_features_t features)
+ {
++      if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4))
++              return ERR_PTR(-EINVAL);
++
+       if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
+               return ERR_PTR(-EINVAL);
+ 
+diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
+index 74db43b47917..69523389f067 100644
+--- a/net/ipv4/tcp_timer.c
++++ b/net/ipv4/tcp_timer.c
+@@ -50,11 +50,19 @@ static void tcp_write_err(struct sock *sk)
+  *  to prevent DoS attacks. It is called when a retransmission timeout
+  *  or zero probe timeout occurs on orphaned socket.
+  *
++ *  Also close if our net namespace is exiting; in that case there is no
++ *  hope of ever communicating again since all netns interfaces are already
++ *  down (or about to be down), and we need to release our dst references,
++ *  which have been moved to the netns loopback interface, so the namespace
++ *  can finish exiting.  This condition is only possible if we are a kernel
++ *  socket, as those do not hold references to the namespace.
++ *
+  *  Criteria is still not confirmed experimentally and may change.
+  *  We kill the socket, if:
+  *  1. If number of orphaned sockets exceeds an administratively configured
+  *     limit.
+  *  2. If we have strong memory pressure.
++ *  3. If our net namespace is exiting.
+  */
+ static int tcp_out_of_resources(struct sock *sk, bool do_reset)
+ {
+@@ -83,6 +91,13 @@ static int tcp_out_of_resources(struct sock *sk, bool 
do_reset)
+               __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
+               return 1;
+       }
++
++      if (!check_net(sock_net(sk))) {
++              /* Not possible to send reset; just close */
++              tcp_done(sk);
++              return 1;
++      }
++
+       return 0;
+ }
+ 
+diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
+index 6401574cd638..f4f616eaaeb8 100644
+--- a/net/ipv4/udp_offload.c
++++ b/net/ipv4/udp_offload.c
+@@ -205,6 +205,9 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff 
*skb,
+               goto out;
+       }
+ 
++      if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP))
++              goto out;
++
+       if (!pskb_may_pull(skb, sizeof(struct udphdr)))
+               goto out;
+ 
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
+index c46066c5dc27..db2613b4a049 100644
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -337,11 +337,12 @@ static struct ip6_tnl *ip6gre_tunnel_locate(struct net 
*net,
+ 
+       nt->dev = dev;
+       nt->net = dev_net(dev);
+-      ip6gre_tnl_link_config(nt, 1);
+ 
+       if (register_netdevice(dev) < 0)
+               goto failed_free;
+ 
++      ip6gre_tnl_link_config(nt, 1);
++
+       /* Can use a lockless transmit, unless we generate output sequences */
+       if (!(nt->parms.o_flags & TUNNEL_SEQ))
+               dev->features |= NETIF_F_LLTX;
+@@ -1263,7 +1264,6 @@ static void ip6gre_netlink_parms(struct nlattr *data[],
+ 
+ static int ip6gre_tap_init(struct net_device *dev)
+ {
+-      struct ip6_tnl *tunnel;
+       int ret;
+ 
+       ret = ip6gre_tunnel_init_common(dev);
+@@ -1272,10 +1272,6 @@ static int ip6gre_tap_init(struct net_device *dev)
+ 
+       dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ 
+-      tunnel = netdev_priv(dev);
+-
+-      ip6gre_tnl_link_config(tunnel, 1);
+-
+       return 0;
+ }
+ 
+@@ -1370,7 +1366,6 @@ static int ip6gre_newlink(struct net *src_net, struct 
net_device *dev,
+ 
+       nt->dev = dev;
+       nt->net = dev_net(dev);
+-      ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
+ 
+       dev->features           |= GRE6_FEATURES;
+       dev->hw_features        |= GRE6_FEATURES;
+@@ -1396,6 +1391,11 @@ static int ip6gre_newlink(struct net *src_net, struct 
net_device *dev,
+       if (err)
+               goto out;
+ 
++      ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
++
++      if (tb[IFLA_MTU])
++              ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
++
+       dev_hold(dev);
+       ip6gre_tunnel_link(ign, nt);
+ 
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 388584b8ff31..2e3db3619858 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -156,7 +156,7 @@ int ip6_output(struct net *net, struct sock *sk, struct 
sk_buff *skb)
+                           !(IP6CB(skb)->flags & IP6SKB_REROUTED));
+ }
+ 
+-static bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np)
++bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np)
+ {
+       if (!np->autoflowlabel_set)
+               return ip6_default_np_autolabel(net);
+@@ -1260,14 +1260,16 @@ static int ip6_setup_cork(struct sock *sk, struct 
inet_cork_full *cork,
+       v6_cork->tclass = ipc6->tclass;
+       if (rt->dst.flags & DST_XFRM_TUNNEL)
+               mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
+-                    rt->dst.dev->mtu : dst_mtu(&rt->dst);
++                    READ_ONCE(rt->dst.dev->mtu) : dst_mtu(&rt->dst);
+       else
+               mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
+-                    rt->dst.dev->mtu : dst_mtu(rt->dst.path);
++                    READ_ONCE(rt->dst.dev->mtu) : dst_mtu(rt->dst.path);
+       if (np->frag_size < mtu) {
+               if (np->frag_size)
+                       mtu = np->frag_size;
+       }
++      if (mtu < IPV6_MIN_MTU)
++              return -EINVAL;
+       cork->base.fragsize = mtu;
+       if (dst_allfrag(rt->dst.path))
+               cork->base.flags |= IPCORK_ALLFRAG;
+@@ -1798,6 +1800,7 @@ struct sk_buff *ip6_make_skb(struct sock *sk,
+       cork.base.flags = 0;
+       cork.base.addr = 0;
+       cork.base.opt = NULL;
++      cork.base.dst = NULL;
+       v6_cork.opt = NULL;
+       err = ip6_setup_cork(sk, &cork, &v6_cork, ipc6, rt, fl6);
+       if (err) {
+diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
+index 6e3871c7f8f7..bcea985dd76b 100644
+--- a/net/ipv6/ipv6_sockglue.c
++++ b/net/ipv6/ipv6_sockglue.c
+@@ -1316,7 +1316,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int 
level, int optname,
+               break;
+ 
+       case IPV6_AUTOFLOWLABEL:
+-              val = np->autoflowlabel;
++              val = ip6_autoflowlabel(sock_net(sk), np);
+               break;
+ 
+       default:
+diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
+index d883c9204c01..278e49cd67d4 100644
+--- a/net/ipv6/tcpv6_offload.c
++++ b/net/ipv6/tcpv6_offload.c
+@@ -46,6 +46,9 @@ static struct sk_buff *tcp6_gso_segment(struct sk_buff *skb,
+ {
+       struct tcphdr *th;
+ 
++      if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
++              return ERR_PTR(-EINVAL);
++
+       if (!pskb_may_pull(skb, sizeof(*th)))
+               return ERR_PTR(-EINVAL);
+ 
+diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
+index e7d378c032cb..2bd2087bd105 100644
+--- a/net/ipv6/udp_offload.c
++++ b/net/ipv6/udp_offload.c
+@@ -55,6 +55,9 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
+               const struct ipv6hdr *ipv6h;
+               struct udphdr *uh;
+ 
++              if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP))
++                      goto out;
++
+               if (!pskb_may_pull(skb, sizeof(struct udphdr)))
+                       goto out;
+ 
+diff --git a/net/netfilter/nfnetlink_cthelper.c 
b/net/netfilter/nfnetlink_cthelper.c
+index 28d065394c09..3f499126727c 100644
+--- a/net/netfilter/nfnetlink_cthelper.c
++++ b/net/netfilter/nfnetlink_cthelper.c
+@@ -17,6 +17,7 @@
+ #include <linux/types.h>
+ #include <linux/list.h>
+ #include <linux/errno.h>
++#include <linux/capability.h>
+ #include <net/netlink.h>
+ #include <net/sock.h>
+ 
+@@ -392,6 +393,9 @@ static int nfnl_cthelper_new(struct net *net, struct sock 
*nfnl,
+       struct nfnl_cthelper *nlcth;
+       int ret = 0;
+ 
++      if (!capable(CAP_NET_ADMIN))
++              return -EPERM;
++
+       if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE])
+               return -EINVAL;
+ 
+@@ -595,6 +599,9 @@ static int nfnl_cthelper_get(struct net *net, struct sock 
*nfnl,
+       struct nfnl_cthelper *nlcth;
+       bool tuple_set = false;
+ 
++      if (!capable(CAP_NET_ADMIN))
++              return -EPERM;
++
+       if (nlh->nlmsg_flags & NLM_F_DUMP) {
+               struct netlink_dump_control c = {
+                       .dump = nfnl_cthelper_dump_table,
+@@ -661,6 +668,9 @@ static int nfnl_cthelper_del(struct net *net, struct sock 
*nfnl,
+       struct nfnl_cthelper *nlcth, *n;
+       int j = 0, ret;
+ 
++      if (!capable(CAP_NET_ADMIN))
++              return -EPERM;
++
+       if (tb[NFCTH_NAME])
+               helper_name = nla_data(tb[NFCTH_NAME]);
+ 
+diff --git a/net/netfilter/xt_osf.c b/net/netfilter/xt_osf.c
+index 2455b69b5810..b589a62e68a2 100644
+--- a/net/netfilter/xt_osf.c
++++ b/net/netfilter/xt_osf.c
+@@ -19,6 +19,7 @@
+ #include <linux/module.h>
+ #include <linux/kernel.h>
+ 
++#include <linux/capability.h>
+ #include <linux/if.h>
+ #include <linux/inetdevice.h>
+ #include <linux/ip.h>
+@@ -69,6 +70,9 @@ static int xt_osf_add_callback(struct net *net, struct sock 
*ctnl,
+       struct xt_osf_finger *kf = NULL, *sf;
+       int err = 0;
+ 
++      if (!capable(CAP_NET_ADMIN))
++              return -EPERM;
++
+       if (!osf_attrs[OSF_ATTR_FINGER])
+               return -EINVAL;
+ 
+@@ -113,6 +117,9 @@ static int xt_osf_remove_callback(struct net *net, struct 
sock *ctnl,
+       struct xt_osf_finger *sf;
+       int err = -ENOENT;
+ 
++      if (!capable(CAP_NET_ADMIN))
++              return -EPERM;
++
+       if (!osf_attrs[OSF_ATTR_FINGER])
+               return -EINVAL;
+ 
+diff --git a/net/sctp/offload.c b/net/sctp/offload.c
+index 4f5a2b580aa5..6300f28c9588 100644
+--- a/net/sctp/offload.c
++++ b/net/sctp/offload.c
+@@ -44,6 +44,9 @@ static struct sk_buff *sctp_gso_segment(struct sk_buff *skb,
+       struct sk_buff *segs = ERR_PTR(-EINVAL);
+       struct sctphdr *sh;
+ 
++      if (!(skb_shinfo(skb)->gso_type & SKB_GSO_SCTP))
++              goto out;
++
+       sh = sctp_hdr(skb);
+       if (!pskb_may_pull(skb, sizeof(*sh)))
+               goto out;
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 7181ce6c62bf..c472b8391dde 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -83,7 +83,7 @@
+ static int sctp_writeable(struct sock *sk);
+ static void sctp_wfree(struct sk_buff *skb);
+ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
+-                              size_t msg_len, struct sock **orig_sk);
++                              size_t msg_len);
+ static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p);
+ static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p);
+ static int sctp_wait_for_accept(struct sock *sk, long timeo);
+@@ -332,16 +332,14 @@ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock 
*opt,
+       if (len < sizeof (struct sockaddr))
+               return NULL;
+ 
++      if (!opt->pf->af_supported(addr->sa.sa_family, opt))
++              return NULL;
++
+       /* V4 mapped address are really of AF_INET family */
+       if (addr->sa.sa_family == AF_INET6 &&
+-          ipv6_addr_v4mapped(&addr->v6.sin6_addr)) {
+-              if (!opt->pf->af_supported(AF_INET, opt))
+-                      return NULL;
+-      } else {
+-              /* Does this PF support this AF? */
+-              if (!opt->pf->af_supported(addr->sa.sa_family, opt))
+-                      return NULL;
+-      }
++          ipv6_addr_v4mapped(&addr->v6.sin6_addr) &&
++          !opt->pf->af_supported(AF_INET, opt))
++              return NULL;
+ 
+       /* If we get this far, af is valid. */
+       af = sctp_get_af_specific(addr->sa.sa_family);
+@@ -1958,7 +1956,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr 
*msg, size_t msg_len)
+       timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
+       if (!sctp_wspace(asoc)) {
+               /* sk can be changed by peel off when waiting for buf. */
+-              err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len, &sk);
++              err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len);
+               if (err) {
+                       if (err == -ESRCH) {
+                               /* asoc is already dead. */
+@@ -7441,12 +7439,12 @@ void sctp_sock_rfree(struct sk_buff *skb)
+ 
+ /* Helper function to wait for space in the sndbuf.  */
+ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
+-                              size_t msg_len, struct sock **orig_sk)
++                              size_t msg_len)
+ {
+       struct sock *sk = asoc->base.sk;
+-      int err = 0;
+       long current_timeo = *timeo_p;
+       DEFINE_WAIT(wait);
++      int err = 0;
+ 
+       pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc,
+                *timeo_p, msg_len);
+@@ -7475,17 +7473,13 @@ static int sctp_wait_for_sndbuf(struct 
sctp_association *asoc, long *timeo_p,
+               release_sock(sk);
+               current_timeo = schedule_timeout(current_timeo);
+               lock_sock(sk);
+-              if (sk != asoc->base.sk) {
+-                      release_sock(sk);
+-                      sk = asoc->base.sk;
+-                      lock_sock(sk);
+-              }
++              if (sk != asoc->base.sk)
++                      goto do_error;
+ 
+               *timeo_p = current_timeo;
+       }
+ 
+ out:
+-      *orig_sk = sk;
+       finish_wait(&asoc->wait, &wait);
+ 
+       /* Release the association's refcnt.  */
+diff --git a/net/socket.c b/net/socket.c
+index 05f13b24572c..bd3b33988ee0 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -2548,6 +2548,15 @@ static int __init sock_init(void)
+ 
+ core_initcall(sock_init);     /* early initcall */
+ 
++static int __init jit_init(void)
++{
++#ifdef CONFIG_BPF_JIT_ALWAYS_ON
++      bpf_jit_enable = 1;
++#endif
++      return 0;
++}
++pure_initcall(jit_init);
++
+ #ifdef CONFIG_PROC_FS
+ void socket_seq_show(struct seq_file *seq)
+ {
+diff --git a/net/tipc/node.c b/net/tipc/node.c
+index 27753325e06e..5b3e1ea37b6d 100644
+--- a/net/tipc/node.c
++++ b/net/tipc/node.c
+@@ -1848,36 +1848,38 @@ int tipc_nl_node_get_link(struct sk_buff *skb, struct 
genl_info *info)
+ 
+       if (strcmp(name, tipc_bclink_name) == 0) {
+               err = tipc_nl_add_bc_link(net, &msg);
+-              if (err) {
+-                      nlmsg_free(msg.skb);
+-                      return err;
+-              }
++              if (err)
++                      goto err_free;
+       } else {
+               int bearer_id;
+               struct tipc_node *node;
+               struct tipc_link *link;
+ 
+               node = tipc_node_find_by_name(net, name, &bearer_id);
+-              if (!node)
+-                      return -EINVAL;
++              if (!node) {
++                      err = -EINVAL;
++                      goto err_free;
++              }
+ 
+               tipc_node_read_lock(node);
+               link = node->links[bearer_id].link;
+               if (!link) {
+                       tipc_node_read_unlock(node);
+-                      nlmsg_free(msg.skb);
+-                      return -EINVAL;
++                      err = -EINVAL;
++                      goto err_free;
+               }
+ 
+               err = __tipc_nl_add_link(net, &msg, link, 0);
+               tipc_node_read_unlock(node);
+-              if (err) {
+-                      nlmsg_free(msg.skb);
+-                      return err;
+-              }
++              if (err)
++                      goto err_free;
+       }
+ 
+       return genlmsg_reply(msg.skb, info);
++
++err_free:
++      nlmsg_free(msg.skb);
++      return err;
+ }
+ 
+ int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info)
+diff --git a/tools/usb/usbip/libsrc/usbip_common.c 
b/tools/usb/usbip/libsrc/usbip_common.c
+index ac73710473de..1517a232ab18 100644
+--- a/tools/usb/usbip/libsrc/usbip_common.c
++++ b/tools/usb/usbip/libsrc/usbip_common.c
+@@ -215,9 +215,16 @@ int read_usb_interface(struct usbip_usb_device *udev, int 
i,
+                      struct usbip_usb_interface *uinf)
+ {
+       char busid[SYSFS_BUS_ID_SIZE];
++      int size;
+       struct udev_device *sif;
+ 
+-      sprintf(busid, "%s:%d.%d", udev->busid, udev->bConfigurationValue, i);
++      size = snprintf(busid, sizeof(busid), "%s:%d.%d",
++                      udev->busid, udev->bConfigurationValue, i);
++      if (size < 0 || (unsigned int)size >= sizeof(busid)) {
++              err("busid length %i >= %lu or < 0", size,
++                  (long unsigned)sizeof(busid));
++              return -1;
++      }
+ 
+       sif = udev_device_new_from_subsystem_sysname(udev_context, "usb", 
busid);
+       if (!sif) {
+diff --git a/tools/usb/usbip/libsrc/usbip_host_common.c 
b/tools/usb/usbip/libsrc/usbip_host_common.c
+index 9d415228883d..6ff7b601f854 100644
+--- a/tools/usb/usbip/libsrc/usbip_host_common.c
++++ b/tools/usb/usbip/libsrc/usbip_host_common.c
+@@ -40,13 +40,20 @@ struct udev *udev_context;
+ static int32_t read_attr_usbip_status(struct usbip_usb_device *udev)
+ {
+       char status_attr_path[SYSFS_PATH_MAX];
++      int size;
+       int fd;
+       int length;
+       char status;
+       int value = 0;
+ 
+-      snprintf(status_attr_path, SYSFS_PATH_MAX, "%s/usbip_status",
+-               udev->path);
++      size = snprintf(status_attr_path, sizeof(status_attr_path),
++                      "%s/usbip_status", udev->path);
++      if (size < 0 || (unsigned int)size >= sizeof(status_attr_path)) {
++              err("usbip_status path length %i >= %lu or < 0", size,
++                  (long unsigned)sizeof(status_attr_path));
++              return -1;
++      }
++
+ 
+       fd = open(status_attr_path, O_RDONLY);
+       if (fd < 0) {
+@@ -218,6 +225,7 @@ int usbip_export_device(struct usbip_exported_device 
*edev, int sockfd)
+ {
+       char attr_name[] = "usbip_sockfd";
+       char sockfd_attr_path[SYSFS_PATH_MAX];
++      int size;
+       char sockfd_buff[30];
+       int ret;
+ 
+@@ -237,10 +245,20 @@ int usbip_export_device(struct usbip_exported_device 
*edev, int sockfd)
+       }
+ 
+       /* only the first interface is true */
+-      snprintf(sockfd_attr_path, sizeof(sockfd_attr_path), "%s/%s",
+-               edev->udev.path, attr_name);
++      size = snprintf(sockfd_attr_path, sizeof(sockfd_attr_path), "%s/%s",
++                      edev->udev.path, attr_name);
++      if (size < 0 || (unsigned int)size >= sizeof(sockfd_attr_path)) {
++              err("exported device path length %i >= %lu or < 0", size,
++                  (long unsigned)sizeof(sockfd_attr_path));
++              return -1;
++      }
+ 
+-      snprintf(sockfd_buff, sizeof(sockfd_buff), "%d\n", sockfd);
++      size = snprintf(sockfd_buff, sizeof(sockfd_buff), "%d\n", sockfd);
++      if (size < 0 || (unsigned int)size >= sizeof(sockfd_buff)) {
++              err("socket length %i >= %lu or < 0", size,
++                  (long unsigned)sizeof(sockfd_buff));
++              return -1;
++      }
+ 
+       ret = write_sysfs_attribute(sockfd_attr_path, sockfd_buff,
+                                   strlen(sockfd_buff));
+diff --git a/tools/usb/usbip/libsrc/vhci_driver.c 
b/tools/usb/usbip/libsrc/vhci_driver.c
+index ad9204773533..1274f326242c 100644
+--- a/tools/usb/usbip/libsrc/vhci_driver.c
++++ b/tools/usb/usbip/libsrc/vhci_driver.c
+@@ -55,12 +55,12 @@ static int parse_status(const char *value)
+ 
+       while (*c != '\0') {
+               int port, status, speed, devid;
+-              unsigned long socket;
++              int sockfd;
+               char lbusid[SYSFS_BUS_ID_SIZE];
+ 
+-              ret = sscanf(c, "%d %d %d %x %lx %31s\n",
++              ret = sscanf(c, "%d %d %d %x %u %31s\n",
+                               &port, &status, &speed,
+-                              &devid, &socket, lbusid);
++                              &devid, &sockfd, lbusid);
+ 
+               if (ret < 5) {
+                       dbg("sscanf failed: %d", ret);
+@@ -69,7 +69,7 @@ static int parse_status(const char *value)
+ 
+               dbg("port %d status %d speed %d devid %x",
+                               port, status, speed, devid);
+-              dbg("socket %lx lbusid %s", socket, lbusid);
++              dbg("sockfd %u lbusid %s", sockfd, lbusid);
+ 
+ 
+               /* if a device is connected, look at it */
+diff --git a/tools/usb/usbip/src/usbip.c b/tools/usb/usbip/src/usbip.c
+index d7599d943529..73d8eee8130b 100644
+--- a/tools/usb/usbip/src/usbip.c
++++ b/tools/usb/usbip/src/usbip.c
+@@ -176,6 +176,8 @@ int main(int argc, char *argv[])
+                       break;
+               case '?':
+                       printf("usbip: invalid option\n");
++                      /* Terminate after printing error */
++                      /* FALLTHRU */
+               default:
+                       usbip_usage();
+                       goto out;

Reply via email to