commit:     a65e0548d27ba07612232f979aa1a902a46535f8
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Feb  6 12:49:34 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Feb  6 12:49:34 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=a65e0548

Linux patch 4.19.272

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README               |    4 +
 1271_linux-4.19.272.patch | 2942 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2946 insertions(+)

diff --git a/0000_README b/0000_README
index 9dab2ece..e471309d 100644
--- a/0000_README
+++ b/0000_README
@@ -1127,6 +1127,10 @@ Patch:  1270_linux-4.19.271.patch
 From:   https://www.kernel.org
 Desc:   Linux 4.19.271
 
+Patch:  1271_linux-4.19.272.patch
+From:   https://www.kernel.org
+Desc:   Linux 4.19.272
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1271_linux-4.19.272.patch b/1271_linux-4.19.272.patch
new file mode 100644
index 00000000..575bc67d
--- /dev/null
+++ b/1271_linux-4.19.272.patch
@@ -0,0 +1,2942 @@
+diff --git a/Documentation/ABI/testing/sysfs-kernel-oops_count 
b/Documentation/ABI/testing/sysfs-kernel-oops_count
+new file mode 100644
+index 0000000000000..156cca9dbc960
+--- /dev/null
++++ b/Documentation/ABI/testing/sysfs-kernel-oops_count
+@@ -0,0 +1,6 @@
++What:         /sys/kernel/oops_count
++Date:         November 2022
++KernelVersion:        6.2.0
++Contact:      Linux Kernel Hardening List <[email protected]>
++Description:
++              Shows how many times the system has Oopsed since last boot.
+diff --git a/Documentation/ABI/testing/sysfs-kernel-warn_count 
b/Documentation/ABI/testing/sysfs-kernel-warn_count
+new file mode 100644
+index 0000000000000..90a029813717d
+--- /dev/null
++++ b/Documentation/ABI/testing/sysfs-kernel-warn_count
+@@ -0,0 +1,6 @@
++What:         /sys/kernel/warn_count
++Date:         November 2022
++KernelVersion:        6.2.0
++Contact:      Linux Kernel Hardening List <[email protected]>
++Description:
++              Shows how many times the system has Warned since last boot.
+diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
+index db1676525ca35..c8d3dbda3c1e2 100644
+--- a/Documentation/sysctl/kernel.txt
++++ b/Documentation/sysctl/kernel.txt
+@@ -51,6 +51,7 @@ show up in /proc/sys/kernel:
+ - msgmnb
+ - msgmni
+ - nmi_watchdog
++- oops_limit
+ - osrelease
+ - ostype
+ - overflowgid
+@@ -96,6 +97,7 @@ show up in /proc/sys/kernel:
+ - threads-max
+ - unprivileged_bpf_disabled
+ - unknown_nmi_panic
++- warn_limit
+ - watchdog
+ - watchdog_thresh
+ - version
+@@ -555,6 +557,15 @@ scanned for a given scan.
+ 
+ ==============================================================
+ 
++oops_limit:
++
++Number of kernel oopses after which the kernel should panic when
++``panic_on_oops`` is not set. Setting this to 0 disables checking
++the count. Setting this to  1 has the same effect as setting
++``panic_on_oops=1``. The default value is 10000.
++
++==============================================================
++
+ osrelease, ostype & version:
+ 
+ # cat osrelease
+@@ -1104,6 +1115,15 @@ example.  If a system hangs up, try pressing the NMI 
switch.
+ 
+ ==============================================================
+ 
++warn_limit:
++
++Number of kernel warnings after which the kernel should panic when
++``panic_on_warn`` is not set. Setting this to 0 disables checking
++the warning count. Setting this to 1 has the same effect as setting
++``panic_on_warn=1``. The default value is 0.
++
++==============================================================
++
+ watchdog:
+ 
+ This parameter can be used to disable or enable the soft lockup detector
+diff --git a/Makefile b/Makefile
+index 560507d1f7a10..e3822e492543a 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 19
+-SUBLEVEL = 271
++SUBLEVEL = 272
+ EXTRAVERSION =
+ NAME = "People's Front"
+ 
+diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c
+index bc9627698796e..22f5c27b96b7c 100644
+--- a/arch/alpha/kernel/traps.c
++++ b/arch/alpha/kernel/traps.c
+@@ -192,7 +192,7 @@ die_if_kernel(char * str, struct pt_regs *regs, long err, 
unsigned long *r9_15)
+               local_irq_enable();
+               while (1);
+       }
+-      do_exit(SIGSEGV);
++      make_task_dead(SIGSEGV);
+ }
+ 
+ #ifndef CONFIG_MATHEMU
+@@ -577,7 +577,7 @@ do_entUna(void * va, unsigned long opcode, unsigned long 
reg,
+ 
+       printk("Bad unaligned kernel access at %016lx: %p %lx %lu\n",
+               pc, va, opcode, reg);
+-      do_exit(SIGSEGV);
++      make_task_dead(SIGSEGV);
+ 
+ got_exception:
+       /* Ok, we caught the exception, but we don't want it.  Is there
+@@ -632,7 +632,7 @@ got_exception:
+               local_irq_enable();
+               while (1);
+       }
+-      do_exit(SIGSEGV);
++      make_task_dead(SIGSEGV);
+ }
+ 
+ /*
+diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
+index 188fc9256baf1..6ce67b05412ec 100644
+--- a/arch/alpha/mm/fault.c
++++ b/arch/alpha/mm/fault.c
+@@ -206,7 +206,7 @@ retry:
+       printk(KERN_ALERT "Unable to handle kernel paging request at "
+              "virtual address %016lx\n", address);
+       die_if_kernel("Oops", regs, cause, (unsigned long*)regs - 16);
+-      do_exit(SIGKILL);
++      make_task_dead(SIGKILL);
+ 
+       /* We ran out of memory, or some other thing happened to us that
+          made us unable to handle the page fault gracefully.  */
+diff --git a/arch/arm/boot/dts/imx53-ppd.dts b/arch/arm/boot/dts/imx53-ppd.dts
+index f346673d34ead..0cb5f01f02d15 100644
+--- a/arch/arm/boot/dts/imx53-ppd.dts
++++ b/arch/arm/boot/dts/imx53-ppd.dts
+@@ -462,7 +462,7 @@
+       scl-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
+       status = "okay";
+ 
+-      i2c-switch@70 {
++      i2c-mux@70 {
+               compatible = "nxp,pca9547";
+               #address-cells = <1>;
+               #size-cells = <0>;
+diff --git a/arch/arm/boot/dts/imx6qdl-gw560x.dtsi 
b/arch/arm/boot/dts/imx6qdl-gw560x.dtsi
+index b5986efe1090c..143d249b821eb 100644
+--- a/arch/arm/boot/dts/imx6qdl-gw560x.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-gw560x.dtsi
+@@ -463,7 +463,6 @@
+ &uart1 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_uart1>;
+-      uart-has-rtscts;
+       rts-gpios = <&gpio7 1 GPIO_ACTIVE_HIGH>;
+       status = "okay";
+ };
+diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
+index d49fafd2b865f..02a4aea52fb05 100644
+--- a/arch/arm/kernel/traps.c
++++ b/arch/arm/kernel/traps.c
+@@ -344,7 +344,7 @@ static void oops_end(unsigned long flags, struct pt_regs 
*regs, int signr)
+       if (panic_on_oops)
+               panic("Fatal exception");
+       if (signr)
+-              do_exit(signr);
++              make_task_dead(signr);
+ }
+ 
+ /*
+diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
+index a9ee0d9dc740a..8457384139cb8 100644
+--- a/arch/arm/mm/fault.c
++++ b/arch/arm/mm/fault.c
+@@ -149,7 +149,7 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long 
addr, unsigned int fsr,
+       show_pte(mm, addr);
+       die("Oops", regs, fsr);
+       bust_spinlocks(0);
+-      do_exit(SIGKILL);
++      make_task_dead(SIGKILL);
+ }
+ 
+ /*
+diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
+index e803fd16248bc..46d2142ebd14c 100644
+--- a/arch/arm/mm/nommu.c
++++ b/arch/arm/mm/nommu.c
+@@ -160,7 +160,7 @@ void __init paging_init(const struct machine_desc *mdesc)
+       mpu_setup();
+ 
+       /* allocate the zero page. */
+-      zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
++      zero_page = (void *)memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+       if (!zero_page)
+               panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+                     __func__, PAGE_SIZE, PAGE_SIZE);
+diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
+index 965595fe68045..20f896b27a447 100644
+--- a/arch/arm64/kernel/traps.c
++++ b/arch/arm64/kernel/traps.c
+@@ -224,7 +224,7 @@ void die(const char *str, struct pt_regs *regs, int err)
+       raw_spin_unlock_irqrestore(&die_lock, flags);
+ 
+       if (ret != NOTIFY_STOP)
+-              do_exit(SIGSEGV);
++              make_task_dead(SIGSEGV);
+ }
+ 
+ static bool show_unhandled_signals_ratelimited(void)
+diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
+index b046006a387ff..0d2be8eb87ec8 100644
+--- a/arch/arm64/mm/fault.c
++++ b/arch/arm64/mm/fault.c
+@@ -268,7 +268,7 @@ static void die_kernel_fault(const char *msg, unsigned 
long addr,
+       show_pte(addr);
+       die("Oops", regs, esr);
+       bust_spinlocks(0);
+-      do_exit(SIGKILL);
++      make_task_dead(SIGKILL);
+ }
+ 
+ static void __do_kernel_fault(unsigned long addr, unsigned int esr,
+diff --git a/arch/h8300/kernel/traps.c b/arch/h8300/kernel/traps.c
+index e47a9e0dc278f..090adaee4b84c 100644
+--- a/arch/h8300/kernel/traps.c
++++ b/arch/h8300/kernel/traps.c
+@@ -17,6 +17,7 @@
+ #include <linux/types.h>
+ #include <linux/sched.h>
+ #include <linux/sched/debug.h>
++#include <linux/sched/task.h>
+ #include <linux/mm_types.h>
+ #include <linux/kernel.h>
+ #include <linux/errno.h>
+@@ -110,7 +111,7 @@ void die(const char *str, struct pt_regs *fp, unsigned 
long err)
+       dump(fp);
+ 
+       spin_unlock_irq(&die_lock);
+-      do_exit(SIGSEGV);
++      make_task_dead(SIGSEGV);
+ }
+ 
+ static int kstack_depth_to_print = 24;
+diff --git a/arch/h8300/mm/fault.c b/arch/h8300/mm/fault.c
+index fabffb83930af..573825c3cb708 100644
+--- a/arch/h8300/mm/fault.c
++++ b/arch/h8300/mm/fault.c
+@@ -52,7 +52,7 @@ asmlinkage int do_page_fault(struct pt_regs *regs, unsigned 
long address,
+       printk(" at virtual address %08lx\n", address);
+       if (!user_mode(regs))
+               die("Oops", regs, error_code);
+-      do_exit(SIGKILL);
++      make_task_dead(SIGKILL);
+ 
+       return 1;
+ }
+diff --git a/arch/hexagon/kernel/traps.c b/arch/hexagon/kernel/traps.c
+index 91ee04842c22c..34a74f73f1690 100644
+--- a/arch/hexagon/kernel/traps.c
++++ b/arch/hexagon/kernel/traps.c
+@@ -234,7 +234,7 @@ int die(const char *str, struct pt_regs *regs, long err)
+               panic("Fatal exception");
+ 
+       oops_exit();
+-      do_exit(err);
++      make_task_dead(err);
+       return 0;
+ }
+ 
+diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
+index 8b4a0c1748c03..0d56b19b7511a 100644
+--- a/arch/ia64/Kconfig
++++ b/arch/ia64/Kconfig
+@@ -445,7 +445,7 @@ config ARCH_PROC_KCORE_TEXT
+       depends on PROC_KCORE
+ 
+ config IA64_MCA_RECOVERY
+-      tristate "MCA recovery from errors other than TLB."
++      bool "MCA recovery from errors other than TLB."
+ 
+ config PERFMON
+       bool "Performance monitor support"
+diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c
+index 06419a95af309..07353029b003a 100644
+--- a/arch/ia64/kernel/mca_drv.c
++++ b/arch/ia64/kernel/mca_drv.c
+@@ -11,6 +11,7 @@
+ #include <linux/types.h>
+ #include <linux/init.h>
+ #include <linux/sched.h>
++#include <linux/sched/task.h>
+ #include <linux/interrupt.h>
+ #include <linux/irq.h>
+ #include <linux/kallsyms.h>
+@@ -176,7 +177,7 @@ mca_handler_bh(unsigned long paddr, void *iip, unsigned 
long ipsr)
+       spin_unlock(&mca_bh_lock);
+ 
+       /* This process is about to be killed itself */
+-      do_exit(SIGKILL);
++      make_task_dead(SIGKILL);
+ }
+ 
+ /**
+diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
+index c6f4932073a18..32c2877b3c0ad 100644
+--- a/arch/ia64/kernel/traps.c
++++ b/arch/ia64/kernel/traps.c
+@@ -85,7 +85,7 @@ die (const char *str, struct pt_regs *regs, long err)
+       if (panic_on_oops)
+               panic("Fatal exception");
+ 
+-      do_exit(SIGSEGV);
++      make_task_dead(SIGSEGV);
+       return 0;
+ }
+ 
+diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
+index a9d55ad8d67be..0b072af7e20d8 100644
+--- a/arch/ia64/mm/fault.c
++++ b/arch/ia64/mm/fault.c
+@@ -302,7 +302,7 @@ retry:
+               regs = NULL;
+       bust_spinlocks(0);
+       if (regs)
+-              do_exit(SIGKILL);
++              make_task_dead(SIGKILL);
+       return;
+ 
+   out_of_memory:
+diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c
+index b2fd000b92857..9b70a7f5e7058 100644
+--- a/arch/m68k/kernel/traps.c
++++ b/arch/m68k/kernel/traps.c
+@@ -1139,7 +1139,7 @@ void die_if_kernel (char *str, struct pt_regs *fp, int 
nr)
+       pr_crit("%s: %08x\n", str, nr);
+       show_registers(fp);
+       add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
+-      do_exit(SIGSEGV);
++      make_task_dead(SIGSEGV);
+ }
+ 
+ asmlinkage void set_esp0(unsigned long ssp)
+diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
+index 9b6163c05a754..fc3aea6dcaa77 100644
+--- a/arch/m68k/mm/fault.c
++++ b/arch/m68k/mm/fault.c
+@@ -48,7 +48,7 @@ int send_fault_sig(struct pt_regs *regs)
+                       pr_alert("Unable to handle kernel access");
+               pr_cont(" at virtual address %p\n", addr);
+               die_if_kernel("Oops", regs, 0 /*error_code*/);
+-              do_exit(SIGKILL);
++              make_task_dead(SIGKILL);
+       }
+ 
+       return 1;
+diff --git a/arch/microblaze/kernel/exceptions.c 
b/arch/microblaze/kernel/exceptions.c
+index eafff21fcb0e6..182402db6b043 100644
+--- a/arch/microblaze/kernel/exceptions.c
++++ b/arch/microblaze/kernel/exceptions.c
+@@ -44,10 +44,10 @@ void die(const char *str, struct pt_regs *fp, long err)
+       pr_warn("Oops: %s, sig: %ld\n", str, err);
+       show_regs(fp);
+       spin_unlock_irq(&die_lock);
+-      /* do_exit() should take care of panic'ing from an interrupt
++      /* make_task_dead() should take care of panic'ing from an interrupt
+        * context so we don't handle it here
+        */
+-      do_exit(err);
++      make_task_dead(err);
+ }
+ 
+ /* for user application debugging */
+diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
+index 0ca4185cc5e38..01c85d37c47ab 100644
+--- a/arch/mips/kernel/traps.c
++++ b/arch/mips/kernel/traps.c
+@@ -412,7 +412,7 @@ void __noreturn die(const char *str, struct pt_regs *regs)
+       if (regs && kexec_should_crash(current))
+               crash_kexec(regs);
+ 
+-      do_exit(sig);
++      make_task_dead(sig);
+ }
+ 
+ extern struct exception_table_entry __start___dbe_table[];
+diff --git a/arch/nds32/kernel/traps.c b/arch/nds32/kernel/traps.c
+index 1496aab489988..a811f286bc85c 100644
+--- a/arch/nds32/kernel/traps.c
++++ b/arch/nds32/kernel/traps.c
+@@ -183,7 +183,7 @@ void die(const char *str, struct pt_regs *regs, int err)
+ 
+       bust_spinlocks(0);
+       spin_unlock_irq(&die_lock);
+-      do_exit(SIGSEGV);
++      make_task_dead(SIGSEGV);
+ }
+ 
+ EXPORT_SYMBOL(die);
+@@ -286,7 +286,7 @@ void unhandled_interruption(struct pt_regs *regs)
+       pr_emerg("unhandled_interruption\n");
+       show_regs(regs);
+       if (!user_mode(regs))
+-              do_exit(SIGKILL);
++              make_task_dead(SIGKILL);
+       force_sig(SIGKILL, current);
+ }
+ 
+@@ -297,7 +297,7 @@ void unhandled_exceptions(unsigned long entry, unsigned 
long addr,
+                addr, type);
+       show_regs(regs);
+       if (!user_mode(regs))
+-              do_exit(SIGKILL);
++              make_task_dead(SIGKILL);
+       force_sig(SIGKILL, current);
+ }
+ 
+@@ -324,7 +324,7 @@ void do_revinsn(struct pt_regs *regs)
+       pr_emerg("Reserved Instruction\n");
+       show_regs(regs);
+       if (!user_mode(regs))
+-              do_exit(SIGILL);
++              make_task_dead(SIGILL);
+       force_sig(SIGILL, current);
+ }
+ 
+diff --git a/arch/nios2/kernel/traps.c b/arch/nios2/kernel/traps.c
+index 3bc3cd22b750e..dc6c270a355b6 100644
+--- a/arch/nios2/kernel/traps.c
++++ b/arch/nios2/kernel/traps.c
+@@ -37,10 +37,10 @@ void die(const char *str, struct pt_regs *regs, long err)
+       show_regs(regs);
+       spin_unlock_irq(&die_lock);
+       /*
+-       * do_exit() should take care of panic'ing from an interrupt
++       * make_task_dead() should take care of panic'ing from an interrupt
+        * context so we don't handle it here
+        */
+-      do_exit(err);
++      make_task_dead(err);
+ }
+ 
+ void _exception(int signo, struct pt_regs *regs, int code, unsigned long addr)
+diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c
+index d8981cbb852a5..dfa3db1bccef4 100644
+--- a/arch/openrisc/kernel/traps.c
++++ b/arch/openrisc/kernel/traps.c
+@@ -224,7 +224,7 @@ void die(const char *str, struct pt_regs *regs, long err)
+       __asm__ __volatile__("l.nop   1");
+       do {} while (1);
+ #endif
+-      do_exit(SIGSEGV);
++      make_task_dead(SIGSEGV);
+ }
+ 
+ /* This is normally the 'Oops' routine */
+diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
+index d7a66d8525091..5d9cf726e4ecd 100644
+--- a/arch/parisc/kernel/traps.c
++++ b/arch/parisc/kernel/traps.c
+@@ -265,7 +265,7 @@ void die_if_kernel(char *str, struct pt_regs *regs, long 
err)
+               panic("Fatal exception");
+ 
+       oops_exit();
+-      do_exit(SIGSEGV);
++      make_task_dead(SIGSEGV);
+ }
+ 
+ /* gdb uses break 4,8 */
+diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
+index 2379c4bf3979e..63c751ce130a5 100644
+--- a/arch/powerpc/kernel/traps.c
++++ b/arch/powerpc/kernel/traps.c
+@@ -251,7 +251,7 @@ static void oops_end(unsigned long flags, struct pt_regs 
*regs,
+               panic("Fatal exception in interrupt");
+       if (panic_on_oops)
+               panic("Fatal exception");
+-      do_exit(signr);
++      make_task_dead(signr);
+ }
+ NOKPROBE_SYMBOL(oops_end);
+ 
+diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
+index 7c65750508f25..9b736e616131d 100644
+--- a/arch/riscv/kernel/traps.c
++++ b/arch/riscv/kernel/traps.c
+@@ -64,7 +64,7 @@ void die(struct pt_regs *regs, const char *str)
+       if (panic_on_oops)
+               panic("Fatal exception");
+       if (ret != NOTIFY_STOP)
+-              do_exit(SIGSEGV);
++              make_task_dead(SIGSEGV);
+ }
+ 
+ void do_trap(struct pt_regs *regs, int signo, int code,
+diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
+index 523dbfbac03dd..7b5f110f8191a 100644
+--- a/arch/riscv/mm/fault.c
++++ b/arch/riscv/mm/fault.c
+@@ -200,7 +200,7 @@ no_context:
+               (addr < PAGE_SIZE) ? "NULL pointer dereference" :
+               "paging request", addr);
+       die(regs, "Oops");
+-      do_exit(SIGKILL);
++      make_task_dead(SIGKILL);
+ 
+       /*
+        * We ran out of memory, call the OOM killer, and return the userspace
+diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
+index 5b23c4f6e50cd..7b21019096f44 100644
+--- a/arch/s390/kernel/dumpstack.c
++++ b/arch/s390/kernel/dumpstack.c
+@@ -187,5 +187,5 @@ void die(struct pt_regs *regs, const char *str)
+       if (panic_on_oops)
+               panic("Fatal exception: panic_on_oops");
+       oops_exit();
+-      do_exit(SIGSEGV);
++      make_task_dead(SIGSEGV);
+ }
+diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
+index 8c867b43c8ebc..471a965ae75a4 100644
+--- a/arch/s390/kernel/nmi.c
++++ b/arch/s390/kernel/nmi.c
+@@ -179,7 +179,7 @@ void s390_handle_mcck(void)
+                      "malfunction (code 0x%016lx).\n", mcck.mcck_code);
+               printk(KERN_EMERG "mcck: task: %s, pid: %d.\n",
+                      current->comm, current->pid);
+-              do_exit(SIGSEGV);
++              make_task_dead(SIGSEGV);
+       }
+ }
+ EXPORT_SYMBOL_GPL(s390_handle_mcck);
+diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c
+index 8b49cced663dc..5fafbef7849b1 100644
+--- a/arch/sh/kernel/traps.c
++++ b/arch/sh/kernel/traps.c
+@@ -57,7 +57,7 @@ void die(const char *str, struct pt_regs *regs, long err)
+       if (panic_on_oops)
+               panic("Fatal exception");
+ 
+-      do_exit(SIGSEGV);
++      make_task_dead(SIGSEGV);
+ }
+ 
+ void die_if_kernel(const char *str, struct pt_regs *regs, long err)
+diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
+index bcdfc6168dd58..ec7fcfbad06c0 100644
+--- a/arch/sparc/kernel/traps_32.c
++++ b/arch/sparc/kernel/traps_32.c
+@@ -86,9 +86,7 @@ void __noreturn die_if_kernel(char *str, struct pt_regs 
*regs)
+       }
+       printk("Instruction DUMP:");
+       instruction_dump ((unsigned long *) regs->pc);
+-      if(regs->psr & PSR_PS)
+-              do_exit(SIGKILL);
+-      do_exit(SIGSEGV);
++      make_task_dead((regs->psr & PSR_PS) ? SIGKILL : SIGSEGV);
+ }
+ 
+ void do_hw_interrupt(struct pt_regs *regs, unsigned long type)
+diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
+index 86879c28910b7..1a338784a37c8 100644
+--- a/arch/sparc/kernel/traps_64.c
++++ b/arch/sparc/kernel/traps_64.c
+@@ -2565,9 +2565,7 @@ void __noreturn die_if_kernel(char *str, struct pt_regs 
*regs)
+       }
+       if (panic_on_oops)
+               panic("Fatal exception");
+-      if (regs->tstate & TSTATE_PRIV)
+-              do_exit(SIGKILL);
+-      do_exit(SIGSEGV);
++      make_task_dead((regs->tstate & TSTATE_PRIV)? SIGKILL : SIGSEGV);
+ }
+ EXPORT_SYMBOL(die_if_kernel);
+ 
+diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
+index 78b308f2f2ea6..e6c258bf95116 100644
+--- a/arch/x86/entry/entry_32.S
++++ b/arch/x86/entry/entry_32.S
+@@ -1500,13 +1500,13 @@ ENTRY(async_page_fault)
+ END(async_page_fault)
+ #endif
+ 
+-ENTRY(rewind_stack_do_exit)
++ENTRY(rewind_stack_and_make_dead)
+       /* Prevent any naive code from trying to unwind to our caller. */
+       xorl    %ebp, %ebp
+ 
+       movl    PER_CPU_VAR(cpu_current_top_of_stack), %esi
+       leal    -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%esi), %esp
+ 
+-      call    do_exit
++      call    make_task_dead
+ 1:    jmp 1b
+-END(rewind_stack_do_exit)
++END(rewind_stack_and_make_dead)
+diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
+index 3f418aedef8d7..ef693d3689aa7 100644
+--- a/arch/x86/entry/entry_64.S
++++ b/arch/x86/entry/entry_64.S
+@@ -1759,10 +1759,10 @@ END(nmi)
+ ENTRY(ignore_sysret)
+       UNWIND_HINT_EMPTY
+       mov     $-ENOSYS, %eax
+-      sysret
++      sysretl
+ END(ignore_sysret)
+ 
+-ENTRY(rewind_stack_do_exit)
++ENTRY(rewind_stack_and_make_dead)
+       UNWIND_HINT_FUNC
+       /* Prevent any naive code from trying to unwind to our caller. */
+       xorl    %ebp, %ebp
+@@ -1771,5 +1771,5 @@ ENTRY(rewind_stack_do_exit)
+       leaq    -PTREGS_SIZE(%rax), %rsp
+       UNWIND_HINT_REGS
+ 
+-      call    do_exit
+-END(rewind_stack_do_exit)
++      call    make_task_dead
++END(rewind_stack_and_make_dead)
+diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
+index 2b5886401e5f4..2b17a5cec0997 100644
+--- a/arch/x86/kernel/dumpstack.c
++++ b/arch/x86/kernel/dumpstack.c
+@@ -326,7 +326,7 @@ unsigned long oops_begin(void)
+ }
+ NOKPROBE_SYMBOL(oops_begin);
+ 
+-void __noreturn rewind_stack_do_exit(int signr);
++void __noreturn rewind_stack_and_make_dead(int signr);
+ 
+ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
+ {
+@@ -361,7 +361,7 @@ void oops_end(unsigned long flags, struct pt_regs *regs, 
int signr)
+        * reuse the task stack and that existing poisons are invalid.
+        */
+       kasan_unpoison_task_stack(current);
+-      rewind_stack_do_exit(signr);
++      rewind_stack_and_make_dead(signr);
+ }
+ NOKPROBE_SYMBOL(oops_end);
+ 
+diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
+index fe522691ac717..8821d0ab0a08c 100644
+--- a/arch/x86/kernel/i8259.c
++++ b/arch/x86/kernel/i8259.c
+@@ -114,6 +114,7 @@ static void make_8259A_irq(unsigned int irq)
+       disable_irq_nosync(irq);
+       io_apic_irqs &= ~(1<<irq);
+       irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
++      irq_set_status_flags(irq, IRQ_LEVEL);
+       enable_irq(irq);
+       lapic_assign_legacy_vector(irq, true);
+ }
+diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
+index a0693b71cfc1c..f2c215e1f64c5 100644
+--- a/arch/x86/kernel/irqinit.c
++++ b/arch/x86/kernel/irqinit.c
+@@ -72,8 +72,10 @@ void __init init_ISA_irqs(void)
+ 
+       legacy_pic->init(0);
+ 
+-      for (i = 0; i < nr_legacy_irqs(); i++)
++      for (i = 0; i < nr_legacy_irqs(); i++) {
+               irq_set_chip_and_handler(i, chip, handle_level_irq);
++              irq_set_status_flags(i, IRQ_LEVEL);
++      }
+ }
+ 
+ void __init init_IRQ(void)
+diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
+index 33147fef3452c..f1024b51cfee1 100644
+--- a/arch/x86/lib/iomap_copy_64.S
++++ b/arch/x86/lib/iomap_copy_64.S
+@@ -22,6 +22,6 @@
+  */
+ ENTRY(__iowrite32_copy)
+       movl %edx,%ecx
+-      rep movsd
++      rep movsl
+       ret
+ ENDPROC(__iowrite32_copy)
+diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
+index 86507fa7c2d7c..2ae5c505894bd 100644
+--- a/arch/xtensa/kernel/traps.c
++++ b/arch/xtensa/kernel/traps.c
+@@ -542,5 +542,5 @@ void die(const char * str, struct pt_regs * regs, long err)
+       if (panic_on_oops)
+               panic("Fatal exception");
+ 
+-      do_exit(err);
++      make_task_dead(err);
+ }
+diff --git a/block/blk-core.c b/block/blk-core.c
+index 80f3e729fdd4d..4fbf915d9cb01 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -2179,10 +2179,7 @@ static inline bool bio_check_ro(struct bio *bio, struct 
hd_struct *part)
+ 
+               if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
+                       return false;
+-
+-              WARN_ONCE(1,
+-                     "generic_make_request: Trying to write "
+-                      "to read-only block-device %s (partno %d)\n",
++              pr_warn("Trying to write to read-only block-device %s (partno 
%d)\n",
+                       bio_devname(bio, b), part->partno);
+               /* Older lvm-tools actually trigger this */
+               return false;
+diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
+index 8a52a5efee4f5..e1cf7c250c33a 100644
+--- a/drivers/dma/dmaengine.c
++++ b/drivers/dma/dmaengine.c
+@@ -223,7 +223,8 @@ static int dma_chan_get(struct dma_chan *chan)
+       /* The channel is already in use, update client count */
+       if (chan->client_count) {
+               __module_get(owner);
+-              goto out;
++              chan->client_count++;
++              return 0;
+       }
+ 
+       if (!try_module_get(owner))
+@@ -236,11 +237,11 @@ static int dma_chan_get(struct dma_chan *chan)
+                       goto err_out;
+       }
+ 
++      chan->client_count++;
++
+       if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
+               balance_ref_count(chan);
+ 
+-out:
+-      chan->client_count++;
+       return 0;
+ 
+ err_out:
+diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
+index 709ead443fc5f..5794d3120bb86 100644
+--- a/drivers/dma/imx-sdma.c
++++ b/drivers/dma/imx-sdma.c
+@@ -1347,10 +1347,12 @@ static struct sdma_desc *sdma_transfer_init(struct 
sdma_channel *sdmac,
+               sdma_config_ownership(sdmac, false, true, false);
+ 
+       if (sdma_load_context(sdmac))
+-              goto err_desc_out;
++              goto err_bd_out;
+ 
+       return desc;
+ 
++err_bd_out:
++      sdma_free_bd(desc);
+ err_desc_out:
+       kfree(desc);
+ err_out:
+diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
+index 0ba70be4ea85d..5f9945651e95e 100644
+--- a/drivers/dma/xilinx/xilinx_dma.c
++++ b/drivers/dma/xilinx/xilinx_dma.c
+@@ -164,7 +164,9 @@
+ #define XILINX_DMA_REG_BTT            0x28
+ 
+ /* AXI DMA Specific Masks/Bit fields */
+-#define XILINX_DMA_MAX_TRANS_LEN      GENMASK(22, 0)
++#define XILINX_DMA_MAX_TRANS_LEN_MIN  8
++#define XILINX_DMA_MAX_TRANS_LEN_MAX  23
++#define XILINX_DMA_V2_MAX_TRANS_LEN_MAX       26
+ #define XILINX_DMA_CR_COALESCE_MAX    GENMASK(23, 16)
+ #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK       BIT(4)
+ #define XILINX_DMA_CR_COALESCE_SHIFT  16
+@@ -428,6 +430,7 @@ struct xilinx_dma_config {
+  * @rxs_clk: DMA s2mm stream clock
+  * @nr_channels: Number of channels DMA device supports
+  * @chan_id: DMA channel identifier
++ * @max_buffer_len: Max buffer length
+  */
+ struct xilinx_dma_device {
+       void __iomem *regs;
+@@ -447,6 +450,7 @@ struct xilinx_dma_device {
+       struct clk *rxs_clk;
+       u32 nr_channels;
+       u32 chan_id;
++      u32 max_buffer_len;
+ };
+ 
+ /* Macros */
+@@ -969,6 +973,25 @@ static int xilinx_dma_alloc_chan_resources(struct 
dma_chan *dchan)
+       return 0;
+ }
+ 
++/**
++ * xilinx_dma_calc_copysize - Calculate the amount of data to copy
++ * @chan: Driver specific DMA channel
++ * @size: Total data that needs to be copied
++ * @done: Amount of data that has been already copied
++ *
++ * Return: Amount of data that has to be copied
++ */
++static int xilinx_dma_calc_copysize(struct xilinx_dma_chan *chan,
++                                  int size, int done)
++{
++      size_t copy;
++
++      copy = min_t(size_t, size - done,
++                   chan->xdev->max_buffer_len);
++
++      return copy;
++}
++
+ /**
+  * xilinx_dma_tx_status - Get DMA transaction status
+  * @dchan: DMA channel
+@@ -1002,7 +1025,7 @@ static enum dma_status xilinx_dma_tx_status(struct 
dma_chan *dchan,
+                       list_for_each_entry(segment, &desc->segments, node) {
+                               hw = &segment->hw;
+                               residue += (hw->control - hw->status) &
+-                                         XILINX_DMA_MAX_TRANS_LEN;
++                                         chan->xdev->max_buffer_len;
+                       }
+               }
+               spin_unlock_irqrestore(&chan->lock, flags);
+@@ -1262,7 +1285,7 @@ static void xilinx_cdma_start_transfer(struct 
xilinx_dma_chan *chan)
+ 
+               /* Start the transfer */
+               dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
+-                              hw->control & XILINX_DMA_MAX_TRANS_LEN);
++                              hw->control & chan->xdev->max_buffer_len);
+       }
+ 
+       list_splice_tail_init(&chan->pending_list, &chan->active_list);
+@@ -1365,7 +1388,7 @@ static void xilinx_dma_start_transfer(struct 
xilinx_dma_chan *chan)
+ 
+               /* Start the transfer */
+               dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
+-                             hw->control & XILINX_DMA_MAX_TRANS_LEN);
++                             hw->control & chan->xdev->max_buffer_len);
+       }
+ 
+       list_splice_tail_init(&chan->pending_list, &chan->active_list);
+@@ -1729,7 +1752,7 @@ xilinx_cdma_prep_memcpy(struct dma_chan *dchan, 
dma_addr_t dma_dst,
+       struct xilinx_cdma_tx_segment *segment;
+       struct xilinx_cdma_desc_hw *hw;
+ 
+-      if (!len || len > XILINX_DMA_MAX_TRANS_LEN)
++      if (!len || len > chan->xdev->max_buffer_len)
+               return NULL;
+ 
+       desc = xilinx_dma_alloc_tx_descriptor(chan);
+@@ -1819,8 +1842,8 @@ static struct dma_async_tx_descriptor 
*xilinx_dma_prep_slave_sg(
+                        * Calculate the maximum number of bytes to transfer,
+                        * making sure it is less than the hw limit
+                        */
+-                      copy = min_t(size_t, sg_dma_len(sg) - sg_used,
+-                                   XILINX_DMA_MAX_TRANS_LEN);
++                      copy = xilinx_dma_calc_copysize(chan, sg_dma_len(sg),
++                                                      sg_used);
+                       hw = &segment->hw;
+ 
+                       /* Fill in the descriptor */
+@@ -1924,8 +1947,8 @@ static struct dma_async_tx_descriptor 
*xilinx_dma_prep_dma_cyclic(
+                        * Calculate the maximum number of bytes to transfer,
+                        * making sure it is less than the hw limit
+                        */
+-                      copy = min_t(size_t, period_len - sg_used,
+-                                   XILINX_DMA_MAX_TRANS_LEN);
++                      copy = xilinx_dma_calc_copysize(chan, period_len,
++                                                      sg_used);
+                       hw = &segment->hw;
+                       xilinx_axidma_buf(chan, hw, buf_addr, sg_used,
+                                         period_len * i);
+@@ -2613,7 +2636,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
+       struct xilinx_dma_device *xdev;
+       struct device_node *child, *np = pdev->dev.of_node;
+       struct resource *io;
+-      u32 num_frames, addr_width;
++      u32 num_frames, addr_width, len_width;
+       int i, err;
+ 
+       /* Allocate and initialize the DMA engine structure */
+@@ -2640,13 +2663,30 @@ static int xilinx_dma_probe(struct platform_device 
*pdev)
+       /* Request and map I/O memory */
+       io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       xdev->regs = devm_ioremap_resource(&pdev->dev, io);
+-      if (IS_ERR(xdev->regs))
+-              return PTR_ERR(xdev->regs);
+-
++      if (IS_ERR(xdev->regs)) {
++              err = PTR_ERR(xdev->regs);
++              goto disable_clks;
++      }
+       /* Retrieve the DMA engine properties from the device tree */
+       xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg");
+-      if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
++      xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0);
++
++      if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
+               xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma");
++              if (!of_property_read_u32(node, "xlnx,sg-length-width",
++                                        &len_width)) {
++                      if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN ||
++                          len_width > XILINX_DMA_V2_MAX_TRANS_LEN_MAX) {
++                              dev_warn(xdev->dev,
++                                       "invalid xlnx,sg-length-width property 
value. Using default width\n");
++                      } else {
++                              if (len_width > XILINX_DMA_MAX_TRANS_LEN_MAX)
++                                      dev_warn(xdev->dev, "Please ensure that 
IP supports buffer length > 23 bits\n");
++                              xdev->max_buffer_len =
++                                      GENMASK(len_width - 1, 0);
++                      }
++              }
++      }
+ 
+       if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
+               err = of_property_read_u32(node, "xlnx,num-fstores",
+@@ -2719,8 +2759,10 @@ static int xilinx_dma_probe(struct platform_device 
*pdev)
+       /* Initialize the channels */
+       for_each_child_of_node(node, child) {
+               err = xilinx_dma_child_probe(xdev, child);
+-              if (err < 0)
+-                      goto disable_clks;
++              if (err < 0) {
++                      of_node_put(child);
++                      goto error;
++              }
+       }
+ 
+       if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
+@@ -2753,12 +2795,12 @@ static int xilinx_dma_probe(struct platform_device 
*pdev)
+ 
+       return 0;
+ 
+-disable_clks:
+-      xdma_disable_allclks(xdev);
+ error:
+       for (i = 0; i < xdev->nr_channels; i++)
+               if (xdev->chan[i])
+                       xilinx_dma_chan_remove(xdev->chan[i]);
++disable_clks:
++      xdma_disable_allclks(xdev);
+ 
+       return err;
+ }
+diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
+index 93d6e6319b3cc..0ac6c49ecdbf4 100644
+--- a/drivers/edac/edac_device.c
++++ b/drivers/edac/edac_device.c
+@@ -34,6 +34,9 @@
+ static DEFINE_MUTEX(device_ctls_mutex);
+ static LIST_HEAD(edac_device_list);
+ 
++/* Default workqueue processing interval on this instance, in msecs */
++#define DEFAULT_POLL_INTERVAL 1000
++
+ #ifdef CONFIG_EDAC_DEBUG
+ static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev)
+ {
+@@ -366,7 +369,7 @@ static void edac_device_workq_function(struct work_struct 
*work_req)
+        * whole one second to save timers firing all over the period
+        * between integral seconds
+        */
+-      if (edac_dev->poll_msec == 1000)
++      if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
+               edac_queue_work(&edac_dev->work, 
round_jiffies_relative(edac_dev->delay));
+       else
+               edac_queue_work(&edac_dev->work, edac_dev->delay);
+@@ -396,7 +399,7 @@ static void edac_device_workq_setup(struct 
edac_device_ctl_info *edac_dev,
+        * timers firing on sub-second basis, while they are happy
+        * to fire together on the 1 second exactly
+        */
+-      if (edac_dev->poll_msec == 1000)
++      if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
+               edac_queue_work(&edac_dev->work, 
round_jiffies_relative(edac_dev->delay));
+       else
+               edac_queue_work(&edac_dev->work, edac_dev->delay);
+@@ -430,7 +433,7 @@ void edac_device_reset_delay_period(struct 
edac_device_ctl_info *edac_dev,
+       edac_dev->delay     = msecs_to_jiffies(msec);
+ 
+       /* See comment in edac_device_workq_setup() above */
+-      if (edac_dev->poll_msec == 1000)
++      if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
+               edac_mod_work(&edac_dev->work, 
round_jiffies_relative(edac_dev->delay));
+       else
+               edac_mod_work(&edac_dev->work, edac_dev->delay);
+@@ -472,11 +475,7 @@ int edac_device_add_device(struct edac_device_ctl_info 
*edac_dev)
+               /* This instance is NOW RUNNING */
+               edac_dev->op_state = OP_RUNNING_POLL;
+ 
+-              /*
+-               * enable workq processing on this instance,
+-               * default = 1000 msec
+-               */
+-              edac_device_workq_setup(edac_dev, 1000);
++              edac_device_workq_setup(edac_dev, edac_dev->poll_msec ?: 
DEFAULT_POLL_INTERVAL);
+       } else {
+               edac_dev->op_state = OP_RUNNING_INTERRUPT;
+       }
+diff --git a/drivers/edac/highbank_mc_edac.c b/drivers/edac/highbank_mc_edac.c
+index 6092e61be6050..bcf41601a9778 100644
+--- a/drivers/edac/highbank_mc_edac.c
++++ b/drivers/edac/highbank_mc_edac.c
+@@ -185,8 +185,10 @@ static int highbank_mc_probe(struct platform_device *pdev)
+       drvdata = mci->pvt_info;
+       platform_set_drvdata(pdev, mci);
+ 
+-      if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))
+-              return -ENOMEM;
++      if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) {
++              res = -ENOMEM;
++              goto free;
++      }
+ 
+       r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!r) {
+@@ -254,6 +256,7 @@ err2:
+       edac_mc_del_mc(&pdev->dev);
+ err:
+       devres_release_group(&pdev->dev, NULL);
++free:
+       edac_mc_free(mci);
+       return res;
+ }
+diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
+index 77a2f7fc2b370..dfa6e6c2f808b 100644
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -4116,7 +4116,18 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
+       bool bret;
+ 
+       if (intel_dp->is_mst) {
+-              u8 esi[DP_DPRX_ESI_LEN] = { 0 };
++              /*
++               * The +2 is because DP_DPRX_ESI_LEN is 14, but we then
++               * pass in "esi+10" to drm_dp_channel_eq_ok(), which
++               * takes a 6-byte array. So we actually need 16 bytes
++               * here.
++               *
++               * Somebody who knows what the limits actually are
++               * should check this, but for now this is at least
++               * harmless and avoids a valid compiler warning about
++               * using more of the array than we have allocated.
++               */
++              u8 esi[DP_DPRX_ESI_LEN+2] = { 0 };
+               int ret = 0;
+               int retry;
+               bool handled;
+diff --git a/drivers/hid/hid-betopff.c b/drivers/hid/hid-betopff.c
+index 9b60efe6ec441..ba386e5aa0557 100644
+--- a/drivers/hid/hid-betopff.c
++++ b/drivers/hid/hid-betopff.c
+@@ -63,7 +63,6 @@ static int betopff_init(struct hid_device *hid)
+       struct list_head *report_list =
+                       &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+       struct input_dev *dev;
+-      int field_count = 0;
+       int error;
+       int i, j;
+ 
+@@ -89,19 +88,21 @@ static int betopff_init(struct hid_device *hid)
+        * -----------------------------------------
+        * Do init them with default value.
+        */
++      if (report->maxfield < 4) {
++              hid_err(hid, "not enough fields in the report: %d\n",
++                              report->maxfield);
++              return -ENODEV;
++      }
+       for (i = 0; i < report->maxfield; i++) {
++              if (report->field[i]->report_count < 1) {
++                      hid_err(hid, "no values in the field\n");
++                      return -ENODEV;
++              }
+               for (j = 0; j < report->field[i]->report_count; j++) {
+                       report->field[i]->value[j] = 0x00;
+-                      field_count++;
+               }
+       }
+ 
+-      if (field_count < 4) {
+-              hid_err(hid, "not enough fields in the report: %d\n",
+-                              field_count);
+-              return -ENODEV;
+-      }
+-
+       betopff = kzalloc(sizeof(*betopff), GFP_KERNEL);
+       if (!betopff)
+               return -ENOMEM;
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 62656636d30c6..8cc79d0d11fb2 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -980,8 +980,8 @@ struct hid_report *hid_validate_values(struct hid_device 
*hid,
+                * Validating on id 0 means we should examine the first
+                * report in the list.
+                */
+-              report = list_entry(
+-                              hid->report_enum[type].report_list.next,
++              report = list_first_entry_or_null(
++                              &hid->report_enum[type].report_list,
+                               struct hid_report, list);
+       } else {
+               report = hid->report_enum[type].report_id_hash[id];
+diff --git a/drivers/hid/intel-ish-hid/ishtp/dma-if.c 
b/drivers/hid/intel-ish-hid/ishtp/dma-if.c
+index 2783f36661149..ff4419c8ed4f6 100644
+--- a/drivers/hid/intel-ish-hid/ishtp/dma-if.c
++++ b/drivers/hid/intel-ish-hid/ishtp/dma-if.c
+@@ -113,6 +113,11 @@ void *ishtp_cl_get_dma_send_buf(struct ishtp_device *dev,
+       int required_slots = (size / DMA_SLOT_SIZE)
+               + 1 * (size % DMA_SLOT_SIZE != 0);
+ 
++      if (!dev->ishtp_dma_tx_map) {
++              dev_err(dev->devc, "Fail to allocate Tx map\n");
++              return NULL;
++      }
++
+       spin_lock_irqsave(&dev->ishtp_dma_tx_lock, flags);
+       for (i = 0; i <= (dev->ishtp_dma_num_slots - required_slots); i++) {
+               free = 1;
+@@ -159,6 +164,11 @@ void ishtp_cl_release_dma_acked_mem(struct ishtp_device 
*dev,
+               return;
+       }
+ 
++      if (!dev->ishtp_dma_tx_map) {
++              dev_err(dev->devc, "Fail to allocate Tx map\n");
++              return;
++      }
++
+       i = (msg_addr - dev->ishtp_host_dma_tx_buf) / DMA_SLOT_SIZE;
+       spin_lock_irqsave(&dev->ishtp_dma_tx_lock, flags);
+       for (j = 0; j < acked_slots; j++) {
+diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c 
b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
+index 4e417ed08b099..dab823aac95e1 100644
+--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c
++++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
+@@ -325,6 +325,8 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
+ 
+       if (!PAGE_ALIGNED(tinfo->vaddr))
+               return -EINVAL;
++      if (tinfo->length == 0)
++              return -EINVAL;
+ 
+       tidbuf = kzalloc(sizeof(*tidbuf), GFP_KERNEL);
+       if (!tidbuf)
+@@ -335,40 +337,38 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
+       tidbuf->psets = kcalloc(uctxt->expected_count, sizeof(*tidbuf->psets),
+                               GFP_KERNEL);
+       if (!tidbuf->psets) {
+-              kfree(tidbuf);
+-              return -ENOMEM;
++              ret = -ENOMEM;
++              goto fail_release_mem;
+       }
+ 
+       pinned = pin_rcv_pages(fd, tidbuf);
+       if (pinned <= 0) {
+-              kfree(tidbuf->psets);
+-              kfree(tidbuf);
+-              return pinned;
++              ret = (pinned < 0) ? pinned : -ENOSPC;
++              goto fail_unpin;
+       }
+ 
+       /* Find sets of physically contiguous pages */
+       tidbuf->n_psets = find_phys_blocks(tidbuf, pinned);
+ 
+-      /*
+-       * We don't need to access this under a lock since tid_used is per
+-       * process and the same process cannot be in hfi1_user_exp_rcv_clear()
+-       * and hfi1_user_exp_rcv_setup() at the same time.
+-       */
++      /* Reserve the number of expected tids to be used. */
+       spin_lock(&fd->tid_lock);
+       if (fd->tid_used + tidbuf->n_psets > fd->tid_limit)
+               pageset_count = fd->tid_limit - fd->tid_used;
+       else
+               pageset_count = tidbuf->n_psets;
++      fd->tid_used += pageset_count;
+       spin_unlock(&fd->tid_lock);
+ 
+-      if (!pageset_count)
+-              goto bail;
++      if (!pageset_count) {
++              ret = -ENOSPC;
++              goto fail_unreserve;
++      }
+ 
+       ngroups = pageset_count / dd->rcv_entries.group_size;
+       tidlist = kcalloc(pageset_count, sizeof(*tidlist), GFP_KERNEL);
+       if (!tidlist) {
+               ret = -ENOMEM;
+-              goto nomem;
++              goto fail_unreserve;
+       }
+ 
+       tididx = 0;
+@@ -464,43 +464,60 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
+       }
+ unlock:
+       mutex_unlock(&uctxt->exp_mutex);
+-nomem:
+       hfi1_cdbg(TID, "total mapped: tidpairs:%u pages:%u (%d)", tididx,
+                 mapped_pages, ret);
+-      if (tididx) {
+-              spin_lock(&fd->tid_lock);
+-              fd->tid_used += tididx;
+-              spin_unlock(&fd->tid_lock);
+-              tinfo->tidcnt = tididx;
+-              tinfo->length = mapped_pages * PAGE_SIZE;
+-
+-              if (copy_to_user(u64_to_user_ptr(tinfo->tidlist),
+-                               tidlist, sizeof(tidlist[0]) * tididx)) {
+-                      /*
+-                       * On failure to copy to the user level, we need to undo
+-                       * everything done so far so we don't leak resources.
+-                       */
+-                      tinfo->tidlist = (unsigned long)&tidlist;
+-                      hfi1_user_exp_rcv_clear(fd, tinfo);
+-                      tinfo->tidlist = 0;
+-                      ret = -EFAULT;
+-                      goto bail;
+-              }
++
++      /* fail if nothing was programmed, set error if none provided */
++      if (tididx == 0) {
++              if (ret >= 0)
++                      ret = -ENOSPC;
++              goto fail_unreserve;
+       }
+ 
+-      /*
+-       * If not everything was mapped (due to insufficient RcvArray entries,
+-       * for example), unpin all unmapped pages so we can pin them nex time.
+-       */
+-      if (mapped_pages != pinned)
+-              unpin_rcv_pages(fd, tidbuf, NULL, mapped_pages,
+-                              (pinned - mapped_pages), false);
+-bail:
++      /* adjust reserved tid_used to actual count */
++      spin_lock(&fd->tid_lock);
++      fd->tid_used -= pageset_count - tididx;
++      spin_unlock(&fd->tid_lock);
++
++      /* unpin all pages not covered by a TID */
++      unpin_rcv_pages(fd, tidbuf, NULL, mapped_pages, pinned - mapped_pages,
++                      false);
++
++      tinfo->tidcnt = tididx;
++      tinfo->length = mapped_pages * PAGE_SIZE;
++
++      if (copy_to_user(u64_to_user_ptr(tinfo->tidlist),
++                       tidlist, sizeof(tidlist[0]) * tididx)) {
++              ret = -EFAULT;
++              goto fail_unprogram;
++      }
++
++      kfree(tidbuf->pages);
+       kfree(tidbuf->psets);
++      kfree(tidbuf);
+       kfree(tidlist);
++      return 0;
++
++fail_unprogram:
++      /* unprogram, unmap, and unpin all allocated TIDs */
++      tinfo->tidlist = (unsigned long)tidlist;
++      hfi1_user_exp_rcv_clear(fd, tinfo);
++      tinfo->tidlist = 0;
++      pinned = 0;             /* nothing left to unpin */
++      pageset_count = 0;      /* nothing left reserved */
++fail_unreserve:
++      spin_lock(&fd->tid_lock);
++      fd->tid_used -= pageset_count;
++      spin_unlock(&fd->tid_lock);
++fail_unpin:
++      if (pinned > 0)
++              unpin_rcv_pages(fd, tidbuf, NULL, 0, pinned, false);
++fail_release_mem:
+       kfree(tidbuf->pages);
++      kfree(tidbuf->psets);
+       kfree(tidbuf);
+-      return ret > 0 ? 0 : ret;
++      kfree(tidlist);
++      return ret;
+ }
+ 
+ int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd,
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index 7dc8ca5fd75f2..c6d393114502d 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -192,7 +192,6 @@ static const char * const smbus_pnp_ids[] = {
+       "SYN3221", /* HP 15-ay000 */
+       "SYN323d", /* HP Spectre X360 13-w013dx */
+       "SYN3257", /* HP Envy 13-ad105ng */
+-      "SYN3286", /* HP Laptop 15-da3001TU */
+       NULL
+ };
+ 
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c 
b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+index 4666084eda16a..9d6fe5a892d9c 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+@@ -524,19 +524,28 @@ static void xgbe_disable_vxlan(struct xgbe_prv_data 
*pdata)
+       netif_dbg(pdata, drv, pdata->netdev, "VXLAN acceleration disabled\n");
+ }
+ 
++static unsigned int xgbe_get_fc_queue_count(struct xgbe_prv_data *pdata)
++{
++      unsigned int max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
++
++      /* From MAC ver 30H the TFCR is per priority, instead of per queue */
++      if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) >= 0x30)
++              return max_q_count;
++      else
++              return min_t(unsigned int, pdata->tx_q_count, max_q_count);
++}
++
+ static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
+ {
+-      unsigned int max_q_count, q_count;
+       unsigned int reg, reg_val;
+-      unsigned int i;
++      unsigned int i, q_count;
+ 
+       /* Clear MTL flow control */
+       for (i = 0; i < pdata->rx_q_count; i++)
+               XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
+ 
+       /* Clear MAC flow control */
+-      max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
+-      q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
++      q_count = xgbe_get_fc_queue_count(pdata);
+       reg = MAC_Q0TFCR;
+       for (i = 0; i < q_count; i++) {
+               reg_val = XGMAC_IOREAD(pdata, reg);
+@@ -553,9 +562,8 @@ static int xgbe_enable_tx_flow_control(struct 
xgbe_prv_data *pdata)
+ {
+       struct ieee_pfc *pfc = pdata->pfc;
+       struct ieee_ets *ets = pdata->ets;
+-      unsigned int max_q_count, q_count;
+       unsigned int reg, reg_val;
+-      unsigned int i;
++      unsigned int i, q_count;
+ 
+       /* Set MTL flow control */
+       for (i = 0; i < pdata->rx_q_count; i++) {
+@@ -579,8 +587,7 @@ static int xgbe_enable_tx_flow_control(struct 
xgbe_prv_data *pdata)
+       }
+ 
+       /* Set MAC flow control */
+-      max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
+-      q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
++      q_count = xgbe_get_fc_queue_count(pdata);
+       reg = MAC_Q0TFCR;
+       for (i = 0; i < q_count; i++) {
+               reg_val = XGMAC_IOREAD(pdata, reg);
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c 
b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+index 97167fc9bebe7..7840eb4cdb8da 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+@@ -496,6 +496,7 @@ static enum xgbe_an xgbe_an73_tx_training(struct 
xgbe_prv_data *pdata,
+       reg |= XGBE_KR_TRAINING_ENABLE;
+       reg |= XGBE_KR_TRAINING_START;
+       XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
++      pdata->kr_start_time = jiffies;
+ 
+       netif_dbg(pdata, link, pdata->netdev,
+                 "KR training initiated\n");
+@@ -632,6 +633,8 @@ static enum xgbe_an xgbe_an73_incompat_link(struct 
xgbe_prv_data *pdata)
+ 
+       xgbe_switch_mode(pdata);
+ 
++      pdata->an_result = XGBE_AN_READY;
++
+       xgbe_an_restart(pdata);
+ 
+       return XGBE_AN_INCOMPAT_LINK;
+@@ -1275,9 +1278,30 @@ static bool xgbe_phy_aneg_done(struct xgbe_prv_data 
*pdata)
+ static void xgbe_check_link_timeout(struct xgbe_prv_data *pdata)
+ {
+       unsigned long link_timeout;
++      unsigned long kr_time;
++      int wait;
+ 
+       link_timeout = pdata->link_check + (XGBE_LINK_TIMEOUT * HZ);
+       if (time_after(jiffies, link_timeout)) {
++              if ((xgbe_cur_mode(pdata) == XGBE_MODE_KR) &&
++                  pdata->phy.autoneg == AUTONEG_ENABLE) {
++                      /* AN restart should not happen while KR training is in 
progress.
++                       * The while loop ensures no AN restart during KR 
training,
++                       * waits up to 500ms and AN restart is triggered only 
if KR
++                       * training is failed.
++                       */
++                      wait = XGBE_KR_TRAINING_WAIT_ITER;
++                      while (wait--) {
++                              kr_time = pdata->kr_start_time +
++                                        msecs_to_jiffies(XGBE_AN_MS_TIMEOUT);
++                              if (time_after(jiffies, kr_time))
++                                      break;
++                              /* AN restart is not required, if AN result is 
COMPLETE */
++                              if (pdata->an_result == XGBE_AN_COMPLETE)
++                                      return;
++                              usleep_range(10000, 11000);
++                      }
++              }
+               netif_dbg(pdata, link, pdata->netdev, "AN link timeout\n");
+               xgbe_phy_config_aneg(pdata);
+       }
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h 
b/drivers/net/ethernet/amd/xgbe/xgbe.h
+index 0c93a552b921d..729307a96c50d 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
+@@ -290,6 +290,7 @@
+ /* Auto-negotiation */
+ #define XGBE_AN_MS_TIMEOUT            500
+ #define XGBE_LINK_TIMEOUT             5
++#define XGBE_KR_TRAINING_WAIT_ITER    50
+ 
+ #define XGBE_SGMII_AN_LINK_STATUS     BIT(1)
+ #define XGBE_SGMII_AN_LINK_SPEED      (BIT(2) | BIT(3))
+@@ -1266,6 +1267,7 @@ struct xgbe_prv_data {
+       unsigned int parallel_detect;
+       unsigned int fec_ability;
+       unsigned long an_start;
++      unsigned long kr_start_time;
+       enum xgbe_an_mode an_mode;
+ 
+       /* I2C support */
+diff --git a/drivers/net/ethernet/broadcom/tg3.c 
b/drivers/net/ethernet/broadcom/tg3.c
+index d1ca3d3f51a7a..2cf144bbef3ee 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -11189,7 +11189,7 @@ static void tg3_reset_task(struct work_struct *work)
+       rtnl_lock();
+       tg3_full_lock(tp, 0);
+ 
+-      if (!netif_running(tp->dev)) {
++      if (tp->pcierr_recovery || !netif_running(tp->dev)) {
+               tg3_flag_clear(tp, RESET_TASK_PENDING);
+               tg3_full_unlock(tp);
+               rtnl_unlock();
+@@ -18240,6 +18240,9 @@ static pci_ers_result_t tg3_io_error_detected(struct 
pci_dev *pdev,
+ 
+       netdev_info(netdev, "PCI I/O error detected\n");
+ 
++      /* Want to make sure that the reset task doesn't run */
++      tg3_reset_task_cancel(tp);
++
+       rtnl_lock();
+ 
+       /* Could be second call or maybe we don't have netdev yet */
+@@ -18256,9 +18259,6 @@ static pci_ers_result_t tg3_io_error_detected(struct 
pci_dev *pdev,
+ 
+       tg3_timer_stop(tp);
+ 
+-      /* Want to make sure that the reset task doesn't run */
+-      tg3_reset_task_cancel(tp);
+-
+       netif_device_detach(netdev);
+ 
+       /* Clean up software state, even if MMIO is blocked */
+diff --git a/drivers/net/ethernet/cadence/macb_main.c 
b/drivers/net/ethernet/cadence/macb_main.c
+index 50331b202f73b..324d81516832c 100644
+--- a/drivers/net/ethernet/cadence/macb_main.c
++++ b/drivers/net/ethernet/cadence/macb_main.c
+@@ -1738,7 +1738,6 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct 
net_device *ndev)
+       bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb) ||
+                     skb_is_nonlinear(*skb);
+       int padlen = ETH_ZLEN - (*skb)->len;
+-      int headroom = skb_headroom(*skb);
+       int tailroom = skb_tailroom(*skb);
+       struct sk_buff *nskb;
+       u32 fcs;
+@@ -1752,9 +1751,6 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct 
net_device *ndev)
+               /* FCS could be appeded to tailroom. */
+               if (tailroom >= ETH_FCS_LEN)
+                       goto add_fcs;
+-              /* FCS could be appeded by moving data to headroom. */
+-              else if (!cloned && headroom + tailroom >= ETH_FCS_LEN)
+-                      padlen = 0;
+               /* No room for FCS, need to reallocate skb. */
+               else
+                       padlen = ETH_FCS_LEN;
+@@ -1763,10 +1759,7 @@ static int macb_pad_and_fcs(struct sk_buff **skb, 
struct net_device *ndev)
+               padlen += ETH_FCS_LEN;
+       }
+ 
+-      if (!cloned && headroom + tailroom >= padlen) {
+-              (*skb)->data = memmove((*skb)->head, (*skb)->data, (*skb)->len);
+-              skb_set_tail_pointer(*skb, (*skb)->len);
+-      } else {
++      if (cloned || tailroom < padlen) {
+               nskb = skb_copy_expand(*skb, 0, padlen, GFP_ATOMIC);
+               if (!nskb)
+                       return -ENOMEM;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c 
b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+index a2b25afa24722..e09bd059984e7 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -1683,7 +1683,7 @@ static void mlx5_core_verify_params(void)
+       }
+ }
+ 
+-static int __init init(void)
++static int __init mlx5_init(void)
+ {
+       int err;
+ 
+@@ -1708,7 +1708,7 @@ err_debug:
+       return err;
+ }
+ 
+-static void __exit cleanup(void)
++static void __exit mlx5_cleanup(void)
+ {
+ #ifdef CONFIG_MLX5_CORE_EN
+       mlx5e_cleanup();
+@@ -1717,5 +1717,5 @@ static void __exit cleanup(void)
+       mlx5_unregister_debugfs();
+ }
+ 
+-module_init(init);
+-module_exit(cleanup);
++module_init(mlx5_init);
++module_exit(mlx5_cleanup);
+diff --git a/drivers/net/ethernet/renesas/ravb_main.c 
b/drivers/net/ethernet/renesas/ravb_main.c
+index ff374d0d80a7d..a1906804c139e 100644
+--- a/drivers/net/ethernet/renesas/ravb_main.c
++++ b/drivers/net/ethernet/renesas/ravb_main.c
+@@ -738,14 +738,14 @@ static void ravb_error_interrupt(struct net_device *ndev)
+       ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS);
+       if (eis & EIS_QFS) {
+               ris2 = ravb_read(ndev, RIS2);
+-              ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF | RIS2_RESERVED),
++              ravb_write(ndev, ~(RIS2_QFF0 | RIS2_QFF1 | RIS2_RFFF | 
RIS2_RESERVED),
+                          RIS2);
+ 
+               /* Receive Descriptor Empty int */
+               if (ris2 & RIS2_QFF0)
+                       priv->stats[RAVB_BE].rx_over_errors++;
+ 
+-                  /* Receive Descriptor Empty int */
++              /* Receive Descriptor Empty int */
+               if (ris2 & RIS2_QFF1)
+                       priv->stats[RAVB_NC].rx_over_errors++;
+ 
+diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
+index 1d1fbd7bd6fc1..550806351049d 100644
+--- a/drivers/net/phy/mdio_bus.c
++++ b/drivers/net/phy/mdio_bus.c
+@@ -103,7 +103,12 @@ EXPORT_SYMBOL(mdiobus_unregister_device);
+ 
+ struct phy_device *mdiobus_get_phy(struct mii_bus *bus, int addr)
+ {
+-      struct mdio_device *mdiodev = bus->mdio_map[addr];
++      struct mdio_device *mdiodev;
++
++      if (addr < 0 || addr >= ARRAY_SIZE(bus->mdio_map))
++              return NULL;
++
++      mdiodev = bus->mdio_map[addr];
+ 
+       if (!mdiodev)
+               return NULL;
+diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
+index 83640628c47dd..8bee8286e41a1 100644
+--- a/drivers/net/usb/sr9700.c
++++ b/drivers/net/usb/sr9700.c
+@@ -410,7 +410,7 @@ static int sr9700_rx_fixup(struct usbnet *dev, struct 
sk_buff *skb)
+               /* ignore the CRC length */
+               len = (skb->data[1] | (skb->data[2] << 8)) - 4;
+ 
+-              if (len > ETH_FRAME_LEN || len > skb->len)
++              if (len > ETH_FRAME_LEN || len > skb->len || len < 0)
+                       return 0;
+ 
+               /* the last packet of current skb */
+diff --git a/drivers/net/wireless/rndis_wlan.c 
b/drivers/net/wireless/rndis_wlan.c
+index 51e4e92d95a0d..0bbeb61ec3a38 100644
+--- a/drivers/net/wireless/rndis_wlan.c
++++ b/drivers/net/wireless/rndis_wlan.c
+@@ -712,8 +712,8 @@ static int rndis_query_oid(struct usbnet *dev, u32 oid, 
void *data, int *len)
+               struct rndis_query      *get;
+               struct rndis_query_c    *get_c;
+       } u;
+-      int ret, buflen;
+-      int resplen, respoffs, copylen;
++      int ret;
++      size_t buflen, resplen, respoffs, copylen;
+ 
+       buflen = *len + sizeof(*u.get);
+       if (buflen < CONTROL_BUFFER_SIZE)
+@@ -748,22 +748,15 @@ static int rndis_query_oid(struct usbnet *dev, u32 oid, 
void *data, int *len)
+ 
+               if (respoffs > buflen) {
+                       /* Device returned data offset outside buffer, error. */
+-                      netdev_dbg(dev->net, "%s(%s): received invalid "
+-                              "data offset: %d > %d\n", __func__,
+-                              oid_to_string(oid), respoffs, buflen);
++                      netdev_dbg(dev->net,
++                                 "%s(%s): received invalid data offset: %zu > 
%zu\n",
++                                 __func__, oid_to_string(oid), respoffs, 
buflen);
+ 
+                       ret = -EINVAL;
+                       goto exit_unlock;
+               }
+ 
+-              if ((resplen + respoffs) > buflen) {
+-                      /* Device would have returned more data if buffer would
+-                       * have been big enough. Copy just the bits that we got.
+-                       */
+-                      copylen = buflen - respoffs;
+-              } else {
+-                      copylen = resplen;
+-              }
++              copylen = min(resplen, buflen - respoffs);
+ 
+               if (copylen > *len)
+                       copylen = *len;
+diff --git a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c 
b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
+index 5049dac79bd0f..77c1c3ffaed7e 100644
+--- a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
++++ b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
+@@ -477,8 +477,10 @@ static int rockchip_usb2phy_power_on(struct phy *phy)
+               return ret;
+ 
+       ret = property_enable(base, &rport->port_cfg->phy_sus, false);
+-      if (ret)
++      if (ret) {
++              clk_disable_unprepare(rphy->clk480m);
+               return ret;
++      }
+ 
+       /* waiting for the utmi_clk to become stable */
+       usleep_range(1500, 2000);
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 13931c5c0eff2..25d9bdd4bc698 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -5771,7 +5771,7 @@ static int hpsa_scsi_host_alloc(struct ctlr_info *h)
+ {
+       struct Scsi_Host *sh;
+ 
+-      sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
++      sh = scsi_host_alloc(&hpsa_driver_template, sizeof(struct ctlr_info));
+       if (sh == NULL) {
+               dev_err(&h->pdev->dev, "scsi_host_alloc failed\n");
+               return -ENOMEM;
+diff --git a/drivers/usb/gadget/function/f_fs.c 
b/drivers/usb/gadget/function/f_fs.c
+index 49eb4e3c760f4..48bdb2a3972bc 100644
+--- a/drivers/usb/gadget/function/f_fs.c
++++ b/drivers/usb/gadget/function/f_fs.c
+@@ -271,6 +271,9 @@ static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char 
*data, size_t len)
+       struct usb_request *req = ffs->ep0req;
+       int ret;
+ 
++      if (!req)
++              return -EINVAL;
++
+       req->zero     = len < le16_to_cpu(ffs->ev.setup.wLength);
+ 
+       spin_unlock_irq(&ffs->ev.waitq.lock);
+@@ -1807,10 +1810,14 @@ static void functionfs_unbind(struct ffs_data *ffs)
+       ENTER();
+ 
+       if (!WARN_ON(!ffs->gadget)) {
++              /* dequeue before freeing ep0req */
++              usb_ep_dequeue(ffs->gadget->ep0, ffs->ep0req);
++              mutex_lock(&ffs->mutex);
+               usb_ep_free_request(ffs->gadget->ep0, ffs->ep0req);
+               ffs->ep0req = NULL;
+               ffs->gadget = NULL;
+               clear_bit(FFS_FL_BOUND, &ffs->flags);
++              mutex_unlock(&ffs->mutex);
+               ffs_data_put(ffs);
+       }
+ }
+diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
+index adc437ca83b88..cb3ba2adae642 100644
+--- a/drivers/usb/host/xhci-plat.c
++++ b/drivers/usb/host/xhci-plat.c
+@@ -261,7 +261,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
+                       *priv = *priv_match;
+       }
+ 
+-      device_wakeup_enable(hcd->self.controller);
++      device_set_wakeup_capable(&pdev->dev, true);
+ 
+       xhci->clk = clk;
+       xhci->reg_clk = reg_clk;
+diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
+index 890c038c25f87..cb3650efc29cd 100644
+--- a/drivers/w1/w1.c
++++ b/drivers/w1/w1.c
+@@ -1140,6 +1140,8 @@ int w1_process(void *data)
+       /* remainder if it woke up early */
+       unsigned long jremain = 0;
+ 
++      atomic_inc(&dev->refcnt);
++
+       for (;;) {
+ 
+               if (!jremain && dev->search_count) {
+@@ -1167,8 +1169,10 @@ int w1_process(void *data)
+                */
+               mutex_unlock(&dev->list_mutex);
+ 
+-              if (kthread_should_stop())
++              if (kthread_should_stop()) {
++                      __set_current_state(TASK_RUNNING);
+                       break;
++              }
+ 
+               /* Only sleep when the search is active. */
+               if (dev->search_count) {
+diff --git a/drivers/w1/w1_int.c b/drivers/w1/w1_int.c
+index 1c776178f598d..eb851eb443005 100644
+--- a/drivers/w1/w1_int.c
++++ b/drivers/w1/w1_int.c
+@@ -60,10 +60,9 @@ static struct w1_master *w1_alloc_dev(u32 id, int 
slave_count, int slave_ttl,
+       dev->search_count       = w1_search_count;
+       dev->enable_pullup      = w1_enable_pullup;
+ 
+-      /* 1 for w1_process to decrement
+-       * 1 for __w1_remove_master_device to decrement
++      /* For __w1_remove_master_device to decrement
+        */
+-      atomic_set(&dev->refcnt, 2);
++      atomic_set(&dev->refcnt, 1);
+ 
+       INIT_LIST_HEAD(&dev->slist);
+       INIT_LIST_HEAD(&dev->async_list);
+diff --git a/fs/affs/file.c b/fs/affs/file.c
+index ba084b0b214b9..82bb38370aa9a 100644
+--- a/fs/affs/file.c
++++ b/fs/affs/file.c
+@@ -878,7 +878,7 @@ affs_truncate(struct inode *inode)
+       if (inode->i_size > AFFS_I(inode)->mmu_private) {
+               struct address_space *mapping = inode->i_mapping;
+               struct page *page;
+-              void *fsdata;
++              void *fsdata = NULL;
+               loff_t isize = inode->i_size;
+               int res;
+ 
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index a7ef847f285d3..37e91f27f49ba 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -429,7 +429,8 @@ cifs_reconnect(struct TCP_Server_Info *server)
+                        server->ssocket->state, server->ssocket->flags);
+               sock_release(server->ssocket);
+               server->ssocket = NULL;
+-      }
++      } else if (cifs_rdma_enabled(server))
++              smbd_destroy(server);
+       server->sequence_number = 0;
+       server->session_estab = false;
+       kfree(server->session_key.response);
+@@ -799,10 +800,8 @@ static void clean_demultiplex_info(struct TCP_Server_Info 
*server)
+       wake_up_all(&server->request_q);
+       /* give those requests time to exit */
+       msleep(125);
+-      if (cifs_rdma_enabled(server) && server->smbd_conn) {
+-              smbd_destroy(server->smbd_conn);
+-              server->smbd_conn = NULL;
+-      }
++      if (cifs_rdma_enabled(server))
++              smbd_destroy(server);
+       if (server->ssocket) {
+               sock_release(server->ssocket);
+               server->ssocket = NULL;
+diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c
+index 784628ec4bc40..591cd5c704323 100644
+--- a/fs/cifs/smbdirect.c
++++ b/fs/cifs/smbdirect.c
+@@ -320,6 +320,9 @@ static int smbd_conn_upcall(
+ 
+               info->transport_status = SMBD_DISCONNECTED;
+               smbd_process_disconnected(info);
++              wake_up(&info->disconn_wait);
++              wake_up_interruptible(&info->wait_reassembly_queue);
++              wake_up_interruptible_all(&info->wait_send_queue);
+               break;
+ 
+       default:
+@@ -1478,21 +1481,102 @@ static void idle_connection_timer(struct work_struct 
*work)
+                       info->keep_alive_interval*HZ);
+ }
+ 
+-/* Destroy this SMBD connection, called from upper layer */
+-void smbd_destroy(struct smbd_connection *info)
++/*
++ * Destroy the transport and related RDMA and memory resources
++ * Need to go through all the pending counters and make sure on one is using
++ * the transport while it is destroyed
++ */
++void smbd_destroy(struct TCP_Server_Info *server)
+ {
++      struct smbd_connection *info = server->smbd_conn;
++      struct smbd_response *response;
++      unsigned long flags;
++
++      if (!info) {
++              log_rdma_event(INFO, "rdma session already destroyed\n");
++              return;
++      }
++
+       log_rdma_event(INFO, "destroying rdma session\n");
++      if (info->transport_status != SMBD_DISCONNECTED) {
++              rdma_disconnect(server->smbd_conn->id);
++              log_rdma_event(INFO, "wait for transport being disconnected\n");
++              wait_event(
++                      info->disconn_wait,
++                      info->transport_status == SMBD_DISCONNECTED);
++      }
+ 
+-      /* Kick off the disconnection process */
+-      smbd_disconnect_rdma_connection(info);
++      log_rdma_event(INFO, "destroying qp\n");
++      ib_drain_qp(info->id->qp);
++      rdma_destroy_qp(info->id);
++
++      log_rdma_event(INFO, "cancelling idle timer\n");
++      cancel_delayed_work_sync(&info->idle_timer_work);
++      log_rdma_event(INFO, "cancelling send immediate work\n");
++      cancel_delayed_work_sync(&info->send_immediate_work);
++
++      log_rdma_event(INFO, "wait for all send posted to IB to finish\n");
++      wait_event(info->wait_send_pending,
++              atomic_read(&info->send_pending) == 0);
++      wait_event(info->wait_send_payload_pending,
++              atomic_read(&info->send_payload_pending) == 0);
++
++      /* It's not posssible for upper layer to get to reassembly */
++      log_rdma_event(INFO, "drain the reassembly queue\n");
++      do {
++              spin_lock_irqsave(&info->reassembly_queue_lock, flags);
++              response = _get_first_reassembly(info);
++              if (response) {
++                      list_del(&response->list);
++                      spin_unlock_irqrestore(
++                              &info->reassembly_queue_lock, flags);
++                      put_receive_buffer(info, response);
++              } else
++                      spin_unlock_irqrestore(
++                              &info->reassembly_queue_lock, flags);
++      } while (response);
++      info->reassembly_data_length = 0;
++
++      log_rdma_event(INFO, "free receive buffers\n");
++      wait_event(info->wait_receive_queues,
++              info->count_receive_queue + info->count_empty_packet_queue
++                      == info->receive_credit_max);
++      destroy_receive_buffers(info);
++
++      /*
++       * For performance reasons, memory registration and deregistration
++       * are not locked by srv_mutex. It is possible some processes are
++       * blocked on transport srv_mutex while holding memory registration.
++       * Release the transport srv_mutex to allow them to hit the failure
++       * path when sending data, and then release memory registartions.
++       */
++      log_rdma_event(INFO, "freeing mr list\n");
++      wake_up_interruptible_all(&info->wait_mr);
++      while (atomic_read(&info->mr_used_count)) {
++              mutex_unlock(&server->srv_mutex);
++              msleep(1000);
++              mutex_lock(&server->srv_mutex);
++      }
++      destroy_mr_list(info);
++
++      ib_free_cq(info->send_cq);
++      ib_free_cq(info->recv_cq);
++      ib_dealloc_pd(info->pd);
++      rdma_destroy_id(info->id);
+ 
+-      log_rdma_event(INFO, "wait for transport being destroyed\n");
+-      wait_event(info->wait_destroy,
+-              info->transport_status == SMBD_DESTROYED);
++      /* free mempools */
++      mempool_destroy(info->request_mempool);
++      kmem_cache_destroy(info->request_cache);
++
++      mempool_destroy(info->response_mempool);
++      kmem_cache_destroy(info->response_cache);
++
++      info->transport_status = SMBD_DESTROYED;
+ 
+       destroy_workqueue(info->workqueue);
+       log_rdma_event(INFO,  "rdma session destroyed\n");
+       kfree(info);
++      server->smbd_conn = NULL;
+ }
+ 
+ /*
+@@ -1514,17 +1598,9 @@ int smbd_reconnect(struct TCP_Server_Info *server)
+        */
+       if (server->smbd_conn->transport_status == SMBD_CONNECTED) {
+               log_rdma_event(INFO, "disconnecting transport\n");
+-              smbd_disconnect_rdma_connection(server->smbd_conn);
++              smbd_destroy(server);
+       }
+ 
+-      /* wait until the transport is destroyed */
+-      if (!wait_event_timeout(server->smbd_conn->wait_destroy,
+-              server->smbd_conn->transport_status == SMBD_DESTROYED, 5*HZ))
+-              return -EAGAIN;
+-
+-      destroy_workqueue(server->smbd_conn->workqueue);
+-      kfree(server->smbd_conn);
+-
+ create_conn:
+       log_rdma_event(INFO, "creating rdma session\n");
+       server->smbd_conn = smbd_get_connection(
+@@ -1741,12 +1817,13 @@ static struct smbd_connection *_smbd_get_connection(
+       conn_param.retry_count = SMBD_CM_RETRY;
+       conn_param.rnr_retry_count = SMBD_CM_RNR_RETRY;
+       conn_param.flow_control = 0;
+-      init_waitqueue_head(&info->wait_destroy);
+ 
+       log_rdma_event(INFO, "connecting to IP %pI4 port %d\n",
+               &addr_in->sin_addr, port);
+ 
+       init_waitqueue_head(&info->conn_wait);
++      init_waitqueue_head(&info->disconn_wait);
++      init_waitqueue_head(&info->wait_reassembly_queue);
+       rc = rdma_connect(info->id, &conn_param);
+       if (rc) {
+               log_rdma_event(ERR, "rdma_connect() failed with %i\n", rc);
+@@ -1770,8 +1847,6 @@ static struct smbd_connection *_smbd_get_connection(
+       }
+ 
+       init_waitqueue_head(&info->wait_send_queue);
+-      init_waitqueue_head(&info->wait_reassembly_queue);
+-
+       INIT_DELAYED_WORK(&info->idle_timer_work, idle_connection_timer);
+       INIT_DELAYED_WORK(&info->send_immediate_work, send_immediate_work);
+       queue_delayed_work(info->workqueue, &info->idle_timer_work,
+@@ -1812,7 +1887,7 @@ static struct smbd_connection *_smbd_get_connection(
+ 
+ allocate_mr_failed:
+       /* At this point, need to a full transport shutdown */
+-      smbd_destroy(info);
++      smbd_destroy(server);
+       return NULL;
+ 
+ negotiation_failed:
+diff --git a/fs/cifs/smbdirect.h b/fs/cifs/smbdirect.h
+index b5c240ff21919..b0ca7df414543 100644
+--- a/fs/cifs/smbdirect.h
++++ b/fs/cifs/smbdirect.h
+@@ -71,6 +71,7 @@ struct smbd_connection {
+       struct completion ri_done;
+       wait_queue_head_t conn_wait;
+       wait_queue_head_t wait_destroy;
++      wait_queue_head_t disconn_wait;
+ 
+       struct completion negotiate_completion;
+       bool negotiate_done;
+@@ -288,7 +289,7 @@ struct smbd_connection *smbd_get_connection(
+ /* Reconnect SMBDirect session */
+ int smbd_reconnect(struct TCP_Server_Info *server);
+ /* Destroy SMBDirect session */
+-void smbd_destroy(struct smbd_connection *info);
++void smbd_destroy(struct TCP_Server_Info *server);
+ 
+ /* Interface for carrying upper layer I/O through send/recv */
+ int smbd_recv(struct smbd_connection *info, struct msghdr *msg);
+@@ -331,7 +332,7 @@ struct smbd_connection {};
+ static inline void *smbd_get_connection(
+       struct TCP_Server_Info *server, struct sockaddr *dstaddr) {return NULL;}
+ static inline int smbd_reconnect(struct TCP_Server_Info *server) {return -1; }
+-static inline void smbd_destroy(struct smbd_connection *info) {}
++static inline void smbd_destroy(struct TCP_Server_Info *server) {}
+ static inline int smbd_recv(struct smbd_connection *info, struct msghdr *msg) 
{return -1; }
+ static inline int smbd_send(struct TCP_Server_Info *server, int num_rqst, 
struct smb_rqst *rqst) {return -1; }
+ #endif
+diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
+index c95f32b83a942..7c62a526506c1 100644
+--- a/fs/proc/proc_sysctl.c
++++ b/fs/proc/proc_sysctl.c
+@@ -13,6 +13,7 @@
+ #include <linux/namei.h>
+ #include <linux/mm.h>
+ #include <linux/module.h>
++#include <linux/kmemleak.h>
+ #include "internal.h"
+ 
+ static const struct dentry_operations proc_sys_dentry_operations;
+@@ -1376,6 +1377,38 @@ struct ctl_table_header *register_sysctl(const char 
*path, struct ctl_table *tab
+ }
+ EXPORT_SYMBOL(register_sysctl);
+ 
++/**
++ * __register_sysctl_init() - register sysctl table to path
++ * @path: path name for sysctl base
++ * @table: This is the sysctl table that needs to be registered to the path
++ * @table_name: The name of sysctl table, only used for log printing when
++ *              registration fails
++ *
++ * The sysctl interface is used by userspace to query or modify at runtime
++ * a predefined value set on a variable. These variables however have default
++ * values pre-set. Code which depends on these variables will always work even
++ * if register_sysctl() fails. If register_sysctl() fails you'd just loose the
++ * ability to query or modify the sysctls dynamically at run time. Chances of
++ * register_sysctl() failing on init are extremely low, and so for both 
reasons
++ * this function does not return any error as it is used by initialization 
code.
++ *
++ * Context: Can only be called after your respective sysctl base path has been
++ * registered. So for instance, most base directories are registered early on
++ * init before init levels are processed through proc_sys_init() and
++ * sysctl_init().
++ */
++void __init __register_sysctl_init(const char *path, struct ctl_table *table,
++                               const char *table_name)
++{
++      struct ctl_table_header *hdr = register_sysctl(path, table);
++
++      if (unlikely(!hdr)) {
++              pr_err("failed when register_sysctl %s to %s\n", table_name, 
path);
++              return;
++      }
++      kmemleak_not_leak(hdr);
++}
++
+ static char *append_path(const char *path, char *pos, const char *name)
+ {
+       int namelen;
+diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
+index 831a542c22c66..e5be1d747c036 100644
+--- a/fs/reiserfs/super.c
++++ b/fs/reiserfs/super.c
+@@ -1443,7 +1443,6 @@ static int reiserfs_remount(struct super_block *s, int 
*mount_flags, char *arg)
+       unsigned long safe_mask = 0;
+       unsigned int commit_max_age = (unsigned int)-1;
+       struct reiserfs_journal *journal = SB_JOURNAL(s);
+-      char *new_opts;
+       int err;
+       char *qf_names[REISERFS_MAXQUOTAS];
+       unsigned int qfmt = 0;
+@@ -1451,10 +1450,6 @@ static int reiserfs_remount(struct super_block *s, int 
*mount_flags, char *arg)
+       int i;
+ #endif
+ 
+-      new_opts = kstrdup(arg, GFP_KERNEL);
+-      if (arg && !new_opts)
+-              return -ENOMEM;
+-
+       sync_filesystem(s);
+       reiserfs_write_lock(s);
+ 
+@@ -1605,7 +1600,6 @@ out_ok_unlocked:
+ out_err_unlock:
+       reiserfs_write_unlock(s);
+ out_err:
+-      kfree(new_opts);
+       return err;
+ }
+ 
+diff --git a/include/linux/kernel.h b/include/linux/kernel.h
+index 50733abbe548e..a28ec4c2f3f5a 100644
+--- a/include/linux/kernel.h
++++ b/include/linux/kernel.h
+@@ -327,6 +327,7 @@ extern long (*panic_blink)(int state);
+ __printf(1, 2)
+ void panic(const char *fmt, ...) __noreturn __cold;
+ void nmi_panic(struct pt_regs *regs, const char *msg);
++void check_panic_on_warn(const char *origin);
+ extern void oops_enter(void);
+ extern void oops_exit(void);
+ void print_oops_end_marker(void);
+diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
+index 91401309b1aa2..0ee9aab3e3091 100644
+--- a/include/linux/sched/task.h
++++ b/include/linux/sched/task.h
+@@ -36,6 +36,7 @@ extern int sched_fork(unsigned long clone_flags, struct 
task_struct *p);
+ extern void sched_dead(struct task_struct *p);
+ 
+ void __noreturn do_task_dead(void);
++void __noreturn make_task_dead(int signr);
+ 
+ extern void proc_caches_init(void);
+ 
+diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
+index b769ecfcc3bd4..0a980aecc8f02 100644
+--- a/include/linux/sysctl.h
++++ b/include/linux/sysctl.h
+@@ -198,6 +198,9 @@ struct ctl_table_header *register_sysctl_paths(const 
struct ctl_path *path,
+ void unregister_sysctl_table(struct ctl_table_header * table);
+ 
+ extern int sysctl_init(void);
++extern void __register_sysctl_init(const char *path, struct ctl_table *table,
++                               const char *table_name);
++#define register_sysctl_init(path, table) __register_sysctl_init(path, table, 
#table)
+ 
+ extern struct ctl_table sysctl_mount_point[];
+ 
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 694ee0b1fefea..61f3a31abc1ad 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -1012,7 +1012,9 @@ static int check_stack_write(struct bpf_verifier_env 
*env,
+               bool sanitize = reg && is_spillable_regtype(reg->type);
+ 
+               for (i = 0; i < size; i++) {
+-                      if (state->stack[spi].slot_type[i] == STACK_INVALID) {
++                      u8 type = state->stack[spi].slot_type[i];
++
++                      if (type != STACK_MISC && type != STACK_ZERO) {
+                               sanitize = true;
+                               break;
+                       }
+diff --git a/kernel/exit.c b/kernel/exit.c
+index 908e7a33e1fcb..02360ec3b1225 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -62,12 +62,59 @@
+ #include <linux/random.h>
+ #include <linux/rcuwait.h>
+ #include <linux/compat.h>
++#include <linux/sysfs.h>
+ 
+ #include <linux/uaccess.h>
+ #include <asm/unistd.h>
+ #include <asm/pgtable.h>
+ #include <asm/mmu_context.h>
+ 
++/*
++ * The default value should be high enough to not crash a system that randomly
++ * crashes its kernel from time to time, but low enough to at least not permit
++ * overflowing 32-bit refcounts or the ldsem writer count.
++ */
++static unsigned int oops_limit = 10000;
++
++#ifdef CONFIG_SYSCTL
++static struct ctl_table kern_exit_table[] = {
++      {
++              .procname       = "oops_limit",
++              .data           = &oops_limit,
++              .maxlen         = sizeof(oops_limit),
++              .mode           = 0644,
++              .proc_handler   = proc_douintvec,
++      },
++      { }
++};
++
++static __init int kernel_exit_sysctls_init(void)
++{
++      register_sysctl_init("kernel", kern_exit_table);
++      return 0;
++}
++late_initcall(kernel_exit_sysctls_init);
++#endif
++
++static atomic_t oops_count = ATOMIC_INIT(0);
++
++#ifdef CONFIG_SYSFS
++static ssize_t oops_count_show(struct kobject *kobj, struct kobj_attribute 
*attr,
++                             char *page)
++{
++      return sysfs_emit(page, "%d\n", atomic_read(&oops_count));
++}
++
++static struct kobj_attribute oops_count_attr = __ATTR_RO(oops_count);
++
++static __init int kernel_exit_sysfs_init(void)
++{
++      sysfs_add_file_to_group(kernel_kobj, &oops_count_attr.attr, NULL);
++      return 0;
++}
++late_initcall(kernel_exit_sysfs_init);
++#endif
++
+ static void __unhash_process(struct task_struct *p, bool group_dead)
+ {
+       nr_threads--;
+@@ -922,6 +969,31 @@ void __noreturn do_exit(long code)
+ }
+ EXPORT_SYMBOL_GPL(do_exit);
+ 
++void __noreturn make_task_dead(int signr)
++{
++      /*
++       * Take the task off the cpu after something catastrophic has
++       * happened.
++       */
++      unsigned int limit;
++
++      /*
++       * Every time the system oopses, if the oops happens while a reference
++       * to an object was held, the reference leaks.
++       * If the oops doesn't also leak memory, repeated oopsing can cause
++       * reference counters to wrap around (if they're not using refcount_t).
++       * This means that repeated oopsing can make unexploitable-looking bugs
++       * exploitable through repeated oopsing.
++       * To make sure this can't happen, place an upper bound on how often the
++       * kernel may oops without panic().
++       */
++      limit = READ_ONCE(oops_limit);
++      if (atomic_inc_return(&oops_count) >= limit && limit)
++              panic("Oopsed too often (kernel.oops_limit is %d)", limit);
++
++      do_exit(signr);
++}
++
+ void complete_and_exit(struct completion *comp, long code)
+ {
+       if (comp)
+diff --git a/kernel/module.c b/kernel/module.c
+index 42a604401c4dd..6ec0b2e0f01f5 100644
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -3478,7 +3478,8 @@ static bool finished_loading(const char *name)
+       sched_annotate_sleep();
+       mutex_lock(&module_mutex);
+       mod = find_module_all(name, strlen(name), true);
+-      ret = !mod || mod->state == MODULE_STATE_LIVE;
++      ret = !mod || mod->state == MODULE_STATE_LIVE
++              || mod->state == MODULE_STATE_GOING;
+       mutex_unlock(&module_mutex);
+ 
+       return ret;
+@@ -3632,20 +3633,35 @@ static int add_unformed_module(struct module *mod)
+ 
+       mod->state = MODULE_STATE_UNFORMED;
+ 
+-again:
+       mutex_lock(&module_mutex);
+       old = find_module_all(mod->name, strlen(mod->name), true);
+       if (old != NULL) {
+-              if (old->state != MODULE_STATE_LIVE) {
++              if (old->state == MODULE_STATE_COMING
++                  || old->state == MODULE_STATE_UNFORMED) {
+                       /* Wait in case it fails to load. */
+                       mutex_unlock(&module_mutex);
+                       err = wait_event_interruptible(module_wq,
+                                              finished_loading(mod->name));
+                       if (err)
+                               goto out_unlocked;
+-                      goto again;
++
++                      /* The module might have gone in the meantime. */
++                      mutex_lock(&module_mutex);
++                      old = find_module_all(mod->name, strlen(mod->name),
++                                            true);
+               }
+-              err = -EEXIST;
++
++              /*
++               * We are here only when the same module was being loaded. Do
++               * not try to load it again right now. It prevents long delays
++               * caused by serialized module load failures. It might happen
++               * when more devices of the same type trigger load of
++               * a particular module.
++               */
++              if (old && old->state == MODULE_STATE_LIVE)
++                      err = -EEXIST;
++              else
++                      err = -EBUSY;
+               goto out;
+       }
+       mod_update_bounds(mod);
+diff --git a/kernel/panic.c b/kernel/panic.c
+index 8138a676fb7d1..982ecba286c08 100644
+--- a/kernel/panic.c
++++ b/kernel/panic.c
+@@ -29,6 +29,7 @@
+ #include <linux/bug.h>
+ #include <linux/ratelimit.h>
+ #include <linux/debugfs.h>
++#include <linux/sysfs.h>
+ #include <asm/sections.h>
+ 
+ #define PANIC_TIMER_STEP 100
+@@ -42,6 +43,7 @@ static int pause_on_oops_flag;
+ static DEFINE_SPINLOCK(pause_on_oops_lock);
+ bool crash_kexec_post_notifiers;
+ int panic_on_warn __read_mostly;
++static unsigned int warn_limit __read_mostly;
+ 
+ int panic_timeout = CONFIG_PANIC_TIMEOUT;
+ EXPORT_SYMBOL_GPL(panic_timeout);
+@@ -50,6 +52,45 @@ ATOMIC_NOTIFIER_HEAD(panic_notifier_list);
+ 
+ EXPORT_SYMBOL(panic_notifier_list);
+ 
++#ifdef CONFIG_SYSCTL
++static struct ctl_table kern_panic_table[] = {
++      {
++              .procname       = "warn_limit",
++              .data           = &warn_limit,
++              .maxlen         = sizeof(warn_limit),
++              .mode           = 0644,
++              .proc_handler   = proc_douintvec,
++      },
++      { }
++};
++
++static __init int kernel_panic_sysctls_init(void)
++{
++      register_sysctl_init("kernel", kern_panic_table);
++      return 0;
++}
++late_initcall(kernel_panic_sysctls_init);
++#endif
++
++static atomic_t warn_count = ATOMIC_INIT(0);
++
++#ifdef CONFIG_SYSFS
++static ssize_t warn_count_show(struct kobject *kobj, struct kobj_attribute 
*attr,
++                             char *page)
++{
++      return sysfs_emit(page, "%d\n", atomic_read(&warn_count));
++}
++
++static struct kobj_attribute warn_count_attr = __ATTR_RO(warn_count);
++
++static __init int kernel_panic_sysfs_init(void)
++{
++      sysfs_add_file_to_group(kernel_kobj, &warn_count_attr.attr, NULL);
++      return 0;
++}
++late_initcall(kernel_panic_sysfs_init);
++#endif
++
+ static long no_blink(int state)
+ {
+       return 0;
+@@ -125,6 +166,19 @@ void nmi_panic(struct pt_regs *regs, const char *msg)
+ }
+ EXPORT_SYMBOL(nmi_panic);
+ 
++void check_panic_on_warn(const char *origin)
++{
++      unsigned int limit;
++
++      if (panic_on_warn)
++              panic("%s: panic_on_warn set ...\n", origin);
++
++      limit = READ_ONCE(warn_limit);
++      if (atomic_inc_return(&warn_count) >= limit && limit)
++              panic("%s: system warned too often (kernel.warn_limit is %d)",
++                    origin, limit);
++}
++
+ /**
+  *    panic - halt the system
+  *    @fmt: The text string to print
+@@ -142,6 +196,16 @@ void panic(const char *fmt, ...)
+       int old_cpu, this_cpu;
+       bool _crash_kexec_post_notifiers = crash_kexec_post_notifiers;
+ 
++      if (panic_on_warn) {
++              /*
++               * This thread may hit another WARN() in the panic path.
++               * Resetting this prevents additional WARN() from panicking the
++               * system on this thread.  Other threads are blocked by the
++               * panic_mutex in panic().
++               */
++              panic_on_warn = 0;
++      }
++
+       /*
+        * Disable local interrupts. This will prevent panic_smp_self_stop
+        * from deadlocking the first cpu that invokes the panic, since
+@@ -530,16 +594,7 @@ void __warn(const char *file, int line, void *caller, 
unsigned taint,
+       if (args)
+               vprintk(args->fmt, args->args);
+ 
+-      if (panic_on_warn) {
+-              /*
+-               * This thread may hit another WARN() in the panic path.
+-               * Resetting this prevents additional WARN() from panicking the
+-               * system on this thread.  Other threads are blocked by the
+-               * panic_mutex in panic().
+-               */
+-              panic_on_warn = 0;
+-              panic("panic_on_warn set ...\n");
+-      }
++      check_panic_on_warn("kernel");
+ 
+       print_modules();
+ 
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index a034642497718..46227cc48124d 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -3316,8 +3316,7 @@ static noinline void __schedule_bug(struct task_struct 
*prev)
+               print_ip_sym(preempt_disable_ip);
+               pr_cont("\n");
+       }
+-      if (panic_on_warn)
+-              panic("scheduling while atomic\n");
++      check_panic_on_warn("scheduling while atomic");
+ 
+       dump_stack();
+       add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 59ad83499ba25..98abff0462366 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -8674,6 +8674,8 @@ void __init early_trace_init(void)
+                       static_key_enable(&tracepoint_printk_key.key);
+       }
+       tracer_alloc_buffers();
++
++      init_events();
+ }
+ 
+ void __init trace_init(void)
+diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
+index 74185fb040f33..0923d1b18d1fb 100644
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -1530,6 +1530,7 @@ extern void trace_event_enable_cmd_record(bool enable);
+ extern void trace_event_enable_tgid_record(bool enable);
+ 
+ extern int event_trace_init(void);
++extern int init_events(void);
+ extern int event_trace_add_tracer(struct dentry *parent, struct trace_array 
*tr);
+ extern int event_trace_del_tracer(struct trace_array *tr);
+ 
+diff --git a/kernel/trace/trace_events_hist.c 
b/kernel/trace/trace_events_hist.c
+index 17b15bd978579..a56ee4ba2afbd 100644
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -2314,6 +2314,8 @@ static struct hist_field *create_hist_field(struct 
hist_trigger_data *hist_data,
+               unsigned long fl = flags & ~HIST_FIELD_FL_LOG2;
+               hist_field->fn = hist_field_log2;
+               hist_field->operands[0] = create_hist_field(hist_data, field, 
fl, NULL);
++              if (!hist_field->operands[0])
++                      goto free;
+               hist_field->size = hist_field->operands[0]->size;
+               hist_field->type = kstrdup(hist_field->operands[0]->type, 
GFP_KERNEL);
+               if (!hist_field->type)
+diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
+index 6e6cc64faa389..62015d62dd6f5 100644
+--- a/kernel/trace/trace_output.c
++++ b/kernel/trace/trace_output.c
+@@ -1395,7 +1395,7 @@ static struct trace_event *events[] __initdata = {
+       NULL
+ };
+ 
+-__init static int init_events(void)
++__init int init_events(void)
+ {
+       struct trace_event *event;
+       int i, ret;
+@@ -1413,4 +1413,3 @@ __init static int init_events(void)
+ 
+       return 0;
+ }
+-early_initcall(init_events);
+diff --git a/mm/kasan/report.c b/mm/kasan/report.c
+index 5c169aa688fde..3ae996824a040 100644
+--- a/mm/kasan/report.c
++++ b/mm/kasan/report.c
+@@ -176,8 +176,7 @@ static void kasan_end_report(unsigned long *flags)
+       
pr_err("==================================================================\n");
+       add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
+       spin_unlock_irqrestore(&report_lock, *flags);
+-      if (panic_on_warn)
+-              panic("panic_on_warn set ...\n");
++      check_panic_on_warn("KASAN");
+       kasan_enable_current();
+ }
+ 
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index e87777255c474..497c8ac140d13 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -1519,6 +1519,7 @@ static int hci_dev_do_open(struct hci_dev *hdev)
+                       hdev->flush(hdev);
+ 
+               if (hdev->sent_cmd) {
++                      cancel_delayed_work_sync(&hdev->cmd_timer);
+                       kfree_skb(hdev->sent_cmd);
+                       hdev->sent_cmd = NULL;
+               }
+diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
+index 56c240c98a567..a87774424829c 100644
+--- a/net/core/net_namespace.c
++++ b/net/core/net_namespace.c
+@@ -132,12 +132,12 @@ static int ops_init(const struct pernet_operations *ops, 
struct net *net)
+               return 0;
+ 
+       if (ops->id && ops->size) {
+-cleanup:
+               ng = rcu_dereference_protected(net->gen,
+                                              
lockdep_is_held(&pernet_ops_rwsem));
+               ng->ptr[*ops->id] = NULL;
+       }
+ 
++cleanup:
+       kfree(data);
+ 
+ out:
+diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
+index 3c58019f07187..d64522af9c3a8 100644
+--- a/net/ipv4/inet_hashtables.c
++++ b/net/ipv4/inet_hashtables.c
+@@ -579,8 +579,20 @@ bool inet_ehash_insert(struct sock *sk, struct sock *osk, 
bool *found_dup_sk)
+       spin_lock(lock);
+       if (osk) {
+               WARN_ON_ONCE(sk->sk_hash != osk->sk_hash);
+-              ret = sk_nulls_del_node_init_rcu(osk);
+-      } else if (found_dup_sk) {
++              ret = sk_hashed(osk);
++              if (ret) {
++                      /* Before deleting the node, we insert a new one to make
++                       * sure that the look-up-sk process would not miss 
either
++                       * of them and that at least one node would exist in 
ehash
++                       * table all the time. Otherwise there's a tiny chance
++                       * that lookup process could find nothing in ehash 
table.
++                       */
++                      __sk_nulls_add_node_tail_rcu(sk, list);
++                      sk_nulls_del_node_init_rcu(osk);
++              }
++              goto unlock;
++      }
++      if (found_dup_sk) {
+               *found_dup_sk = inet_ehash_lookup_by_sk(sk, list);
+               if (*found_dup_sk)
+                       ret = false;
+@@ -589,6 +601,7 @@ bool inet_ehash_insert(struct sock *sk, struct sock *osk, 
bool *found_dup_sk)
+       if (ret)
+               __sk_nulls_add_node_rcu(sk, list);
+ 
++unlock:
+       spin_unlock(lock);
+ 
+       return ret;
+diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
+index 88c5069b5d20c..fedd19c22b392 100644
+--- a/net/ipv4/inet_timewait_sock.c
++++ b/net/ipv4/inet_timewait_sock.c
+@@ -80,10 +80,10 @@ void inet_twsk_put(struct inet_timewait_sock *tw)
+ }
+ EXPORT_SYMBOL_GPL(inet_twsk_put);
+ 
+-static void inet_twsk_add_node_rcu(struct inet_timewait_sock *tw,
+-                                 struct hlist_nulls_head *list)
++static void inet_twsk_add_node_tail_rcu(struct inet_timewait_sock *tw,
++                                      struct hlist_nulls_head *list)
+ {
+-      hlist_nulls_add_head_rcu(&tw->tw_node, list);
++      hlist_nulls_add_tail_rcu(&tw->tw_node, list);
+ }
+ 
+ static void inet_twsk_add_bind_node(struct inet_timewait_sock *tw,
+@@ -119,7 +119,7 @@ void inet_twsk_hashdance(struct inet_timewait_sock *tw, 
struct sock *sk,
+ 
+       spin_lock(lock);
+ 
+-      inet_twsk_add_node_rcu(tw, &ehead->chain);
++      inet_twsk_add_node_tail_rcu(tw, &ehead->chain);
+ 
+       /* Step 3: Remove SK from hash chain */
+       if (__sk_nulls_del_node_init_rcu(sk))
+diff --git a/net/ipv4/metrics.c b/net/ipv4/metrics.c
+index 04311f7067e2e..9a6b01d85cd07 100644
+--- a/net/ipv4/metrics.c
++++ b/net/ipv4/metrics.c
+@@ -1,4 +1,5 @@
+ #include <linux/netlink.h>
++#include <linux/nospec.h>
+ #include <linux/rtnetlink.h>
+ #include <linux/types.h>
+ #include <net/ip.h>
+@@ -24,6 +25,7 @@ int ip_metrics_convert(struct net *net, struct nlattr 
*fc_mx, int fc_mx_len,
+               if (type > RTAX_MAX)
+                       return -EINVAL;
+ 
++              type = array_index_nospec(type, RTAX_MAX + 1);
+               if (type == RTAX_CC_ALGO) {
+                       char tmp[TCP_CA_NAME_MAX];
+ 
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
+index e617a98f4df61..00601bc4fdfa3 100644
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -1154,14 +1154,16 @@ static void ip6gre_tnl_link_config_route(struct 
ip6_tnl *t, int set_mtu,
+                               dev->needed_headroom = dst_len;
+ 
+                       if (set_mtu) {
+-                              dev->mtu = rt->dst.dev->mtu - t_hlen;
++                              int mtu = rt->dst.dev->mtu - t_hlen;
++
+                               if (!(t->parms.flags & 
IP6_TNL_F_IGN_ENCAP_LIMIT))
+-                                      dev->mtu -= 8;
++                                      mtu -= 8;
+                               if (dev->type == ARPHRD_ETHER)
+-                                      dev->mtu -= ETH_HLEN;
++                                      mtu -= ETH_HLEN;
+ 
+-                              if (dev->mtu < IPV6_MIN_MTU)
+-                                      dev->mtu = IPV6_MIN_MTU;
++                              if (mtu < IPV6_MIN_MTU)
++                                      mtu = IPV6_MIN_MTU;
++                              WRITE_ONCE(dev->mtu, mtu);
+                       }
+               }
+               ip6_rt_put(rt);
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index b647a40376795..75a1ec2605fca 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -1435,6 +1435,7 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
+       struct __ip6_tnl_parm *p = &t->parms;
+       struct flowi6 *fl6 = &t->fl.u.ip6;
+       int t_hlen;
++      int mtu;
+ 
+       memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
+       memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
+@@ -1477,12 +1478,13 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
+                       dev->hard_header_len = rt->dst.dev->hard_header_len +
+                               t_hlen;
+ 
+-                      dev->mtu = rt->dst.dev->mtu - t_hlen;
++                      mtu = rt->dst.dev->mtu - t_hlen;
+                       if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
+-                              dev->mtu -= 8;
++                              mtu -= 8;
+ 
+-                      if (dev->mtu < IPV6_MIN_MTU)
+-                              dev->mtu = IPV6_MIN_MTU;
++                      if (mtu < IPV6_MIN_MTU)
++                              mtu = IPV6_MIN_MTU;
++                      WRITE_ONCE(dev->mtu, mtu);
+               }
+               ip6_rt_put(rt);
+       }
+diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
+index 420b9fcc10d7d..df734fe64d10a 100644
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -1082,10 +1082,12 @@ static void ipip6_tunnel_bind_dev(struct net_device 
*dev)
+ 
+       if (tdev && !netif_is_l3_master(tdev)) {
+               int t_hlen = tunnel->hlen + sizeof(struct iphdr);
++              int mtu;
+ 
+-              dev->mtu = tdev->mtu - t_hlen;
+-              if (dev->mtu < IPV6_MIN_MTU)
+-                      dev->mtu = IPV6_MIN_MTU;
++              mtu = tdev->mtu - t_hlen;
++              if (mtu < IPV6_MIN_MTU)
++                      mtu = IPV6_MIN_MTU;
++              WRITE_ONCE(dev->mtu, mtu);
+       }
+ }
+ 
+diff --git a/net/netfilter/nf_conntrack_proto_sctp.c 
b/net/netfilter/nf_conntrack_proto_sctp.c
+index 8cb62805fd684..cadeb22a48f23 100644
+--- a/net/netfilter/nf_conntrack_proto_sctp.c
++++ b/net/netfilter/nf_conntrack_proto_sctp.c
+@@ -317,22 +317,29 @@ static int sctp_packet(struct nf_conn *ct,
+       for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) {
+               /* Special cases of Verification tag check (Sec 8.5.1) */
+               if (sch->type == SCTP_CID_INIT) {
+-                      /* Sec 8.5.1 (A) */
++                      /* (A) vtag MUST be zero */
+                       if (sh->vtag != 0)
+                               goto out_unlock;
+               } else if (sch->type == SCTP_CID_ABORT) {
+-                      /* Sec 8.5.1 (B) */
+-                      if (sh->vtag != ct->proto.sctp.vtag[dir] &&
+-                          sh->vtag != ct->proto.sctp.vtag[!dir])
++                      /* (B) vtag MUST match own vtag if T flag is unset OR
++                       * MUST match peer's vtag if T flag is set
++                       */
++                      if ((!(sch->flags & SCTP_CHUNK_FLAG_T) &&
++                           sh->vtag != ct->proto.sctp.vtag[dir]) ||
++                          ((sch->flags & SCTP_CHUNK_FLAG_T) &&
++                           sh->vtag != ct->proto.sctp.vtag[!dir]))
+                               goto out_unlock;
+               } else if (sch->type == SCTP_CID_SHUTDOWN_COMPLETE) {
+-                      /* Sec 8.5.1 (C) */
+-                      if (sh->vtag != ct->proto.sctp.vtag[dir] &&
+-                          sh->vtag != ct->proto.sctp.vtag[!dir] &&
+-                          sch->flags & SCTP_CHUNK_FLAG_T)
++                      /* (C) vtag MUST match own vtag if T flag is unset OR
++                       * MUST match peer's vtag if T flag is set
++                       */
++                      if ((!(sch->flags & SCTP_CHUNK_FLAG_T) &&
++                           sh->vtag != ct->proto.sctp.vtag[dir]) ||
++                          ((sch->flags & SCTP_CHUNK_FLAG_T) &&
++                           sh->vtag != ct->proto.sctp.vtag[!dir]))
+                               goto out_unlock;
+               } else if (sch->type == SCTP_CID_COOKIE_ECHO) {
+-                      /* Sec 8.5.1 (D) */
++                      /* (D) vtag must be same as init_vtag as found in 
INIT_ACK */
+                       if (sh->vtag != ct->proto.sctp.vtag[dir])
+                               goto out_unlock;
+               } else if (sch->type == SCTP_CID_HEARTBEAT) {
+diff --git a/net/netfilter/nf_conntrack_proto_tcp.c 
b/net/netfilter/nf_conntrack_proto_tcp.c
+index 66cda5e2d6b9e..955b73a9a05ed 100644
+--- a/net/netfilter/nf_conntrack_proto_tcp.c
++++ b/net/netfilter/nf_conntrack_proto_tcp.c
+@@ -1094,6 +1094,16 @@ static int tcp_packet(struct nf_conn *ct,
+                       nf_ct_kill_acct(ct, ctinfo, skb);
+                       return NF_ACCEPT;
+               }
++
++              if (index == TCP_SYN_SET && old_state == 
TCP_CONNTRACK_SYN_SENT) {
++                      /* do not renew timeout on SYN retransmit.
++                       *
++                       * Else port reuse by client or NAT middlebox can keep
++                       * entry alive indefinitely (including nat info).
++                       */
++                      return NF_ACCEPT;
++              }
++
+               /* ESTABLISHED without SEEN_REPLY, i.e. mid-connection
+                * pickup with loose=1. Avoid large ESTABLISHED timeout.
+                */
+diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
+index 84d317418d184..78a0f42837870 100644
+--- a/net/netfilter/nft_set_rbtree.c
++++ b/net/netfilter/nft_set_rbtree.c
+@@ -375,23 +375,37 @@ static void nft_rbtree_gc(struct work_struct *work)
+       struct nft_rbtree *priv;
+       struct rb_node *node;
+       struct nft_set *set;
++      struct net *net;
++      u8 genmask;
+ 
+       priv = container_of(work, struct nft_rbtree, gc_work.work);
+       set  = nft_set_container_of(priv);
++      net  = read_pnet(&set->net);
++      genmask = nft_genmask_cur(net);
+ 
+       write_lock_bh(&priv->lock);
+       write_seqcount_begin(&priv->count);
+       for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
+               rbe = rb_entry(node, struct nft_rbtree_elem, node);
+ 
++              if (!nft_set_elem_active(&rbe->ext, genmask))
++                      continue;
++
++              /* elements are reversed in the rbtree for historical reasons,
++               * from highest to lowest value, that is why end element is
++               * always visited before the start element.
++               */
+               if (nft_rbtree_interval_end(rbe)) {
+                       rbe_end = rbe;
+                       continue;
+               }
+               if (!nft_set_elem_expired(&rbe->ext))
+                       continue;
+-              if (nft_set_elem_mark_busy(&rbe->ext))
++
++              if (nft_set_elem_mark_busy(&rbe->ext)) {
++                      rbe_end = NULL;
+                       continue;
++              }
+ 
+               if (rbe_prev) {
+                       rb_erase(&rbe_prev->node, &priv->root);
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 6ffa83319d08b..6a49c0aa55bda 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -578,12 +578,9 @@ static int netlink_insert(struct sock *sk, u32 portid)
+       if (nlk_sk(sk)->bound)
+               goto err;
+ 
+-      err = -ENOMEM;
+-      if (BITS_PER_LONG > 32 &&
+-          unlikely(atomic_read(&table->hash.nelems) >= UINT_MAX))
+-              goto err;
++      /* portid can be read locklessly from netlink_getname(). */
++      WRITE_ONCE(nlk_sk(sk)->portid, portid);
+ 
+-      nlk_sk(sk)->portid = portid;
+       sock_hold(sk);
+ 
+       err = __netlink_insert(table, sk);
+@@ -1093,9 +1090,11 @@ static int netlink_connect(struct socket *sock, struct 
sockaddr *addr,
+               return -EINVAL;
+ 
+       if (addr->sa_family == AF_UNSPEC) {
+-              sk->sk_state    = NETLINK_UNCONNECTED;
+-              nlk->dst_portid = 0;
+-              nlk->dst_group  = 0;
++              /* paired with READ_ONCE() in netlink_getsockbyportid() */
++              WRITE_ONCE(sk->sk_state, NETLINK_UNCONNECTED);
++              /* dst_portid and dst_group can be read locklessly */
++              WRITE_ONCE(nlk->dst_portid, 0);
++              WRITE_ONCE(nlk->dst_group, 0);
+               return 0;
+       }
+       if (addr->sa_family != AF_NETLINK)
+@@ -1116,9 +1115,11 @@ static int netlink_connect(struct socket *sock, struct 
sockaddr *addr,
+               err = netlink_autobind(sock);
+ 
+       if (err == 0) {
+-              sk->sk_state    = NETLINK_CONNECTED;
+-              nlk->dst_portid = nladdr->nl_pid;
+-              nlk->dst_group  = ffs(nladdr->nl_groups);
++              /* paired with READ_ONCE() in netlink_getsockbyportid() */
++              WRITE_ONCE(sk->sk_state, NETLINK_CONNECTED);
++              /* dst_portid and dst_group can be read locklessly */
++              WRITE_ONCE(nlk->dst_portid, nladdr->nl_pid);
++              WRITE_ONCE(nlk->dst_group, ffs(nladdr->nl_groups));
+       }
+ 
+       return err;
+@@ -1135,10 +1136,12 @@ static int netlink_getname(struct socket *sock, struct 
sockaddr *addr,
+       nladdr->nl_pad = 0;
+ 
+       if (peer) {
+-              nladdr->nl_pid = nlk->dst_portid;
+-              nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
++              /* Paired with WRITE_ONCE() in netlink_connect() */
++              nladdr->nl_pid = READ_ONCE(nlk->dst_portid);
++              nladdr->nl_groups = 
netlink_group_mask(READ_ONCE(nlk->dst_group));
+       } else {
+-              nladdr->nl_pid = nlk->portid;
++              /* Paired with WRITE_ONCE() in netlink_insert() */
++              nladdr->nl_pid = READ_ONCE(nlk->portid);
+               netlink_lock_table();
+               nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
+               netlink_unlock_table();
+@@ -1165,8 +1168,9 @@ static struct sock *netlink_getsockbyportid(struct sock 
*ssk, u32 portid)
+ 
+       /* Don't bother queuing skb if kernel socket has no input function */
+       nlk = nlk_sk(sock);
+-      if (sock->sk_state == NETLINK_CONNECTED &&
+-          nlk->dst_portid != nlk_sk(ssk)->portid) {
++      /* dst_portid and sk_state can be changed in netlink_connect() */
++      if (READ_ONCE(sock->sk_state) == NETLINK_CONNECTED &&
++          READ_ONCE(nlk->dst_portid) != nlk_sk(ssk)->portid) {
+               sock_put(sock);
+               return ERR_PTR(-ECONNREFUSED);
+       }
+@@ -1878,8 +1882,9 @@ static int netlink_sendmsg(struct socket *sock, struct 
msghdr *msg, size_t len)
+                       goto out;
+               netlink_skb_flags |= NETLINK_SKB_DST;
+       } else {
+-              dst_portid = nlk->dst_portid;
+-              dst_group = nlk->dst_group;
++              /* Paired with WRITE_ONCE() in netlink_connect() */
++              dst_portid = READ_ONCE(nlk->dst_portid);
++              dst_group = READ_ONCE(nlk->dst_group);
+       }
+ 
+       /* Paired with WRITE_ONCE() in netlink_insert() */
+diff --git a/net/netrom/nr_timer.c b/net/netrom/nr_timer.c
+index 426d496095244..2bf99bd5be58c 100644
+--- a/net/netrom/nr_timer.c
++++ b/net/netrom/nr_timer.c
+@@ -124,6 +124,7 @@ static void nr_heartbeat_expiry(struct timer_list *t)
+                  is accepted() it isn't 'dead' so doesn't get removed. */
+               if (sock_flag(sk, SOCK_DESTROY) ||
+                   (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) {
++                      sock_hold(sk);
+                       bh_unlock_sock(sk);
+                       nr_destroy_socket(sk);
+                       goto out;
+diff --git a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c
+index 4fa015208aab1..3290f2275b857 100644
+--- a/net/nfc/llcp_core.c
++++ b/net/nfc/llcp_core.c
+@@ -171,6 +171,7 @@ static void local_cleanup(struct nfc_llcp_local *local)
+       cancel_work_sync(&local->rx_work);
+       cancel_work_sync(&local->timeout_work);
+       kfree_skb(local->rx_pending);
++      local->rx_pending = NULL;
+       del_timer_sync(&local->sdreq_timer);
+       cancel_work_sync(&local->sdreq_timeout_work);
+       nfc_llcp_free_sdp_tlv_list(&local->pending_sdreqs);
+diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
+index f8a2832456728..d723942e5e65d 100644
+--- a/net/sctp/bind_addr.c
++++ b/net/sctp/bind_addr.c
+@@ -88,6 +88,12 @@ int sctp_bind_addr_copy(struct net *net, struct 
sctp_bind_addr *dest,
+               }
+       }
+ 
++      /* If somehow no addresses were found that can be used with this
++       * scope, it's an error.
++       */
++      if (list_empty(&dest->address_list))
++              error = -ENETUNREACH;
++
+ out:
+       if (error)
+               sctp_bind_addr_clean(dest);
+diff --git a/security/tomoyo/Makefile b/security/tomoyo/Makefile
+index cca5a3012fee2..221eaadffb09c 100644
+--- a/security/tomoyo/Makefile
++++ b/security/tomoyo/Makefile
+@@ -10,7 +10,7 @@ endef
+ quiet_cmd_policy  = POLICY  $@
+       cmd_policy  = ($(call do_policy,profile); $(call 
do_policy,exception_policy); $(call do_policy,domain_policy); $(call 
do_policy,manager); $(call do_policy,stat)) >$@
+ 
+-$(obj)/builtin-policy.h: $(wildcard $(obj)/policy/*.conf 
$(src)/policy/*.conf.default) FORCE
++$(obj)/builtin-policy.h: $(wildcard $(obj)/policy/*.conf 
$(srctree)/$(src)/policy/*.conf.default) FORCE
+       $(call if_changed,policy)
+ 
+ $(obj)/common.o: $(obj)/builtin-policy.h
+diff --git a/tools/objtool/check.c b/tools/objtool/check.c
+index c0ab27368a345..55f3e7fa1c5d0 100644
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -159,6 +159,7 @@ static int __dead_end_function(struct objtool_file *file, 
struct symbol *func,
+               "panic",
+               "do_exit",
+               "do_task_dead",
++              "make_task_dead",
+               "__module_put_and_exit",
+               "complete_and_exit",
+               "kvm_spurious_fault",
+@@ -167,7 +168,7 @@ static int __dead_end_function(struct objtool_file *file, 
struct symbol *func,
+               "fortify_panic",
+               "usercopy_abort",
+               "machine_real_restart",
+-              "rewind_stack_do_exit",
++              "rewind_stack_and_make_dead",
+       };
+ 
+       if (func->bind == STB_WEAK)
+diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
+index 4c23779e271a3..3641a96a2de4a 100644
+--- a/tools/perf/util/env.c
++++ b/tools/perf/util/env.c
+@@ -163,11 +163,11 @@ static const char *normalize_arch(char *arch)
+ 
+ const char *perf_env__arch(struct perf_env *env)
+ {
+-      struct utsname uts;
+       char *arch_name;
+ 
+       if (!env || !env->arch) { /* Assume local operation */
+-              if (uname(&uts) < 0)
++              static struct utsname uts = { .machine[0] = '\0', };
++              if (uts.machine[0] == '\0' && uname(&uts) < 0)
+                       return NULL;
+               arch_name = uts.machine;
+       } else

Reply via email to