commit:     9a220db3d81c396e7413cfea5513377e52254613
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Apr 30 10:28:53 2018 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Apr 30 10:28:53 2018 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=9a220db3

Linux patch 4.9.97

 0000_README             |    4 +
 1096_linux-4.9.97.patch | 3632 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3636 insertions(+)

diff --git a/0000_README b/0000_README
index 0d1f889..efef388 100644
--- a/0000_README
+++ b/0000_README
@@ -427,6 +427,10 @@ Patch:  1095_linux-4.9.96.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.9.96
 
+Patch:  1096_linux-4.9.97.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.9.97
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1096_linux-4.9.97.patch b/1096_linux-4.9.97.patch
new file mode 100644
index 0000000..26c13e0
--- /dev/null
+++ b/1096_linux-4.9.97.patch
@@ -0,0 +1,3632 @@
+diff --git a/Documentation/kernel-parameters.txt 
b/Documentation/kernel-parameters.txt
+index 466c039c622b..5f9e51436a99 100644
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -2640,6 +2640,9 @@ bytes respectively. Such letter suffixes can also be 
entirely omitted.
+ 
+       noalign         [KNL,ARM]
+ 
++      noaltinstr      [S390] Disables alternative instructions patching
++                      (CPU alternatives feature).
++
+       noapic          [SMP,APIC] Tells the kernel to not make use of any
+                       IOAPICs that may be present in the system.
+ 
+diff --git a/Makefile b/Makefile
+index 50ae573e8951..ee3e943c3bd9 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 9
+-SUBLEVEL = 96
++SUBLEVEL = 97
+ EXTRAVERSION =
+ NAME = Roaring Lionus
+ 
+diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
+index 2d2fd79ced9d..34fbbf8fdeaa 100644
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -95,6 +95,7 @@ config MIPS_GENERIC
+       select PCI_DRIVERS_GENERIC
+       select PINCTRL
+       select SMP_UP if SMP
++      select SWAP_IO_SPACE
+       select SYS_HAS_CPU_MIPS32_R1
+       select SYS_HAS_CPU_MIPS32_R2
+       select SYS_HAS_CPU_MIPS32_R6
+diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
+index 9aa0d04c9dcc..1c4a595e8224 100644
+--- a/arch/s390/Kconfig
++++ b/arch/s390/Kconfig
+@@ -118,6 +118,7 @@ config S390
+       select GENERIC_CLOCKEVENTS
+       select GENERIC_CPU_AUTOPROBE
+       select GENERIC_CPU_DEVICES if !SMP
++      select GENERIC_CPU_VULNERABILITIES
+       select GENERIC_FIND_FIRST_BIT
+       select GENERIC_SMP_IDLE_THREAD
+       select GENERIC_TIME_VSYSCALL
+@@ -704,6 +705,51 @@ config SECCOMP
+ 
+         If unsure, say Y.
+ 
++config KERNEL_NOBP
++      def_bool n
++      prompt "Enable modified branch prediction for the kernel by default"
++      help
++        If this option is selected the kernel will switch to a modified
++        branch prediction mode if the firmware interface is available.
++        The modified branch prediction mode improves the behaviour in
++        regard to speculative execution.
++
++        With the option enabled the kernel parameter "nobp=0" or "nospec"
++        can be used to run the kernel in the normal branch prediction mode.
++
++        With the option disabled the modified branch prediction mode is
++        enabled with the "nobp=1" kernel parameter.
++
++        If unsure, say N.
++
++config EXPOLINE
++      def_bool n
++      prompt "Avoid speculative indirect branches in the kernel"
++      help
++        Compile the kernel with the expoline compiler options to guard
++        against kernel-to-user data leaks by avoiding speculative indirect
++        branches.
++        Requires a compiler with -mindirect-branch=thunk support for full
++        protection. The kernel may run slower.
++
++        If unsure, say N.
++
++choice
++      prompt "Expoline default"
++      depends on EXPOLINE
++      default EXPOLINE_FULL
++
++config EXPOLINE_OFF
++      bool "spectre_v2=off"
++
++config EXPOLINE_AUTO
++      bool "spectre_v2=auto"
++
++config EXPOLINE_FULL
++      bool "spectre_v2=on"
++
++endchoice
++
+ endmenu
+ 
+ menu "Power Management"
+@@ -753,6 +799,7 @@ config PFAULT
+ config SHARED_KERNEL
+       bool "VM shared kernel support"
+       depends on !JUMP_LABEL
++      depends on !ALTERNATIVES
+       help
+         Select this option, if you want to share the text segment of the
+         Linux kernel between different VM guests. This reduces memory
+diff --git a/arch/s390/Makefile b/arch/s390/Makefile
+index 54e00526b8df..bef67c0f63e2 100644
+--- a/arch/s390/Makefile
++++ b/arch/s390/Makefile
+@@ -79,6 +79,16 @@ ifeq ($(call cc-option-yn,-mwarn-dynamicstack),y)
+ cflags-$(CONFIG_WARN_DYNAMIC_STACK) += -mwarn-dynamicstack
+ endif
+ 
++ifdef CONFIG_EXPOLINE
++  ifeq ($(call cc-option-yn,$(CC_FLAGS_MARCH) -mindirect-branch=thunk),y)
++    CC_FLAGS_EXPOLINE := -mindirect-branch=thunk
++    CC_FLAGS_EXPOLINE += -mfunction-return=thunk
++    CC_FLAGS_EXPOLINE += -mindirect-branch-table
++    export CC_FLAGS_EXPOLINE
++    cflags-y += $(CC_FLAGS_EXPOLINE) -DCC_USING_EXPOLINE
++  endif
++endif
++
+ ifdef CONFIG_FUNCTION_TRACER
+ # make use of hotpatch feature if the compiler supports it
+ cc_hotpatch   := -mhotpatch=0,3
+diff --git a/arch/s390/include/asm/alternative.h 
b/arch/s390/include/asm/alternative.h
+new file mode 100644
+index 000000000000..a72002056b54
+--- /dev/null
++++ b/arch/s390/include/asm/alternative.h
+@@ -0,0 +1,149 @@
++#ifndef _ASM_S390_ALTERNATIVE_H
++#define _ASM_S390_ALTERNATIVE_H
++
++#ifndef __ASSEMBLY__
++
++#include <linux/types.h>
++#include <linux/stddef.h>
++#include <linux/stringify.h>
++
++struct alt_instr {
++      s32 instr_offset;       /* original instruction */
++      s32 repl_offset;        /* offset to replacement instruction */
++      u16 facility;           /* facility bit set for replacement */
++      u8  instrlen;           /* length of original instruction */
++      u8  replacementlen;     /* length of new instruction */
++} __packed;
++
++void apply_alternative_instructions(void);
++void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
++
++/*
++ * |661:       |662:    |6620      |663:
++ * +-----------+---------------------+
++ * | oldinstr  | oldinstr_padding    |
++ * |         +----------+----------+
++ * |         |          |          |
++ * |         | >6 bytes |6/4/2 nops|
++ * |         |6 bytes jg----------->
++ * +-----------+---------------------+
++ *             ^^ static padding ^^
++ *
++ * .altinstr_replacement section
++ * +---------------------+-----------+
++ * |6641:                          |6651:
++ * | alternative instr 1           |
++ * +-----------+---------+- - - - - -+
++ * |6642:              |6652:      |
++ * | alternative instr 2 | padding
++ * +---------------------+- - - - - -+
++ *                      ^ runtime ^
++ *
++ * .altinstructions section
++ * +---------------------------------+
++ * | alt_instr entries for each      |
++ * | alternative instr                     |
++ * +---------------------------------+
++ */
++
++#define b_altinstr(num)       "664"#num
++#define e_altinstr(num)       "665"#num
++
++#define e_oldinstr_pad_end    "663"
++#define oldinstr_len          "662b-661b"
++#define oldinstr_total_len    e_oldinstr_pad_end"b-661b"
++#define altinstr_len(num)     e_altinstr(num)"b-"b_altinstr(num)"b"
++#define oldinstr_pad_len(num) \
++      "-(((" altinstr_len(num) ")-(" oldinstr_len ")) > 0) * " \
++      "((" altinstr_len(num) ")-(" oldinstr_len "))"
++
++#define INSTR_LEN_SANITY_CHECK(len)                                   \
++      ".if " len " > 254\n"                                           \
++      "\t.error \"cpu alternatives does not support instructions "    \
++              "blocks > 254 bytes\"\n"                                \
++      ".endif\n"                                                      \
++      ".if (" len ") %% 2\n"                                          \
++      "\t.error \"cpu alternatives instructions length is odd\"\n"    \
++      ".endif\n"
++
++#define OLDINSTR_PADDING(oldinstr, num)                                       
\
++      ".if " oldinstr_pad_len(num) " > 6\n"                           \
++      "\tjg " e_oldinstr_pad_end "f\n"                                \
++      "6620:\n"                                                       \
++      "\t.fill (" oldinstr_pad_len(num) " - (6620b-662b)) / 2, 2, 0x0700\n" \
++      ".else\n"                                                       \
++      "\t.fill " oldinstr_pad_len(num) " / 6, 6, 0xc0040000\n"        \
++      "\t.fill " oldinstr_pad_len(num) " %% 6 / 4, 4, 0x47000000\n"   \
++      "\t.fill " oldinstr_pad_len(num) " %% 6 %% 4 / 2, 2, 0x0700\n"  \
++      ".endif\n"
++
++#define OLDINSTR(oldinstr, num)                                               
\
++      "661:\n\t" oldinstr "\n662:\n"                                  \
++      OLDINSTR_PADDING(oldinstr, num)                                 \
++      e_oldinstr_pad_end ":\n"                                        \
++      INSTR_LEN_SANITY_CHECK(oldinstr_len)
++
++#define OLDINSTR_2(oldinstr, num1, num2)                              \
++      "661:\n\t" oldinstr "\n662:\n"                                  \
++      ".if " altinstr_len(num1) " < " altinstr_len(num2) "\n"         \
++      OLDINSTR_PADDING(oldinstr, num2)                                \
++      ".else\n"                                                       \
++      OLDINSTR_PADDING(oldinstr, num1)                                \
++      ".endif\n"                                                      \
++      e_oldinstr_pad_end ":\n"                                        \
++      INSTR_LEN_SANITY_CHECK(oldinstr_len)
++
++#define ALTINSTR_ENTRY(facility, num)                                 \
++      "\t.long 661b - .\n"                    /* old instruction */   \
++      "\t.long " b_altinstr(num)"b - .\n"     /* alt instruction */   \
++      "\t.word " __stringify(facility) "\n"   /* facility bit    */   \
++      "\t.byte " oldinstr_total_len "\n"      /* source len      */   \
++      "\t.byte " altinstr_len(num) "\n"       /* alt instruction len */
++
++#define ALTINSTR_REPLACEMENT(altinstr, num)   /* replacement */       \
++      b_altinstr(num)":\n\t" altinstr "\n" e_altinstr(num) ":\n"      \
++      INSTR_LEN_SANITY_CHECK(altinstr_len(num))
++
++/* alternative assembly primitive: */
++#define ALTERNATIVE(oldinstr, altinstr, facility) \
++      ".pushsection .altinstr_replacement, \"ax\"\n"                  \
++      ALTINSTR_REPLACEMENT(altinstr, 1)                               \
++      ".popsection\n"                                                 \
++      OLDINSTR(oldinstr, 1)                                           \
++      ".pushsection .altinstructions,\"a\"\n"                         \
++      ALTINSTR_ENTRY(facility, 1)                                     \
++      ".popsection\n"
++
++#define ALTERNATIVE_2(oldinstr, altinstr1, facility1, altinstr2, facility2)\
++      ".pushsection .altinstr_replacement, \"ax\"\n"                  \
++      ALTINSTR_REPLACEMENT(altinstr1, 1)                              \
++      ALTINSTR_REPLACEMENT(altinstr2, 2)                              \
++      ".popsection\n"                                                 \
++      OLDINSTR_2(oldinstr, 1, 2)                                      \
++      ".pushsection .altinstructions,\"a\"\n"                         \
++      ALTINSTR_ENTRY(facility1, 1)                                    \
++      ALTINSTR_ENTRY(facility2, 2)                                    \
++      ".popsection\n"
++
++/*
++ * Alternative instructions for different CPU types or capabilities.
++ *
++ * This allows to use optimized instructions even on generic binary
++ * kernels.
++ *
++ * oldinstr is padded with jump and nops at compile time if altinstr is
++ * longer. altinstr is padded with jump and nops at run-time during patching.
++ *
++ * For non barrier like inlines please define new variants
++ * without volatile and memory clobber.
++ */
++#define alternative(oldinstr, altinstr, facility)                     \
++      asm volatile(ALTERNATIVE(oldinstr, altinstr, facility) : : : "memory")
++
++#define alternative_2(oldinstr, altinstr1, facility1, altinstr2, facility2) \
++      asm volatile(ALTERNATIVE_2(oldinstr, altinstr1, facility1,          \
++                                 altinstr2, facility2) ::: "memory")
++
++#endif /* __ASSEMBLY__ */
++
++#endif /* _ASM_S390_ALTERNATIVE_H */
+diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
+index 5c8db3ce61c8..03b2e5bf1206 100644
+--- a/arch/s390/include/asm/barrier.h
++++ b/arch/s390/include/asm/barrier.h
+@@ -48,6 +48,30 @@ do {                                                        
                \
+ #define __smp_mb__before_atomic()     barrier()
+ #define __smp_mb__after_atomic()      barrier()
+ 
++/**
++ * array_index_mask_nospec - generate a mask for array_idx() that is
++ * ~0UL when the bounds check succeeds and 0 otherwise
++ * @index: array element index
++ * @size: number of elements in array
++ */
++#define array_index_mask_nospec array_index_mask_nospec
++static inline unsigned long array_index_mask_nospec(unsigned long index,
++                                                  unsigned long size)
++{
++      unsigned long mask;
++
++      if (__builtin_constant_p(size) && size > 0) {
++              asm("   clgr    %2,%1\n"
++                  "   slbgr   %0,%0\n"
++                  :"=d" (mask) : "d" (size-1), "d" (index) :"cc");
++              return mask;
++      }
++      asm("   clgr    %1,%2\n"
++          "   slbgr   %0,%0\n"
++          :"=d" (mask) : "d" (size), "d" (index) :"cc");
++      return ~mask;
++}
++
+ #include <asm-generic/barrier.h>
+ 
+ #endif /* __ASM_BARRIER_H */
+diff --git a/arch/s390/include/asm/facility.h 
b/arch/s390/include/asm/facility.h
+index 09b406db7529..7a8a1457dbb8 100644
+--- a/arch/s390/include/asm/facility.h
++++ b/arch/s390/include/asm/facility.h
+@@ -17,6 +17,24 @@
+ 
+ #define MAX_FACILITY_BIT (256*8)      /* stfle_fac_list has 256 bytes */
+ 
++static inline void __set_facility(unsigned long nr, void *facilities)
++{
++      unsigned char *ptr = (unsigned char *) facilities;
++
++      if (nr >= MAX_FACILITY_BIT)
++              return;
++      ptr[nr >> 3] |= 0x80 >> (nr & 7);
++}
++
++static inline void __clear_facility(unsigned long nr, void *facilities)
++{
++      unsigned char *ptr = (unsigned char *) facilities;
++
++      if (nr >= MAX_FACILITY_BIT)
++              return;
++      ptr[nr >> 3] &= ~(0x80 >> (nr & 7));
++}
++
+ static inline int __test_facility(unsigned long nr, void *facilities)
+ {
+       unsigned char *ptr;
+diff --git a/arch/s390/include/asm/kvm_host.h 
b/arch/s390/include/asm/kvm_host.h
+index a41faf34b034..5792590d0e7c 100644
+--- a/arch/s390/include/asm/kvm_host.h
++++ b/arch/s390/include/asm/kvm_host.h
+@@ -181,7 +181,8 @@ struct kvm_s390_sie_block {
+       __u16   ipa;                    /* 0x0056 */
+       __u32   ipb;                    /* 0x0058 */
+       __u32   scaoh;                  /* 0x005c */
+-      __u8    reserved60;             /* 0x0060 */
++#define FPF_BPBC      0x20
++      __u8    fpf;                    /* 0x0060 */
+       __u8    ecb;                    /* 0x0061 */
+       __u8    ecb2;                   /* 0x0062 */
+ #define ECB3_AES 0x04
+diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
+index 7b93b78f423c..ad4e0cee1557 100644
+--- a/arch/s390/include/asm/lowcore.h
++++ b/arch/s390/include/asm/lowcore.h
+@@ -135,7 +135,9 @@ struct lowcore {
+       /* Per cpu primary space access list */
+       __u32   paste[16];                      /* 0x0400 */
+ 
+-      __u8    pad_0x04c0[0x0e00-0x0440];      /* 0x0440 */
++      /* br %r1 trampoline */
++      __u16   br_r1_trampoline;               /* 0x0440 */
++      __u8    pad_0x0442[0x0e00-0x0442];      /* 0x0442 */
+ 
+       /*
+        * 0xe00 contains the address of the IPL Parameter Information
+@@ -150,7 +152,8 @@ struct lowcore {
+       __u8    pad_0x0e20[0x0f00-0x0e20];      /* 0x0e20 */
+ 
+       /* Extended facility list */
+-      __u64   stfle_fac_list[32];             /* 0x0f00 */
++      __u64   stfle_fac_list[16];             /* 0x0f00 */
++      __u64   alt_stfle_fac_list[16];         /* 0x0f80 */
+       __u8    pad_0x1000[0x11b0-0x1000];      /* 0x1000 */
+ 
+       /* Pointer to vector register save area */
+diff --git a/arch/s390/include/asm/nospec-branch.h 
b/arch/s390/include/asm/nospec-branch.h
+new file mode 100644
+index 000000000000..b4bd8c41e9d3
+--- /dev/null
++++ b/arch/s390/include/asm/nospec-branch.h
+@@ -0,0 +1,17 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _ASM_S390_EXPOLINE_H
++#define _ASM_S390_EXPOLINE_H
++
++#ifndef __ASSEMBLY__
++
++#include <linux/types.h>
++
++extern int nospec_disable;
++
++void nospec_init_branches(void);
++void nospec_auto_detect(void);
++void nospec_revert(s32 *start, s32 *end);
++
++#endif /* __ASSEMBLY__ */
++
++#endif /* _ASM_S390_EXPOLINE_H */
+diff --git a/arch/s390/include/asm/processor.h 
b/arch/s390/include/asm/processor.h
+index 6bcbbece082b..d5842126ec70 100644
+--- a/arch/s390/include/asm/processor.h
++++ b/arch/s390/include/asm/processor.h
+@@ -84,6 +84,7 @@ void cpu_detect_mhz_feature(void);
+ extern const struct seq_operations cpuinfo_op;
+ extern int sysctl_ieee_emulation_warnings;
+ extern void execve_tail(void);
++extern void __bpon(void);
+ 
+ /*
+  * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit.
+@@ -359,6 +360,9 @@ extern void memcpy_absolute(void *, void *, size_t);
+       memcpy_absolute(&(dest), &__tmp, sizeof(__tmp));        \
+ }
+ 
++extern int s390_isolate_bp(void);
++extern int s390_isolate_bp_guest(void);
++
+ #endif /* __ASSEMBLY__ */
+ 
+ #endif /* __ASM_S390_PROCESSOR_H */
+diff --git a/arch/s390/include/asm/thread_info.h 
b/arch/s390/include/asm/thread_info.h
+index f15c0398c363..84f2ae44b4e9 100644
+--- a/arch/s390/include/asm/thread_info.h
++++ b/arch/s390/include/asm/thread_info.h
+@@ -79,6 +79,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct 
task_struct *src);
+ #define TIF_SECCOMP           5       /* secure computing */
+ #define TIF_SYSCALL_TRACEPOINT        6       /* syscall tracepoint 
instrumentation */
+ #define TIF_UPROBE            7       /* breakpointed or single-stepping */
++#define TIF_ISOLATE_BP                8       /* Run process with isolated BP 
*/
++#define TIF_ISOLATE_BP_GUEST  9       /* Run KVM guests with isolated BP */
+ #define TIF_31BIT             16      /* 32bit process */
+ #define TIF_MEMDIE            17      /* is terminating due to OOM killer */
+ #define TIF_RESTORE_SIGMASK   18      /* restore signal mask in do_signal() */
+@@ -94,6 +96,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct 
task_struct *src);
+ #define _TIF_SECCOMP          _BITUL(TIF_SECCOMP)
+ #define _TIF_SYSCALL_TRACEPOINT       _BITUL(TIF_SYSCALL_TRACEPOINT)
+ #define _TIF_UPROBE           _BITUL(TIF_UPROBE)
++#define _TIF_ISOLATE_BP               _BITUL(TIF_ISOLATE_BP)
++#define _TIF_ISOLATE_BP_GUEST _BITUL(TIF_ISOLATE_BP_GUEST)
+ #define _TIF_31BIT            _BITUL(TIF_31BIT)
+ #define _TIF_SINGLE_STEP      _BITUL(TIF_SINGLE_STEP)
+ 
+diff --git a/arch/s390/include/uapi/asm/kvm.h 
b/arch/s390/include/uapi/asm/kvm.h
+index a2ffec4139ad..81c02e198527 100644
+--- a/arch/s390/include/uapi/asm/kvm.h
++++ b/arch/s390/include/uapi/asm/kvm.h
+@@ -197,6 +197,7 @@ struct kvm_guest_debug_arch {
+ #define KVM_SYNC_VRS    (1UL << 6)
+ #define KVM_SYNC_RICCB  (1UL << 7)
+ #define KVM_SYNC_FPRS   (1UL << 8)
++#define KVM_SYNC_BPBC (1UL << 10)
+ /* definition of registers in kvm_run */
+ struct kvm_sync_regs {
+       __u64 prefix;   /* prefix register */
+@@ -217,7 +218,9 @@ struct kvm_sync_regs {
+       };
+       __u8  reserved[512];    /* for future vector expansion */
+       __u32 fpc;              /* valid on KVM_SYNC_VRS or KVM_SYNC_FPRS */
+-      __u8 padding[52];       /* riccb needs to be 64byte aligned */
++      __u8 bpbc : 1;          /* bp mode */
++      __u8 reserved2 : 7;
++      __u8 padding1[51];      /* riccb needs to be 64byte aligned */
+       __u8 riccb[64];         /* runtime instrumentation controls block */
+ };
+ 
+diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
+index 1f0fe98f6db9..0501cac2ab95 100644
+--- a/arch/s390/kernel/Makefile
++++ b/arch/s390/kernel/Makefile
+@@ -42,6 +42,7 @@ ifneq ($(CC_FLAGS_MARCH),-march=z900)
+ CFLAGS_REMOVE_sclp.o  += $(CC_FLAGS_MARCH)
+ CFLAGS_sclp.o         += -march=z900
+ CFLAGS_REMOVE_als.o   += $(CC_FLAGS_MARCH)
++CFLAGS_REMOVE_als.o   += $(CC_FLAGS_EXPOLINE)
+ CFLAGS_als.o          += -march=z900
+ AFLAGS_REMOVE_head.o  += $(CC_FLAGS_MARCH)
+ AFLAGS_head.o         += -march=z900
+@@ -57,10 +58,13 @@ obj-y      += processor.o sys_s390.o ptrace.o signal.o 
cpcmd.o ebcdic.o nmi.o
+ obj-y += debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o als.o
+ obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
+ obj-y += runtime_instr.o cache.o fpu.o dumpstack.o
+-obj-y += entry.o reipl.o relocate_kernel.o
++obj-y += entry.o reipl.o relocate_kernel.o alternative.o
++obj-y += nospec-branch.o
+ 
+ extra-y                               += head.o head64.o vmlinux.lds
+ 
++CFLAGS_REMOVE_nospec-branch.o += $(CC_FLAGS_EXPOLINE)
++
+ obj-$(CONFIG_MODULES)         += module.o
+ obj-$(CONFIG_SMP)             += smp.o
+ obj-$(CONFIG_SCHED_TOPOLOGY)  += topology.o
+diff --git a/arch/s390/kernel/alternative.c b/arch/s390/kernel/alternative.c
+new file mode 100644
+index 000000000000..b57b293998dc
+--- /dev/null
++++ b/arch/s390/kernel/alternative.c
+@@ -0,0 +1,112 @@
++#include <linux/module.h>
++#include <asm/alternative.h>
++#include <asm/facility.h>
++#include <asm/nospec-branch.h>
++
++#define MAX_PATCH_LEN (255 - 1)
++
++static int __initdata_or_module alt_instr_disabled;
++
++static int __init disable_alternative_instructions(char *str)
++{
++      alt_instr_disabled = 1;
++      return 0;
++}
++
++early_param("noaltinstr", disable_alternative_instructions);
++
++struct brcl_insn {
++      u16 opc;
++      s32 disp;
++} __packed;
++
++static u16 __initdata_or_module nop16 = 0x0700;
++static u32 __initdata_or_module nop32 = 0x47000000;
++static struct brcl_insn __initdata_or_module nop48 = {
++      0xc004, 0
++};
++
++static const void *nops[] __initdata_or_module = {
++      &nop16,
++      &nop32,
++      &nop48
++};
++
++static void __init_or_module add_jump_padding(void *insns, unsigned int len)
++{
++      struct brcl_insn brcl = {
++              0xc0f4,
++              len / 2
++      };
++
++      memcpy(insns, &brcl, sizeof(brcl));
++      insns += sizeof(brcl);
++      len -= sizeof(brcl);
++
++      while (len > 0) {
++              memcpy(insns, &nop16, 2);
++              insns += 2;
++              len -= 2;
++      }
++}
++
++static void __init_or_module add_padding(void *insns, unsigned int len)
++{
++      if (len > 6)
++              add_jump_padding(insns, len);
++      else if (len >= 2)
++              memcpy(insns, nops[len / 2 - 1], len);
++}
++
++static void __init_or_module __apply_alternatives(struct alt_instr *start,
++                                                struct alt_instr *end)
++{
++      struct alt_instr *a;
++      u8 *instr, *replacement;
++      u8 insnbuf[MAX_PATCH_LEN];
++
++      /*
++       * The scan order should be from start to end. A later scanned
++       * alternative code can overwrite previously scanned alternative code.
++       */
++      for (a = start; a < end; a++) {
++              int insnbuf_sz = 0;
++
++              instr = (u8 *)&a->instr_offset + a->instr_offset;
++              replacement = (u8 *)&a->repl_offset + a->repl_offset;
++
++              if (!__test_facility(a->facility,
++                                   S390_lowcore.alt_stfle_fac_list))
++                      continue;
++
++              if (unlikely(a->instrlen % 2 || a->replacementlen % 2)) {
++                      WARN_ONCE(1, "cpu alternatives instructions length is "
++                                   "odd, skipping patching\n");
++                      continue;
++              }
++
++              memcpy(insnbuf, replacement, a->replacementlen);
++              insnbuf_sz = a->replacementlen;
++
++              if (a->instrlen > a->replacementlen) {
++                      add_padding(insnbuf + a->replacementlen,
++                                  a->instrlen - a->replacementlen);
++                      insnbuf_sz += a->instrlen - a->replacementlen;
++              }
++
++              s390_kernel_write(instr, insnbuf, insnbuf_sz);
++      }
++}
++
++void __init_or_module apply_alternatives(struct alt_instr *start,
++                                       struct alt_instr *end)
++{
++      if (!alt_instr_disabled)
++              __apply_alternatives(start, end);
++}
++
++extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
++void __init apply_alternative_instructions(void)
++{
++      apply_alternatives(__alt_instructions, __alt_instructions_end);
++}
+diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
+index 62578989c74d..0c7a7d5d95f1 100644
+--- a/arch/s390/kernel/early.c
++++ b/arch/s390/kernel/early.c
+@@ -299,6 +299,11 @@ static noinline __init void setup_facility_list(void)
+ {
+       stfle(S390_lowcore.stfle_fac_list,
+             ARRAY_SIZE(S390_lowcore.stfle_fac_list));
++      memcpy(S390_lowcore.alt_stfle_fac_list,
++             S390_lowcore.stfle_fac_list,
++             sizeof(S390_lowcore.alt_stfle_fac_list));
++      if (!IS_ENABLED(CONFIG_KERNEL_NOBP))
++              __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
+ }
+ 
+ static __init void detect_diag9c(void)
+diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
+index 3bc2825173ef..1996afeb2e81 100644
+--- a/arch/s390/kernel/entry.S
++++ b/arch/s390/kernel/entry.S
+@@ -105,6 +105,7 @@ _PIF_WORK  = (_PIF_PER_TRAP)
+       j       3f
+ 1:    LAST_BREAK %r14
+       UPDATE_VTIME %r14,%r15,\timer
++      BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
+ 2:    lg      %r15,__LC_ASYNC_STACK   # load async stack
+ 3:    la      %r11,STACK_FRAME_OVERHEAD(%r15)
+       .endm
+@@ -163,6 +164,130 @@ _PIF_WORK        = (_PIF_PER_TRAP)
+               tm      off+\addr, \mask
+       .endm
+ 
++      .macro BPOFF
++      .pushsection .altinstr_replacement, "ax"
++660:  .long   0xb2e8c000
++      .popsection
++661:  .long   0x47000000
++      .pushsection .altinstructions, "a"
++      .long 661b - .
++      .long 660b - .
++      .word 82
++      .byte 4
++      .byte 4
++      .popsection
++      .endm
++
++      .macro BPON
++      .pushsection .altinstr_replacement, "ax"
++662:  .long   0xb2e8d000
++      .popsection
++663:  .long   0x47000000
++      .pushsection .altinstructions, "a"
++      .long 663b - .
++      .long 662b - .
++      .word 82
++      .byte 4
++      .byte 4
++      .popsection
++      .endm
++
++      .macro BPENTER tif_ptr,tif_mask
++      .pushsection .altinstr_replacement, "ax"
++662:  .word   0xc004, 0x0000, 0x0000  # 6 byte nop
++      .word   0xc004, 0x0000, 0x0000  # 6 byte nop
++      .popsection
++664:  TSTMSK  \tif_ptr,\tif_mask
++      jz      . + 8
++      .long   0xb2e8d000
++      .pushsection .altinstructions, "a"
++      .long 664b - .
++      .long 662b - .
++      .word 82
++      .byte 12
++      .byte 12
++      .popsection
++      .endm
++
++      .macro BPEXIT tif_ptr,tif_mask
++      TSTMSK  \tif_ptr,\tif_mask
++      .pushsection .altinstr_replacement, "ax"
++662:  jnz     . + 8
++      .long   0xb2e8d000
++      .popsection
++664:  jz      . + 8
++      .long   0xb2e8c000
++      .pushsection .altinstructions, "a"
++      .long 664b - .
++      .long 662b - .
++      .word 82
++      .byte 8
++      .byte 8
++      .popsection
++      .endm
++
++#ifdef CONFIG_EXPOLINE
++
++      .macro GEN_BR_THUNK name,reg,tmp
++      .section .text.\name,"axG",@progbits,\name,comdat
++      .globl \name
++      .hidden \name
++      .type \name,@function
++\name:
++      .cfi_startproc
++#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
++      exrl    0,0f
++#else
++      larl    \tmp,0f
++      ex      0,0(\tmp)
++#endif
++      j       .
++0:    br      \reg
++      .cfi_endproc
++      .endm
++
++      GEN_BR_THUNK __s390x_indirect_jump_r1use_r9,%r9,%r1
++      GEN_BR_THUNK __s390x_indirect_jump_r1use_r14,%r14,%r1
++      GEN_BR_THUNK __s390x_indirect_jump_r11use_r14,%r14,%r11
++
++      .macro BASR_R14_R9
++0:    brasl   %r14,__s390x_indirect_jump_r1use_r9
++      .pushsection .s390_indirect_branches,"a",@progbits
++      .long   0b-.
++      .popsection
++      .endm
++
++      .macro BR_R1USE_R14
++0:    jg      __s390x_indirect_jump_r1use_r14
++      .pushsection .s390_indirect_branches,"a",@progbits
++      .long   0b-.
++      .popsection
++      .endm
++
++      .macro BR_R11USE_R14
++0:    jg      __s390x_indirect_jump_r11use_r14
++      .pushsection .s390_indirect_branches,"a",@progbits
++      .long   0b-.
++      .popsection
++      .endm
++
++#else /* CONFIG_EXPOLINE */
++
++      .macro BASR_R14_R9
++      basr    %r14,%r9
++      .endm
++
++      .macro BR_R1USE_R14
++      br      %r14
++      .endm
++
++      .macro BR_R11USE_R14
++      br      %r14
++      .endm
++
++#endif /* CONFIG_EXPOLINE */
++
++
+       .section .kprobes.text, "ax"
+ .Ldummy:
+       /*
+@@ -175,6 +300,11 @@ _PIF_WORK = (_PIF_PER_TRAP)
+        */
+       nop     0
+ 
++ENTRY(__bpon)
++      .globl __bpon
++      BPON
++      BR_R1USE_R14
++
+ /*
+  * Scheduler resume function, called by switch_to
+  *  gpr2 = (task_struct *) prev
+@@ -201,9 +331,9 @@ ENTRY(__switch_to)
+       mvc     __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next
+       lmg     %r6,%r15,__SF_GPRS(%r15)        # load gprs of next task
+       TSTMSK  __LC_MACHINE_FLAGS,MACHINE_FLAG_LPP
+-      bzr     %r14
++      jz      0f
+       .insn   s,0xb2800000,__LC_LPP           # set program parameter
+-      br      %r14
++0:    BR_R1USE_R14
+ 
+ .L__critical_start:
+ 
+@@ -215,9 +345,11 @@ ENTRY(__switch_to)
+  */
+ ENTRY(sie64a)
+       stmg    %r6,%r14,__SF_GPRS(%r15)        # save kernel registers
++      lg      %r12,__LC_CURRENT
+       stg     %r2,__SF_EMPTY(%r15)            # save control block pointer
+       stg     %r3,__SF_EMPTY+8(%r15)          # save guest register save area
+       xc      __SF_EMPTY+16(8,%r15),__SF_EMPTY+16(%r15) # reason code = 0
++      mvc     __SF_EMPTY+24(8,%r15),__TI_flags(%r12) # copy thread flags
+       TSTMSK  __LC_CPU_FLAGS,_CIF_FPU         # load guest fp/vx registers ?
+       jno     .Lsie_load_guest_gprs
+       brasl   %r14,load_fpu_regs              # load guest fp/vx regs
+@@ -234,7 +366,11 @@ ENTRY(sie64a)
+       jnz     .Lsie_skip
+       TSTMSK  __LC_CPU_FLAGS,_CIF_FPU
+       jo      .Lsie_skip                      # exit if fp/vx regs changed
++      BPEXIT  __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
+       sie     0(%r14)
++.Lsie_exit:
++      BPOFF
++      BPENTER __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
+ .Lsie_skip:
+       ni      __SIE_PROG0C+3(%r14),0xfe       # no longer in SIE
+       lctlg   %c1,%c1,__LC_USER_ASCE          # load primary asce
+@@ -255,9 +391,15 @@ ENTRY(sie64a)
+ sie_exit:
+       lg      %r14,__SF_EMPTY+8(%r15)         # load guest register save area
+       stmg    %r0,%r13,0(%r14)                # save guest gprs 0-13
++      xgr     %r0,%r0                         # clear guest registers to
++      xgr     %r1,%r1                         # prevent speculative use
++      xgr     %r2,%r2
++      xgr     %r3,%r3
++      xgr     %r4,%r4
++      xgr     %r5,%r5
+       lmg     %r6,%r14,__SF_GPRS(%r15)        # restore kernel registers
+       lg      %r2,__SF_EMPTY+16(%r15)         # return exit reason code
+-      br      %r14
++      BR_R1USE_R14
+ .Lsie_fault:
+       lghi    %r14,-EFAULT
+       stg     %r14,__SF_EMPTY+16(%r15)        # set exit reason code
+@@ -280,6 +422,7 @@ ENTRY(system_call)
+       stpt    __LC_SYNC_ENTER_TIMER
+ .Lsysc_stmg:
+       stmg    %r8,%r15,__LC_SAVE_AREA_SYNC
++      BPOFF
+       lg      %r10,__LC_LAST_BREAK
+       lg      %r12,__LC_THREAD_INFO
+       lghi    %r14,_PIF_SYSCALL
+@@ -289,12 +432,15 @@ ENTRY(system_call)
+       LAST_BREAK %r13
+ .Lsysc_vtime:
+       UPDATE_VTIME %r10,%r13,__LC_SYNC_ENTER_TIMER
++      BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
+       stmg    %r0,%r7,__PT_R0(%r11)
+       mvc     __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
+       mvc     __PT_PSW(16,%r11),__LC_SVC_OLD_PSW
+       mvc     __PT_INT_CODE(4,%r11),__LC_SVC_ILC
+       stg     %r14,__PT_FLAGS(%r11)
+ .Lsysc_do_svc:
++      # clear user controlled register to prevent speculative use
++      xgr     %r0,%r0
+       lg      %r10,__TI_sysc_table(%r12)      # address of system call table
+       llgh    %r8,__PT_INT_CODE+2(%r11)
+       slag    %r8,%r8,2                       # shift and test for svc 0
+@@ -312,7 +458,7 @@ ENTRY(system_call)
+       lgf     %r9,0(%r8,%r10)                 # get system call add.
+       TSTMSK  __TI_flags(%r12),_TIF_TRACE
+       jnz     .Lsysc_tracesys
+-      basr    %r14,%r9                        # call sys_xxxx
++      BASR_R14_R9                             # call sys_xxxx
+       stg     %r2,__PT_R2(%r11)               # store return value
+ 
+ .Lsysc_return:
+@@ -324,6 +470,7 @@ ENTRY(system_call)
+       jnz     .Lsysc_work                     # check for work
+       TSTMSK  __LC_CPU_FLAGS,_CIF_WORK
+       jnz     .Lsysc_work
++      BPEXIT  __TI_flags(%r12),_TIF_ISOLATE_BP
+ .Lsysc_restore:
+       lg      %r14,__LC_VDSO_PER_CPU
+       lmg     %r0,%r10,__PT_R0(%r11)
+@@ -451,7 +598,7 @@ ENTRY(system_call)
+       lmg     %r3,%r7,__PT_R3(%r11)
+       stg     %r7,STACK_FRAME_OVERHEAD(%r15)
+       lg      %r2,__PT_ORIG_GPR2(%r11)
+-      basr    %r14,%r9                # call sys_xxx
++      BASR_R14_R9                     # call sys_xxx
+       stg     %r2,__PT_R2(%r11)       # store return value
+ .Lsysc_tracenogo:
+       TSTMSK  __TI_flags(%r12),_TIF_TRACE
+@@ -475,7 +622,7 @@ ENTRY(ret_from_fork)
+       lmg     %r9,%r10,__PT_R9(%r11)  # load gprs
+ ENTRY(kernel_thread_starter)
+       la      %r2,0(%r10)
+-      basr    %r14,%r9
++      BASR_R14_R9
+       j       .Lsysc_tracenogo
+ 
+ /*
+@@ -484,6 +631,7 @@ ENTRY(kernel_thread_starter)
+ 
+ ENTRY(pgm_check_handler)
+       stpt    __LC_SYNC_ENTER_TIMER
++      BPOFF
+       stmg    %r8,%r15,__LC_SAVE_AREA_SYNC
+       lg      %r10,__LC_LAST_BREAK
+       lg      %r12,__LC_THREAD_INFO
+@@ -508,6 +656,7 @@ ENTRY(pgm_check_handler)
+       j       3f
+ 2:    LAST_BREAK %r14
+       UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
++      BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
+       lg      %r15,__LC_KERNEL_STACK
+       lg      %r14,__TI_task(%r12)
+       aghi    %r14,__TASK_thread      # pointer to thread_struct
+@@ -517,6 +666,15 @@ ENTRY(pgm_check_handler)
+       mvc     __THREAD_trap_tdb(256,%r14),0(%r13)
+ 3:    la      %r11,STACK_FRAME_OVERHEAD(%r15)
+       stmg    %r0,%r7,__PT_R0(%r11)
++      # clear user controlled registers to prevent speculative use
++      xgr     %r0,%r0
++      xgr     %r1,%r1
++      xgr     %r2,%r2
++      xgr     %r3,%r3
++      xgr     %r4,%r4
++      xgr     %r5,%r5
++      xgr     %r6,%r6
++      xgr     %r7,%r7
+       mvc     __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
+       stmg    %r8,%r9,__PT_PSW(%r11)
+       mvc     __PT_INT_CODE(4,%r11),__LC_PGM_ILC
+@@ -538,9 +696,9 @@ ENTRY(pgm_check_handler)
+       nill    %r10,0x007f
+       sll     %r10,2
+       je      .Lpgm_return
+-      lgf     %r1,0(%r10,%r1)         # load address of handler routine
++      lgf     %r9,0(%r10,%r1)         # load address of handler routine
+       lgr     %r2,%r11                # pass pointer to pt_regs
+-      basr    %r14,%r1                # branch to interrupt-handler
++      BASR_R14_R9                     # branch to interrupt-handler
+ .Lpgm_return:
+       LOCKDEP_SYS_EXIT
+       tm      __PT_PSW+1(%r11),0x01   # returning to user ?
+@@ -573,6 +731,7 @@ ENTRY(pgm_check_handler)
+ ENTRY(io_int_handler)
+       STCK    __LC_INT_CLOCK
+       stpt    __LC_ASYNC_ENTER_TIMER
++      BPOFF
+       stmg    %r8,%r15,__LC_SAVE_AREA_ASYNC
+       lg      %r10,__LC_LAST_BREAK
+       lg      %r12,__LC_THREAD_INFO
+@@ -580,6 +739,16 @@ ENTRY(io_int_handler)
+       lmg     %r8,%r9,__LC_IO_OLD_PSW
+       SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
+       stmg    %r0,%r7,__PT_R0(%r11)
++      # clear user controlled registers to prevent speculative use
++      xgr     %r0,%r0
++      xgr     %r1,%r1
++      xgr     %r2,%r2
++      xgr     %r3,%r3
++      xgr     %r4,%r4
++      xgr     %r5,%r5
++      xgr     %r6,%r6
++      xgr     %r7,%r7
++      xgr     %r10,%r10
+       mvc     __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
+       stmg    %r8,%r9,__PT_PSW(%r11)
+       mvc     __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
+@@ -614,9 +783,13 @@ ENTRY(io_int_handler)
+       lg      %r14,__LC_VDSO_PER_CPU
+       lmg     %r0,%r10,__PT_R0(%r11)
+       mvc     __LC_RETURN_PSW(16),__PT_PSW(%r11)
++      tm      __PT_PSW+1(%r11),0x01   # returning to user ?
++      jno     .Lio_exit_kernel
++      BPEXIT  __TI_flags(%r12),_TIF_ISOLATE_BP
+ .Lio_exit_timer:
+       stpt    __LC_EXIT_TIMER
+       mvc     __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
++.Lio_exit_kernel:
+       lmg     %r11,%r15,__PT_R11(%r11)
+       lpswe   __LC_RETURN_PSW
+ .Lio_done:
+@@ -748,6 +921,7 @@ ENTRY(io_int_handler)
+ ENTRY(ext_int_handler)
+       STCK    __LC_INT_CLOCK
+       stpt    __LC_ASYNC_ENTER_TIMER
++      BPOFF
+       stmg    %r8,%r15,__LC_SAVE_AREA_ASYNC
+       lg      %r10,__LC_LAST_BREAK
+       lg      %r12,__LC_THREAD_INFO
+@@ -755,6 +929,16 @@ ENTRY(ext_int_handler)
+       lmg     %r8,%r9,__LC_EXT_OLD_PSW
+       SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
+       stmg    %r0,%r7,__PT_R0(%r11)
++      # clear user controlled registers to prevent speculative use
++      xgr     %r0,%r0
++      xgr     %r1,%r1
++      xgr     %r2,%r2
++      xgr     %r3,%r3
++      xgr     %r4,%r4
++      xgr     %r5,%r5
++      xgr     %r6,%r6
++      xgr     %r7,%r7
++      xgr     %r10,%r10
+       mvc     __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
+       stmg    %r8,%r9,__PT_PSW(%r11)
+       lghi    %r1,__LC_EXT_PARAMS2
+@@ -787,11 +971,12 @@ ENTRY(psw_idle)
+ .Lpsw_idle_stcctm:
+ #endif
+       oi      __LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT
++      BPON
+       STCK    __CLOCK_IDLE_ENTER(%r2)
+       stpt    __TIMER_IDLE_ENTER(%r2)
+ .Lpsw_idle_lpsw:
+       lpswe   __SF_EMPTY(%r15)
+-      br      %r14
++      BR_R1USE_R14
+ .Lpsw_idle_end:
+ 
+ /*
+@@ -805,7 +990,7 @@ ENTRY(save_fpu_regs)
+       lg      %r2,__LC_CURRENT
+       aghi    %r2,__TASK_thread
+       TSTMSK  __LC_CPU_FLAGS,_CIF_FPU
+-      bor     %r14
++      jo      .Lsave_fpu_regs_exit
+       stfpc   __THREAD_FPU_fpc(%r2)
+ .Lsave_fpu_regs_fpc_end:
+       lg      %r3,__THREAD_FPU_regs(%r2)
+@@ -835,7 +1020,8 @@ ENTRY(save_fpu_regs)
+       std     15,120(%r3)
+ .Lsave_fpu_regs_done:
+       oi      __LC_CPU_FLAGS+7,_CIF_FPU
+-      br      %r14
++.Lsave_fpu_regs_exit:
++      BR_R1USE_R14
+ .Lsave_fpu_regs_end:
+ #if IS_ENABLED(CONFIG_KVM)
+ EXPORT_SYMBOL(save_fpu_regs)
+@@ -855,7 +1041,7 @@ load_fpu_regs:
+       lg      %r4,__LC_CURRENT
+       aghi    %r4,__TASK_thread
+       TSTMSK  __LC_CPU_FLAGS,_CIF_FPU
+-      bnor    %r14
++      jno     .Lload_fpu_regs_exit
+       lfpc    __THREAD_FPU_fpc(%r4)
+       TSTMSK  __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
+       lg      %r4,__THREAD_FPU_regs(%r4)      # %r4 <- reg save area
+@@ -884,7 +1070,8 @@ load_fpu_regs:
+       ld      15,120(%r4)
+ .Lload_fpu_regs_done:
+       ni      __LC_CPU_FLAGS+7,255-_CIF_FPU
+-      br      %r14
++.Lload_fpu_regs_exit:
++      BR_R1USE_R14
+ .Lload_fpu_regs_end:
+ 
+ .L__critical_end:
+@@ -894,6 +1081,7 @@ load_fpu_regs:
+  */
+ ENTRY(mcck_int_handler)
+       STCK    __LC_MCCK_CLOCK
++      BPOFF
+       la      %r1,4095                # revalidate r1
+       spt     __LC_CPU_TIMER_SAVE_AREA-4095(%r1)      # revalidate cpu timer
+       lmg     %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
+@@ -925,6 +1113,16 @@ ENTRY(mcck_int_handler)
+ .Lmcck_skip:
+       lghi    %r14,__LC_GPREGS_SAVE_AREA+64
+       stmg    %r0,%r7,__PT_R0(%r11)
++      # clear user controlled registers to prevent speculative use
++      xgr     %r0,%r0
++      xgr     %r1,%r1
++      xgr     %r2,%r2
++      xgr     %r3,%r3
++      xgr     %r4,%r4
++      xgr     %r5,%r5
++      xgr     %r6,%r6
++      xgr     %r7,%r7
++      xgr     %r10,%r10
+       mvc     __PT_R8(64,%r11),0(%r14)
+       stmg    %r8,%r9,__PT_PSW(%r11)
+       xc      __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
+@@ -950,6 +1148,7 @@ ENTRY(mcck_int_handler)
+       mvc     __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
+       tm      __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
+       jno     0f
++      BPEXIT  __TI_flags(%r12),_TIF_ISOLATE_BP
+       stpt    __LC_EXIT_TIMER
+       mvc     __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
+ 0:    lmg     %r11,%r15,__PT_R11(%r11)
+@@ -1045,7 +1244,7 @@ cleanup_critical:
+       jl      0f
+       clg     %r9,BASED(.Lcleanup_table+104)  # .Lload_fpu_regs_end
+       jl      .Lcleanup_load_fpu_regs
+-0:    br      %r14
++0:    BR_R11USE_R14
+ 
+       .align  8
+ .Lcleanup_table:
+@@ -1070,11 +1269,12 @@ cleanup_critical:
+       .quad   .Lsie_done
+ 
+ .Lcleanup_sie:
++      BPENTER __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
+       lg      %r9,__SF_EMPTY(%r15)            # get control block pointer
+       ni      __SIE_PROG0C+3(%r9),0xfe        # no longer in SIE
+       lctlg   %c1,%c1,__LC_USER_ASCE          # load primary asce
+       larl    %r9,sie_exit                    # skip forward to sie_exit
+-      br      %r14
++      BR_R11USE_R14
+ #endif
+ 
+ .Lcleanup_system_call:
+@@ -1116,7 +1316,8 @@ cleanup_critical:
+       srag    %r9,%r9,23
+       jz      0f
+       mvc     __TI_last_break(8,%r12),16(%r11)
+-0:    # set up saved register r11
++0:    BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
++      # set up saved register r11
+       lg      %r15,__LC_KERNEL_STACK
+       la      %r9,STACK_FRAME_OVERHEAD(%r15)
+       stg     %r9,24(%r11)            # r11 pt_regs pointer
+@@ -1131,7 +1332,7 @@ cleanup_critical:
+       stg     %r15,56(%r11)           # r15 stack pointer
+       # set new psw address and exit
+       larl    %r9,.Lsysc_do_svc
+-      br      %r14
++      BR_R11USE_R14
+ .Lcleanup_system_call_insn:
+       .quad   system_call
+       .quad   .Lsysc_stmg
+@@ -1141,7 +1342,7 @@ cleanup_critical:
+ 
+ .Lcleanup_sysc_tif:
+       larl    %r9,.Lsysc_tif
+-      br      %r14
++      BR_R11USE_R14
+ 
+ .Lcleanup_sysc_restore:
+       # check if stpt has been executed
+@@ -1158,14 +1359,14 @@ cleanup_critical:
+       mvc     0(64,%r11),__PT_R8(%r9)
+       lmg     %r0,%r7,__PT_R0(%r9)
+ 1:    lmg     %r8,%r9,__LC_RETURN_PSW
+-      br      %r14
++      BR_R11USE_R14
+ .Lcleanup_sysc_restore_insn:
+       .quad   .Lsysc_exit_timer
+       .quad   .Lsysc_done - 4
+ 
+ .Lcleanup_io_tif:
+       larl    %r9,.Lio_tif
+-      br      %r14
++      BR_R11USE_R14
+ 
+ .Lcleanup_io_restore:
+       # check if stpt has been executed
+@@ -1179,7 +1380,7 @@ cleanup_critical:
+       mvc     0(64,%r11),__PT_R8(%r9)
+       lmg     %r0,%r7,__PT_R0(%r9)
+ 1:    lmg     %r8,%r9,__LC_RETURN_PSW
+-      br      %r14
++      BR_R11USE_R14
+ .Lcleanup_io_restore_insn:
+       .quad   .Lio_exit_timer
+       .quad   .Lio_done - 4
+@@ -1232,17 +1433,17 @@ cleanup_critical:
+       # prepare return psw
+       nihh    %r8,0xfcfd              # clear irq & wait state bits
+       lg      %r9,48(%r11)            # return from psw_idle
+-      br      %r14
++      BR_R11USE_R14
+ .Lcleanup_idle_insn:
+       .quad   .Lpsw_idle_lpsw
+ 
+ .Lcleanup_save_fpu_regs:
+       larl    %r9,save_fpu_regs
+-      br      %r14
++      BR_R11USE_R14
+ 
+ .Lcleanup_load_fpu_regs:
+       larl    %r9,load_fpu_regs
+-      br      %r14
++      BR_R11USE_R14
+ 
+ /*
+  * Integer constants
+@@ -1258,7 +1459,6 @@ cleanup_critical:
+ .Lsie_critical_length:
+       .quad   .Lsie_done - .Lsie_gmap
+ #endif
+-
+       .section .rodata, "a"
+ #define SYSCALL(esame,emu)    .long esame
+       .globl  sys_call_table
+diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
+index 39127b691b78..df49f2a1a7e5 100644
+--- a/arch/s390/kernel/ipl.c
++++ b/arch/s390/kernel/ipl.c
+@@ -563,6 +563,7 @@ static struct kset *ipl_kset;
+ 
+ static void __ipl_run(void *unused)
+ {
++      __bpon();
+       diag308(DIAG308_LOAD_CLEAR, NULL);
+       if (MACHINE_IS_VM)
+               __cpcmd("IPL", NULL, 0, NULL);
+diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
+index fbc07891f9e7..64ccfdf96b32 100644
+--- a/arch/s390/kernel/module.c
++++ b/arch/s390/kernel/module.c
+@@ -31,6 +31,9 @@
+ #include <linux/kernel.h>
+ #include <linux/moduleloader.h>
+ #include <linux/bug.h>
++#include <asm/alternative.h>
++#include <asm/nospec-branch.h>
++#include <asm/facility.h>
+ 
+ #if 0
+ #define DEBUGP printk
+@@ -167,7 +170,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr 
*sechdrs,
+       me->arch.got_offset = me->core_layout.size;
+       me->core_layout.size += me->arch.got_size;
+       me->arch.plt_offset = me->core_layout.size;
+-      me->core_layout.size += me->arch.plt_size;
++      if (me->arch.plt_size) {
++              if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable)
++                      me->arch.plt_size += PLT_ENTRY_SIZE;
++              me->core_layout.size += me->arch.plt_size;
++      }
+       return 0;
+ }
+ 
+@@ -321,9 +328,20 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, 
Elf_Sym *symtab,
+                       unsigned int *ip;
+                       ip = me->core_layout.base + me->arch.plt_offset +
+                               info->plt_offset;
+-                      ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
+-                      ip[1] = 0x100a0004;
+-                      ip[2] = 0x07f10000;
++                      ip[0] = 0x0d10e310;     /* basr 1,0  */
++                      ip[1] = 0x100a0004;     /* lg   1,10(1) */
++                      if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable) {
++                              unsigned int *ij;
++                              ij = me->core_layout.base +
++                                      me->arch.plt_offset +
++                                      me->arch.plt_size - PLT_ENTRY_SIZE;
++                              ip[2] = 0xa7f40000 +    /* j __jump_r1 */
++                                      (unsigned int)(u16)
++                                      (((unsigned long) ij - 8 -
++                                        (unsigned long) ip) / 2);
++                      } else {
++                              ip[2] = 0x07f10000;     /* br %r1 */
++                      }
+                       ip[3] = (unsigned int) (val >> 32);
+                       ip[4] = (unsigned int) val;
+                       info->plt_initialized = 1;
+@@ -428,6 +446,45 @@ int module_finalize(const Elf_Ehdr *hdr,
+                   const Elf_Shdr *sechdrs,
+                   struct module *me)
+ {
++      const Elf_Shdr *s;
++      char *secstrings, *secname;
++      void *aseg;
++
++      if (IS_ENABLED(CONFIG_EXPOLINE) &&
++          !nospec_disable && me->arch.plt_size) {
++              unsigned int *ij;
++
++              ij = me->core_layout.base + me->arch.plt_offset +
++                      me->arch.plt_size - PLT_ENTRY_SIZE;
++              if (test_facility(35)) {
++                      ij[0] = 0xc6000000;     /* exrl %r0,.+10        */
++                      ij[1] = 0x0005a7f4;     /* j    .               */
++                      ij[2] = 0x000007f1;     /* br   %r1             */
++              } else {
++                      ij[0] = 0x44000000 | (unsigned int)
++                              offsetof(struct lowcore, br_r1_trampoline);
++                      ij[1] = 0xa7f40000;     /* j    .               */
++              }
++      }
++
++      secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
++      for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
++              aseg = (void *) s->sh_addr;
++              secname = secstrings + s->sh_name;
++
++              if (!strcmp(".altinstructions", secname))
++                      /* patch .altinstructions */
++                      apply_alternatives(aseg, aseg + s->sh_size);
++
++              if (IS_ENABLED(CONFIG_EXPOLINE) &&
++                  (!strncmp(".s390_indirect", secname, 14)))
++                      nospec_revert(aseg, aseg + s->sh_size);
++
++              if (IS_ENABLED(CONFIG_EXPOLINE) &&
++                  (!strncmp(".s390_return", secname, 12)))
++                      nospec_revert(aseg, aseg + s->sh_size);
++      }
++
+       jump_label_apply_nops(me);
+       return 0;
+ }
+diff --git a/arch/s390/kernel/nospec-branch.c 
b/arch/s390/kernel/nospec-branch.c
+new file mode 100644
+index 000000000000..9f3b5b382743
+--- /dev/null
++++ b/arch/s390/kernel/nospec-branch.c
+@@ -0,0 +1,169 @@
++// SPDX-License-Identifier: GPL-2.0
++#include <linux/module.h>
++#include <linux/device.h>
++#include <asm/facility.h>
++#include <asm/nospec-branch.h>
++
++static int __init nobp_setup_early(char *str)
++{
++      bool enabled;
++      int rc;
++
++      rc = kstrtobool(str, &enabled);
++      if (rc)
++              return rc;
++      if (enabled && test_facility(82)) {
++              /*
++               * The user explicitely requested nobp=1, enable it and
++               * disable the expoline support.
++               */
++              __set_facility(82, S390_lowcore.alt_stfle_fac_list);
++              if (IS_ENABLED(CONFIG_EXPOLINE))
++                      nospec_disable = 1;
++      } else {
++              __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
++      }
++      return 0;
++}
++early_param("nobp", nobp_setup_early);
++
++static int __init nospec_setup_early(char *str)
++{
++      __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
++      return 0;
++}
++early_param("nospec", nospec_setup_early);
++
++static int __init nospec_report(void)
++{
++      if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
++              pr_info("Spectre V2 mitigation: execute trampolines.\n");
++      if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
++              pr_info("Spectre V2 mitigation: limited branch prediction.\n");
++      return 0;
++}
++arch_initcall(nospec_report);
++
++#ifdef CONFIG_SYSFS
++ssize_t cpu_show_spectre_v1(struct device *dev,
++                          struct device_attribute *attr, char *buf)
++{
++      return sprintf(buf, "Mitigation: __user pointer sanitization\n");
++}
++
++ssize_t cpu_show_spectre_v2(struct device *dev,
++                          struct device_attribute *attr, char *buf)
++{
++      if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
++              return sprintf(buf, "Mitigation: execute trampolines\n");
++      if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
++              return sprintf(buf, "Mitigation: limited branch prediction.\n");
++      return sprintf(buf, "Vulnerable\n");
++}
++#endif
++
++#ifdef CONFIG_EXPOLINE
++
++int nospec_disable = IS_ENABLED(CONFIG_EXPOLINE_OFF);
++
++static int __init nospectre_v2_setup_early(char *str)
++{
++      nospec_disable = 1;
++      return 0;
++}
++early_param("nospectre_v2", nospectre_v2_setup_early);
++
++void __init nospec_auto_detect(void)
++{
++      if (IS_ENABLED(CC_USING_EXPOLINE)) {
++              /*
++               * The kernel has been compiled with expolines.
++               * Keep expolines enabled and disable nobp.
++               */
++              nospec_disable = 0;
++              __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
++      }
++      /*
++       * If the kernel has not been compiled with expolines the
++       * nobp setting decides what is done, this depends on the
++       * CONFIG_KERNEL_NP option and the nobp/nospec parameters.
++       */
++}
++
++static int __init spectre_v2_setup_early(char *str)
++{
++      if (str && !strncmp(str, "on", 2)) {
++              nospec_disable = 0;
++              __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
++      }
++      if (str && !strncmp(str, "off", 3))
++              nospec_disable = 1;
++      if (str && !strncmp(str, "auto", 4))
++              nospec_auto_detect();
++      return 0;
++}
++early_param("spectre_v2", spectre_v2_setup_early);
++
++static void __init_or_module __nospec_revert(s32 *start, s32 *end)
++{
++      enum { BRCL_EXPOLINE, BRASL_EXPOLINE } type;
++      u8 *instr, *thunk, *br;
++      u8 insnbuf[6];
++      s32 *epo;
++
++      /* Second part of the instruction replace is always a nop */
++      memcpy(insnbuf + 2, (char[]) { 0x47, 0x00, 0x00, 0x00 }, 4);
++      for (epo = start; epo < end; epo++) {
++              instr = (u8 *) epo + *epo;
++              if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x04)
++                      type = BRCL_EXPOLINE;   /* brcl instruction */
++              else if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x05)
++                      type = BRASL_EXPOLINE;  /* brasl instruction */
++              else
++                      continue;
++              thunk = instr + (*(int *)(instr + 2)) * 2;
++              if (thunk[0] == 0xc6 && thunk[1] == 0x00)
++                      /* exrl %r0,<target-br> */
++                      br = thunk + (*(int *)(thunk + 2)) * 2;
++              else if (thunk[0] == 0xc0 && (thunk[1] & 0x0f) == 0x00 &&
++                       thunk[6] == 0x44 && thunk[7] == 0x00 &&
++                       (thunk[8] & 0x0f) == 0x00 && thunk[9] == 0x00 &&
++                       (thunk[1] & 0xf0) == (thunk[8] & 0xf0))
++                      /* larl %rx,<target br> + ex %r0,0(%rx) */
++                      br = thunk + (*(int *)(thunk + 2)) * 2;
++              else
++                      continue;
++              if (br[0] != 0x07 || (br[1] & 0xf0) != 0xf0)
++                      continue;
++              switch (type) {
++              case BRCL_EXPOLINE:
++                      /* brcl to thunk, replace with br + nop */
++                      insnbuf[0] = br[0];
++                      insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f);
++                      break;
++              case BRASL_EXPOLINE:
++                      /* brasl to thunk, replace with basr + nop */
++                      insnbuf[0] = 0x0d;
++                      insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f);
++                      break;
++              }
++
++              s390_kernel_write(instr, insnbuf, 6);
++      }
++}
++
++void __init_or_module nospec_revert(s32 *start, s32 *end)
++{
++      if (nospec_disable)
++              __nospec_revert(start, end);
++}
++
++extern s32 __nospec_call_start[], __nospec_call_end[];
++extern s32 __nospec_return_start[], __nospec_return_end[];
++void __init nospec_init_branches(void)
++{
++      nospec_revert(__nospec_call_start, __nospec_call_end);
++      nospec_revert(__nospec_return_start, __nospec_return_end);
++}
++
++#endif /* CONFIG_EXPOLINE */
+diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
+index 81d0808085e6..d856263fd768 100644
+--- a/arch/s390/kernel/processor.c
++++ b/arch/s390/kernel/processor.c
+@@ -179,3 +179,21 @@ const struct seq_operations cpuinfo_op = {
+       .stop   = c_stop,
+       .show   = show_cpuinfo,
+ };
++
++int s390_isolate_bp(void)
++{
++      if (!test_facility(82))
++              return -EOPNOTSUPP;
++      set_thread_flag(TIF_ISOLATE_BP);
++      return 0;
++}
++EXPORT_SYMBOL(s390_isolate_bp);
++
++int s390_isolate_bp_guest(void)
++{
++      if (!test_facility(82))
++              return -EOPNOTSUPP;
++      set_thread_flag(TIF_ISOLATE_BP_GUEST);
++      return 0;
++}
++EXPORT_SYMBOL(s390_isolate_bp_guest);
+diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
+index e974e53ab597..feb9d97a9d14 100644
+--- a/arch/s390/kernel/setup.c
++++ b/arch/s390/kernel/setup.c
+@@ -63,6 +63,8 @@
+ #include <asm/sclp.h>
+ #include <asm/sysinfo.h>
+ #include <asm/numa.h>
++#include <asm/alternative.h>
++#include <asm/nospec-branch.h>
+ #include "entry.h"
+ 
+ /*
+@@ -335,7 +337,9 @@ static void __init setup_lowcore(void)
+       lc->machine_flags = S390_lowcore.machine_flags;
+       lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
+       memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
+-             MAX_FACILITY_BIT/8);
++             sizeof(lc->stfle_fac_list));
++      memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list,
++             sizeof(lc->alt_stfle_fac_list));
+       if (MACHINE_HAS_VX)
+               lc->vector_save_area_addr =
+                       (unsigned long) &lc->vector_save_area;
+@@ -372,6 +376,7 @@ static void __init setup_lowcore(void)
+ #ifdef CONFIG_SMP
+       lc->spinlock_lockval = arch_spin_lockval(0);
+ #endif
++      lc->br_r1_trampoline = 0x07f1;  /* br %r1 */
+ 
+       set_prefix((u32)(unsigned long) lc);
+       lowcore_ptr[0] = lc;
+@@ -871,6 +876,9 @@ void __init setup_arch(char **cmdline_p)
+       init_mm.end_data = (unsigned long) &_edata;
+       init_mm.brk = (unsigned long) &_end;
+ 
++      if (IS_ENABLED(CONFIG_EXPOLINE_AUTO))
++              nospec_auto_detect();
++
+       parse_early_param();
+ #ifdef CONFIG_CRASH_DUMP
+       /* Deactivate elfcorehdr= kernel parameter */
+@@ -931,6 +939,10 @@ void __init setup_arch(char **cmdline_p)
+       conmode_default();
+       set_preferred_console();
+ 
++      apply_alternative_instructions();
++      if (IS_ENABLED(CONFIG_EXPOLINE))
++              nospec_init_branches();
++
+       /* Setup zfcpdump support */
+       setup_zfcpdump();
+ 
+diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
+index 35531fe1c5ea..0a31110f41f6 100644
+--- a/arch/s390/kernel/smp.c
++++ b/arch/s390/kernel/smp.c
+@@ -205,6 +205,7 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
+       lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET;
+       lc->cpu_nr = cpu;
+       lc->spinlock_lockval = arch_spin_lockval(cpu);
++      lc->br_r1_trampoline = 0x07f1;  /* br %r1 */
+       if (MACHINE_HAS_VX)
+               lc->vector_save_area_addr =
+                       (unsigned long) &lc->vector_save_area;
+@@ -253,7 +254,9 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int 
cpu)
+       __ctl_store(lc->cregs_save_area, 0, 15);
+       save_access_regs((unsigned int *) lc->access_regs_save_area);
+       memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
+-             MAX_FACILITY_BIT/8);
++             sizeof(lc->stfle_fac_list));
++      memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list,
++             sizeof(lc->alt_stfle_fac_list));
+ }
+ 
+ static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
+@@ -302,6 +305,7 @@ static void pcpu_delegate(struct pcpu *pcpu, void 
(*func)(void *),
+       mem_assign_absolute(lc->restart_fn, (unsigned long) func);
+       mem_assign_absolute(lc->restart_data, (unsigned long) data);
+       mem_assign_absolute(lc->restart_source, source_cpu);
++      __bpon();
+       asm volatile(
+               "0:     sigp    0,%0,%2 # sigp restart to target cpu\n"
+               "       brc     2,0b    # busy, try again\n"
+@@ -875,6 +879,7 @@ void __cpu_die(unsigned int cpu)
+ void __noreturn cpu_die(void)
+ {
+       idle_task_exit();
++      __bpon();
+       pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
+       for (;;) ;
+ }
+diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c
+index 66956c09d5bf..3d04dfdabc9f 100644
+--- a/arch/s390/kernel/uprobes.c
++++ b/arch/s390/kernel/uprobes.c
+@@ -147,6 +147,15 @@ unsigned long arch_uretprobe_hijack_return_addr(unsigned 
long trampoline,
+       return orig;
+ }
+ 
++bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
++                           struct pt_regs *regs)
++{
++      if (ctx == RP_CHECK_CHAIN_CALL)
++              return user_stack_pointer(regs) <= ret->stack;
++      else
++              return user_stack_pointer(regs) < ret->stack;
++}
++
+ /* Instruction Emulation */
+ 
+ static void adjust_psw_addr(psw_t *psw, unsigned long len)
+diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
+index 115bda280d50..dd96b467946b 100644
+--- a/arch/s390/kernel/vmlinux.lds.S
++++ b/arch/s390/kernel/vmlinux.lds.S
+@@ -99,6 +99,43 @@ SECTIONS
+               EXIT_DATA
+       }
+ 
++      /*
++       * struct alt_inst entries. From the header (alternative.h):
++       * "Alternative instructions for different CPU types or capabilities"
++       * Think locking instructions on spinlocks.
++       * Note, that it is a part of __init region.
++       */
++      . = ALIGN(8);
++      .altinstructions : {
++              __alt_instructions = .;
++              *(.altinstructions)
++              __alt_instructions_end = .;
++      }
++
++      /*
++       * And here are the replacement instructions. The linker sticks
++       * them as binary blobs. The .altinstructions has enough data to
++       * get the address and the length of them to patch the kernel safely.
++       * Note, that it is a part of __init region.
++       */
++      .altinstr_replacement : {
++              *(.altinstr_replacement)
++      }
++
++      /*
++       * Table with the patch locations to undo expolines
++      */
++      .nospec_call_table : {
++              __nospec_call_start = . ;
++              *(.s390_indirect*)
++              __nospec_call_end = . ;
++      }
++      .nospec_return_table : {
++              __nospec_return_start = . ;
++              *(.s390_return*)
++              __nospec_return_end = . ;
++      }
++
+       /* early.c uses stsi, which requires page aligned data. */
+       . = ALIGN(PAGE_SIZE);
+       INIT_DATA_SECTION(0x100)
+diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
+index a70ff09b4982..2032ab81b2d7 100644
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -401,6 +401,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
+       case KVM_CAP_S390_RI:
+               r = test_facility(64);
+               break;
++      case KVM_CAP_S390_BPB:
++              r = test_facility(82);
++              break;
+       default:
+               r = 0;
+       }
+@@ -1713,6 +1716,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+       kvm_s390_set_prefix(vcpu, 0);
+       if (test_kvm_facility(vcpu->kvm, 64))
+               vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
++      if (test_kvm_facility(vcpu->kvm, 82))
++              vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
+       /* fprs can be synchronized via vrs, even if the guest has no vx. With
+        * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
+        */
+@@ -1829,7 +1834,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+       if (test_fp_ctl(current->thread.fpu.fpc))
+               /* User space provided an invalid FPC, let's clear it */
+               current->thread.fpu.fpc = 0;
+-
+       save_access_regs(vcpu->arch.host_acrs);
+       restore_access_regs(vcpu->run->s.regs.acrs);
+       gmap_enable(vcpu->arch.enabled_gmap);
+@@ -1877,6 +1881,7 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu 
*vcpu)
+       current->thread.fpu.fpc = 0;
+       vcpu->arch.sie_block->gbea = 1;
+       vcpu->arch.sie_block->pp = 0;
++      vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
+       vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
+       kvm_clear_async_pf_completion_queue(vcpu);
+       if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
+@@ -2744,6 +2749,11 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct 
kvm_run *kvm_run)
+               if (riccb->valid)
+                       vcpu->arch.sie_block->ecb3 |= 0x01;
+       }
++      if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
++          test_kvm_facility(vcpu->kvm, 82)) {
++              vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
++              vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 
0;
++      }
+ 
+       kvm_run->kvm_dirty_regs = 0;
+ }
+@@ -2762,6 +2772,7 @@ static void store_regs(struct kvm_vcpu *vcpu, struct 
kvm_run *kvm_run)
+       kvm_run->s.regs.pft = vcpu->arch.pfault_token;
+       kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
+       kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
++      kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == 
FPF_BPBC;
+ }
+ 
+ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
+index d8673e243f13..ced6c9b8f04d 100644
+--- a/arch/s390/kvm/vsie.c
++++ b/arch/s390/kvm/vsie.c
+@@ -217,6 +217,12 @@ static void unshadow_scb(struct kvm_vcpu *vcpu, struct 
vsie_page *vsie_page)
+       memcpy(scb_o->gcr, scb_s->gcr, 128);
+       scb_o->pp = scb_s->pp;
+ 
++      /* branch prediction */
++      if (test_kvm_facility(vcpu->kvm, 82)) {
++              scb_o->fpf &= ~FPF_BPBC;
++              scb_o->fpf |= scb_s->fpf & FPF_BPBC;
++      }
++
+       /* interrupt intercept */
+       switch (scb_s->icptcode) {
+       case ICPT_PROGI:
+@@ -259,6 +265,7 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct 
vsie_page *vsie_page)
+       scb_s->ecb3 = 0;
+       scb_s->ecd = 0;
+       scb_s->fac = 0;
++      scb_s->fpf = 0;
+ 
+       rc = prepare_cpuflags(vcpu, vsie_page);
+       if (rc)
+@@ -316,6 +323,9 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct 
vsie_page *vsie_page)
+                       prefix_unmapped(vsie_page);
+               scb_s->ecb |= scb_o->ecb & 0x10U;
+       }
++      /* branch prediction */
++      if (test_kvm_facility(vcpu->kvm, 82))
++              scb_s->fpf |= scb_o->fpf & FPF_BPBC;
+       /* SIMD */
+       if (test_kvm_facility(vcpu->kvm, 129)) {
+               scb_s->eca |= scb_o->eca & 0x00020000U;
+@@ -754,6 +764,7 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct 
vsie_page *vsie_page)
+ {
+       struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
+       struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
++      int guest_bp_isolation;
+       int rc;
+ 
+       handle_last_fault(vcpu, vsie_page);
+@@ -764,6 +775,20 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct 
vsie_page *vsie_page)
+               s390_handle_mcck();
+ 
+       srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
++
++      /* save current guest state of bp isolation override */
++      guest_bp_isolation = test_thread_flag(TIF_ISOLATE_BP_GUEST);
++
++      /*
++       * The guest is running with BPBC, so we have to force it on for our
++       * nested guest. This is done by enabling BPBC globally, so the BPBC
++       * control in the SCB (which the nested guest can modify) is simply
++       * ignored.
++       */
++      if (test_kvm_facility(vcpu->kvm, 82) &&
++          vcpu->arch.sie_block->fpf & FPF_BPBC)
++              set_thread_flag(TIF_ISOLATE_BP_GUEST);
++
+       local_irq_disable();
+       guest_enter_irqoff();
+       local_irq_enable();
+@@ -773,6 +798,11 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct 
vsie_page *vsie_page)
+       local_irq_disable();
+       guest_exit_irqoff();
+       local_irq_enable();
++
++      /* restore guest state for bp isolation override */
++      if (!guest_bp_isolation)
++              clear_thread_flag(TIF_ISOLATE_BP_GUEST);
++
+       vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+ 
+       if (rc > 0)
+diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
+index bbfb03eccb7f..da6a287a11e4 100644
+--- a/arch/x86/kernel/tsc.c
++++ b/arch/x86/kernel/tsc.c
+@@ -409,7 +409,7 @@ static unsigned long calc_hpet_ref(u64 deltatsc, u64 
hpet1, u64 hpet2)
+       hpet2 -= hpet1;
+       tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD));
+       do_div(tmp, 1000000);
+-      do_div(deltatsc, tmp);
++      deltatsc = div64_u64(deltatsc, tmp);
+ 
+       return (unsigned long) deltatsc;
+ }
+diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
+index 94e04c9de12b..667dc5c86fef 100644
+--- a/drivers/acpi/acpi_video.c
++++ b/drivers/acpi/acpi_video.c
+@@ -2069,6 +2069,25 @@ static int __init intel_opregion_present(void)
+       return opregion;
+ }
+ 
++static bool dmi_is_desktop(void)
++{
++      const char *chassis_type;
++
++      chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE);
++      if (!chassis_type)
++              return false;
++
++      if (!strcmp(chassis_type, "3") || /*  3: Desktop */
++          !strcmp(chassis_type, "4") || /*  4: Low Profile Desktop */
++          !strcmp(chassis_type, "5") || /*  5: Pizza Box */
++          !strcmp(chassis_type, "6") || /*  6: Mini Tower */
++          !strcmp(chassis_type, "7") || /*  7: Tower */
++          !strcmp(chassis_type, "11"))  /* 11: Main Server Chassis */
++              return true;
++
++      return false;
++}
++
+ int acpi_video_register(void)
+ {
+       int ret = 0;
+@@ -2089,8 +2108,12 @@ int acpi_video_register(void)
+        * win8 ready (where we also prefer the native backlight driver, so
+        * normally the acpi_video code should not register there anyways).
+        */
+-      if (only_lcd == -1)
+-              only_lcd = acpi_osi_is_win8();
++      if (only_lcd == -1) {
++              if (dmi_is_desktop() && acpi_osi_is_win8())
++                      only_lcd = true;
++              else
++                      only_lcd = false;
++      }
+ 
+       dmi_check_system(video_dmi_table);
+ 
+diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
+index 5d475b3a0b2e..128ebd439221 100644
+--- a/drivers/cdrom/cdrom.c
++++ b/drivers/cdrom/cdrom.c
+@@ -2368,7 +2368,7 @@ static int cdrom_ioctl_media_changed(struct 
cdrom_device_info *cdi,
+       if (!CDROM_CAN(CDC_SELECT_DISC) || arg == CDSL_CURRENT)
+               return media_changed(cdi, 1);
+ 
+-      if ((unsigned int)arg >= cdi->capacity)
++      if (arg >= cdi->capacity)
+               return -EINVAL;
+ 
+       info = kmalloc(sizeof(*info), GFP_KERNEL);
+diff --git a/drivers/gpu/drm/drm_dp_dual_mode_helper.c 
b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
+index a7b2a751f6fe..cdb53586c8fe 100644
+--- a/drivers/gpu/drm/drm_dp_dual_mode_helper.c
++++ b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
+@@ -322,19 +322,44 @@ int drm_dp_dual_mode_set_tmds_output(enum 
drm_dp_dual_mode_type type,
+ {
+       uint8_t tmds_oen = enable ? 0 : DP_DUAL_MODE_TMDS_DISABLE;
+       ssize_t ret;
++      int retry;
+ 
+       if (type < DRM_DP_DUAL_MODE_TYPE2_DVI)
+               return 0;
+ 
+-      ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN,
+-                                   &tmds_oen, sizeof(tmds_oen));
+-      if (ret) {
+-              DRM_DEBUG_KMS("Failed to %s TMDS output buffers\n",
+-                            enable ? "enable" : "disable");
+-              return ret;
++      /*
++       * LSPCON adapters in low-power state may ignore the first write, so
++       * read back and verify the written value a few times.
++       */
++      for (retry = 0; retry < 3; retry++) {
++              uint8_t tmp;
++
++              ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN,
++                                           &tmds_oen, sizeof(tmds_oen));
++              if (ret) {
++                      DRM_DEBUG_KMS("Failed to %s TMDS output buffers (%d 
attempts)\n",
++                                    enable ? "enable" : "disable",
++                                    retry + 1);
++                      return ret;
++              }
++
++              ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_TMDS_OEN,
++                                          &tmp, sizeof(tmp));
++              if (ret) {
++                      DRM_DEBUG_KMS("I2C read failed during TMDS output 
buffer %s (%d attempts)\n",
++                                    enable ? "enabling" : "disabling",
++                                    retry + 1);
++                      return ret;
++              }
++
++              if (tmp == tmds_oen)
++                      return 0;
+       }
+ 
+-      return 0;
++      DRM_DEBUG_KMS("I2C write value mismatch during TMDS output buffer %s\n",
++                    enable ? "enabling" : "disabling");
++
++      return -EIO;
+ }
+ EXPORT_SYMBOL(drm_dp_dual_mode_set_tmds_output);
+ 
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index 36a665f0e5c9..e23748cca0c0 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -3681,7 +3681,11 @@ extern void intel_display_print_error_state(struct 
drm_i915_error_state_buf *e,
+                                           struct intel_display_error_state 
*error);
+ 
+ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 
*val);
+-int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 
val);
++int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv, u32 
mbox,
++                                  u32 val, int timeout_us);
++#define sandybridge_pcode_write(dev_priv, mbox, val)  \
++      sandybridge_pcode_write_timeout(dev_priv, mbox, val, 500)
++
+ int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 
request,
+                     u32 reply_mask, u32 reply, int timeout_base_ms);
+ 
+diff --git a/drivers/gpu/drm/i915/intel_display.c 
b/drivers/gpu/drm/i915/intel_display.c
+index ce32303b3013..c185625d67f2 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -6012,8 +6012,8 @@ static void bxt_set_cdclk(struct drm_i915_private 
*dev_priv, int cdclk)
+ 
+       /* Inform power controller of upcoming frequency change */
+       mutex_lock(&dev_priv->rps.hw_lock);
+-      ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
+-                                    0x80000000);
++      ret = sandybridge_pcode_write_timeout(dev_priv, 
HSW_PCODE_DE_WRITE_FREQ_REQ,
++                                            0x80000000, 2000);
+       mutex_unlock(&dev_priv->rps.hw_lock);
+ 
+       if (ret) {
+@@ -6044,8 +6044,9 @@ static void bxt_set_cdclk(struct drm_i915_private 
*dev_priv, int cdclk)
+       I915_WRITE(CDCLK_CTL, val);
+ 
+       mutex_lock(&dev_priv->rps.hw_lock);
+-      ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
+-                                    DIV_ROUND_UP(cdclk, 25000));
++      ret = sandybridge_pcode_write_timeout(dev_priv,
++                                            HSW_PCODE_DE_WRITE_FREQ_REQ,
++                                            DIV_ROUND_UP(cdclk, 25000), 2000);
+       mutex_unlock(&dev_priv->rps.hw_lock);
+ 
+       if (ret) {
+diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
+index 49de4760cc16..05427d292457 100644
+--- a/drivers/gpu/drm/i915/intel_pm.c
++++ b/drivers/gpu/drm/i915/intel_pm.c
+@@ -7913,8 +7913,8 @@ int sandybridge_pcode_read(struct drm_i915_private 
*dev_priv, u32 mbox, u32 *val
+       return 0;
+ }
+ 
+-int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
+-                          u32 mbox, u32 val)
++int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv,
++                                  u32 mbox, u32 val, int timeout_us)
+ {
+       int status;
+ 
+@@ -7935,7 +7935,7 @@ int sandybridge_pcode_write(struct drm_i915_private 
*dev_priv,
+ 
+       if (intel_wait_for_register_fw(dev_priv,
+                                      GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
+-                                     500)) {
++                                     timeout_us)) {
+               DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", 
mbox);
+               return -ETIMEDOUT;
+       }
+diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
+index ec9023bd935b..d53e805d392f 100644
+--- a/drivers/gpu/drm/vc4/vc4_bo.c
++++ b/drivers/gpu/drm/vc4/vc4_bo.c
+@@ -80,6 +80,7 @@ static void vc4_bo_destroy(struct vc4_bo *bo)
+       struct vc4_dev *vc4 = to_vc4_dev(obj->dev);
+ 
+       if (bo->validated_shader) {
++              kfree(bo->validated_shader->uniform_addr_offsets);
+               kfree(bo->validated_shader->texture_samples);
+               kfree(bo->validated_shader);
+               bo->validated_shader = NULL;
+@@ -328,6 +329,7 @@ void vc4_free_object(struct drm_gem_object *gem_bo)
+       }
+ 
+       if (bo->validated_shader) {
++              kfree(bo->validated_shader->uniform_addr_offsets);
+               kfree(bo->validated_shader->texture_samples);
+               kfree(bo->validated_shader);
+               bo->validated_shader = NULL;
+diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c 
b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
+index 917321ce832f..19a5bde8e490 100644
+--- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c
++++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
+@@ -874,6 +874,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
+ fail:
+       kfree(validation_state.branch_targets);
+       if (validated_shader) {
++              kfree(validated_shader->uniform_addr_offsets);
+               kfree(validated_shader->texture_samples);
+               kfree(validated_shader);
+       }
+diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
+index e6fe21a6135b..b32bf7eac3c8 100644
+--- a/drivers/i2c/busses/i2c-i801.c
++++ b/drivers/i2c/busses/i2c-i801.c
+@@ -243,6 +243,7 @@ struct i801_priv {
+       struct i2c_adapter adapter;
+       unsigned long smba;
+       unsigned char original_hstcfg;
++      unsigned char original_slvcmd;
+       struct pci_dev *pci_dev;
+       unsigned int features;
+ 
+@@ -962,13 +963,24 @@ static int i801_enable_host_notify(struct i2c_adapter 
*adapter)
+       if (!priv->host_notify)
+               return -ENOMEM;
+ 
+-      outb_p(SMBSLVCMD_HST_NTFY_INTREN, SMBSLVCMD(priv));
++      if (!(SMBSLVCMD_HST_NTFY_INTREN & priv->original_slvcmd))
++              outb_p(SMBSLVCMD_HST_NTFY_INTREN | priv->original_slvcmd,
++                     SMBSLVCMD(priv));
++
+       /* clear Host Notify bit to allow a new notification */
+       outb_p(SMBSLVSTS_HST_NTFY_STS, SMBSLVSTS(priv));
+ 
+       return 0;
+ }
+ 
++static void i801_disable_host_notify(struct i801_priv *priv)
++{
++      if (!(priv->features & FEATURE_HOST_NOTIFY))
++              return;
++
++      outb_p(priv->original_slvcmd, SMBSLVCMD(priv));
++}
++
+ static const struct i2c_algorithm smbus_algorithm = {
+       .smbus_xfer     = i801_access,
+       .functionality  = i801_func,
+@@ -1589,6 +1601,10 @@ static int i801_probe(struct pci_dev *dev, const struct 
pci_device_id *id)
+               outb_p(inb_p(SMBAUXCTL(priv)) &
+                      ~(SMBAUXCTL_CRC | SMBAUXCTL_E32B), SMBAUXCTL(priv));
+ 
++      /* Remember original Host Notify setting */
++      if (priv->features & FEATURE_HOST_NOTIFY)
++              priv->original_slvcmd = inb_p(SMBSLVCMD(priv));
++
+       /* Default timeout in interrupt mode: 200 ms */
+       priv->adapter.timeout = HZ / 5;
+ 
+@@ -1666,6 +1682,7 @@ static void i801_remove(struct pci_dev *dev)
+       pm_runtime_forbid(&dev->dev);
+       pm_runtime_get_noresume(&dev->dev);
+ 
++      i801_disable_host_notify(priv);
+       i801_del_mux(priv);
+       i2c_del_adapter(&priv->adapter);
+       i801_acpi_remove(priv);
+@@ -1679,6 +1696,15 @@ static void i801_remove(struct pci_dev *dev)
+        */
+ }
+ 
++static void i801_shutdown(struct pci_dev *dev)
++{
++      struct i801_priv *priv = pci_get_drvdata(dev);
++
++      /* Restore config registers to avoid hard hang on some systems */
++      i801_disable_host_notify(priv);
++      pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg);
++}
++
+ #ifdef CONFIG_PM
+ static int i801_suspend(struct device *dev)
+ {
+@@ -1711,6 +1737,7 @@ static struct pci_driver i801_driver = {
+       .id_table       = i801_ids,
+       .probe          = i801_probe,
+       .remove         = i801_remove,
++      .shutdown       = i801_shutdown,
+       .driver         = {
+               .pm     = &i801_pm_ops,
+       },
+diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
+index 403df3591d29..5b8909d1b55e 100644
+--- a/drivers/infiniband/hw/mlx5/qp.c
++++ b/drivers/infiniband/hw/mlx5/qp.c
+@@ -2848,7 +2848,8 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
+        * If we moved a kernel QP to RESET, clean up all old CQ
+        * entries and reinitialize the QP.
+        */
+-      if (new_state == IB_QPS_RESET && !ibqp->uobject) {
++      if (new_state == IB_QPS_RESET &&
++          !ibqp->uobject && ibqp->qp_type != IB_QPT_XRC_TGT) {
+               mlx5_ib_cq_clean(recv_cq, base->mqp.qpn,
+                                ibqp->srq ? to_msrq(ibqp->srq) : NULL);
+               if (send_cq != recv_cq)
+diff --git a/drivers/input/misc/drv260x.c b/drivers/input/misc/drv260x.c
+index 930424e55439..251d64ca41ce 100644
+--- a/drivers/input/misc/drv260x.c
++++ b/drivers/input/misc/drv260x.c
+@@ -521,7 +521,7 @@ static int drv260x_probe(struct i2c_client *client,
+       if (!haptics)
+               return -ENOMEM;
+ 
+-      haptics->rated_voltage = DRV260X_DEF_OD_CLAMP_VOLT;
++      haptics->overdrive_voltage = DRV260X_DEF_OD_CLAMP_VOLT;
+       haptics->rated_voltage = DRV260X_DEF_RATED_VOLT;
+ 
+       if (pdata) {
+diff --git a/drivers/media/usb/stkwebcam/stk-sensor.c 
b/drivers/media/usb/stkwebcam/stk-sensor.c
+index e546b014d7ad..2dcc8d0be9e7 100644
+--- a/drivers/media/usb/stkwebcam/stk-sensor.c
++++ b/drivers/media/usb/stkwebcam/stk-sensor.c
+@@ -228,7 +228,7 @@
+ static int stk_sensor_outb(struct stk_camera *dev, u8 reg, u8 val)
+ {
+       int i = 0;
+-      int tmpval = 0;
++      u8 tmpval = 0;
+ 
+       if (stk_camera_write_reg(dev, STK_IIC_TX_INDEX, reg))
+               return 1;
+@@ -253,7 +253,7 @@ static int stk_sensor_outb(struct stk_camera *dev, u8 reg, 
u8 val)
+ static int stk_sensor_inb(struct stk_camera *dev, u8 reg, u8 *val)
+ {
+       int i = 0;
+-      int tmpval = 0;
++      u8 tmpval = 0;
+ 
+       if (stk_camera_write_reg(dev, STK_IIC_RX_INDEX, reg))
+               return 1;
+@@ -274,7 +274,7 @@ static int stk_sensor_inb(struct stk_camera *dev, u8 reg, 
u8 *val)
+       if (stk_camera_read_reg(dev, STK_IIC_RX_VALUE, &tmpval))
+               return 1;
+ 
+-      *val = (u8) tmpval;
++      *val = tmpval;
+       return 0;
+ }
+ 
+diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c 
b/drivers/media/usb/stkwebcam/stk-webcam.c
+index 22a9aae16291..1c48f2f1e14a 100644
+--- a/drivers/media/usb/stkwebcam/stk-webcam.c
++++ b/drivers/media/usb/stkwebcam/stk-webcam.c
+@@ -144,7 +144,7 @@ int stk_camera_write_reg(struct stk_camera *dev, u16 
index, u8 value)
+               return 0;
+ }
+ 
+-int stk_camera_read_reg(struct stk_camera *dev, u16 index, int *value)
++int stk_camera_read_reg(struct stk_camera *dev, u16 index, u8 *value)
+ {
+       struct usb_device *udev = dev->udev;
+       unsigned char *buf;
+@@ -163,7 +163,7 @@ int stk_camera_read_reg(struct stk_camera *dev, u16 index, 
int *value)
+                       sizeof(u8),
+                       500);
+       if (ret >= 0)
+-              memcpy(value, buf, sizeof(u8));
++              *value = *buf;
+ 
+       kfree(buf);
+       return ret;
+@@ -171,9 +171,10 @@ int stk_camera_read_reg(struct stk_camera *dev, u16 
index, int *value)
+ 
+ static int stk_start_stream(struct stk_camera *dev)
+ {
+-      int value;
++      u8 value;
+       int i, ret;
+-      int value_116, value_117;
++      u8 value_116, value_117;
++
+ 
+       if (!is_present(dev))
+               return -ENODEV;
+@@ -213,7 +214,7 @@ static int stk_start_stream(struct stk_camera *dev)
+ 
+ static int stk_stop_stream(struct stk_camera *dev)
+ {
+-      int value;
++      u8 value;
+       int i;
+       if (is_present(dev)) {
+               stk_camera_read_reg(dev, 0x0100, &value);
+diff --git a/drivers/media/usb/stkwebcam/stk-webcam.h 
b/drivers/media/usb/stkwebcam/stk-webcam.h
+index 9bbfa3d9bfdd..92bb48e3c74e 100644
+--- a/drivers/media/usb/stkwebcam/stk-webcam.h
++++ b/drivers/media/usb/stkwebcam/stk-webcam.h
+@@ -129,7 +129,7 @@ struct stk_camera {
+ #define vdev_to_camera(d) container_of(d, struct stk_camera, vdev)
+ 
+ int stk_camera_write_reg(struct stk_camera *, u16, u8);
+-int stk_camera_read_reg(struct stk_camera *, u16, int *);
++int stk_camera_read_reg(struct stk_camera *, u16, u8 *);
+ 
+ int stk_sensor_init(struct stk_camera *);
+ int stk_sensor_configure(struct stk_camera *);
+diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
+index 7ee1667acde4..00dff9b5a6c4 100644
+--- a/drivers/message/fusion/mptsas.c
++++ b/drivers/message/fusion/mptsas.c
+@@ -1994,6 +1994,7 @@ static struct scsi_host_template mptsas_driver_template 
= {
+       .cmd_per_lun                    = 7,
+       .use_clustering                 = ENABLE_CLUSTERING,
+       .shost_attrs                    = mptscsih_host_attrs,
++      .no_write_same                  = 1,
+ };
+ 
+ static int mptsas_get_linkerrors(struct sas_phy *phy)
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 513457a2a7bf..13a015b8052b 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -1654,8 +1654,7 @@ int bond_enslave(struct net_device *bond_dev, struct 
net_device *slave_dev)
+       } /* switch(bond_mode) */
+ 
+ #ifdef CONFIG_NET_POLL_CONTROLLER
+-      slave_dev->npinfo = bond->dev->npinfo;
+-      if (slave_dev->npinfo) {
++      if (bond->dev->npinfo) {
+               if (slave_enable_netpoll(new_slave)) {
+                       netdev_info(bond_dev, "master_dev is using netpoll, but 
new slave device does not support netpoll\n");
+                       res = -EBUSY;
+diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
+index 552de9c490c6..de336897a28a 100644
+--- a/drivers/net/ethernet/ti/cpsw.c
++++ b/drivers/net/ethernet/ti/cpsw.c
+@@ -124,7 +124,7 @@ do {                                                       
        \
+ 
+ #define RX_PRIORITY_MAPPING   0x76543210
+ #define TX_PRIORITY_MAPPING   0x33221100
+-#define CPDMA_TX_PRIORITY_MAP 0x01234567
++#define CPDMA_TX_PRIORITY_MAP 0x76543210
+ 
+ #define CPSW_VLAN_AWARE               BIT(1)
+ #define CPSW_ALE_VLAN_AWARE   1
+diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
+index dc36c2ec1d10..fa2c7bd638be 100644
+--- a/drivers/net/ppp/pppoe.c
++++ b/drivers/net/ppp/pppoe.c
+@@ -620,6 +620,10 @@ static int pppoe_connect(struct socket *sock, struct 
sockaddr *uservaddr,
+       lock_sock(sk);
+ 
+       error = -EINVAL;
++
++      if (sockaddr_len != sizeof(struct sockaddr_pppox))
++              goto end;
++
+       if (sp->sa_protocol != PX_PROTO_OE)
+               goto end;
+ 
+diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
+index 8673ef3c9cdc..36963685d42a 100644
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -261,6 +261,17 @@ static void __team_option_inst_mark_removed_port(struct 
team *team,
+       }
+ }
+ 
++static bool __team_option_inst_tmp_find(const struct list_head *opts,
++                                      const struct team_option_inst *needle)
++{
++      struct team_option_inst *opt_inst;
++
++      list_for_each_entry(opt_inst, opts, tmp_list)
++              if (opt_inst == needle)
++                      return true;
++      return false;
++}
++
+ static int __team_options_register(struct team *team,
+                                  const struct team_option *option,
+                                  size_t option_count)
+@@ -1067,14 +1078,11 @@ static void team_port_leave(struct team *team, struct 
team_port *port)
+ }
+ 
+ #ifdef CONFIG_NET_POLL_CONTROLLER
+-static int team_port_enable_netpoll(struct team *team, struct team_port *port)
++static int __team_port_enable_netpoll(struct team_port *port)
+ {
+       struct netpoll *np;
+       int err;
+ 
+-      if (!team->dev->npinfo)
+-              return 0;
+-
+       np = kzalloc(sizeof(*np), GFP_KERNEL);
+       if (!np)
+               return -ENOMEM;
+@@ -1088,6 +1096,14 @@ static int team_port_enable_netpoll(struct team *team, 
struct team_port *port)
+       return err;
+ }
+ 
++static int team_port_enable_netpoll(struct team_port *port)
++{
++      if (!port->team->dev->npinfo)
++              return 0;
++
++      return __team_port_enable_netpoll(port);
++}
++
+ static void team_port_disable_netpoll(struct team_port *port)
+ {
+       struct netpoll *np = port->np;
+@@ -1102,7 +1118,7 @@ static void team_port_disable_netpoll(struct team_port 
*port)
+       kfree(np);
+ }
+ #else
+-static int team_port_enable_netpoll(struct team *team, struct team_port *port)
++static int team_port_enable_netpoll(struct team_port *port)
+ {
+       return 0;
+ }
+@@ -1210,7 +1226,7 @@ static int team_port_add(struct team *team, struct 
net_device *port_dev)
+               goto err_vids_add;
+       }
+ 
+-      err = team_port_enable_netpoll(team, port);
++      err = team_port_enable_netpoll(port);
+       if (err) {
+               netdev_err(dev, "Failed to enable netpoll on device %s\n",
+                          portname);
+@@ -1908,7 +1924,7 @@ static int team_netpoll_setup(struct net_device *dev,
+ 
+       mutex_lock(&team->lock);
+       list_for_each_entry(port, &team->port_list, list) {
+-              err = team_port_enable_netpoll(team, port);
++              err = __team_port_enable_netpoll(port);
+               if (err) {
+                       __team_netpoll_cleanup(team);
+                       break;
+@@ -2569,6 +2585,14 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, 
struct genl_info *info)
+                       if (err)
+                               goto team_put;
+                       opt_inst->changed = true;
++
++                      /* dumb/evil user-space can send us duplicate opt,
++                       * keep only the last one
++                       */
++                      if (__team_option_inst_tmp_find(&opt_inst_list,
++                                                      opt_inst))
++                              continue;
++
+                       list_add(&opt_inst->tmp_list, &opt_inst_list);
+               }
+               if (!opt_found) {
+diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
+index 4fb468666b19..99424c87b464 100644
+--- a/drivers/net/usb/cdc_ether.c
++++ b/drivers/net/usb/cdc_ether.c
+@@ -530,6 +530,7 @@ static const struct driver_info wwan_info = {
+ #define REALTEK_VENDOR_ID     0x0bda
+ #define SAMSUNG_VENDOR_ID     0x04e8
+ #define LENOVO_VENDOR_ID      0x17ef
++#define LINKSYS_VENDOR_ID     0x13b1
+ #define NVIDIA_VENDOR_ID      0x0955
+ #define HP_VENDOR_ID          0x03f0
+ 
+@@ -719,6 +720,15 @@ static const struct usb_device_id products[] = {
+       .driver_info = 0,
+ },
+ 
++#if IS_ENABLED(CONFIG_USB_RTL8152)
++/* Linksys USB3GIGV1 Ethernet Adapter */
++{
++      USB_DEVICE_AND_INTERFACE_INFO(LINKSYS_VENDOR_ID, 0x0041, USB_CLASS_COMM,
++                      USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
++      .driver_info = 0,
++},
++#endif
++
+ /* Lenovo Thinkpad USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
+ {
+       USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x7205, USB_CLASS_COMM,
+diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
+index b2d7c7e32250..3cdfa2465e3f 100644
+--- a/drivers/net/usb/r8152.c
++++ b/drivers/net/usb/r8152.c
+@@ -519,6 +519,7 @@ enum rtl8152_flags {
+ #define VENDOR_ID_REALTEK             0x0bda
+ #define VENDOR_ID_SAMSUNG             0x04e8
+ #define VENDOR_ID_LENOVO              0x17ef
++#define VENDOR_ID_LINKSYS             0x13b1
+ #define VENDOR_ID_NVIDIA              0x0955
+ 
+ #define MCU_TYPE_PLA                  0x0100
+@@ -4506,6 +4507,7 @@ static struct usb_device_id rtl8152_table[] = {
+       {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)},
+       {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x7205)},
+       {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x304f)},
++      {REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041)},
+       {REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA,  0x09ff)},
+       {}
+ };
+diff --git a/drivers/net/wireless/ath/ath10k/mac.c 
b/drivers/net/wireless/ath/ath10k/mac.c
+index a497bf31953d..5aa5df24f4dc 100644
+--- a/drivers/net/wireless/ath/ath10k/mac.c
++++ b/drivers/net/wireless/ath/ath10k/mac.c
+@@ -5819,9 +5819,8 @@ static void ath10k_sta_rc_update_wk(struct work_struct 
*wk)
+                                   sta->addr, smps, err);
+       }
+ 
+-      if (changed & IEEE80211_RC_SUPP_RATES_CHANGED ||
+-          changed & IEEE80211_RC_NSS_CHANGED) {
+-              ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp 
rates/nss\n",
++      if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
++              ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp 
rates\n",
+                          sta->addr);
+ 
+               err = ath10k_station_assoc(ar, arvif->vif, sta, true);
+diff --git a/drivers/net/wireless/ath/ath9k/hw.c 
b/drivers/net/wireless/ath/ath9k/hw.c
+index a35f78be8dec..acef4ec928c1 100644
+--- a/drivers/net/wireless/ath/ath9k/hw.c
++++ b/drivers/net/wireless/ath/ath9k/hw.c
+@@ -1603,6 +1603,10 @@ bool ath9k_hw_check_alive(struct ath_hw *ah)
+       int count = 50;
+       u32 reg, last_val;
+ 
++      /* Check if chip failed to wake up */
++      if (REG_READ(ah, AR_CFG) == 0xdeadbeef)
++              return false;
++
+       if (AR_SREV_9300(ah))
+               return !ath9k_hw_detect_mac_hang(ah);
+ 
+diff --git a/drivers/net/wireless/mac80211_hwsim.c 
b/drivers/net/wireless/mac80211_hwsim.c
+index 4182c3775a72..2681b5339810 100644
+--- a/drivers/net/wireless/mac80211_hwsim.c
++++ b/drivers/net/wireless/mac80211_hwsim.c
+@@ -3346,8 +3346,11 @@ static void __net_exit hwsim_exit_net(struct net *net)
+                       continue;
+ 
+               list_del(&data->list);
+-              INIT_WORK(&data->destroy_work, destroy_radio);
+-              schedule_work(&data->destroy_work);
++              spin_unlock_bh(&hwsim_radio_lock);
++              mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy),
++                                       NULL);
++              spin_lock_bh(&hwsim_radio_lock);
++
+       }
+       spin_unlock_bh(&hwsim_radio_lock);
+ }
+diff --git a/drivers/of/base.c b/drivers/of/base.c
+index a0bccb54a9bd..466b285cef3e 100644
+--- a/drivers/of/base.c
++++ b/drivers/of/base.c
+@@ -2109,7 +2109,7 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 
align))
+                       continue;
+ 
+               /* Allocate an alias_prop with enough space for the stem */
+-              ap = dt_alloc(sizeof(*ap) + len + 1, 4);
++              ap = dt_alloc(sizeof(*ap) + len + 1, __alignof__(*ap));
+               if (!ap)
+                       continue;
+               memset(ap, 0, sizeof(*ap) + len + 1);
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index a87c8e1aef68..9c13aeeeb973 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -3756,27 +3756,49 @@ int pci_wait_for_pending_transaction(struct pci_dev 
*dev)
+ }
+ EXPORT_SYMBOL(pci_wait_for_pending_transaction);
+ 
+-/*
+- * We should only need to wait 100ms after FLR, but some devices take longer.
+- * Wait for up to 1000ms for config space to return something other than -1.
+- * Intel IGD requires this when an LCD panel is attached.  We read the 2nd
+- * dword because VFs don't implement the 1st dword.
+- */
+ static void pci_flr_wait(struct pci_dev *dev)
+ {
+-      int i = 0;
++      int delay = 1, timeout = 60000;
+       u32 id;
+ 
+-      do {
+-              msleep(100);
++      /*
++       * Per PCIe r3.1, sec 6.6.2, a device must complete an FLR within
++       * 100ms, but may silently discard requests while the FLR is in
++       * progress.  Wait 100ms before trying to access the device.
++       */
++      msleep(100);
++
++      /*
++       * After 100ms, the device should not silently discard config
++       * requests, but it may still indicate that it needs more time by
++       * responding to them with CRS completions.  The Root Port will
++       * generally synthesize ~0 data to complete the read (except when
++       * CRS SV is enabled and the read was for the Vendor ID; in that
++       * case it synthesizes 0x0001 data).
++       *
++       * Wait for the device to return a non-CRS completion.  Read the
++       * Command register instead of Vendor ID so we don't have to
++       * contend with the CRS SV value.
++       */
++      pci_read_config_dword(dev, PCI_COMMAND, &id);
++      while (id == ~0) {
++              if (delay > timeout) {
++                      dev_warn(&dev->dev, "not ready %dms after FLR; giving 
up\n",
++                               100 + delay - 1);
++                      return;
++              }
++
++              if (delay > 1000)
++                      dev_info(&dev->dev, "not ready %dms after FLR; 
waiting\n",
++                               100 + delay - 1);
++
++              msleep(delay);
++              delay *= 2;
+               pci_read_config_dword(dev, PCI_COMMAND, &id);
+-      } while (i++ < 10 && id == ~0);
++      }
+ 
+-      if (id == ~0)
+-              dev_warn(&dev->dev, "Failed to return from FLR\n");
+-      else if (i > 1)
+-              dev_info(&dev->dev, "Required additional %dms to return from 
FLR\n",
+-                       (i - 1) * 100);
++      if (delay > 1000)
++              dev_info(&dev->dev, "ready %dms after FLR\n", 100 + delay - 1);
+ }
+ 
+ static int pcie_flr(struct pci_dev *dev, int probe)
+diff --git a/drivers/pinctrl/intel/pinctrl-intel.c 
b/drivers/pinctrl/intel/pinctrl-intel.c
+index df63b7d997e8..b40a074822cf 100644
+--- a/drivers/pinctrl/intel/pinctrl-intel.c
++++ b/drivers/pinctrl/intel/pinctrl-intel.c
+@@ -368,18 +368,6 @@ static void __intel_gpio_set_direction(void __iomem 
*padcfg0, bool input)
+       writel(value, padcfg0);
+ }
+ 
+-static void intel_gpio_set_gpio_mode(void __iomem *padcfg0)
+-{
+-      u32 value;
+-
+-      /* Put the pad into GPIO mode */
+-      value = readl(padcfg0) & ~PADCFG0_PMODE_MASK;
+-      /* Disable SCI/SMI/NMI generation */
+-      value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI);
+-      value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI);
+-      writel(value, padcfg0);
+-}
+-
+ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
+                                    struct pinctrl_gpio_range *range,
+                                    unsigned pin)
+@@ -387,6 +375,7 @@ static int intel_gpio_request_enable(struct pinctrl_dev 
*pctldev,
+       struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+       void __iomem *padcfg0;
+       unsigned long flags;
++      u32 value;
+ 
+       raw_spin_lock_irqsave(&pctrl->lock, flags);
+ 
+@@ -396,7 +385,13 @@ static int intel_gpio_request_enable(struct pinctrl_dev 
*pctldev,
+       }
+ 
+       padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0);
+-      intel_gpio_set_gpio_mode(padcfg0);
++      /* Put the pad into GPIO mode */
++      value = readl(padcfg0) & ~PADCFG0_PMODE_MASK;
++      /* Disable SCI/SMI/NMI generation */
++      value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI);
++      value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI);
++      writel(value, padcfg0);
++
+       /* Disable TX buffer and enable RX (this will be input) */
+       __intel_gpio_set_direction(padcfg0, true);
+ 
+@@ -775,8 +770,6 @@ static int intel_gpio_irq_type(struct irq_data *d, 
unsigned type)
+ 
+       raw_spin_lock_irqsave(&pctrl->lock, flags);
+ 
+-      intel_gpio_set_gpio_mode(reg);
+-
+       value = readl(reg);
+ 
+       value &= ~(PADCFG0_RXEVCFG_MASK | PADCFG0_RXINV);
+diff --git a/drivers/power/supply/bq2415x_charger.c 
b/drivers/power/supply/bq2415x_charger.c
+index 73e2f0b79dd4..c4770a94cc8e 100644
+--- a/drivers/power/supply/bq2415x_charger.c
++++ b/drivers/power/supply/bq2415x_charger.c
+@@ -1569,6 +1569,11 @@ static int bq2415x_probe(struct i2c_client *client,
+               acpi_id =
+                       acpi_match_device(client->dev.driver->acpi_match_table,
+                                         &client->dev);
++              if (!acpi_id) {
++                      dev_err(&client->dev, "failed to match device name\n");
++                      ret = -ENODEV;
++                      goto error_1;
++              }
+               name = kasprintf(GFP_KERNEL, "%s-%d", acpi_id->id, num);
+       }
+       if (!name) {
+diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
+index 1e560188dd13..e453d2a7d7f9 100644
+--- a/drivers/s390/block/dasd_alias.c
++++ b/drivers/s390/block/dasd_alias.c
+@@ -591,13 +591,22 @@ static int _schedule_lcu_update(struct alias_lcu *lcu,
+ int dasd_alias_add_device(struct dasd_device *device)
+ {
+       struct dasd_eckd_private *private = device->private;
+-      struct alias_lcu *lcu;
++      __u8 uaddr = private->uid.real_unit_addr;
++      struct alias_lcu *lcu = private->lcu;
+       unsigned long flags;
+       int rc;
+ 
+-      lcu = private->lcu;
+       rc = 0;
+       spin_lock_irqsave(&lcu->lock, flags);
++      /*
++       * Check if device and lcu type differ. If so, the uac data may be
++       * outdated and needs to be updated.
++       */
++      if (private->uid.type !=  lcu->uac->unit[uaddr].ua_type) {
++              lcu->flags |= UPDATE_PENDING;
++              DBF_DEV_EVENT(DBF_WARNING, device, "%s",
++                            "uid type mismatch - trigger rescan");
++      }
+       if (!(lcu->flags & UPDATE_PENDING)) {
+               rc = _add_device_to_lcu(lcu, device, device);
+               if (rc)
+diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
+index 41e28b23b26a..8ac27efe34fc 100644
+--- a/drivers/s390/char/Makefile
++++ b/drivers/s390/char/Makefile
+@@ -2,6 +2,8 @@
+ # S/390 character devices
+ #
+ 
++CFLAGS_REMOVE_sclp_early_core.o       += $(CC_FLAGS_EXPOLINE)
++
+ obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
+        sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o \
+        sclp_early.o
+diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
+index 11674698b36d..67903c93328b 100644
+--- a/drivers/s390/cio/chsc.c
++++ b/drivers/s390/cio/chsc.c
+@@ -451,6 +451,7 @@ static void chsc_process_sei_link_incident(struct 
chsc_sei_nt0_area *sei_area)
+ 
+ static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
+ {
++      struct channel_path *chp;
+       struct chp_link link;
+       struct chp_id chpid;
+       int status;
+@@ -463,10 +464,17 @@ static void chsc_process_sei_res_acc(struct 
chsc_sei_nt0_area *sei_area)
+       chpid.id = sei_area->rsid;
+       /* allocate a new channel path structure, if needed */
+       status = chp_get_status(chpid);
+-      if (status < 0)
+-              chp_new(chpid);
+-      else if (!status)
++      if (!status)
+               return;
++
++      if (status < 0) {
++              chp_new(chpid);
++      } else {
++              chp = chpid_to_chp(chpid);
++              mutex_lock(&chp->lock);
++              chp_update_desc(chp);
++              mutex_unlock(&chp->lock);
++      }
+       memset(&link, 0, sizeof(struct chp_link));
+       link.chpid = chpid;
+       if ((sei_area->vf & 0xc0) != 0) {
+diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
+index 2d9a8067eaca..579aa9accafc 100644
+--- a/drivers/usb/musb/musb_core.c
++++ b/drivers/usb/musb/musb_core.c
+@@ -1774,6 +1774,7 @@ musb_vbus_show(struct device *dev, struct 
device_attribute *attr, char *buf)
+       int             vbus;
+       u8              devctl;
+ 
++      pm_runtime_get_sync(dev);
+       spin_lock_irqsave(&musb->lock, flags);
+       val = musb->a_wait_bcon;
+       vbus = musb_platform_get_vbus_status(musb);
+@@ -1787,6 +1788,7 @@ musb_vbus_show(struct device *dev, struct 
device_attribute *attr, char *buf)
+                       vbus = 0;
+       }
+       spin_unlock_irqrestore(&musb->lock, flags);
++      pm_runtime_put_sync(dev);
+ 
+       return sprintf(buf, "Vbus %s, timeout %lu msec\n",
+                       vbus ? "on" : "off", val);
+@@ -2483,10 +2485,11 @@ static int musb_remove(struct platform_device *pdev)
+       musb_generic_disable(musb);
+       spin_unlock_irqrestore(&musb->lock, flags);
+       musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
++      musb_platform_exit(musb);
++
+       pm_runtime_dont_use_autosuspend(musb->controller);
+       pm_runtime_put_sync(musb->controller);
+       pm_runtime_disable(musb->controller);
+-      musb_platform_exit(musb);
+       musb_phy_callback = NULL;
+       if (musb->dma_controller)
+               musb_dma_controller_destroy(musb->dma_controller);
+@@ -2710,7 +2713,8 @@ static int musb_resume(struct device *dev)
+       if ((devctl & mask) != (musb->context.devctl & mask))
+               musb->port1_status = 0;
+ 
+-      musb_start(musb);
++      musb_enable_interrupts(musb);
++      musb_platform_enable(musb);
+ 
+       spin_lock_irqsave(&musb->lock, flags);
+       error = musb_run_resume_work(musb);
+diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
+index d9cbda269462..331ddd07e505 100644
+--- a/fs/cifs/dir.c
++++ b/fs/cifs/dir.c
+@@ -673,6 +673,9 @@ int cifs_mknod(struct inode *inode, struct dentry 
*direntry, umode_t mode,
+               goto mknod_out;
+       }
+ 
++      if (!S_ISCHR(mode) && !S_ISBLK(mode))
++              goto mknod_out;
++
+       if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
+               goto mknod_out;
+ 
+@@ -681,10 +684,8 @@ int cifs_mknod(struct inode *inode, struct dentry 
*direntry, umode_t mode,
+ 
+       buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
+       if (buf == NULL) {
+-              kfree(full_path);
+               rc = -ENOMEM;
+-              free_xid(xid);
+-              return rc;
++              goto mknod_out;
+       }
+ 
+       if (backup_cred(cifs_sb))
+@@ -731,7 +732,7 @@ int cifs_mknod(struct inode *inode, struct dentry 
*direntry, umode_t mode,
+               pdev->minor = cpu_to_le64(MINOR(device_number));
+               rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
+                                                       &bytes_written, iov, 1);
+-      } /* else if (S_ISFIFO) */
++      }
+       tcon->ses->server->ops->close(xid, tcon, &fid);
+       d_drop(direntry);
+ 
+diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
+index 542e33d29088..d10bb2c30bf8 100644
+--- a/fs/jbd2/journal.c
++++ b/fs/jbd2/journal.c
+@@ -276,11 +276,11 @@ static int kjournald2(void *arg)
+       goto loop;
+ 
+ end_loop:
+-      write_unlock(&journal->j_state_lock);
+       del_timer_sync(&journal->j_commit_timer);
+       journal->j_task = NULL;
+       wake_up(&journal->j_wait_done_commit);
+       jbd_debug(1, "Journal thread exiting.\n");
++      write_unlock(&journal->j_state_lock);
+       return 0;
+ }
+ 
+diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
+index 8feecd5345e7..7e39719e27cb 100644
+--- a/include/linux/if_vlan.h
++++ b/include/linux/if_vlan.h
+@@ -600,7 +600,7 @@ static inline bool skb_vlan_tagged(const struct sk_buff 
*skb)
+  * Returns true if the skb is tagged with multiple vlan headers, regardless
+  * of whether it is hardware accelerated or not.
+  */
+-static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
++static inline bool skb_vlan_tagged_multi(struct sk_buff *skb)
+ {
+       __be16 protocol = skb->protocol;
+ 
+@@ -610,6 +610,9 @@ static inline bool skb_vlan_tagged_multi(const struct 
sk_buff *skb)
+               if (likely(!eth_type_vlan(protocol)))
+                       return false;
+ 
++              if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
++                      return false;
++
+               veh = (struct vlan_ethhdr *)skb->data;
+               protocol = veh->h_vlan_encapsulated_proto;
+       }
+@@ -627,7 +630,7 @@ static inline bool skb_vlan_tagged_multi(const struct 
sk_buff *skb)
+  *
+  * Returns features without unsafe ones if the skb has multiple tags.
+  */
+-static inline netdev_features_t vlan_features_check(const struct sk_buff *skb,
++static inline netdev_features_t vlan_features_check(struct sk_buff *skb,
+                                                   netdev_features_t features)
+ {
+       if (skb_vlan_tagged_multi(skb)) {
+diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h
+index fe994d2e5286..ea985aa7a6c5 100644
+--- a/include/net/llc_conn.h
++++ b/include/net/llc_conn.h
+@@ -97,6 +97,7 @@ static __inline__ char llc_backlog_type(struct sk_buff *skb)
+ 
+ struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority,
+                         struct proto *prot, int kern);
++void llc_sk_stop_all_timers(struct sock *sk, bool sync);
+ void llc_sk_free(struct sock *sk);
+ 
+ void llc_sk_reset(struct sock *sk);
+diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
+index 4ee67cb99143..05b9bb63dbec 100644
+--- a/include/uapi/linux/kvm.h
++++ b/include/uapi/linux/kvm.h
+@@ -870,6 +870,7 @@ struct kvm_ppc_smmu_info {
+ #define KVM_CAP_S390_USER_INSTR0 130
+ #define KVM_CAP_MSI_DEVID 131
+ #define KVM_CAP_PPC_HTM 132
++#define KVM_CAP_S390_BPB 152
+ 
+ #ifdef KVM_CAP_IRQ_ROUTING
+ 
+diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
+index 411226b26bca..04988d6466bf 100644
+--- a/kernel/events/callchain.c
++++ b/kernel/events/callchain.c
+@@ -117,19 +117,22 @@ int get_callchain_buffers(int event_max_stack)
+               goto exit;
+       }
+ 
++      /*
++       * If requesting per event more than the global cap,
++       * return a different error to help userspace figure
++       * this out.
++       *
++       * And also do it here so that we have &callchain_mutex held.
++       */
++      if (event_max_stack > sysctl_perf_event_max_stack) {
++              err = -EOVERFLOW;
++              goto exit;
++      }
++
+       if (count > 1) {
+               /* If the allocation failed, give up */
+               if (!callchain_cpus_entries)
+                       err = -ENOMEM;
+-              /*
+-               * If requesting per event more than the global cap,
+-               * return a different error to help userspace figure
+-               * this out.
+-               *
+-               * And also do it here so that we have &callchain_mutex held.
+-               */
+-              if (event_max_stack > sysctl_perf_event_max_stack)
+-                      err = -EOVERFLOW;
+               goto exit;
+       }
+ 
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 74710fad35d5..b1d6b9888fba 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -9456,9 +9456,9 @@ static int perf_copy_attr(struct perf_event_attr __user 
*uattr,
+                * __u16 sample size limit.
+                */
+               if (attr->sample_stack_user >= USHRT_MAX)
+-                      ret = -EINVAL;
++                      return -EINVAL;
+               else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
+-                      ret = -EINVAL;
++                      return -EINVAL;
+       }
+ 
+       if (attr->sample_type & PERF_SAMPLE_REGS_INTR)
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 3d9190c2940d..5407d5f7b2d0 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -2871,7 +2871,7 @@ netdev_features_t passthru_features_check(struct sk_buff 
*skb,
+ }
+ EXPORT_SYMBOL(passthru_features_check);
+ 
+-static netdev_features_t dflt_features_check(const struct sk_buff *skb,
++static netdev_features_t dflt_features_check(struct sk_buff *skb,
+                                            struct net_device *dev,
+                                            netdev_features_t features)
+ {
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index a426790b0688..128c811dcb1a 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -54,7 +54,8 @@ do {                                         \
+ static void neigh_timer_handler(unsigned long arg);
+ static void __neigh_notify(struct neighbour *n, int type, int flags);
+ static void neigh_update_notify(struct neighbour *neigh);
+-static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
++static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
++                                  struct net_device *dev);
+ 
+ #ifdef CONFIG_PROC_FS
+ static const struct file_operations neigh_stat_seq_fops;
+@@ -254,8 +255,7 @@ int neigh_ifdown(struct neigh_table *tbl, struct 
net_device *dev)
+ {
+       write_lock_bh(&tbl->lock);
+       neigh_flush_dev(tbl, dev);
+-      pneigh_ifdown(tbl, dev);
+-      write_unlock_bh(&tbl->lock);
++      pneigh_ifdown_and_unlock(tbl, dev);
+ 
+       del_timer_sync(&tbl->proxy_timer);
+       pneigh_queue_purge(&tbl->proxy_queue);
+@@ -645,9 +645,10 @@ int pneigh_delete(struct neigh_table *tbl, struct net 
*net, const void *pkey,
+       return -ENOENT;
+ }
+ 
+-static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
++static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
++                                  struct net_device *dev)
+ {
+-      struct pneigh_entry *n, **np;
++      struct pneigh_entry *n, **np, *freelist = NULL;
+       u32 h;
+ 
+       for (h = 0; h <= PNEIGH_HASHMASK; h++) {
+@@ -655,16 +656,23 @@ static int pneigh_ifdown(struct neigh_table *tbl, struct 
net_device *dev)
+               while ((n = *np) != NULL) {
+                       if (!dev || n->dev == dev) {
+                               *np = n->next;
+-                              if (tbl->pdestructor)
+-                                      tbl->pdestructor(n);
+-                              if (n->dev)
+-                                      dev_put(n->dev);
+-                              kfree(n);
++                              n->next = freelist;
++                              freelist = n;
+                               continue;
+                       }
+                       np = &n->next;
+               }
+       }
++      write_unlock_bh(&tbl->lock);
++      while ((n = freelist)) {
++              freelist = n->next;
++              n->next = NULL;
++              if (tbl->pdestructor)
++                      tbl->pdestructor(n);
++              if (n->dev)
++                      dev_put(n->dev);
++              kfree(n);
++      }
+       return -ENOENT;
+ }
+ 
+@@ -2279,12 +2287,16 @@ static int neigh_dump_table(struct neigh_table *tbl, 
struct sk_buff *skb,
+ 
+       err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX, NULL);
+       if (!err) {
+-              if (tb[NDA_IFINDEX])
++              if (tb[NDA_IFINDEX]) {
++                      if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
++                              return -EINVAL;
+                       filter_idx = nla_get_u32(tb[NDA_IFINDEX]);
+-
+-              if (tb[NDA_MASTER])
++              }
++              if (tb[NDA_MASTER]) {
++                      if (nla_len(tb[NDA_MASTER]) != sizeof(u32))
++                              return -EINVAL;
+                       filter_master_idx = nla_get_u32(tb[NDA_MASTER]);
+-
++              }
+               if (filter_idx || filter_master_idx)
+                       flags |= NLM_F_DUMP_FILTERED;
+       }
+diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
+index e1d4d898a007..f0252768ecf4 100644
+--- a/net/dns_resolver/dns_key.c
++++ b/net/dns_resolver/dns_key.c
+@@ -25,6 +25,7 @@
+ #include <linux/moduleparam.h>
+ #include <linux/slab.h>
+ #include <linux/string.h>
++#include <linux/ratelimit.h>
+ #include <linux/kernel.h>
+ #include <linux/keyctl.h>
+ #include <linux/err.h>
+@@ -91,9 +92,9 @@ dns_resolver_preparse(struct key_preparsed_payload *prep)
+ 
+                       next_opt = memchr(opt, '#', end - opt) ?: end;
+                       opt_len = next_opt - opt;
+-                      if (!opt_len) {
+-                              printk(KERN_WARNING
+-                                     "Empty option to dns_resolver key\n");
++                      if (opt_len <= 0 || opt_len > 128) {
++                              pr_warn_ratelimited("Invalid option length (%d) 
for dns_resolver key\n",
++                                                  opt_len);
+                               return -EINVAL;
+                       }
+ 
+@@ -127,10 +128,8 @@ dns_resolver_preparse(struct key_preparsed_payload *prep)
+                       }
+ 
+               bad_option_value:
+-                      printk(KERN_WARNING
+-                             "Option '%*.*s' to dns_resolver key:"
+-                             " bad/missing value\n",
+-                             opt_nlen, opt_nlen, opt);
++                      pr_warn_ratelimited("Option '%*.*s' to dns_resolver 
key: bad/missing value\n",
++                                          opt_nlen, opt_nlen, opt);
+                       return -EINVAL;
+               } while (opt = next_opt + 1, opt < end);
+       }
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 0d1a767db1bb..0fc5dad02fe8 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -2662,8 +2662,10 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
+ 
+ #ifdef CONFIG_TCP_MD5SIG
+       case TCP_MD5SIG:
+-              /* Read the IP->Key mappings from userspace */
+-              err = tp->af_specific->md5_parse(sk, optval, optlen);
++              if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
++                      err = tp->af_specific->md5_parse(sk, optval, optlen);
++              else
++                      err = -EINVAL;
+               break;
+ #endif
+       case TCP_USER_TIMEOUT:
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index eb05ad940e37..52b0a84be765 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -3943,11 +3943,8 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr 
*th)
+       int length = (th->doff << 2) - sizeof(*th);
+       const u8 *ptr = (const u8 *)(th + 1);
+ 
+-      /* If the TCP option is too short, we can short cut */
+-      if (length < TCPOLEN_MD5SIG)
+-              return NULL;
+-
+-      while (length > 0) {
++      /* If not enough data remaining, we can short cut */
++      while (length >= TCPOLEN_MD5SIG) {
+               int opcode = *ptr++;
+               int opsize;
+ 
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index d6a4b2c73a7c..f6ac472acd0f 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -2811,6 +2811,7 @@ void rt6_mtu_change(struct net_device *dev, unsigned int 
mtu)
+ 
+ static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
+       [RTA_GATEWAY]           = { .len = sizeof(struct in6_addr) },
++      [RTA_PREFSRC]           = { .len = sizeof(struct in6_addr) },
+       [RTA_OIF]               = { .type = NLA_U32 },
+       [RTA_IIF]               = { .type = NLA_U32 },
+       [RTA_PRIORITY]          = { .type = NLA_U32 },
+@@ -2820,6 +2821,7 @@ static const struct nla_policy 
rtm_ipv6_policy[RTA_MAX+1] = {
+       [RTA_ENCAP_TYPE]        = { .type = NLA_U16 },
+       [RTA_ENCAP]             = { .type = NLA_NESTED },
+       [RTA_EXPIRES]           = { .type = NLA_U32 },
++      [RTA_TABLE]             = { .type = NLA_U32 },
+ };
+ 
+ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
+diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
+index 163f1fa53917..9b214f313cc0 100644
+--- a/net/l2tp/l2tp_ppp.c
++++ b/net/l2tp/l2tp_ppp.c
+@@ -590,6 +590,13 @@ static int pppol2tp_connect(struct socket *sock, struct 
sockaddr *uservaddr,
+       lock_sock(sk);
+ 
+       error = -EINVAL;
++
++      if (sockaddr_len != sizeof(struct sockaddr_pppol2tp) &&
++          sockaddr_len != sizeof(struct sockaddr_pppol2tpv3) &&
++          sockaddr_len != sizeof(struct sockaddr_pppol2tpin6) &&
++          sockaddr_len != sizeof(struct sockaddr_pppol2tpv3in6))
++              goto end;
++
+       if (sp->sa_protocol != PX_PROTO_OL2TP)
+               goto end;
+ 
+diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
+index f7caf0f5d9c8..d6bc5f2a1175 100644
+--- a/net/llc/af_llc.c
++++ b/net/llc/af_llc.c
+@@ -197,9 +197,19 @@ static int llc_ui_release(struct socket *sock)
+               llc->laddr.lsap, llc->daddr.lsap);
+       if (!llc_send_disc(sk))
+               llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo);
+-      if (!sock_flag(sk, SOCK_ZAPPED))
++      if (!sock_flag(sk, SOCK_ZAPPED)) {
++              struct llc_sap *sap = llc->sap;
++
++              /* Hold this for release_sock(), so that llc_backlog_rcv()
++               * could still use it.
++               */
++              llc_sap_hold(sap);
+               llc_sap_remove_socket(llc->sap, sk);
+-      release_sock(sk);
++              release_sock(sk);
++              llc_sap_put(sap);
++      } else {
++              release_sock(sk);
++      }
+       if (llc->dev)
+               dev_put(llc->dev);
+       sock_put(sk);
+diff --git a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c
+index ea225bd2672c..f8d4ab8ca1a5 100644
+--- a/net/llc/llc_c_ac.c
++++ b/net/llc/llc_c_ac.c
+@@ -1096,14 +1096,7 @@ int llc_conn_ac_inc_tx_win_size(struct sock *sk, struct 
sk_buff *skb)
+ 
+ int llc_conn_ac_stop_all_timers(struct sock *sk, struct sk_buff *skb)
+ {
+-      struct llc_sock *llc = llc_sk(sk);
+-
+-      del_timer(&llc->pf_cycle_timer.timer);
+-      del_timer(&llc->ack_timer.timer);
+-      del_timer(&llc->rej_sent_timer.timer);
+-      del_timer(&llc->busy_state_timer.timer);
+-      llc->ack_must_be_send = 0;
+-      llc->ack_pf = 0;
++      llc_sk_stop_all_timers(sk, false);
+       return 0;
+ }
+ 
+diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
+index 8bc5a1bd2d45..d861b74ad068 100644
+--- a/net/llc/llc_conn.c
++++ b/net/llc/llc_conn.c
+@@ -951,6 +951,26 @@ struct sock *llc_sk_alloc(struct net *net, int family, 
gfp_t priority, struct pr
+       return sk;
+ }
+ 
++void llc_sk_stop_all_timers(struct sock *sk, bool sync)
++{
++      struct llc_sock *llc = llc_sk(sk);
++
++      if (sync) {
++              del_timer_sync(&llc->pf_cycle_timer.timer);
++              del_timer_sync(&llc->ack_timer.timer);
++              del_timer_sync(&llc->rej_sent_timer.timer);
++              del_timer_sync(&llc->busy_state_timer.timer);
++      } else {
++              del_timer(&llc->pf_cycle_timer.timer);
++              del_timer(&llc->ack_timer.timer);
++              del_timer(&llc->rej_sent_timer.timer);
++              del_timer(&llc->busy_state_timer.timer);
++      }
++
++      llc->ack_must_be_send = 0;
++      llc->ack_pf = 0;
++}
++
+ /**
+  *    llc_sk_free - Frees a LLC socket
+  *    @sk - socket to free
+@@ -963,7 +983,7 @@ void llc_sk_free(struct sock *sk)
+ 
+       llc->state = LLC_CONN_OUT_OF_SVC;
+       /* Stop all (possibly) running timers */
+-      llc_conn_ac_stop_all_timers(sk, NULL);
++      llc_sk_stop_all_timers(sk, true);
+ #ifdef DEBUG_LLC_CONN_ALLOC
+       printk(KERN_INFO "%s: unackq=%d, txq=%d\n", __func__,
+               skb_queue_len(&llc->pdu_unack_q),
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 267db0d603bc..a027f8c00944 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -333,11 +333,11 @@ static void packet_pick_tx_queue(struct net_device *dev, 
struct sk_buff *skb)
+       skb_set_queue_mapping(skb, queue_index);
+ }
+ 
+-/* register_prot_hook must be invoked with the po->bind_lock held,
++/* __register_prot_hook must be invoked through register_prot_hook
+  * or from a context in which asynchronous accesses to the packet
+  * socket is not possible (packet_create()).
+  */
+-static void register_prot_hook(struct sock *sk)
++static void __register_prot_hook(struct sock *sk)
+ {
+       struct packet_sock *po = pkt_sk(sk);
+ 
+@@ -352,8 +352,13 @@ static void register_prot_hook(struct sock *sk)
+       }
+ }
+ 
+-/* {,__}unregister_prot_hook() must be invoked with the po->bind_lock
+- * held.   If the sync parameter is true, we will temporarily drop
++static void register_prot_hook(struct sock *sk)
++{
++      lockdep_assert_held_once(&pkt_sk(sk)->bind_lock);
++      __register_prot_hook(sk);
++}
++
++/* If the sync parameter is true, we will temporarily drop
+  * the po->bind_lock and do a synchronize_net to make sure no
+  * asynchronous packet processing paths still refer to the elements
+  * of po->prot_hook.  If the sync parameter is false, it is the
+@@ -363,6 +368,8 @@ static void __unregister_prot_hook(struct sock *sk, bool 
sync)
+ {
+       struct packet_sock *po = pkt_sk(sk);
+ 
++      lockdep_assert_held_once(&po->bind_lock);
++
+       po->running = 0;
+ 
+       if (po->fanout)
+@@ -3017,6 +3024,7 @@ static int packet_release(struct socket *sock)
+ 
+       packet_flush_mclist(sk);
+ 
++      lock_sock(sk);
+       if (po->rx_ring.pg_vec) {
+               memset(&req_u, 0, sizeof(req_u));
+               packet_set_ring(sk, &req_u, 1, 0);
+@@ -3026,6 +3034,7 @@ static int packet_release(struct socket *sock)
+               memset(&req_u, 0, sizeof(req_u));
+               packet_set_ring(sk, &req_u, 1, 1);
+       }
++      release_sock(sk);
+ 
+       f = fanout_release(sk);
+ 
+@@ -3259,7 +3268,7 @@ static int packet_create(struct net *net, struct socket 
*sock, int protocol,
+ 
+       if (proto) {
+               po->prot_hook.type = proto;
+-              register_prot_hook(sk);
++              __register_prot_hook(sk);
+       }
+ 
+       mutex_lock(&net->packet.sklist_lock);
+@@ -3654,6 +3663,7 @@ packet_setsockopt(struct socket *sock, int level, int 
optname, char __user *optv
+               union tpacket_req_u req_u;
+               int len;
+ 
++              lock_sock(sk);
+               switch (po->tp_version) {
+               case TPACKET_V1:
+               case TPACKET_V2:
+@@ -3664,12 +3674,17 @@ packet_setsockopt(struct socket *sock, int level, int 
optname, char __user *optv
+                       len = sizeof(req_u.req3);
+                       break;
+               }
+-              if (optlen < len)
+-                      return -EINVAL;
+-              if (copy_from_user(&req_u.req, optval, len))
+-                      return -EFAULT;
+-              return packet_set_ring(sk, &req_u, 0,
+-                      optname == PACKET_TX_RING);
++              if (optlen < len) {
++                      ret = -EINVAL;
++              } else {
++                      if (copy_from_user(&req_u.req, optval, len))
++                              ret = -EFAULT;
++                      else
++                              ret = packet_set_ring(sk, &req_u, 0,
++                                                  optname == PACKET_TX_RING);
++              }
++              release_sock(sk);
++              return ret;
+       }
+       case PACKET_COPY_THRESH:
+       {
+@@ -3735,12 +3750,18 @@ packet_setsockopt(struct socket *sock, int level, int 
optname, char __user *optv
+ 
+               if (optlen != sizeof(val))
+                       return -EINVAL;
+-              if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
+-                      return -EBUSY;
+               if (copy_from_user(&val, optval, sizeof(val)))
+                       return -EFAULT;
+-              po->tp_loss = !!val;
+-              return 0;
++
++              lock_sock(sk);
++              if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
++                      ret = -EBUSY;
++              } else {
++                      po->tp_loss = !!val;
++                      ret = 0;
++              }
++              release_sock(sk);
++              return ret;
+       }
+       case PACKET_AUXDATA:
+       {
+@@ -3751,7 +3772,9 @@ packet_setsockopt(struct socket *sock, int level, int 
optname, char __user *optv
+               if (copy_from_user(&val, optval, sizeof(val)))
+                       return -EFAULT;
+ 
++              lock_sock(sk);
+               po->auxdata = !!val;
++              release_sock(sk);
+               return 0;
+       }
+       case PACKET_ORIGDEV:
+@@ -3763,7 +3786,9 @@ packet_setsockopt(struct socket *sock, int level, int 
optname, char __user *optv
+               if (copy_from_user(&val, optval, sizeof(val)))
+                       return -EFAULT;
+ 
++              lock_sock(sk);
+               po->origdev = !!val;
++              release_sock(sk);
+               return 0;
+       }
+       case PACKET_VNET_HDR:
+@@ -3772,15 +3797,20 @@ packet_setsockopt(struct socket *sock, int level, int 
optname, char __user *optv
+ 
+               if (sock->type != SOCK_RAW)
+                       return -EINVAL;
+-              if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
+-                      return -EBUSY;
+               if (optlen < sizeof(val))
+                       return -EINVAL;
+               if (copy_from_user(&val, optval, sizeof(val)))
+                       return -EFAULT;
+ 
+-              po->has_vnet_hdr = !!val;
+-              return 0;
++              lock_sock(sk);
++              if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
++                      ret = -EBUSY;
++              } else {
++                      po->has_vnet_hdr = !!val;
++                      ret = 0;
++              }
++              release_sock(sk);
++              return ret;
+       }
+       case PACKET_TIMESTAMP:
+       {
+@@ -3818,11 +3848,17 @@ packet_setsockopt(struct socket *sock, int level, int 
optname, char __user *optv
+ 
+               if (optlen != sizeof(val))
+                       return -EINVAL;
+-              if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
+-                      return -EBUSY;
+               if (copy_from_user(&val, optval, sizeof(val)))
+                       return -EFAULT;
+-              po->tp_tx_has_off = !!val;
++
++              lock_sock(sk);
++              if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
++                      ret = -EBUSY;
++              } else {
++                      po->tp_tx_has_off = !!val;
++                      ret = 0;
++              }
++              release_sock(sk);
+               return 0;
+       }
+       case PACKET_QDISC_BYPASS:
+@@ -4219,7 +4255,6 @@ static int packet_set_ring(struct sock *sk, union 
tpacket_req_u *req_u,
+       /* Added to avoid minimal code churn */
+       struct tpacket_req *req = &req_u->req;
+ 
+-      lock_sock(sk);
+       /* Opening a Tx-ring is NOT supported in TPACKET_V3 */
+       if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
+               net_warn_ratelimited("Tx-ring is not supported.\n");
+@@ -4355,7 +4390,6 @@ static int packet_set_ring(struct sock *sk, union 
tpacket_req_u *req_u,
+       if (pg_vec)
+               free_pg_vec(pg_vec, order, req->tp_block_nr);
+ out:
+-      release_sock(sk);
+       return err;
+ }
+ 
+diff --git a/net/packet/internal.h b/net/packet/internal.h
+index d55bfc34d6b3..1309e2a7baad 100644
+--- a/net/packet/internal.h
++++ b/net/packet/internal.h
+@@ -109,10 +109,12 @@ struct packet_sock {
+       int                     copy_thresh;
+       spinlock_t              bind_lock;
+       struct mutex            pg_vec_lock;
+-      unsigned int            running:1,      /* prot_hook is attached*/
+-                              auxdata:1,
++      unsigned int            running;        /* bind_lock must be held */
++      unsigned int            auxdata:1,      /* writer must hold sock lock */
+                               origdev:1,
+-                              has_vnet_hdr:1;
++                              has_vnet_hdr:1,
++                              tp_loss:1,
++                              tp_tx_has_off:1;
+       int                     pressure;
+       int                     ifindex;        /* bound device         */
+       __be16                  num;
+@@ -122,8 +124,6 @@ struct packet_sock {
+       enum tpacket_versions   tp_version;
+       unsigned int            tp_hdrlen;
+       unsigned int            tp_reserve;
+-      unsigned int            tp_loss:1;
+-      unsigned int            tp_tx_has_off:1;
+       unsigned int            tp_tstamp;
+       struct net_device __rcu *cached_dev;
+       int                     (*xmit)(struct sk_buff *skb);
+diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
+index 95c463cbb9a6..235db2c9bbbb 100644
+--- a/net/sched/act_ife.c
++++ b/net/sched/act_ife.c
+@@ -634,7 +634,7 @@ int find_decode_metaid(struct sk_buff *skb, struct 
tcf_ife_info *ife,
+               }
+       }
+ 
+-      return 0;
++      return -ENOENT;
+ }
+ 
+ struct ifeheadr {
+diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
+index 355d95a7cd81..e031797ad311 100644
+--- a/net/sctp/ipv6.c
++++ b/net/sctp/ipv6.c
+@@ -521,46 +521,49 @@ static void sctp_v6_to_addr(union sctp_addr *addr, 
struct in6_addr *saddr,
+       addr->v6.sin6_scope_id = 0;
+ }
+ 
+-/* Compare addresses exactly.
+- * v4-mapped-v6 is also in consideration.
+- */
+-static int sctp_v6_cmp_addr(const union sctp_addr *addr1,
+-                          const union sctp_addr *addr2)
++static int __sctp_v6_cmp_addr(const union sctp_addr *addr1,
++                            const union sctp_addr *addr2)
+ {
+       if (addr1->sa.sa_family != addr2->sa.sa_family) {
+               if (addr1->sa.sa_family == AF_INET &&
+                   addr2->sa.sa_family == AF_INET6 &&
+-                  ipv6_addr_v4mapped(&addr2->v6.sin6_addr)) {
+-                      if (addr2->v6.sin6_port == addr1->v4.sin_port &&
+-                          addr2->v6.sin6_addr.s6_addr32[3] ==
+-                          addr1->v4.sin_addr.s_addr)
+-                              return 1;
+-              }
++                  ipv6_addr_v4mapped(&addr2->v6.sin6_addr) &&
++                  addr2->v6.sin6_addr.s6_addr32[3] ==
++                  addr1->v4.sin_addr.s_addr)
++                      return 1;
++
+               if (addr2->sa.sa_family == AF_INET &&
+                   addr1->sa.sa_family == AF_INET6 &&
+-                  ipv6_addr_v4mapped(&addr1->v6.sin6_addr)) {
+-                      if (addr1->v6.sin6_port == addr2->v4.sin_port &&
+-                          addr1->v6.sin6_addr.s6_addr32[3] ==
+-                          addr2->v4.sin_addr.s_addr)
+-                              return 1;
+-              }
++                  ipv6_addr_v4mapped(&addr1->v6.sin6_addr) &&
++                  addr1->v6.sin6_addr.s6_addr32[3] ==
++                  addr2->v4.sin_addr.s_addr)
++                      return 1;
++
+               return 0;
+       }
+-      if (addr1->v6.sin6_port != addr2->v6.sin6_port)
+-              return 0;
++
+       if (!ipv6_addr_equal(&addr1->v6.sin6_addr, &addr2->v6.sin6_addr))
+               return 0;
++
+       /* If this is a linklocal address, compare the scope_id. */
+-      if (ipv6_addr_type(&addr1->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) {
+-              if (addr1->v6.sin6_scope_id && addr2->v6.sin6_scope_id &&
+-                  (addr1->v6.sin6_scope_id != addr2->v6.sin6_scope_id)) {
+-                      return 0;
+-              }
+-      }
++      if ((ipv6_addr_type(&addr1->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) &&
++          addr1->v6.sin6_scope_id && addr2->v6.sin6_scope_id &&
++          addr1->v6.sin6_scope_id != addr2->v6.sin6_scope_id)
++              return 0;
+ 
+       return 1;
+ }
+ 
++/* Compare addresses exactly.
++ * v4-mapped-v6 is also in consideration.
++ */
++static int sctp_v6_cmp_addr(const union sctp_addr *addr1,
++                          const union sctp_addr *addr2)
++{
++      return __sctp_v6_cmp_addr(addr1, addr2) &&
++             addr1->v6.sin6_port == addr2->v6.sin6_port;
++}
++
+ /* Initialize addr struct to INADDR_ANY. */
+ static void sctp_v6_inaddr_any(union sctp_addr *addr, __be16 port)
+ {
+@@ -844,8 +847,8 @@ static int sctp_inet6_cmp_addr(const union sctp_addr 
*addr1,
+                              const union sctp_addr *addr2,
+                              struct sctp_sock *opt)
+ {
+-      struct sctp_af *af1, *af2;
+       struct sock *sk = sctp_opt2sk(opt);
++      struct sctp_af *af1, *af2;
+ 
+       af1 = sctp_get_af_specific(addr1->sa.sa_family);
+       af2 = sctp_get_af_specific(addr2->sa.sa_family);
+@@ -861,10 +864,7 @@ static int sctp_inet6_cmp_addr(const union sctp_addr 
*addr1,
+       if (sctp_is_any(sk, addr1) || sctp_is_any(sk, addr2))
+               return 1;
+ 
+-      if (addr1->sa.sa_family != addr2->sa.sa_family)
+-              return 0;
+-
+-      return af1->cmp_addr(addr1, addr2);
++      return __sctp_v6_cmp_addr(addr1, addr2);
+ }
+ 
+ /* Verify that the provided sockaddr looks bindable.   Common verification,
+diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c
+index 6cbc935ddd96..bbee334ab1b0 100644
+--- a/net/strparser/strparser.c
++++ b/net/strparser/strparser.c
+@@ -285,9 +285,9 @@ static int strp_recv(read_descriptor_t *desc, struct 
sk_buff *orig_skb,
+                                       strp_start_rx_timer(strp);
+                               }
+ 
++                              rxm->accum_len += cand_len;
+                               strp->rx_need_bytes = rxm->strp.full_len -
+                                                      rxm->accum_len;
+-                              rxm->accum_len += cand_len;
+                               rxm->early_eaten = cand_len;
+                               STRP_STATS_ADD(strp->stats.rx_bytes, cand_len);
+                               desc->count = 0; /* Stop reading socket */
+@@ -310,6 +310,7 @@ static int strp_recv(read_descriptor_t *desc, struct 
sk_buff *orig_skb,
+               /* Hurray, we have a new message! */
+               del_timer(&strp->rx_msg_timer);
+               strp->rx_skb_head = NULL;
++              strp->rx_need_bytes = 0;
+               STRP_STATS_INCR(strp->stats.rx_msgs);
+ 
+               /* Give skb to upper layer */
+@@ -374,9 +375,7 @@ void strp_data_ready(struct strparser *strp)
+               return;
+ 
+       if (strp->rx_need_bytes) {
+-              if (strp_peek_len(strp) >= strp->rx_need_bytes)
+-                      strp->rx_need_bytes = 0;
+-              else
++              if (strp_peek_len(strp) < strp->rx_need_bytes)
+                       return;
+       }
+ 
+diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
+index 3200059d14b2..9ba3c462f86e 100644
+--- a/net/tipc/netlink.c
++++ b/net/tipc/netlink.c
+@@ -79,7 +79,8 @@ const struct nla_policy 
tipc_nl_sock_policy[TIPC_NLA_SOCK_MAX + 1] = {
+ 
+ const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = {
+       [TIPC_NLA_NET_UNSPEC]           = { .type = NLA_UNSPEC },
+-      [TIPC_NLA_NET_ID]               = { .type = NLA_U32 }
++      [TIPC_NLA_NET_ID]               = { .type = NLA_U32 },
++      [TIPC_NLA_NET_ADDR]             = { .type = NLA_U32 },
+ };
+ 
+ const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
+diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
+index 4bc58822416c..d2c6cdd9d42b 100644
+--- a/tools/perf/util/dso.c
++++ b/tools/perf/util/dso.c
+@@ -366,23 +366,7 @@ static int __open_dso(struct dso *dso, struct machine 
*machine)
+       if (!is_regular_file(name))
+               return -EINVAL;
+ 
+-      if (dso__needs_decompress(dso)) {
+-              char newpath[KMOD_DECOMP_LEN];
+-              size_t len = sizeof(newpath);
+-
+-              if (dso__decompress_kmodule_path(dso, name, newpath, len) < 0) {
+-                      free(name);
+-                      return -dso->load_errno;
+-              }
+-
+-              strcpy(name, newpath);
+-      }
+-
+       fd = do_open(name);
+-
+-      if (dso__needs_decompress(dso))
+-              unlink(name);
+-
+       free(name);
+       return fd;
+ }

Reply via email to