commit:     ef0ea4248bddb2347474f29fe2a408600ee9302c
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Nov 10 16:19:01 2019 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Nov 10 16:19:01 2019 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=ef0ea424

Linux patch 4.14.153

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README               |    4 +
 1152_linux-4.14.153.patch | 3786 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3790 insertions(+)

diff --git a/0000_README b/0000_README
index b873d53..b7d2410 100644
--- a/0000_README
+++ b/0000_README
@@ -651,6 +651,10 @@ Patch:  1151_linux-4.14.152.patch
 From:   https://www.kernel.org
 Desc:   Linux 4.14.152
 
+Patch:  1152_linux-4.14.153.patch
+From:   https://www.kernel.org
+Desc:   Linux 4.14.153
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1152_linux-4.14.153.patch b/1152_linux-4.14.153.patch
new file mode 100644
index 0000000..711cfbb
--- /dev/null
+++ b/1152_linux-4.14.153.patch
@@ -0,0 +1,3786 @@
+diff --git a/Makefile b/Makefile
+index 1d7f47334ca2..2819ed540ce2 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 14
+-SUBLEVEL = 152
++SUBLEVEL = 153
+ EXTRAVERSION =
+ NAME = Petit Gorille
+ 
+@@ -840,6 +840,15 @@ KBUILD_CFLAGS   += $(call 
cc-option,-Werror=incompatible-pointer-types)
+ # Require designated initializers for all marked structures
+ KBUILD_CFLAGS   += $(call cc-option,-Werror=designated-init)
+ 
++# change __FILE__ to the relative path from the srctree
++KBUILD_CFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=)
++
++# ensure -fcf-protection is disabled when using retpoline as it is
++# incompatible with -mindirect-branch=thunk-extern
++ifdef CONFIG_RETPOLINE
++KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none)
++endif
++
+ # use the deterministic mode of AR if available
+ KBUILD_ARFLAGS := $(call ar-option,D)
+ 
+diff --git a/arch/arm/boot/dts/imx7s.dtsi b/arch/arm/boot/dts/imx7s.dtsi
+index bf15efbe8a71..836550f2297a 100644
+--- a/arch/arm/boot/dts/imx7s.dtsi
++++ b/arch/arm/boot/dts/imx7s.dtsi
+@@ -450,7 +450,7 @@
+                               compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
+                               reg = <0x302d0000 0x10000>;
+                               interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
+-                              clocks = <&clks IMX7D_CLK_DUMMY>,
++                              clocks = <&clks IMX7D_GPT1_ROOT_CLK>,
+                                        <&clks IMX7D_GPT1_ROOT_CLK>;
+                               clock-names = "ipg", "per";
+                       };
+@@ -459,7 +459,7 @@
+                               compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
+                               reg = <0x302e0000 0x10000>;
+                               interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
+-                              clocks = <&clks IMX7D_CLK_DUMMY>,
++                              clocks = <&clks IMX7D_GPT2_ROOT_CLK>,
+                                        <&clks IMX7D_GPT2_ROOT_CLK>;
+                               clock-names = "ipg", "per";
+                               status = "disabled";
+@@ -469,7 +469,7 @@
+                               compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
+                               reg = <0x302f0000 0x10000>;
+                               interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
+-                              clocks = <&clks IMX7D_CLK_DUMMY>,
++                              clocks = <&clks IMX7D_GPT3_ROOT_CLK>,
+                                        <&clks IMX7D_GPT3_ROOT_CLK>;
+                               clock-names = "ipg", "per";
+                               status = "disabled";
+@@ -479,7 +479,7 @@
+                               compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
+                               reg = <0x30300000 0x10000>;
+                               interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
+-                              clocks = <&clks IMX7D_CLK_DUMMY>,
++                              clocks = <&clks IMX7D_GPT4_ROOT_CLK>,
+                                        <&clks IMX7D_GPT4_ROOT_CLK>;
+                               clock-names = "ipg", "per";
+                               status = "disabled";
+diff --git a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi 
b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
+index fe4cbdc72359..7265d7072b5c 100644
+--- a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
++++ b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
+@@ -270,3 +270,7 @@
+ &twl_gpio {
+       ti,use-leds;
+ };
++
++&twl_keypad {
++      status = "disabled";
++};
+diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c
+index 8be04ec95adf..d80b2290ac2e 100644
+--- a/arch/arm/mach-davinci/dm365.c
++++ b/arch/arm/mach-davinci/dm365.c
+@@ -856,8 +856,8 @@ static s8 dm365_queue_priority_mapping[][2] = {
+ };
+ 
+ static const struct dma_slave_map dm365_edma_map[] = {
+-      { "davinci-mcbsp.0", "tx", EDMA_FILTER_PARAM(0, 2) },
+-      { "davinci-mcbsp.0", "rx", EDMA_FILTER_PARAM(0, 3) },
++      { "davinci-mcbsp", "tx", EDMA_FILTER_PARAM(0, 2) },
++      { "davinci-mcbsp", "rx", EDMA_FILTER_PARAM(0, 3) },
+       { "davinci_voicecodec", "tx", EDMA_FILTER_PARAM(0, 2) },
+       { "davinci_voicecodec", "rx", EDMA_FILTER_PARAM(0, 3) },
+       { "spi_davinci.2", "tx", EDMA_FILTER_PARAM(0, 10) },
+diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
+index 2c96190e018b..96b17a870b91 100644
+--- a/arch/arm/mm/alignment.c
++++ b/arch/arm/mm/alignment.c
+@@ -768,6 +768,36 @@ do_alignment_t32_to_handler(unsigned long *pinstr, struct 
pt_regs *regs,
+       return NULL;
+ }
+ 
++static int alignment_get_arm(struct pt_regs *regs, u32 *ip, unsigned long 
*inst)
++{
++      u32 instr = 0;
++      int fault;
++
++      if (user_mode(regs))
++              fault = get_user(instr, ip);
++      else
++              fault = probe_kernel_address(ip, instr);
++
++      *inst = __mem_to_opcode_arm(instr);
++
++      return fault;
++}
++
++static int alignment_get_thumb(struct pt_regs *regs, u16 *ip, u16 *inst)
++{
++      u16 instr = 0;
++      int fault;
++
++      if (user_mode(regs))
++              fault = get_user(instr, ip);
++      else
++              fault = probe_kernel_address(ip, instr);
++
++      *inst = __mem_to_opcode_thumb16(instr);
++
++      return fault;
++}
++
+ static int
+ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+ {
+@@ -775,10 +805,10 @@ do_alignment(unsigned long addr, unsigned int fsr, 
struct pt_regs *regs)
+       unsigned long instr = 0, instrptr;
+       int (*handler)(unsigned long addr, unsigned long instr, struct pt_regs 
*regs);
+       unsigned int type;
+-      unsigned int fault;
+       u16 tinstr = 0;
+       int isize = 4;
+       int thumb2_32b = 0;
++      int fault;
+ 
+       if (interrupts_enabled(regs))
+               local_irq_enable();
+@@ -787,15 +817,14 @@ do_alignment(unsigned long addr, unsigned int fsr, 
struct pt_regs *regs)
+ 
+       if (thumb_mode(regs)) {
+               u16 *ptr = (u16 *)(instrptr & ~1);
+-              fault = probe_kernel_address(ptr, tinstr);
+-              tinstr = __mem_to_opcode_thumb16(tinstr);
++
++              fault = alignment_get_thumb(regs, ptr, &tinstr);
+               if (!fault) {
+                       if (cpu_architecture() >= CPU_ARCH_ARMv7 &&
+                           IS_T32(tinstr)) {
+                               /* Thumb-2 32-bit */
+-                              u16 tinst2 = 0;
+-                              fault = probe_kernel_address(ptr + 1, tinst2);
+-                              tinst2 = __mem_to_opcode_thumb16(tinst2);
++                              u16 tinst2;
++                              fault = alignment_get_thumb(regs, ptr + 1, 
&tinst2);
+                               instr = __opcode_thumb32_compose(tinstr, 
tinst2);
+                               thumb2_32b = 1;
+                       } else {
+@@ -804,8 +833,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct 
pt_regs *regs)
+                       }
+               }
+       } else {
+-              fault = probe_kernel_address((void *)instrptr, instr);
+-              instr = __mem_to_opcode_arm(instr);
++              fault = alignment_get_arm(regs, (void *)instrptr, &instr);
+       }
+ 
+       if (fault) {
+diff --git a/arch/arm/mm/proc-v7m.S b/arch/arm/mm/proc-v7m.S
+index 92e84181933a..c68408d51c4b 100644
+--- a/arch/arm/mm/proc-v7m.S
++++ b/arch/arm/mm/proc-v7m.S
+@@ -135,7 +135,6 @@ __v7m_setup_cont:
+       dsb
+       mov     r6, lr                  @ save LR
+       ldr     sp, =init_thread_union + THREAD_START_SP
+-      stmia   sp, {r0-r3, r12}
+       cpsie   i
+       svc     #0
+ 1:    cpsid   i
+diff --git a/arch/arm64/boot/dts/broadcom/stingray/stingray-pinctrl.dtsi 
b/arch/arm64/boot/dts/broadcom/stingray/stingray-pinctrl.dtsi
+index 15214d05fec1..8c20d4a0cb4e 100644
+--- a/arch/arm64/boot/dts/broadcom/stingray/stingray-pinctrl.dtsi
++++ b/arch/arm64/boot/dts/broadcom/stingray/stingray-pinctrl.dtsi
+@@ -42,13 +42,14 @@
+ 
+               pinmux: pinmux@0014029c {
+                       compatible = "pinctrl-single";
+-                      reg = <0x0014029c 0x250>;
++                      reg = <0x0014029c 0x26c>;
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+                       pinctrl-single,register-width = <32>;
+                       pinctrl-single,function-mask = <0xf>;
+                       pinctrl-single,gpio-range = <
+-                              &range 0 154 MODE_GPIO
++                              &range 0  91 MODE_GPIO
++                              &range 95 60 MODE_GPIO
+                               >;
+                       range: gpio-range {
+                               #pinctrl-single,gpio-range-cells = <3>;
+diff --git a/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi 
b/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi
+index 2b76293b51c8..3d2921ef2935 100644
+--- a/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi
++++ b/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi
+@@ -444,8 +444,7 @@
+                                       <&pinmux 108 16 27>,
+                                       <&pinmux 135 77 6>,
+                                       <&pinmux 141 67 4>,
+-                                      <&pinmux 145 149 6>,
+-                                      <&pinmux 151 91 4>;
++                                      <&pinmux 145 149 6>;
+               };
+ 
+               i2c1: i2c@000e0000 {
+diff --git a/arch/mips/bcm63xx/prom.c b/arch/mips/bcm63xx/prom.c
+index 7019e2967009..bbbf8057565b 100644
+--- a/arch/mips/bcm63xx/prom.c
++++ b/arch/mips/bcm63xx/prom.c
+@@ -84,7 +84,7 @@ void __init prom_init(void)
+                * Here we will start up CPU1 in the background and ask it to
+                * reconfigure itself then go back to sleep.
+                */
+-              memcpy((void *)0xa0000200, &bmips_smp_movevec, 0x20);
++              memcpy((void *)0xa0000200, bmips_smp_movevec, 0x20);
+               __sync();
+               set_c0_cause(C_SW0);
+               cpumask_set_cpu(1, &bmips_booted_mask);
+diff --git a/arch/mips/include/asm/bmips.h b/arch/mips/include/asm/bmips.h
+index b3e2975f83d3..a564915fddc4 100644
+--- a/arch/mips/include/asm/bmips.h
++++ b/arch/mips/include/asm/bmips.h
+@@ -75,11 +75,11 @@ static inline int register_bmips_smp_ops(void)
+ #endif
+ }
+ 
+-extern char bmips_reset_nmi_vec;
+-extern char bmips_reset_nmi_vec_end;
+-extern char bmips_smp_movevec;
+-extern char bmips_smp_int_vec;
+-extern char bmips_smp_int_vec_end;
++extern char bmips_reset_nmi_vec[];
++extern char bmips_reset_nmi_vec_end[];
++extern char bmips_smp_movevec[];
++extern char bmips_smp_int_vec[];
++extern char bmips_smp_int_vec_end[];
+ 
+ extern int bmips_smp_enabled;
+ extern int bmips_cpu_offset;
+diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
+index 382d12eb88f0..45fbcbbf2504 100644
+--- a/arch/mips/kernel/smp-bmips.c
++++ b/arch/mips/kernel/smp-bmips.c
+@@ -457,10 +457,10 @@ static void bmips_wr_vec(unsigned long dst, char *start, 
char *end)
+ 
+ static inline void bmips_nmi_handler_setup(void)
+ {
+-      bmips_wr_vec(BMIPS_NMI_RESET_VEC, &bmips_reset_nmi_vec,
+-              &bmips_reset_nmi_vec_end);
+-      bmips_wr_vec(BMIPS_WARM_RESTART_VEC, &bmips_smp_int_vec,
+-              &bmips_smp_int_vec_end);
++      bmips_wr_vec(BMIPS_NMI_RESET_VEC, bmips_reset_nmi_vec,
++              bmips_reset_nmi_vec_end);
++      bmips_wr_vec(BMIPS_WARM_RESTART_VEC, bmips_smp_int_vec,
++              bmips_smp_int_vec_end);
+ }
+ 
+ struct reset_vec_info {
+diff --git a/arch/powerpc/include/asm/cputable.h 
b/arch/powerpc/include/asm/cputable.h
+index 53b31c2bcdf4..7e3ccf21830e 100644
+--- a/arch/powerpc/include/asm/cputable.h
++++ b/arch/powerpc/include/asm/cputable.h
+@@ -215,7 +215,9 @@ enum {
+ #define CPU_FTR_DAWR                  LONG_ASM_CONST(0x0400000000000000)
+ #define CPU_FTR_DABRX                 LONG_ASM_CONST(0x0800000000000000)
+ #define CPU_FTR_PMAO_BUG              LONG_ASM_CONST(0x1000000000000000)
++#define CPU_FTR_P9_TLBIE_STQ_BUG      LONG_ASM_CONST(0x0000400000000000)
+ #define CPU_FTR_POWER9_DD1            LONG_ASM_CONST(0x4000000000000000)
++#define CPU_FTR_P9_TLBIE_ERAT_BUG     LONG_ASM_CONST(0x0001000000000000)
+ 
+ #ifndef __ASSEMBLY__
+ 
+@@ -475,7 +477,8 @@ enum {
+           CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
+           CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
+           CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
+-          CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP | CPU_FTR_ARCH_300)
++          CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP | CPU_FTR_ARCH_300 | \
++          CPU_FTR_P9_TLBIE_STQ_BUG | CPU_FTR_P9_TLBIE_ERAT_BUG)
+ #define CPU_FTRS_POWER9_DD1 ((CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD1) & \
+                            (~CPU_FTR_SAO))
+ #define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
+diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c 
b/arch/powerpc/kernel/dt_cpu_ftrs.c
+index 2dba206b065a..2357df60de95 100644
+--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
++++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
+@@ -733,15 +733,45 @@ static bool __init cpufeatures_process_feature(struct 
dt_cpu_feature *f)
+       return true;
+ }
+ 
++/*
++ * Handle POWER9 broadcast tlbie invalidation issue using
++ * cpu feature flag.
++ */
++static __init void update_tlbie_feature_flag(unsigned long pvr)
++{
++      if (PVR_VER(pvr) == PVR_POWER9) {
++              /*
++               * Set the tlbie feature flag for anything below
++               * Nimbus DD 2.3 and Cumulus DD 1.3
++               */
++              if ((pvr & 0xe000) == 0) {
++                      /* Nimbus */
++                      if ((pvr & 0xfff) < 0x203)
++                              cur_cpu_spec->cpu_features |= 
CPU_FTR_P9_TLBIE_STQ_BUG;
++              } else if ((pvr & 0xc000) == 0) {
++                      /* Cumulus */
++                      if ((pvr & 0xfff) < 0x103)
++                              cur_cpu_spec->cpu_features |= 
CPU_FTR_P9_TLBIE_STQ_BUG;
++              } else {
++                      WARN_ONCE(1, "Unknown PVR");
++                      cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_STQ_BUG;
++              }
++
++              cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_ERAT_BUG;
++      }
++}
++
+ static __init void cpufeatures_cpu_quirks(void)
+ {
+-      int version = mfspr(SPRN_PVR);
++      unsigned long version = mfspr(SPRN_PVR);
+ 
+       /*
+        * Not all quirks can be derived from the cpufeatures device tree.
+        */
+       if ((version & 0xffffff00) == 0x004e0100)
+               cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD1;
++
++      update_tlbie_feature_flag(version);
+ }
+ 
+ static void __init cpufeatures_setup_finished(void)
+diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c 
b/arch/powerpc/kvm/book3s_64_mmu_radix.c
+index 27a41695fcfd..7f8f2a0189df 100644
+--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
++++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
+@@ -160,6 +160,9 @@ static void kvmppc_radix_tlbie_page(struct kvm *kvm, 
unsigned long addr,
+       asm volatile("ptesync": : :"memory");
+       asm volatile(PPC_TLBIE_5(%0, %1, 0, 0, 1)
+                    : : "r" (addr), "r" (kvm->arch.lpid) : "memory");
++      if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG))
++              asm volatile(PPC_TLBIE_5(%0, %1, 0, 0, 1)
++                           : : "r" (addr), "r" (kvm->arch.lpid) : "memory");
+       asm volatile("ptesync": : :"memory");
+ }
+ 
+diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c 
b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+index 4962d537c186..669b547385f3 100644
+--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
++++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+@@ -429,6 +429,37 @@ static inline int try_lock_tlbie(unsigned int *lock)
+       return old == 0;
+ }
+ 
++static inline void fixup_tlbie_lpid(unsigned long rb_value, unsigned long 
lpid)
++{
++
++      if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
++              /* Radix flush for a hash guest */
++
++              unsigned long rb,rs,prs,r,ric;
++
++              rb = PPC_BIT(52); /* IS = 2 */
++              rs = 0;  /* lpid = 0 */
++              prs = 0; /* partition scoped */
++              r = 1;   /* radix format */
++              ric = 0; /* RIC_FLSUH_TLB */
++
++              /*
++               * Need the extra ptesync to make sure we don't
++               * re-order the tlbie
++               */
++              asm volatile("ptesync": : :"memory");
++              asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
++                           : : "r"(rb), "i"(r), "i"(prs),
++                             "i"(ric), "r"(rs) : "memory");
++      }
++
++      if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
++              asm volatile("ptesync": : :"memory");
++              asm volatile(PPC_TLBIE_5(%0,%1,0,0,0) : :
++                           "r" (rb_value), "r" (lpid));
++      }
++}
++
+ static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
+                     long npages, int global, bool need_sync)
+ {
+@@ -448,6 +479,8 @@ static void do_tlbies(struct kvm *kvm, unsigned long 
*rbvalues,
+                       asm volatile(PPC_TLBIE_5(%0,%1,0,0,0) : :
+                                    "r" (rbvalues[i]), "r" (kvm->arch.lpid));
+               }
++
++              fixup_tlbie_lpid(rbvalues[i - 1], kvm->arch.lpid);
+               asm volatile("eieio; tlbsync; ptesync" : : : "memory");
+               kvm->arch.tlbie_lock = 0;
+       } else {
+diff --git a/arch/powerpc/mm/hash_native_64.c 
b/arch/powerpc/mm/hash_native_64.c
+index 640cf566e986..a4b6efbf667b 100644
+--- a/arch/powerpc/mm/hash_native_64.c
++++ b/arch/powerpc/mm/hash_native_64.c
+@@ -104,6 +104,37 @@ static inline unsigned long  ___tlbie(unsigned long vpn, 
int psize,
+       return va;
+ }
+ 
++static inline void fixup_tlbie_vpn(unsigned long vpn, int psize,
++                                 int apsize, int ssize)
++{
++      if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
++              /* Radix flush for a hash guest */
++
++              unsigned long rb,rs,prs,r,ric;
++
++              rb = PPC_BIT(52); /* IS = 2 */
++              rs = 0;  /* lpid = 0 */
++              prs = 0; /* partition scoped */
++              r = 1;   /* radix format */
++              ric = 0; /* RIC_FLSUH_TLB */
++
++              /*
++               * Need the extra ptesync to make sure we don't
++               * re-order the tlbie
++               */
++              asm volatile("ptesync": : :"memory");
++              asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
++                           : : "r"(rb), "i"(r), "i"(prs),
++                             "i"(ric), "r"(rs) : "memory");
++      }
++
++      if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
++              /* Need the extra ptesync to ensure we don't reorder tlbie*/
++              asm volatile("ptesync": : :"memory");
++              ___tlbie(vpn, psize, apsize, ssize);
++      }
++}
++
+ static inline void __tlbie(unsigned long vpn, int psize, int apsize, int 
ssize)
+ {
+       unsigned long rb;
+@@ -181,6 +212,7 @@ static inline void tlbie(unsigned long vpn, int psize, int 
apsize,
+               asm volatile("ptesync": : :"memory");
+       } else {
+               __tlbie(vpn, psize, apsize, ssize);
++              fixup_tlbie_vpn(vpn, psize, apsize, ssize);
+               asm volatile("eieio; tlbsync; ptesync": : :"memory");
+       }
+       if (lock_tlbie && !use_local)
+@@ -674,7 +706,7 @@ static void native_hpte_clear(void)
+  */
+ static void native_flush_hash_range(unsigned long number, int local)
+ {
+-      unsigned long vpn;
++      unsigned long vpn = 0;
+       unsigned long hash, index, hidx, shift, slot;
+       struct hash_pte *hptep;
+       unsigned long hpte_v;
+@@ -746,6 +778,10 @@ static void native_flush_hash_range(unsigned long number, 
int local)
+                               __tlbie(vpn, psize, psize, ssize);
+                       } pte_iterate_hashed_end();
+               }
++              /*
++               * Just do one more with the last used values.
++               */
++              fixup_tlbie_vpn(vpn, psize, psize, ssize);
+               asm volatile("eieio; tlbsync; ptesync":::"memory");
+ 
+               if (lock_tlbie)
+diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
+index 12f95b1f7d07..48ed34d52ffd 100644
+--- a/arch/powerpc/mm/pgtable_64.c
++++ b/arch/powerpc/mm/pgtable_64.c
+@@ -491,6 +491,7 @@ void mmu_partition_table_set_entry(unsigned int lpid, 
unsigned long dw0,
+                            "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
+               trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 0);
+       }
++      /* do we need fixup here ?*/
+       asm volatile("eieio; tlbsync; ptesync" : : : "memory");
+ }
+ EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry);
+diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
+index 4b295cfd5f7e..41e782f126d6 100644
+--- a/arch/powerpc/mm/tlb-radix.c
++++ b/arch/powerpc/mm/tlb-radix.c
+@@ -23,6 +23,37 @@
+ #define RIC_FLUSH_PWC 1
+ #define RIC_FLUSH_ALL 2
+ 
++static inline void __tlbie_va(unsigned long va, unsigned long pid,
++                            unsigned long ap, unsigned long ric)
++{
++      unsigned long rb,rs,prs,r;
++
++      rb = va & ~(PPC_BITMASK(52, 63));
++      rb |= ap << PPC_BITLSHIFT(58);
++      rs = pid << PPC_BITLSHIFT(31);
++      prs = 1; /* process scoped */
++      r = 1;   /* raidx format */
++
++      asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
++                   : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : 
"memory");
++      trace_tlbie(0, 0, rb, rs, ric, prs, r);
++}
++
++
++static inline void fixup_tlbie_va(unsigned long va, unsigned long pid,
++                                unsigned long ap)
++{
++      if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
++              asm volatile("ptesync": : :"memory");
++              __tlbie_va(va, 0, ap, RIC_FLUSH_TLB);
++      }
++
++      if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
++              asm volatile("ptesync": : :"memory");
++              __tlbie_va(va, pid, ap, RIC_FLUSH_TLB);
++      }
++}
++
+ static inline void __tlbiel_pid(unsigned long pid, int set,
+                               unsigned long ric)
+ {
+@@ -68,22 +99,64 @@ static inline void _tlbiel_pid(unsigned long pid, unsigned 
long ric)
+       asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
+ }
+ 
+-static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
++static inline void __tlbie_pid(unsigned long pid, unsigned long ric)
+ {
+       unsigned long rb,rs,prs,r;
+ 
+       rb = PPC_BIT(53); /* IS = 1 */
+       rs = pid << PPC_BITLSHIFT(31);
+       prs = 1; /* process scoped */
+-      r = 1;   /* raidx format */
++      r = 1;   /* radix format */
+ 
+-      asm volatile("ptesync": : :"memory");
+       asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
+                    : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : 
"memory");
+-      asm volatile("eieio; tlbsync; ptesync": : :"memory");
+       trace_tlbie(0, 0, rb, rs, ric, prs, r);
+ }
+ 
++static inline void fixup_tlbie_pid(unsigned long pid)
++{
++      /*
++       * We can use any address for the invalidation, pick one which is
++       * probably unused as an optimisation.
++       */
++      unsigned long va = ((1UL << 52) - 1);
++
++      if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
++              asm volatile("ptesync": : :"memory");
++              __tlbie_pid(0, RIC_FLUSH_TLB);
++      }
++
++      if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
++              asm volatile("ptesync": : :"memory");
++              __tlbie_va(va, pid, mmu_get_ap(MMU_PAGE_64K), RIC_FLUSH_TLB);
++      }
++}
++
++static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
++{
++      asm volatile("ptesync": : :"memory");
++
++      /*
++       * Workaround the fact that the "ric" argument to __tlbie_pid
++       * must be a compile-time contraint to match the "i" constraint
++       * in the asm statement.
++       */
++      switch (ric) {
++      case RIC_FLUSH_TLB:
++              __tlbie_pid(pid, RIC_FLUSH_TLB);
++              fixup_tlbie_pid(pid);
++              break;
++      case RIC_FLUSH_PWC:
++              __tlbie_pid(pid, RIC_FLUSH_PWC);
++              break;
++      case RIC_FLUSH_ALL:
++      default:
++              __tlbie_pid(pid, RIC_FLUSH_ALL);
++              fixup_tlbie_pid(pid);
++      }
++      asm volatile("eieio; tlbsync; ptesync": : :"memory");
++}
++
+ static inline void _tlbiel_va(unsigned long va, unsigned long pid,
+                             unsigned long ap, unsigned long ric)
+ {
+@@ -105,19 +178,10 @@ static inline void _tlbiel_va(unsigned long va, unsigned 
long pid,
+ static inline void _tlbie_va(unsigned long va, unsigned long pid,
+                            unsigned long ap, unsigned long ric)
+ {
+-      unsigned long rb,rs,prs,r;
+-
+-      rb = va & ~(PPC_BITMASK(52, 63));
+-      rb |= ap << PPC_BITLSHIFT(58);
+-      rs = pid << PPC_BITLSHIFT(31);
+-      prs = 1; /* process scoped */
+-      r = 1;   /* raidx format */
+-
+       asm volatile("ptesync": : :"memory");
+-      asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
+-                   : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : 
"memory");
++      __tlbie_va(va, pid, ap, ric);
++      fixup_tlbie_va(va, pid, ap);
+       asm volatile("eieio; tlbsync; ptesync": : :"memory");
+-      trace_tlbie(0, 0, rb, rs, ric, prs, r);
+ }
+ 
+ /*
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index f3d0bc9a9905..34dfadd4dcd4 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -648,6 +648,12 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device 
*nbd, int index)
+               ret = -ENOENT;
+               goto out;
+       }
++      if (cmd->status != BLK_STS_OK) {
++              dev_err(disk_to_dev(nbd->disk), "Command already handled %p\n",
++                      req);
++              ret = -ENOENT;
++              goto out;
++      }
+       if (test_bit(NBD_CMD_REQUEUED, &cmd->flags)) {
+               dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req 
%p\n",
+                       req);
+diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
+index 8fbf175fdcc7..57c5cc51f862 100644
+--- a/drivers/dma/qcom/bam_dma.c
++++ b/drivers/dma/qcom/bam_dma.c
+@@ -690,7 +690,21 @@ static int bam_dma_terminate_all(struct dma_chan *chan)
+ 
+       /* remove all transactions, including active transaction */
+       spin_lock_irqsave(&bchan->vc.lock, flag);
++      /*
++       * If we have transactions queued, then some might be committed to the
++       * hardware in the desc fifo.  The only way to reset the desc fifo is
++       * to do a hardware reset (either by pipe or the entire block).
++       * bam_chan_init_hw() will trigger a pipe reset, and also reinit the
++       * pipe.  If the pipe is left disabled (default state after pipe reset)
++       * and is accessed by a connected hardware engine, a fatal error in
++       * the BAM will occur.  There is a small window where this could happen
++       * with bam_chan_init_hw(), but it is assumed that the caller has
++       * stopped activity on any attached hardware engine.  Make sure to do
++       * this first so that the BAM hardware doesn't cause memory corruption
++       * by accessing freed resources.
++       */
+       if (bchan->curr_txd) {
++              bam_chan_init_hw(bchan, bchan->curr_txd->dir);
+               list_add(&bchan->curr_txd->vd.node, &bchan->vc.desc_issued);
+               bchan->curr_txd = NULL;
+       }
+diff --git a/drivers/i2c/busses/i2c-stm32f7.c 
b/drivers/i2c/busses/i2c-stm32f7.c
+index d8cbe149925b..14f60751729e 100644
+--- a/drivers/i2c/busses/i2c-stm32f7.c
++++ b/drivers/i2c/busses/i2c-stm32f7.c
+@@ -219,7 +219,7 @@ struct stm32f7_i2c_dev {
+       struct stm32f7_i2c_timings timing;
+ };
+ 
+-/**
++/*
+  * All these values are coming from I2C Specification, Version 6.0, 4th of
+  * April 2014.
+  *
+diff --git a/drivers/iio/adc/stm32-adc-core.c 
b/drivers/iio/adc/stm32-adc-core.c
+index 804198eb0eef..bc9ebcc6508a 100644
+--- a/drivers/iio/adc/stm32-adc-core.c
++++ b/drivers/iio/adc/stm32-adc-core.c
+@@ -33,36 +33,9 @@
+ 
+ #include "stm32-adc-core.h"
+ 
+-/* STM32F4 - common registers for all ADC instances: 1, 2 & 3 */
+-#define STM32F4_ADC_CSR                       (STM32_ADCX_COMN_OFFSET + 0x00)
+-#define STM32F4_ADC_CCR                       (STM32_ADCX_COMN_OFFSET + 0x04)
+-
+-/* STM32F4_ADC_CSR - bit fields */
+-#define STM32F4_EOC3                  BIT(17)
+-#define STM32F4_EOC2                  BIT(9)
+-#define STM32F4_EOC1                  BIT(1)
+-
+-/* STM32F4_ADC_CCR - bit fields */
+-#define STM32F4_ADC_ADCPRE_SHIFT      16
+-#define STM32F4_ADC_ADCPRE_MASK               GENMASK(17, 16)
+-
+ /* STM32 F4 maximum analog clock rate (from datasheet) */
+ #define STM32F4_ADC_MAX_CLK_RATE      36000000
+ 
+-/* STM32H7 - common registers for all ADC instances */
+-#define STM32H7_ADC_CSR                       (STM32_ADCX_COMN_OFFSET + 0x00)
+-#define STM32H7_ADC_CCR                       (STM32_ADCX_COMN_OFFSET + 0x08)
+-
+-/* STM32H7_ADC_CSR - bit fields */
+-#define STM32H7_EOC_SLV                       BIT(18)
+-#define STM32H7_EOC_MST                       BIT(2)
+-
+-/* STM32H7_ADC_CCR - bit fields */
+-#define STM32H7_PRESC_SHIFT           18
+-#define STM32H7_PRESC_MASK            GENMASK(21, 18)
+-#define STM32H7_CKMODE_SHIFT          16
+-#define STM32H7_CKMODE_MASK           GENMASK(17, 16)
+-
+ /* STM32 H7 maximum analog clock rate (from datasheet) */
+ #define STM32H7_ADC_MAX_CLK_RATE      36000000
+ 
+@@ -72,12 +45,16 @@
+  * @eoc1:     adc1 end of conversion flag in @csr
+  * @eoc2:     adc2 end of conversion flag in @csr
+  * @eoc3:     adc3 end of conversion flag in @csr
++ * @ier:      interrupt enable register offset for each adc
++ * @eocie_msk:        end of conversion interrupt enable mask in @ier
+  */
+ struct stm32_adc_common_regs {
+       u32 csr;
+       u32 eoc1_msk;
+       u32 eoc2_msk;
+       u32 eoc3_msk;
++      u32 ier;
++      u32 eocie_msk;
+ };
+ 
+ struct stm32_adc_priv;
+@@ -271,6 +248,8 @@ static const struct stm32_adc_common_regs 
stm32f4_adc_common_regs = {
+       .eoc1_msk = STM32F4_EOC1,
+       .eoc2_msk = STM32F4_EOC2,
+       .eoc3_msk = STM32F4_EOC3,
++      .ier = STM32F4_ADC_CR1,
++      .eocie_msk = STM32F4_EOCIE,
+ };
+ 
+ /* STM32H7 common registers definitions */
+@@ -278,8 +257,24 @@ static const struct stm32_adc_common_regs 
stm32h7_adc_common_regs = {
+       .csr = STM32H7_ADC_CSR,
+       .eoc1_msk = STM32H7_EOC_MST,
+       .eoc2_msk = STM32H7_EOC_SLV,
++      .ier = STM32H7_ADC_IER,
++      .eocie_msk = STM32H7_EOCIE,
++};
++
++static const unsigned int stm32_adc_offset[STM32_ADC_MAX_ADCS] = {
++      0, STM32_ADC_OFFSET, STM32_ADC_OFFSET * 2,
+ };
+ 
++static unsigned int stm32_adc_eoc_enabled(struct stm32_adc_priv *priv,
++                                        unsigned int adc)
++{
++      u32 ier, offset = stm32_adc_offset[adc];
++
++      ier = readl_relaxed(priv->common.base + offset + priv->cfg->regs->ier);
++
++      return ier & priv->cfg->regs->eocie_msk;
++}
++
+ /* ADC common interrupt for all instances */
+ static void stm32_adc_irq_handler(struct irq_desc *desc)
+ {
+@@ -290,13 +285,28 @@ static void stm32_adc_irq_handler(struct irq_desc *desc)
+       chained_irq_enter(chip, desc);
+       status = readl_relaxed(priv->common.base + priv->cfg->regs->csr);
+ 
+-      if (status & priv->cfg->regs->eoc1_msk)
++      /*
++       * End of conversion may be handled by using IRQ or DMA. There may be a
++       * race here when two conversions complete at the same time on several
++       * ADCs. EOC may be read 'set' for several ADCs, with:
++       * - an ADC configured to use DMA (EOC triggers the DMA request, and
++       *   is then automatically cleared by DR read in hardware)
++       * - an ADC configured to use IRQs (EOCIE bit is set. The handler must
++       *   be called in this case)
++       * So both EOC status bit in CSR and EOCIE control bit must be checked
++       * before invoking the interrupt handler (e.g. call ISR only for
++       * IRQ-enabled ADCs).
++       */
++      if (status & priv->cfg->regs->eoc1_msk &&
++          stm32_adc_eoc_enabled(priv, 0))
+               generic_handle_irq(irq_find_mapping(priv->domain, 0));
+ 
+-      if (status & priv->cfg->regs->eoc2_msk)
++      if (status & priv->cfg->regs->eoc2_msk &&
++          stm32_adc_eoc_enabled(priv, 1))
+               generic_handle_irq(irq_find_mapping(priv->domain, 1));
+ 
+-      if (status & priv->cfg->regs->eoc3_msk)
++      if (status & priv->cfg->regs->eoc3_msk &&
++          stm32_adc_eoc_enabled(priv, 2))
+               generic_handle_irq(irq_find_mapping(priv->domain, 2));
+ 
+       chained_irq_exit(chip, desc);
+diff --git a/drivers/iio/adc/stm32-adc-core.h 
b/drivers/iio/adc/stm32-adc-core.h
+index 250ee958a669..9f8559cf86c4 100644
+--- a/drivers/iio/adc/stm32-adc-core.h
++++ b/drivers/iio/adc/stm32-adc-core.h
+@@ -37,8 +37,143 @@
+  * --------------------------------------------------------
+  */
+ #define STM32_ADC_MAX_ADCS            3
++#define STM32_ADC_OFFSET              0x100
+ #define STM32_ADCX_COMN_OFFSET                0x300
+ 
++/* STM32F4 - Registers for each ADC instance */
++#define STM32F4_ADC_SR                        0x00
++#define STM32F4_ADC_CR1                       0x04
++#define STM32F4_ADC_CR2                       0x08
++#define STM32F4_ADC_SMPR1             0x0C
++#define STM32F4_ADC_SMPR2             0x10
++#define STM32F4_ADC_HTR                       0x24
++#define STM32F4_ADC_LTR                       0x28
++#define STM32F4_ADC_SQR1              0x2C
++#define STM32F4_ADC_SQR2              0x30
++#define STM32F4_ADC_SQR3              0x34
++#define STM32F4_ADC_JSQR              0x38
++#define STM32F4_ADC_JDR1              0x3C
++#define STM32F4_ADC_JDR2              0x40
++#define STM32F4_ADC_JDR3              0x44
++#define STM32F4_ADC_JDR4              0x48
++#define STM32F4_ADC_DR                        0x4C
++
++/* STM32F4 - common registers for all ADC instances: 1, 2 & 3 */
++#define STM32F4_ADC_CSR                       (STM32_ADCX_COMN_OFFSET + 0x00)
++#define STM32F4_ADC_CCR                       (STM32_ADCX_COMN_OFFSET + 0x04)
++
++/* STM32F4_ADC_SR - bit fields */
++#define STM32F4_STRT                  BIT(4)
++#define STM32F4_EOC                   BIT(1)
++
++/* STM32F4_ADC_CR1 - bit fields */
++#define STM32F4_RES_SHIFT             24
++#define STM32F4_RES_MASK              GENMASK(25, 24)
++#define STM32F4_SCAN                  BIT(8)
++#define STM32F4_EOCIE                 BIT(5)
++
++/* STM32F4_ADC_CR2 - bit fields */
++#define STM32F4_SWSTART                       BIT(30)
++#define STM32F4_EXTEN_SHIFT           28
++#define STM32F4_EXTEN_MASK            GENMASK(29, 28)
++#define STM32F4_EXTSEL_SHIFT          24
++#define STM32F4_EXTSEL_MASK           GENMASK(27, 24)
++#define STM32F4_EOCS                  BIT(10)
++#define STM32F4_DDS                   BIT(9)
++#define STM32F4_DMA                   BIT(8)
++#define STM32F4_ADON                  BIT(0)
++
++/* STM32F4_ADC_CSR - bit fields */
++#define STM32F4_EOC3                  BIT(17)
++#define STM32F4_EOC2                  BIT(9)
++#define STM32F4_EOC1                  BIT(1)
++
++/* STM32F4_ADC_CCR - bit fields */
++#define STM32F4_ADC_ADCPRE_SHIFT      16
++#define STM32F4_ADC_ADCPRE_MASK               GENMASK(17, 16)
++
++/* STM32H7 - Registers for each ADC instance */
++#define STM32H7_ADC_ISR                       0x00
++#define STM32H7_ADC_IER                       0x04
++#define STM32H7_ADC_CR                        0x08
++#define STM32H7_ADC_CFGR              0x0C
++#define STM32H7_ADC_SMPR1             0x14
++#define STM32H7_ADC_SMPR2             0x18
++#define STM32H7_ADC_PCSEL             0x1C
++#define STM32H7_ADC_SQR1              0x30
++#define STM32H7_ADC_SQR2              0x34
++#define STM32H7_ADC_SQR3              0x38
++#define STM32H7_ADC_SQR4              0x3C
++#define STM32H7_ADC_DR                        0x40
++#define STM32H7_ADC_CALFACT           0xC4
++#define STM32H7_ADC_CALFACT2          0xC8
++
++/* STM32H7 - common registers for all ADC instances */
++#define STM32H7_ADC_CSR                       (STM32_ADCX_COMN_OFFSET + 0x00)
++#define STM32H7_ADC_CCR                       (STM32_ADCX_COMN_OFFSET + 0x08)
++
++/* STM32H7_ADC_ISR - bit fields */
++#define STM32H7_EOC                   BIT(2)
++#define STM32H7_ADRDY                 BIT(0)
++
++/* STM32H7_ADC_IER - bit fields */
++#define STM32H7_EOCIE                 STM32H7_EOC
++
++/* STM32H7_ADC_CR - bit fields */
++#define STM32H7_ADCAL                 BIT(31)
++#define STM32H7_ADCALDIF              BIT(30)
++#define STM32H7_DEEPPWD                       BIT(29)
++#define STM32H7_ADVREGEN              BIT(28)
++#define STM32H7_LINCALRDYW6           BIT(27)
++#define STM32H7_LINCALRDYW5           BIT(26)
++#define STM32H7_LINCALRDYW4           BIT(25)
++#define STM32H7_LINCALRDYW3           BIT(24)
++#define STM32H7_LINCALRDYW2           BIT(23)
++#define STM32H7_LINCALRDYW1           BIT(22)
++#define STM32H7_ADCALLIN              BIT(16)
++#define STM32H7_BOOST                 BIT(8)
++#define STM32H7_ADSTP                 BIT(4)
++#define STM32H7_ADSTART                       BIT(2)
++#define STM32H7_ADDIS                 BIT(1)
++#define STM32H7_ADEN                  BIT(0)
++
++/* STM32H7_ADC_CFGR bit fields */
++#define STM32H7_EXTEN_SHIFT           10
++#define STM32H7_EXTEN_MASK            GENMASK(11, 10)
++#define STM32H7_EXTSEL_SHIFT          5
++#define STM32H7_EXTSEL_MASK           GENMASK(9, 5)
++#define STM32H7_RES_SHIFT             2
++#define STM32H7_RES_MASK              GENMASK(4, 2)
++#define STM32H7_DMNGT_SHIFT           0
++#define STM32H7_DMNGT_MASK            GENMASK(1, 0)
++
++enum stm32h7_adc_dmngt {
++      STM32H7_DMNGT_DR_ONLY,          /* Regular data in DR only */
++      STM32H7_DMNGT_DMA_ONESHOT,      /* DMA one shot mode */
++      STM32H7_DMNGT_DFSDM,            /* DFSDM mode */
++      STM32H7_DMNGT_DMA_CIRC,         /* DMA circular mode */
++};
++
++/* STM32H7_ADC_CALFACT - bit fields */
++#define STM32H7_CALFACT_D_SHIFT               16
++#define STM32H7_CALFACT_D_MASK                GENMASK(26, 16)
++#define STM32H7_CALFACT_S_SHIFT               0
++#define STM32H7_CALFACT_S_MASK                GENMASK(10, 0)
++
++/* STM32H7_ADC_CALFACT2 - bit fields */
++#define STM32H7_LINCALFACT_SHIFT      0
++#define STM32H7_LINCALFACT_MASK               GENMASK(29, 0)
++
++/* STM32H7_ADC_CSR - bit fields */
++#define STM32H7_EOC_SLV                       BIT(18)
++#define STM32H7_EOC_MST                       BIT(2)
++
++/* STM32H7_ADC_CCR - bit fields */
++#define STM32H7_PRESC_SHIFT           18
++#define STM32H7_PRESC_MASK            GENMASK(21, 18)
++#define STM32H7_CKMODE_SHIFT          16
++#define STM32H7_CKMODE_MASK           GENMASK(17, 16)
++
+ /**
+  * struct stm32_adc_common - stm32 ADC driver common data (for all instances)
+  * @base:             control registers base cpu addr
+diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
+index 04be8bd951be..e59cbc9ad4f6 100644
+--- a/drivers/iio/adc/stm32-adc.c
++++ b/drivers/iio/adc/stm32-adc.c
+@@ -40,113 +40,6 @@
+ 
+ #include "stm32-adc-core.h"
+ 
+-/* STM32F4 - Registers for each ADC instance */
+-#define STM32F4_ADC_SR                        0x00
+-#define STM32F4_ADC_CR1                       0x04
+-#define STM32F4_ADC_CR2                       0x08
+-#define STM32F4_ADC_SMPR1             0x0C
+-#define STM32F4_ADC_SMPR2             0x10
+-#define STM32F4_ADC_HTR                       0x24
+-#define STM32F4_ADC_LTR                       0x28
+-#define STM32F4_ADC_SQR1              0x2C
+-#define STM32F4_ADC_SQR2              0x30
+-#define STM32F4_ADC_SQR3              0x34
+-#define STM32F4_ADC_JSQR              0x38
+-#define STM32F4_ADC_JDR1              0x3C
+-#define STM32F4_ADC_JDR2              0x40
+-#define STM32F4_ADC_JDR3              0x44
+-#define STM32F4_ADC_JDR4              0x48
+-#define STM32F4_ADC_DR                        0x4C
+-
+-/* STM32F4_ADC_SR - bit fields */
+-#define STM32F4_STRT                  BIT(4)
+-#define STM32F4_EOC                   BIT(1)
+-
+-/* STM32F4_ADC_CR1 - bit fields */
+-#define STM32F4_RES_SHIFT             24
+-#define STM32F4_RES_MASK              GENMASK(25, 24)
+-#define STM32F4_SCAN                  BIT(8)
+-#define STM32F4_EOCIE                 BIT(5)
+-
+-/* STM32F4_ADC_CR2 - bit fields */
+-#define STM32F4_SWSTART                       BIT(30)
+-#define STM32F4_EXTEN_SHIFT           28
+-#define STM32F4_EXTEN_MASK            GENMASK(29, 28)
+-#define STM32F4_EXTSEL_SHIFT          24
+-#define STM32F4_EXTSEL_MASK           GENMASK(27, 24)
+-#define STM32F4_EOCS                  BIT(10)
+-#define STM32F4_DDS                   BIT(9)
+-#define STM32F4_DMA                   BIT(8)
+-#define STM32F4_ADON                  BIT(0)
+-
+-/* STM32H7 - Registers for each ADC instance */
+-#define STM32H7_ADC_ISR                       0x00
+-#define STM32H7_ADC_IER                       0x04
+-#define STM32H7_ADC_CR                        0x08
+-#define STM32H7_ADC_CFGR              0x0C
+-#define STM32H7_ADC_SMPR1             0x14
+-#define STM32H7_ADC_SMPR2             0x18
+-#define STM32H7_ADC_PCSEL             0x1C
+-#define STM32H7_ADC_SQR1              0x30
+-#define STM32H7_ADC_SQR2              0x34
+-#define STM32H7_ADC_SQR3              0x38
+-#define STM32H7_ADC_SQR4              0x3C
+-#define STM32H7_ADC_DR                        0x40
+-#define STM32H7_ADC_CALFACT           0xC4
+-#define STM32H7_ADC_CALFACT2          0xC8
+-
+-/* STM32H7_ADC_ISR - bit fields */
+-#define STM32H7_EOC                   BIT(2)
+-#define STM32H7_ADRDY                 BIT(0)
+-
+-/* STM32H7_ADC_IER - bit fields */
+-#define STM32H7_EOCIE                 STM32H7_EOC
+-
+-/* STM32H7_ADC_CR - bit fields */
+-#define STM32H7_ADCAL                 BIT(31)
+-#define STM32H7_ADCALDIF              BIT(30)
+-#define STM32H7_DEEPPWD                       BIT(29)
+-#define STM32H7_ADVREGEN              BIT(28)
+-#define STM32H7_LINCALRDYW6           BIT(27)
+-#define STM32H7_LINCALRDYW5           BIT(26)
+-#define STM32H7_LINCALRDYW4           BIT(25)
+-#define STM32H7_LINCALRDYW3           BIT(24)
+-#define STM32H7_LINCALRDYW2           BIT(23)
+-#define STM32H7_LINCALRDYW1           BIT(22)
+-#define STM32H7_ADCALLIN              BIT(16)
+-#define STM32H7_BOOST                 BIT(8)
+-#define STM32H7_ADSTP                 BIT(4)
+-#define STM32H7_ADSTART                       BIT(2)
+-#define STM32H7_ADDIS                 BIT(1)
+-#define STM32H7_ADEN                  BIT(0)
+-
+-/* STM32H7_ADC_CFGR bit fields */
+-#define STM32H7_EXTEN_SHIFT           10
+-#define STM32H7_EXTEN_MASK            GENMASK(11, 10)
+-#define STM32H7_EXTSEL_SHIFT          5
+-#define STM32H7_EXTSEL_MASK           GENMASK(9, 5)
+-#define STM32H7_RES_SHIFT             2
+-#define STM32H7_RES_MASK              GENMASK(4, 2)
+-#define STM32H7_DMNGT_SHIFT           0
+-#define STM32H7_DMNGT_MASK            GENMASK(1, 0)
+-
+-enum stm32h7_adc_dmngt {
+-      STM32H7_DMNGT_DR_ONLY,          /* Regular data in DR only */
+-      STM32H7_DMNGT_DMA_ONESHOT,      /* DMA one shot mode */
+-      STM32H7_DMNGT_DFSDM,            /* DFSDM mode */
+-      STM32H7_DMNGT_DMA_CIRC,         /* DMA circular mode */
+-};
+-
+-/* STM32H7_ADC_CALFACT - bit fields */
+-#define STM32H7_CALFACT_D_SHIFT               16
+-#define STM32H7_CALFACT_D_MASK                GENMASK(26, 16)
+-#define STM32H7_CALFACT_S_SHIFT               0
+-#define STM32H7_CALFACT_S_MASK                GENMASK(10, 0)
+-
+-/* STM32H7_ADC_CALFACT2 - bit fields */
+-#define STM32H7_LINCALFACT_SHIFT      0
+-#define STM32H7_LINCALFACT_MASK               GENMASK(29, 0)
+-
+ /* Number of linear calibration shadow registers / LINCALRDYW control bits */
+ #define STM32H7_LINCALFACT_NUM                6
+ 
+diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
+index 91efbf0f19f9..da45049de97e 100644
+--- a/drivers/isdn/capi/capi.c
++++ b/drivers/isdn/capi/capi.c
+@@ -743,7 +743,7 @@ capi_poll(struct file *file, poll_table *wait)
+ 
+       poll_wait(file, &(cdev->recvwait), wait);
+       mask = POLLOUT | POLLWRNORM;
+-      if (!skb_queue_empty(&cdev->recvqueue))
++      if (!skb_queue_empty_lockless(&cdev->recvqueue))
+               mask |= POLLIN | POLLRDNORM;
+       return mask;
+ }
+diff --git a/drivers/net/dsa/b53/b53_common.c 
b/drivers/net/dsa/b53/b53_common.c
+index acf64d4cd94c..434e6dced6b7 100644
+--- a/drivers/net/dsa/b53/b53_common.c
++++ b/drivers/net/dsa/b53/b53_common.c
+@@ -1431,7 +1431,6 @@ int b53_mirror_add(struct dsa_switch *ds, int port,
+               loc = B53_EG_MIR_CTL;
+ 
+       b53_read16(dev, B53_MGMT_PAGE, loc, &reg);
+-      reg &= ~MIRROR_MASK;
+       reg |= BIT(port);
+       b53_write16(dev, B53_MGMT_PAGE, loc, reg);
+ 
+diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
+index 0132921f408a..604c5abc08eb 100644
+--- a/drivers/net/dsa/bcm_sf2.c
++++ b/drivers/net/dsa/bcm_sf2.c
+@@ -106,22 +106,11 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int 
port)
+       unsigned int i;
+       u32 reg, offset;
+ 
+-      if (priv->type == BCM7445_DEVICE_ID)
+-              offset = CORE_STS_OVERRIDE_IMP;
+-      else
+-              offset = CORE_STS_OVERRIDE_IMP2;
+-
+       /* Enable the port memories */
+       reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
+       reg &= ~P_TXQ_PSM_VDD(port);
+       core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
+ 
+-      /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
+-      reg = core_readl(priv, CORE_IMP_CTL);
+-      reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN);
+-      reg &= ~(RX_DIS | TX_DIS);
+-      core_writel(priv, reg, CORE_IMP_CTL);
+-
+       /* Enable forwarding */
+       core_writel(priv, SW_FWDG_EN, CORE_SWMODE);
+ 
+@@ -140,10 +129,27 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int 
port)
+ 
+       bcm_sf2_brcm_hdr_setup(priv, port);
+ 
+-      /* Force link status for IMP port */
+-      reg = core_readl(priv, offset);
+-      reg |= (MII_SW_OR | LINK_STS);
+-      core_writel(priv, reg, offset);
++      if (port == 8) {
++              if (priv->type == BCM7445_DEVICE_ID)
++                      offset = CORE_STS_OVERRIDE_IMP;
++              else
++                      offset = CORE_STS_OVERRIDE_IMP2;
++
++              /* Force link status for IMP port */
++              reg = core_readl(priv, offset);
++              reg |= (MII_SW_OR | LINK_STS);
++              core_writel(priv, reg, offset);
++
++              /* Enable Broadcast, Multicast, Unicast forwarding to IMP port 
*/
++              reg = core_readl(priv, CORE_IMP_CTL);
++              reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN);
++              reg &= ~(RX_DIS | TX_DIS);
++              core_writel(priv, reg, CORE_IMP_CTL);
++      } else {
++              reg = core_readl(priv, CORE_G_PCTL_PORT(port));
++              reg &= ~(RX_DIS | TX_DIS);
++              core_writel(priv, reg, CORE_G_PCTL_PORT(port));
++      }
+ }
+ 
+ static void bcm_sf2_eee_enable_set(struct dsa_switch *ds, int port, bool 
enable)
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c 
b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+index d857df8ebdb4..1cc4fb27c13b 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+@@ -1985,6 +1985,8 @@ static void bcmgenet_link_intr_enable(struct 
bcmgenet_priv *priv)
+        */
+       if (priv->internal_phy) {
+               int0_enable |= UMAC_IRQ_LINK_EVENT;
++              if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv))
++                      int0_enable |= UMAC_IRQ_PHY_DET_R;
+       } else if (priv->ext_phy) {
+               int0_enable |= UMAC_IRQ_LINK_EVENT;
+       } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
+@@ -2608,6 +2610,10 @@ static void bcmgenet_irq_task(struct work_struct *work)
+               bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
+       }
+ 
++      if (status & UMAC_IRQ_PHY_DET_R &&
++          priv->dev->phydev->autoneg != AUTONEG_ENABLE)
++              phy_init_hw(priv->dev->phydev);
++
+       /* Link UP/DOWN event */
+       if (status & UMAC_IRQ_LINK_EVENT)
+               phy_mac_interrupt(priv->phydev,
+@@ -2713,8 +2719,7 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
+       }
+ 
+       /* all other interested interrupts handled in bottom half */
+-      status &= (UMAC_IRQ_LINK_EVENT |
+-                 UMAC_IRQ_MPD_R);
++      status &= (UMAC_IRQ_LINK_EVENT | UMAC_IRQ_MPD_R | UMAC_IRQ_PHY_DET_R);
+       if (status) {
+               /* Save irq status for bottom-half processing. */
+               spin_lock_irqsave(&priv->lock, flags);
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c 
b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+index 8441ce3541af..ad4c9f17d77c 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+@@ -670,10 +670,10 @@ static void uld_init(struct adapter *adap, struct 
cxgb4_lld_info *lld)
+       lld->fr_nsmr_tpte_wr_support = adap->params.fr_nsmr_tpte_wr_support;
+ }
+ 
+-static void uld_attach(struct adapter *adap, unsigned int uld)
++static int uld_attach(struct adapter *adap, unsigned int uld)
+ {
+-      void *handle;
+       struct cxgb4_lld_info lli;
++      void *handle;
+ 
+       uld_init(adap, &lli);
+       uld_queue_init(adap, uld, &lli);
+@@ -683,7 +683,7 @@ static void uld_attach(struct adapter *adap, unsigned int 
uld)
+               dev_warn(adap->pdev_dev,
+                        "could not attach to the %s driver, error %ld\n",
+                        adap->uld[uld].name, PTR_ERR(handle));
+-              return;
++              return PTR_ERR(handle);
+       }
+ 
+       adap->uld[uld].handle = handle;
+@@ -691,23 +691,24 @@ static void uld_attach(struct adapter *adap, unsigned 
int uld)
+ 
+       if (adap->flags & FULL_INIT_DONE)
+               adap->uld[uld].state_change(handle, CXGB4_STATE_UP);
++
++      return 0;
+ }
+ 
+-/**
+- *    cxgb4_register_uld - register an upper-layer driver
+- *    @type: the ULD type
+- *    @p: the ULD methods
++/* cxgb4_register_uld - register an upper-layer driver
++ * @type: the ULD type
++ * @p: the ULD methods
+  *
+- *    Registers an upper-layer driver with this driver and notifies the ULD
+- *    about any presently available devices that support its type.  Returns
+- *    %-EBUSY if a ULD of the same type is already registered.
++ * Registers an upper-layer driver with this driver and notifies the ULD
++ * about any presently available devices that support its type.  Returns
++ * %-EBUSY if a ULD of the same type is already registered.
+  */
+ int cxgb4_register_uld(enum cxgb4_uld type,
+                      const struct cxgb4_uld_info *p)
+ {
+-      int ret = 0;
+       unsigned int adap_idx = 0;
+       struct adapter *adap;
++      int ret = 0;
+ 
+       if (type >= CXGB4_ULD_MAX)
+               return -EINVAL;
+@@ -741,12 +742,16 @@ int cxgb4_register_uld(enum cxgb4_uld type,
+               if (ret)
+                       goto free_irq;
+               adap->uld[type] = *p;
+-              uld_attach(adap, type);
++              ret = uld_attach(adap, type);
++              if (ret)
++                      goto free_txq;
+               adap_idx++;
+       }
+       mutex_unlock(&uld_mutex);
+       return 0;
+ 
++free_txq:
++      release_sge_txq_uld(adap, type);
+ free_irq:
+       if (adap->flags & FULL_INIT_DONE)
+               quiesce_rx_uld(adap, type);
+diff --git a/drivers/net/ethernet/faraday/ftgmac100.c 
b/drivers/net/ethernet/faraday/ftgmac100.c
+index 9ed8e4b81530..bfda315a3f1b 100644
+--- a/drivers/net/ethernet/faraday/ftgmac100.c
++++ b/drivers/net/ethernet/faraday/ftgmac100.c
+@@ -734,6 +734,18 @@ static int ftgmac100_hard_start_xmit(struct sk_buff *skb,
+        */
+       nfrags = skb_shinfo(skb)->nr_frags;
+ 
++      /* Setup HW checksumming */
++      csum_vlan = 0;
++      if (skb->ip_summed == CHECKSUM_PARTIAL &&
++          !ftgmac100_prep_tx_csum(skb, &csum_vlan))
++              goto drop;
++
++      /* Add VLAN tag */
++      if (skb_vlan_tag_present(skb)) {
++              csum_vlan |= FTGMAC100_TXDES1_INS_VLANTAG;
++              csum_vlan |= skb_vlan_tag_get(skb) & 0xffff;
++      }
++
+       /* Get header len */
+       len = skb_headlen(skb);
+ 
+@@ -760,19 +772,6 @@ static int ftgmac100_hard_start_xmit(struct sk_buff *skb,
+       if (nfrags == 0)
+               f_ctl_stat |= FTGMAC100_TXDES0_LTS;
+       txdes->txdes3 = cpu_to_le32(map);
+-
+-      /* Setup HW checksumming */
+-      csum_vlan = 0;
+-      if (skb->ip_summed == CHECKSUM_PARTIAL &&
+-          !ftgmac100_prep_tx_csum(skb, &csum_vlan))
+-              goto drop;
+-
+-      /* Add VLAN tag */
+-      if (skb_vlan_tag_present(skb)) {
+-              csum_vlan |= FTGMAC100_TXDES1_INS_VLANTAG;
+-              csum_vlan |= skb_vlan_tag_get(skb) & 0xffff;
+-      }
+-
+       txdes->txdes1 = cpu_to_le32(csum_vlan);
+ 
+       /* Next descriptor */
+diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c 
b/drivers/net/ethernet/hisilicon/hip04_eth.c
+index 1bfe9544b3c1..17cbe8145dcd 100644
+--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
++++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
+@@ -174,6 +174,7 @@ struct hip04_priv {
+       dma_addr_t rx_phys[RX_DESC_NUM];
+       unsigned int rx_head;
+       unsigned int rx_buf_size;
++      unsigned int rx_cnt_remaining;
+ 
+       struct device_node *phy_node;
+       struct phy_device *phy;
+@@ -487,7 +488,6 @@ static int hip04_rx_poll(struct napi_struct *napi, int 
budget)
+       struct hip04_priv *priv = container_of(napi, struct hip04_priv, napi);
+       struct net_device *ndev = priv->ndev;
+       struct net_device_stats *stats = &ndev->stats;
+-      unsigned int cnt = hip04_recv_cnt(priv);
+       struct rx_desc *desc;
+       struct sk_buff *skb;
+       unsigned char *buf;
+@@ -500,8 +500,8 @@ static int hip04_rx_poll(struct napi_struct *napi, int 
budget)
+ 
+       /* clean up tx descriptors */
+       tx_remaining = hip04_tx_reclaim(ndev, false);
+-
+-      while (cnt && !last) {
++      priv->rx_cnt_remaining += hip04_recv_cnt(priv);
++      while (priv->rx_cnt_remaining && !last) {
+               buf = priv->rx_buf[priv->rx_head];
+               skb = build_skb(buf, priv->rx_buf_size);
+               if (unlikely(!skb)) {
+@@ -547,11 +547,13 @@ refill:
+               hip04_set_recv_desc(priv, phys);
+ 
+               priv->rx_head = RX_NEXT(priv->rx_head);
+-              if (rx >= budget)
++              if (rx >= budget) {
++                      --priv->rx_cnt_remaining;
+                       goto done;
++              }
+ 
+-              if (--cnt == 0)
+-                      cnt = hip04_recv_cnt(priv);
++              if (--priv->rx_cnt_remaining == 0)
++                      priv->rx_cnt_remaining += hip04_recv_cnt(priv);
+       }
+ 
+       if (!(priv->reg_inten & RCV_INT)) {
+@@ -636,6 +638,7 @@ static int hip04_mac_open(struct net_device *ndev)
+       int i;
+ 
+       priv->rx_head = 0;
++      priv->rx_cnt_remaining = 0;
+       priv->tx_head = 0;
+       priv->tx_tail = 0;
+       hip04_reset_ppe(priv);
+diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 
b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+index 53ca6cf316dc..66e8054a8966 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
++++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+@@ -471,12 +471,31 @@ void mlx4_init_quotas(struct mlx4_dev *dev)
+               priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
+ }
+ 
+-static int get_max_gauranteed_vfs_counter(struct mlx4_dev *dev)
++static int
++mlx4_calc_res_counter_guaranteed(struct mlx4_dev *dev,
++                               struct resource_allocator *res_alloc,
++                               int vf)
+ {
+-      /* reduce the sink counter */
+-      return (dev->caps.max_counters - 1 -
+-              (MLX4_PF_COUNTERS_PER_PORT * MLX4_MAX_PORTS))
+-              / MLX4_MAX_PORTS;
++      struct mlx4_active_ports actv_ports;
++      int ports, counters_guaranteed;
++
++      /* For master, only allocate according to the number of phys ports */
++      if (vf == mlx4_master_func_num(dev))
++              return MLX4_PF_COUNTERS_PER_PORT * dev->caps.num_ports;
++
++      /* calculate real number of ports for the VF */
++      actv_ports = mlx4_get_active_ports(dev, vf);
++      ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
++      counters_guaranteed = ports * MLX4_VF_COUNTERS_PER_PORT;
++
++      /* If we do not have enough counters for this VF, do not
++       * allocate any for it. '-1' to reduce the sink counter.
++       */
++      if ((res_alloc->res_reserved + counters_guaranteed) >
++          (dev->caps.max_counters - 1))
++              return 0;
++
++      return counters_guaranteed;
+ }
+ 
+ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
+@@ -484,7 +503,6 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       int i, j;
+       int t;
+-      int max_vfs_guarantee_counter = get_max_gauranteed_vfs_counter(dev);
+ 
+       priv->mfunc.master.res_tracker.slave_list =
+               kzalloc(dev->num_slaves * sizeof(struct slave_list),
+@@ -601,16 +619,8 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
+                               break;
+                       case RES_COUNTER:
+                               res_alloc->quota[t] = dev->caps.max_counters;
+-                              if (t == mlx4_master_func_num(dev))
+-                                      res_alloc->guaranteed[t] =
+-                                              MLX4_PF_COUNTERS_PER_PORT *
+-                                              MLX4_MAX_PORTS;
+-                              else if (t <= max_vfs_guarantee_counter)
+-                                      res_alloc->guaranteed[t] =
+-                                              MLX4_VF_COUNTERS_PER_PORT *
+-                                              MLX4_MAX_PORTS;
+-                              else
+-                                      res_alloc->guaranteed[t] = 0;
++                              res_alloc->guaranteed[t] =
++                                      mlx4_calc_res_counter_guaranteed(dev, 
res_alloc, t);
+                               break;
+                       default:
+                               break;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 
b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+index eec7c2ef067a..bf311a3c3e02 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+@@ -1093,8 +1093,11 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
+       if (unlikely(!MLX5E_TEST_BIT(rq->state, MLX5E_RQ_STATE_ENABLED)))
+               return 0;
+ 
+-      if (cq->decmprs_left)
++      if (cq->decmprs_left) {
+               work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget);
++              if (cq->decmprs_left || work_done >= budget)
++                      goto out;
++      }
+ 
+       cqe = mlx5_cqwq_get_cqe(&cq->wq);
+       if (!cqe) {
+diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
+index ffd15f5f836f..6c7a169d906a 100644
+--- a/drivers/net/usb/cdc_ether.c
++++ b/drivers/net/usb/cdc_ether.c
+@@ -800,6 +800,13 @@ static const struct usb_device_id products[] = {
+       .driver_info = 0,
+ },
+ 
++/* ThinkPad USB-C Dock Gen 2 (based on Realtek RTL8153) */
++{
++      USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0xa387, USB_CLASS_COMM,
++                      USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
++      .driver_info = 0,
++},
++
+ /* NVIDIA Tegra USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
+ {
+       USB_DEVICE_AND_INTERFACE_INFO(NVIDIA_VENDOR_ID, 0x09ff, USB_CLASS_COMM,
+diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
+index 24b994c68bcc..78a12d7b96e8 100644
+--- a/drivers/net/usb/lan78xx.c
++++ b/drivers/net/usb/lan78xx.c
+@@ -3642,10 +3642,14 @@ static int lan78xx_probe(struct usb_interface *intf,
+       /* driver requires remote-wakeup capability during autosuspend. */
+       intf->needs_remote_wakeup = 1;
+ 
++      ret = lan78xx_phy_init(dev);
++      if (ret < 0)
++              goto out4;
++
+       ret = register_netdev(netdev);
+       if (ret != 0) {
+               netif_err(dev, probe, netdev, "couldn't register the device\n");
+-              goto out4;
++              goto out5;
+       }
+ 
+       usb_set_intfdata(intf, dev);
+@@ -3658,14 +3662,10 @@ static int lan78xx_probe(struct usb_interface *intf,
+       pm_runtime_set_autosuspend_delay(&udev->dev,
+                                        DEFAULT_AUTOSUSPEND_DELAY);
+ 
+-      ret = lan78xx_phy_init(dev);
+-      if (ret < 0)
+-              goto out5;
+-
+       return 0;
+ 
+ out5:
+-      unregister_netdev(netdev);
++      phy_disconnect(netdev->phydev);
+ out4:
+       usb_free_urb(dev->urb_intr);
+ out3:
+diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
+index c0964281ab98..6a86a03c5e95 100644
+--- a/drivers/net/usb/r8152.c
++++ b/drivers/net/usb/r8152.c
+@@ -5324,6 +5324,7 @@ static const struct usb_device_id rtl8152_table[] = {
+       {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x7205)},
+       {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x720c)},
+       {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x7214)},
++      {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0xa387)},
+       {REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041)},
+       {REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA,  0x09ff)},
+       {REALTEK_USB_DEVICE(VENDOR_ID_TPLINK,  0x0601)},
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
+index 2fbaa279988e..6d26bbd190dd 100644
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -2169,8 +2169,11 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct 
net_device *dev,
+               vni = tunnel_id_to_key32(info->key.tun_id);
+               ifindex = 0;
+               dst_cache = &info->dst_cache;
+-              if (info->options_len)
++              if (info->options_len) {
++                      if (info->options_len < sizeof(*md))
++                              goto drop;
+                       md = ip_tunnel_info_opts(info);
++              }
+               ttl = info->key.ttl;
+               tos = info->key.tos;
+               label = info->key.label;
+diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
+index 7c6aff761800..87650d42682f 100644
+--- a/drivers/of/unittest.c
++++ b/drivers/of/unittest.c
+@@ -1002,6 +1002,7 @@ static int __init unittest_data_add(void)
+       of_fdt_unflatten_tree(unittest_data, NULL, &unittest_data_node);
+       if (!unittest_data_node) {
+               pr_warn("%s: No tree to attach; not running tests\n", __func__);
++              kfree(unittest_data);
+               return -ENODATA;
+       }
+       of_node_set_flag(unittest_data_node, OF_DETACHED);
+diff --git a/drivers/pinctrl/bcm/pinctrl-ns2-mux.c 
b/drivers/pinctrl/bcm/pinctrl-ns2-mux.c
+index 4b5cf0e0f16e..951090faa6a9 100644
+--- a/drivers/pinctrl/bcm/pinctrl-ns2-mux.c
++++ b/drivers/pinctrl/bcm/pinctrl-ns2-mux.c
+@@ -640,8 +640,8 @@ static int ns2_pinmux_enable(struct pinctrl_dev *pctrl_dev,
+       const struct ns2_pin_function *func;
+       const struct ns2_pin_group *grp;
+ 
+-      if (grp_select > pinctrl->num_groups ||
+-              func_select > pinctrl->num_functions)
++      if (grp_select >= pinctrl->num_groups ||
++              func_select >= pinctrl->num_functions)
+               return -EINVAL;
+ 
+       func = &pinctrl->functions[func_select];
+diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c
+index 63a6881c7078..971ae892c611 100644
+--- a/drivers/platform/x86/pmc_atom.c
++++ b/drivers/platform/x86/pmc_atom.c
+@@ -475,6 +475,13 @@ static const struct dmi_system_id critclk_systems[] = {
+                       DMI_MATCH(DMI_BOARD_NAME, "CB6363"),
+               },
+       },
++      {
++              .ident = "SIMATIC IPC227E",
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "SIEMENS AG"),
++                      DMI_MATCH(DMI_PRODUCT_VERSION, "6ES7647-8B"),
++              },
++      },
+       { /*sentinel*/ }
+ };
+ 
+diff --git a/drivers/regulator/pfuze100-regulator.c 
b/drivers/regulator/pfuze100-regulator.c
+index 659e516455be..4f205366d8ae 100644
+--- a/drivers/regulator/pfuze100-regulator.c
++++ b/drivers/regulator/pfuze100-regulator.c
+@@ -632,7 +632,13 @@ static int pfuze100_regulator_probe(struct i2c_client 
*client,
+ 
+               /* SW2~SW4 high bit check and modify the voltage value table */
+               if (i >= sw_check_start && i <= sw_check_end) {
+-                      regmap_read(pfuze_chip->regmap, desc->vsel_reg, &val);
++                      ret = regmap_read(pfuze_chip->regmap,
++                                              desc->vsel_reg, &val);
++                      if (ret) {
++                              dev_err(&client->dev, "Fails to read from the 
register.\n");
++                              return ret;
++                      }
++
+                       if (val & sw_hi) {
+                               if (pfuze_chip->chip_id == PFUZE3000) {
+                                       desc->volt_table = pfuze3000_sw2hi;
+diff --git a/drivers/regulator/ti-abb-regulator.c 
b/drivers/regulator/ti-abb-regulator.c
+index d2f994298753..6d17357b3a24 100644
+--- a/drivers/regulator/ti-abb-regulator.c
++++ b/drivers/regulator/ti-abb-regulator.c
+@@ -173,19 +173,14 @@ static int ti_abb_wait_txdone(struct device *dev, struct 
ti_abb *abb)
+       while (timeout++ <= abb->settling_time) {
+               status = ti_abb_check_txdone(abb);
+               if (status)
+-                      break;
++                      return 0;
+ 
+               udelay(1);
+       }
+ 
+-      if (timeout > abb->settling_time) {
+-              dev_warn_ratelimited(dev,
+-                                   "%s:TRANXDONE timeout(%duS) int=0x%08x\n",
+-                                   __func__, timeout, readl(abb->int_base));
+-              return -ETIMEDOUT;
+-      }
+-
+-      return 0;
++      dev_warn_ratelimited(dev, "%s:TRANXDONE timeout(%duS) int=0x%08x\n",
++                           __func__, timeout, readl(abb->int_base));
++      return -ETIMEDOUT;
+ }
+ 
+ /**
+@@ -205,19 +200,14 @@ static int ti_abb_clear_all_txdone(struct device *dev, 
const struct ti_abb *abb)
+ 
+               status = ti_abb_check_txdone(abb);
+               if (!status)
+-                      break;
++                      return 0;
+ 
+               udelay(1);
+       }
+ 
+-      if (timeout > abb->settling_time) {
+-              dev_warn_ratelimited(dev,
+-                                   "%s:TRANXDONE timeout(%duS) int=0x%08x\n",
+-                                   __func__, timeout, readl(abb->int_base));
+-              return -ETIMEDOUT;
+-      }
+-
+-      return 0;
++      dev_warn_ratelimited(dev, "%s:TRANXDONE timeout(%duS) int=0x%08x\n",
++                           __func__, timeout, readl(abb->int_base));
++      return -ETIMEDOUT;
+ }
+ 
+ /**
+diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
+index 41366339b950..881906dc33b8 100644
+--- a/drivers/scsi/Kconfig
++++ b/drivers/scsi/Kconfig
+@@ -966,7 +966,7 @@ config SCSI_SNI_53C710
+ 
+ config 53C700_LE_ON_BE
+       bool
+-      depends on SCSI_LASI700
++      depends on SCSI_LASI700 || SCSI_SNI_53C710
+       default y
+ 
+ config SCSI_STEX
+diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c 
b/drivers/scsi/device_handler/scsi_dh_alua.c
+index 41f5f6410163..135376ee2cbf 100644
+--- a/drivers/scsi/device_handler/scsi_dh_alua.c
++++ b/drivers/scsi/device_handler/scsi_dh_alua.c
+@@ -523,6 +523,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct 
alua_port_group *pg)
+       unsigned int tpg_desc_tbl_off;
+       unsigned char orig_transition_tmo;
+       unsigned long flags;
++      bool transitioning_sense = false;
+ 
+       if (!pg->expiry) {
+               unsigned long transition_tmo = ALUA_FAILOVER_TIMEOUT * HZ;
+@@ -567,13 +568,19 @@ static int alua_rtpg(struct scsi_device *sdev, struct 
alua_port_group *pg)
+                       goto retry;
+               }
+               /*
+-               * Retry on ALUA state transition or if any
+-               * UNIT ATTENTION occurred.
++               * If the array returns with 'ALUA state transition'
++               * sense code here it cannot return RTPG data during
++               * transition. So set the state to 'transitioning' directly.
+                */
+               if (sense_hdr.sense_key == NOT_READY &&
+-                  sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a)
+-                      err = SCSI_DH_RETRY;
+-              else if (sense_hdr.sense_key == UNIT_ATTENTION)
++                  sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a) {
++                      transitioning_sense = true;
++                      goto skip_rtpg;
++              }
++              /*
++               * Retry on any other UNIT ATTENTION occurred.
++               */
++              if (sense_hdr.sense_key == UNIT_ATTENTION)
+                       err = SCSI_DH_RETRY;
+               if (err == SCSI_DH_RETRY &&
+                   pg->expiry != 0 && time_before(jiffies, pg->expiry)) {
+@@ -661,7 +668,11 @@ static int alua_rtpg(struct scsi_device *sdev, struct 
alua_port_group *pg)
+               off = 8 + (desc[7] * 4);
+       }
+ 
++ skip_rtpg:
+       spin_lock_irqsave(&pg->lock, flags);
++      if (transitioning_sense)
++              pg->state = SCSI_ACCESS_STATE_TRANSITIONING;
++
+       sdev_printk(KERN_INFO, sdev,
+                   "%s: port group %02x state %c %s supports %c%c%c%c%c%c%c\n",
+                   ALUA_DH_NAME, pg->group_id, print_alua_state(pg->state),
+diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c
+index 1f9a087daf69..3102a75984d3 100644
+--- a/drivers/scsi/sni_53c710.c
++++ b/drivers/scsi/sni_53c710.c
+@@ -78,10 +78,8 @@ static int snirm710_probe(struct platform_device *dev)
+ 
+       base = res->start;
+       hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL);
+-      if (!hostdata) {
+-              dev_printk(KERN_ERR, dev, "Failed to allocate host data\n");
++      if (!hostdata)
+               return -ENOMEM;
+-      }
+ 
+       hostdata->dev = &dev->dev;
+       dma_set_mask(&dev->dev, DMA_BIT_MASK(32));
+diff --git a/drivers/target/target_core_device.c 
b/drivers/target/target_core_device.c
+index 84742125f773..92b52d2314b5 100644
+--- a/drivers/target/target_core_device.c
++++ b/drivers/target/target_core_device.c
+@@ -1151,27 +1151,6 @@ passthrough_parse_cdb(struct se_cmd *cmd,
+       struct se_device *dev = cmd->se_dev;
+       unsigned int size;
+ 
+-      /*
+-       * Clear a lun set in the cdb if the initiator talking to use spoke
+-       * and old standards version, as we can't assume the underlying device
+-       * won't choke up on it.
+-       */
+-      switch (cdb[0]) {
+-      case READ_10: /* SBC - RDProtect */
+-      case READ_12: /* SBC - RDProtect */
+-      case READ_16: /* SBC - RDProtect */
+-      case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
+-      case VERIFY: /* SBC - VRProtect */
+-      case VERIFY_16: /* SBC - VRProtect */
+-      case WRITE_VERIFY: /* SBC - VRProtect */
+-      case WRITE_VERIFY_12: /* SBC - VRProtect */
+-      case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */
+-              break;
+-      default:
+-              cdb[1] &= 0x1f; /* clear logical unit number */
+-              break;
+-      }
+-
+       /*
+        * For REPORT LUNS we always need to emulate the response, for 
everything
+        * else, pass it up.
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
+index 7b7ab10a9db1..600bb838c15b 100644
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -1210,6 +1210,11 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file);
+ struct cifsInodeInfo {
+       bool can_cache_brlcks;
+       struct list_head llist; /* locks helb by this inode */
++      /*
++       * NOTE: Some code paths call down_read(lock_sem) twice, so
++       * we must always use use cifs_down_write() instead of down_write()
++       * for this semaphore to avoid deadlocks.
++       */
+       struct rw_semaphore lock_sem;   /* protect the fields above */
+       /* BB add in lists for dirty pages i.e. write caching info for oplock */
+       struct list_head openFileList;
+diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
+index ccdb42f71b2e..3a7fb8e750e9 100644
+--- a/fs/cifs/cifsproto.h
++++ b/fs/cifs/cifsproto.h
+@@ -149,6 +149,7 @@ extern int cifs_unlock_range(struct cifsFileInfo *cfile,
+                            struct file_lock *flock, const unsigned int xid);
+ extern int cifs_push_mandatory_locks(struct cifsFileInfo *cfile);
+ 
++extern void cifs_down_write(struct rw_semaphore *sem);
+ extern struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid,
+                                             struct file *file,
+                                             struct tcon_link *tlink,
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index 71a960da7cce..40f22932343c 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -280,6 +280,13 @@ cifs_has_mand_locks(struct cifsInodeInfo *cinode)
+       return has_locks;
+ }
+ 
++void
++cifs_down_write(struct rw_semaphore *sem)
++{
++      while (!down_write_trylock(sem))
++              msleep(10);
++}
++
+ struct cifsFileInfo *
+ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
+                 struct tcon_link *tlink, __u32 oplock)
+@@ -305,7 +312,7 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
+       INIT_LIST_HEAD(&fdlocks->locks);
+       fdlocks->cfile = cfile;
+       cfile->llist = fdlocks;
+-      down_write(&cinode->lock_sem);
++      cifs_down_write(&cinode->lock_sem);
+       list_add(&fdlocks->llist, &cinode->llist);
+       up_write(&cinode->lock_sem);
+ 
+@@ -457,7 +464,7 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, 
bool wait_oplock_handler)
+        * Delete any outstanding lock records. We'll lose them when the file
+        * is closed anyway.
+        */
+-      down_write(&cifsi->lock_sem);
++      cifs_down_write(&cifsi->lock_sem);
+       list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
+               list_del(&li->llist);
+               cifs_del_lock_waiters(li);
+@@ -1011,7 +1018,7 @@ static void
+ cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
+ {
+       struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
+-      down_write(&cinode->lock_sem);
++      cifs_down_write(&cinode->lock_sem);
+       list_add_tail(&lock->llist, &cfile->llist->locks);
+       up_write(&cinode->lock_sem);
+ }
+@@ -1033,7 +1040,7 @@ cifs_lock_add_if(struct cifsFileInfo *cfile, struct 
cifsLockInfo *lock,
+ 
+ try_again:
+       exist = false;
+-      down_write(&cinode->lock_sem);
++      cifs_down_write(&cinode->lock_sem);
+ 
+       exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
+                                       lock->type, &conf_lock, CIFS_LOCK_OP);
+@@ -1055,7 +1062,7 @@ try_again:
+                                       (lock->blist.next == &lock->blist));
+               if (!rc)
+                       goto try_again;
+-              down_write(&cinode->lock_sem);
++              cifs_down_write(&cinode->lock_sem);
+               list_del_init(&lock->blist);
+       }
+ 
+@@ -1108,7 +1115,7 @@ cifs_posix_lock_set(struct file *file, struct file_lock 
*flock)
+               return rc;
+ 
+ try_again:
+-      down_write(&cinode->lock_sem);
++      cifs_down_write(&cinode->lock_sem);
+       if (!cinode->can_cache_brlcks) {
+               up_write(&cinode->lock_sem);
+               return rc;
+@@ -1314,7 +1321,7 @@ cifs_push_locks(struct cifsFileInfo *cfile)
+       int rc = 0;
+ 
+       /* we are going to update can_cache_brlcks here - need a write access */
+-      down_write(&cinode->lock_sem);
++      cifs_down_write(&cinode->lock_sem);
+       if (!cinode->can_cache_brlcks) {
+               up_write(&cinode->lock_sem);
+               return rc;
+@@ -1505,7 +1512,7 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct 
file_lock *flock,
+       if (!buf)
+               return -ENOMEM;
+ 
+-      down_write(&cinode->lock_sem);
++      cifs_down_write(&cinode->lock_sem);
+       for (i = 0; i < 2; i++) {
+               cur = buf;
+               num = 0;
+diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
+index 1add404618f0..2c809233084b 100644
+--- a/fs/cifs/smb2file.c
++++ b/fs/cifs/smb2file.c
+@@ -139,7 +139,7 @@ smb2_unlock_range(struct cifsFileInfo *cfile, struct 
file_lock *flock,
+ 
+       cur = buf;
+ 
+-      down_write(&cinode->lock_sem);
++      cifs_down_write(&cinode->lock_sem);
+       list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
+               if (flock->fl_start > li->offset ||
+                   (flock->fl_start + length) <
+diff --git a/include/linux/gfp.h b/include/linux/gfp.h
+index b041f94678de..79d3dab45ceb 100644
+--- a/include/linux/gfp.h
++++ b/include/linux/gfp.h
+@@ -313,6 +313,29 @@ static inline bool gfpflags_allow_blocking(const gfp_t 
gfp_flags)
+       return !!(gfp_flags & __GFP_DIRECT_RECLAIM);
+ }
+ 
++/**
++ * gfpflags_normal_context - is gfp_flags a normal sleepable context?
++ * @gfp_flags: gfp_flags to test
++ *
++ * Test whether @gfp_flags indicates that the allocation is from the
++ * %current context and allowed to sleep.
++ *
++ * An allocation being allowed to block doesn't mean it owns the %current
++ * context.  When direct reclaim path tries to allocate memory, the
++ * allocation context is nested inside whatever %current was doing at the
++ * time of the original allocation.  The nested allocation may be allowed
++ * to block but modifying anything %current owns can corrupt the outer
++ * context's expectations.
++ *
++ * %true result from this function indicates that the allocation context
++ * can sleep and use anything that's associated with %current.
++ */
++static inline bool gfpflags_normal_context(const gfp_t gfp_flags)
++{
++      return (gfp_flags & (__GFP_DIRECT_RECLAIM | __GFP_MEMALLOC)) ==
++              __GFP_DIRECT_RECLAIM;
++}
++
+ #ifdef CONFIG_HIGHMEM
+ #define OPT_ZONE_HIGHMEM ZONE_HIGHMEM
+ #else
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index fdb0cd0699b6..ec00d9264e5c 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -1228,7 +1228,8 @@ static inline __u32 skb_get_hash_flowi6(struct sk_buff 
*skb, const struct flowi6
+       return skb->hash;
+ }
+ 
+-__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb);
++__u32 skb_get_hash_perturb(const struct sk_buff *skb,
++                         const siphash_key_t *perturb);
+ 
+ static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
+ {
+@@ -1345,6 +1346,19 @@ static inline int skb_queue_empty(const struct 
sk_buff_head *list)
+       return list->next == (const struct sk_buff *) list;
+ }
+ 
++/**
++ *    skb_queue_empty_lockless - check if a queue is empty
++ *    @list: queue head
++ *
++ *    Returns true if the queue is empty, false otherwise.
++ *    This variant can be used in lockless contexts.
++ */
++static inline bool skb_queue_empty_lockless(const struct sk_buff_head *list)
++{
++      return READ_ONCE(list->next) == (const struct sk_buff *) list;
++}
++
++
+ /**
+  *    skb_queue_is_last - check if skb is the last entry in the queue
+  *    @list: queue head
+@@ -1709,9 +1723,11 @@ static inline void __skb_insert(struct sk_buff *newsk,
+                               struct sk_buff *prev, struct sk_buff *next,
+                               struct sk_buff_head *list)
+ {
+-      newsk->next = next;
+-      newsk->prev = prev;
+-      next->prev  = prev->next = newsk;
++      /* see skb_queue_empty_lockless() for the opposite READ_ONCE() */
++      WRITE_ONCE(newsk->next, next);
++      WRITE_ONCE(newsk->prev, prev);
++      WRITE_ONCE(next->prev, newsk);
++      WRITE_ONCE(prev->next, newsk);
+       list->qlen++;
+ }
+ 
+@@ -1722,11 +1738,11 @@ static inline void __skb_queue_splice(const struct 
sk_buff_head *list,
+       struct sk_buff *first = list->next;
+       struct sk_buff *last = list->prev;
+ 
+-      first->prev = prev;
+-      prev->next = first;
++      WRITE_ONCE(first->prev, prev);
++      WRITE_ONCE(prev->next, first);
+ 
+-      last->next = next;
+-      next->prev = last;
++      WRITE_ONCE(last->next, next);
++      WRITE_ONCE(next->prev, last);
+ }
+ 
+ /**
+@@ -1867,8 +1883,8 @@ static inline void __skb_unlink(struct sk_buff *skb, 
struct sk_buff_head *list)
+       next       = skb->next;
+       prev       = skb->prev;
+       skb->next  = skb->prev = NULL;
+-      next->prev = prev;
+-      prev->next = next;
++      WRITE_ONCE(next->prev, prev);
++      WRITE_ONCE(prev->next, next);
+ }
+ 
+ /**
+diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
+index 71c72a939bf8..c86fcadccbd7 100644
+--- a/include/net/busy_poll.h
++++ b/include/net/busy_poll.h
+@@ -134,7 +134,7 @@ static inline void skb_mark_napi_id(struct sk_buff *skb,
+ static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb)
+ {
+ #ifdef CONFIG_NET_RX_BUSY_POLL
+-      sk->sk_napi_id = skb->napi_id;
++      WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
+ #endif
+ }
+ 
+@@ -143,8 +143,8 @@ static inline void sk_mark_napi_id_once(struct sock *sk,
+                                       const struct sk_buff *skb)
+ {
+ #ifdef CONFIG_NET_RX_BUSY_POLL
+-      if (!sk->sk_napi_id)
+-              sk->sk_napi_id = skb->napi_id;
++      if (!READ_ONCE(sk->sk_napi_id))
++              WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
+ #endif
+ }
+ 
+diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
+index 22aba321282d..227dc0a84172 100644
+--- a/include/net/flow_dissector.h
++++ b/include/net/flow_dissector.h
+@@ -4,6 +4,7 @@
+ 
+ #include <linux/types.h>
+ #include <linux/in6.h>
++#include <linux/siphash.h>
+ #include <uapi/linux/if_ether.h>
+ 
+ /**
+@@ -229,7 +230,7 @@ struct flow_dissector {
+ struct flow_keys {
+       struct flow_dissector_key_control control;
+ #define FLOW_KEYS_HASH_START_FIELD basic
+-      struct flow_dissector_key_basic basic;
++      struct flow_dissector_key_basic basic __aligned(SIPHASH_ALIGNMENT);
+       struct flow_dissector_key_tags tags;
+       struct flow_dissector_key_vlan vlan;
+       struct flow_dissector_key_keyid keyid;
+diff --git a/include/net/fq.h b/include/net/fq.h
+index 6d8521a30c5c..2c7687902789 100644
+--- a/include/net/fq.h
++++ b/include/net/fq.h
+@@ -70,7 +70,7 @@ struct fq {
+       struct list_head backlogs;
+       spinlock_t lock;
+       u32 flows_cnt;
+-      u32 perturbation;
++      siphash_key_t   perturbation;
+       u32 limit;
+       u32 memory_limit;
+       u32 memory_usage;
+diff --git a/include/net/fq_impl.h b/include/net/fq_impl.h
+index ac1a2317941e..46903e23eab9 100644
+--- a/include/net/fq_impl.h
++++ b/include/net/fq_impl.h
+@@ -105,7 +105,7 @@ static struct fq_flow *fq_flow_classify(struct fq *fq,
+ 
+       lockdep_assert_held(&fq->lock);
+ 
+-      hash = skb_get_hash_perturb(skb, fq->perturbation);
++      hash = skb_get_hash_perturb(skb, &fq->perturbation);
+       idx = reciprocal_scale(hash, fq->flows_cnt);
+       flow = &fq->flows[idx];
+ 
+@@ -255,7 +255,7 @@ static int fq_init(struct fq *fq, int flows_cnt)
+       INIT_LIST_HEAD(&fq->backlogs);
+       spin_lock_init(&fq->lock);
+       fq->flows_cnt = max_t(u32, flows_cnt, 1);
+-      fq->perturbation = prandom_u32();
++      get_random_bytes(&fq->perturbation, sizeof(fq->perturbation));
+       fq->quantum = 300;
+       fq->limit = 8192;
+       fq->memory_limit = 16 << 20; /* 16 MBytes */
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 60eef7f1ac05..7ec4d0bd8d12 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -916,8 +916,8 @@ static inline void sk_incoming_cpu_update(struct sock *sk)
+ {
+       int cpu = raw_smp_processor_id();
+ 
+-      if (unlikely(sk->sk_incoming_cpu != cpu))
+-              sk->sk_incoming_cpu = cpu;
++      if (unlikely(READ_ONCE(sk->sk_incoming_cpu) != cpu))
++              WRITE_ONCE(sk->sk_incoming_cpu, cpu);
+ }
+ 
+ static inline void sock_rps_record_flow_hash(__u32 hash)
+@@ -2131,12 +2131,17 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, 
int size, gfp_t gfp,
+  * sk_page_frag - return an appropriate page_frag
+  * @sk: socket
+  *
+- * If socket allocation mode allows current thread to sleep, it means its
+- * safe to use the per task page_frag instead of the per socket one.
++ * Use the per task page_frag instead of the per socket one for
++ * optimization when we know that we're in the normal context and owns
++ * everything that's associated with %current.
++ *
++ * gfpflags_allow_blocking() isn't enough here as direct reclaim may nest
++ * inside other socket operations and end up recursing into sk_page_frag()
++ * while it's already in use.
+  */
+ static inline struct page_frag *sk_page_frag(struct sock *sk)
+ {
+-      if (gfpflags_allow_blocking(sk->sk_allocation))
++      if (gfpflags_normal_context(sk->sk_allocation))
+               return &current->task_frag;
+ 
+       return &sk->sk_frag;
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 32ba789c544c..bbf8b32fc69e 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -432,10 +432,11 @@ void wake_q_add(struct wake_q_head *head, struct 
task_struct *task)
+        * its already queued (either by us or someone else) and will get the
+        * wakeup due to that.
+        *
+-       * This cmpxchg() implies a full barrier, which pairs with the write
+-       * barrier implied by the wakeup in wake_up_q().
++       * In order to ensure that a pending wakeup will observe our pending
++       * state, even in the failed case, an explicit smp_mb() must be used.
+        */
+-      if (cmpxchg(&node->next, NULL, WAKE_Q_TAIL))
++      smp_mb__before_atomic();
++      if (cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL))
+               return;
+ 
+       get_task_struct(task);
+diff --git a/net/atm/common.c b/net/atm/common.c
+index 9e812c782a37..0fd2d26d4c6e 100644
+--- a/net/atm/common.c
++++ b/net/atm/common.c
+@@ -667,7 +667,7 @@ unsigned int vcc_poll(struct file *file, struct socket 
*sock, poll_table *wait)
+               mask |= POLLHUP;
+ 
+       /* readable? */
+-      if (!skb_queue_empty(&sk->sk_receive_queue))
++      if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
+               mask |= POLLIN | POLLRDNORM;
+ 
+       /* writable? */
+diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
+index b216e697deac..b48d54783e5d 100644
+--- a/net/bluetooth/af_bluetooth.c
++++ b/net/bluetooth/af_bluetooth.c
+@@ -460,7 +460,7 @@ unsigned int bt_sock_poll(struct file *file, struct socket 
*sock,
+       if (sk->sk_state == BT_LISTEN)
+               return bt_accept_poll(sk);
+ 
+-      if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
++      if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
+               mask |= POLLERR |
+                       (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
+ 
+@@ -470,7 +470,7 @@ unsigned int bt_sock_poll(struct file *file, struct socket 
*sock,
+       if (sk->sk_shutdown == SHUTDOWN_MASK)
+               mask |= POLLHUP;
+ 
+-      if (!skb_queue_empty(&sk->sk_receive_queue))
++      if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
+               mask |= POLLIN | POLLRDNORM;
+ 
+       if (sk->sk_state == BT_CLOSED)
+diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
+index 632d5a416d97..df936d2f58bd 100644
+--- a/net/caif/caif_socket.c
++++ b/net/caif/caif_socket.c
+@@ -953,7 +953,7 @@ static unsigned int caif_poll(struct file *file,
+               mask |= POLLRDHUP;
+ 
+       /* readable? */
+-      if (!skb_queue_empty(&sk->sk_receive_queue) ||
++      if (!skb_queue_empty_lockless(&sk->sk_receive_queue) ||
+               (sk->sk_shutdown & RCV_SHUTDOWN))
+               mask |= POLLIN | POLLRDNORM;
+ 
+diff --git a/net/core/datagram.c b/net/core/datagram.c
+index dcb333e95702..85fcca395fad 100644
+--- a/net/core/datagram.c
++++ b/net/core/datagram.c
+@@ -97,7 +97,7 @@ int __skb_wait_for_more_packets(struct sock *sk, int *err, 
long *timeo_p,
+       if (error)
+               goto out_err;
+ 
+-      if (sk->sk_receive_queue.prev != skb)
++      if (READ_ONCE(sk->sk_receive_queue.prev) != skb)
+               goto out;
+ 
+       /* Socket shut down? */
+@@ -281,7 +281,7 @@ struct sk_buff *__skb_try_recv_datagram(struct sock *sk, 
unsigned int flags,
+                       break;
+ 
+               sk_busy_loop(sk, flags & MSG_DONTWAIT);
+-      } while (sk->sk_receive_queue.prev != *last);
++      } while (READ_ONCE(sk->sk_receive_queue.prev) != *last);
+ 
+       error = -EAGAIN;
+ 
+@@ -844,7 +844,7 @@ unsigned int datagram_poll(struct file *file, struct 
socket *sock,
+       mask = 0;
+ 
+       /* exceptional events? */
+-      if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
++      if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
+               mask |= POLLERR |
+                       (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
+ 
+@@ -854,7 +854,7 @@ unsigned int datagram_poll(struct file *file, struct 
socket *sock,
+               mask |= POLLHUP;
+ 
+       /* readable? */
+-      if (!skb_queue_empty(&sk->sk_receive_queue))
++      if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
+               mask |= POLLIN | POLLRDNORM;
+ 
+       /* Connection-based need to check for termination and startup */
+diff --git a/net/core/ethtool.c b/net/core/ethtool.c
+index 97569d3e1937..7822defa5a5d 100644
+--- a/net/core/ethtool.c
++++ b/net/core/ethtool.c
+@@ -1450,11 +1450,13 @@ static int ethtool_reset(struct net_device *dev, char 
__user *useraddr)
+ 
+ static int ethtool_get_wol(struct net_device *dev, char __user *useraddr)
+ {
+-      struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
++      struct ethtool_wolinfo wol;
+ 
+       if (!dev->ethtool_ops->get_wol)
+               return -EOPNOTSUPP;
+ 
++      memset(&wol, 0, sizeof(struct ethtool_wolinfo));
++      wol.cmd = ETHTOOL_GWOL;
+       dev->ethtool_ops->get_wol(dev, &wol);
+ 
+       if (copy_to_user(useraddr, &wol, sizeof(wol)))
+diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
+index e2e716003ede..7be5c20a93a5 100644
+--- a/net/core/flow_dissector.c
++++ b/net/core/flow_dissector.c
+@@ -889,45 +889,34 @@ out_bad:
+ }
+ EXPORT_SYMBOL(__skb_flow_dissect);
+ 
+-static u32 hashrnd __read_mostly;
++static siphash_key_t hashrnd __read_mostly;
+ static __always_inline void __flow_hash_secret_init(void)
+ {
+       net_get_random_once(&hashrnd, sizeof(hashrnd));
+ }
+ 
+-static __always_inline u32 __flow_hash_words(const u32 *words, u32 length,
+-                                           u32 keyval)
++static const void *flow_keys_hash_start(const struct flow_keys *flow)
+ {
+-      return jhash2(words, length, keyval);
+-}
+-
+-static inline const u32 *flow_keys_hash_start(const struct flow_keys *flow)
+-{
+-      const void *p = flow;
+-
+-      BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET % sizeof(u32));
+-      return (const u32 *)(p + FLOW_KEYS_HASH_OFFSET);
++      BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET % SIPHASH_ALIGNMENT);
++      return &flow->FLOW_KEYS_HASH_START_FIELD;
+ }
+ 
+ static inline size_t flow_keys_hash_length(const struct flow_keys *flow)
+ {
+-      size_t diff = FLOW_KEYS_HASH_OFFSET + sizeof(flow->addrs);
+-      BUILD_BUG_ON((sizeof(*flow) - FLOW_KEYS_HASH_OFFSET) % sizeof(u32));
+-      BUILD_BUG_ON(offsetof(typeof(*flow), addrs) !=
+-                   sizeof(*flow) - sizeof(flow->addrs));
++      size_t len = offsetof(typeof(*flow), addrs) - FLOW_KEYS_HASH_OFFSET;
+ 
+       switch (flow->control.addr_type) {
+       case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
+-              diff -= sizeof(flow->addrs.v4addrs);
++              len += sizeof(flow->addrs.v4addrs);
+               break;
+       case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
+-              diff -= sizeof(flow->addrs.v6addrs);
++              len += sizeof(flow->addrs.v6addrs);
+               break;
+       case FLOW_DISSECTOR_KEY_TIPC_ADDRS:
+-              diff -= sizeof(flow->addrs.tipcaddrs);
++              len += sizeof(flow->addrs.tipcaddrs);
+               break;
+       }
+-      return (sizeof(*flow) - diff) / sizeof(u32);
++      return len;
+ }
+ 
+ __be32 flow_get_u32_src(const struct flow_keys *flow)
+@@ -993,14 +982,15 @@ static inline void __flow_hash_consistentify(struct 
flow_keys *keys)
+       }
+ }
+ 
+-static inline u32 __flow_hash_from_keys(struct flow_keys *keys, u32 keyval)
++static inline u32 __flow_hash_from_keys(struct flow_keys *keys,
++                                      const siphash_key_t *keyval)
+ {
+       u32 hash;
+ 
+       __flow_hash_consistentify(keys);
+ 
+-      hash = __flow_hash_words(flow_keys_hash_start(keys),
+-                               flow_keys_hash_length(keys), keyval);
++      hash = siphash(flow_keys_hash_start(keys),
++                     flow_keys_hash_length(keys), keyval);
+       if (!hash)
+               hash = 1;
+ 
+@@ -1010,12 +1000,13 @@ static inline u32 __flow_hash_from_keys(struct 
flow_keys *keys, u32 keyval)
+ u32 flow_hash_from_keys(struct flow_keys *keys)
+ {
+       __flow_hash_secret_init();
+-      return __flow_hash_from_keys(keys, hashrnd);
++      return __flow_hash_from_keys(keys, &hashrnd);
+ }
+ EXPORT_SYMBOL(flow_hash_from_keys);
+ 
+ static inline u32 ___skb_get_hash(const struct sk_buff *skb,
+-                                struct flow_keys *keys, u32 keyval)
++                                struct flow_keys *keys,
++                                const siphash_key_t *keyval)
+ {
+       skb_flow_dissect_flow_keys(skb, keys,
+                                  FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
+@@ -1063,7 +1054,7 @@ u32 __skb_get_hash_symmetric(const struct sk_buff *skb)
+                          NULL, 0, 0, 0,
+                          FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
+ 
+-      return __flow_hash_from_keys(&keys, hashrnd);
++      return __flow_hash_from_keys(&keys, &hashrnd);
+ }
+ EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric);
+ 
+@@ -1083,13 +1074,14 @@ void __skb_get_hash(struct sk_buff *skb)
+ 
+       __flow_hash_secret_init();
+ 
+-      hash = ___skb_get_hash(skb, &keys, hashrnd);
++      hash = ___skb_get_hash(skb, &keys, &hashrnd);
+ 
+       __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
+ }
+ EXPORT_SYMBOL(__skb_get_hash);
+ 
+-__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb)
++__u32 skb_get_hash_perturb(const struct sk_buff *skb,
++                         const siphash_key_t *perturb)
+ {
+       struct flow_keys keys;
+ 
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 5f466db916ee..7ccbcd853cbc 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -1039,7 +1039,7 @@ set_rcvbuf:
+               break;
+ 
+       case SO_INCOMING_CPU:
+-              sk->sk_incoming_cpu = val;
++              WRITE_ONCE(sk->sk_incoming_cpu, val);
+               break;
+ 
+       case SO_CNX_ADVICE:
+@@ -1351,7 +1351,7 @@ int sock_getsockopt(struct socket *sock, int level, int 
optname,
+               break;
+ 
+       case SO_INCOMING_CPU:
+-              v.val = sk->sk_incoming_cpu;
++              v.val = READ_ONCE(sk->sk_incoming_cpu);
+               break;
+ 
+       case SO_MEMINFO:
+@@ -3381,7 +3381,7 @@ bool sk_busy_loop_end(void *p, unsigned long start_time)
+ {
+       struct sock *sk = p;
+ 
+-      return !skb_queue_empty(&sk->sk_receive_queue) ||
++      return !skb_queue_empty_lockless(&sk->sk_receive_queue) ||
+              sk_busy_loop_timeout(sk, start_time);
+ }
+ EXPORT_SYMBOL(sk_busy_loop_end);
+diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
+index 8e08cea6f178..176bddacc16e 100644
+--- a/net/dccp/ipv4.c
++++ b/net/dccp/ipv4.c
+@@ -121,7 +121,7 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr 
*uaddr, int addr_len)
+                                                   inet->inet_daddr,
+                                                   inet->inet_sport,
+                                                   inet->inet_dport);
+-      inet->inet_id = dp->dccps_iss ^ jiffies;
++      inet->inet_id = prandom_u32();
+ 
+       err = dccp_connect(sk);
+       rt = NULL;
+@@ -417,7 +417,7 @@ struct sock *dccp_v4_request_recv_sock(const struct sock 
*sk,
+       RCU_INIT_POINTER(newinet->inet_opt, rcu_dereference(ireq->ireq_opt));
+       newinet->mc_index  = inet_iif(skb);
+       newinet->mc_ttl    = ip_hdr(skb)->ttl;
+-      newinet->inet_id   = jiffies;
++      newinet->inet_id   = prandom_u32();
+ 
+       if (dst == NULL && (dst = inet_csk_route_child_sock(sk, newsk, req)) == 
NULL)
+               goto put_and_exit;
+diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
+index 045d8a176279..0796355e74c1 100644
+--- a/net/dsa/dsa2.c
++++ b/net/dsa/dsa2.c
+@@ -62,7 +62,7 @@ static struct dsa_switch_tree *dsa_add_dst(u32 tree)
+               return NULL;
+       dst->tree = tree;
+       INIT_LIST_HEAD(&dst->list);
+-      list_add_tail(&dsa_switch_trees, &dst->list);
++      list_add_tail(&dst->list, &dsa_switch_trees);
+       kref_init(&dst->refcount);
+ 
+       return dst;
+diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
+index f915abff1350..d3eddfd13875 100644
+--- a/net/ipv4/datagram.c
++++ b/net/ipv4/datagram.c
+@@ -75,7 +75,7 @@ int __ip4_datagram_connect(struct sock *sk, struct sockaddr 
*uaddr, int addr_len
+       inet->inet_dport = usin->sin_port;
+       sk->sk_state = TCP_ESTABLISHED;
+       sk_set_txhash(sk);
+-      inet->inet_id = jiffies;
++      inet->inet_id = prandom_u32();
+ 
+       sk_dst_set(sk, &rt->dst);
+       err = 0;
+diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
+index 24b066c32e06..1f26627c7fad 100644
+--- a/net/ipv4/inet_hashtables.c
++++ b/net/ipv4/inet_hashtables.c
+@@ -193,7 +193,7 @@ static inline int compute_score(struct sock *sk, struct 
net *net,
+                       if (sk->sk_bound_dev_if)
+                               score += 4;
+               }
+-              if (sk->sk_incoming_cpu == raw_smp_processor_id())
++              if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
+                       score++;
+       }
+       return score;
+diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
+index 347be2ea78d4..71ff2531d973 100644
+--- a/net/ipv4/ip_gre.c
++++ b/net/ipv4/ip_gre.c
+@@ -592,6 +592,9 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct 
net_device *dev,
+               truncate = true;
+       }
+ 
++      if (tun_info->options_len < sizeof(*md))
++              goto err_free_rt;
++
+       md = ip_tunnel_info_opts(tun_info);
+       if (!md)
+               goto err_free_rt;
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index c1f59a53f68f..8f07655718f3 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -574,7 +574,7 @@ unsigned int tcp_poll(struct file *file, struct socket 
*sock, poll_table *wait)
+       }
+       /* This barrier is coupled with smp_wmb() in tcp_reset() */
+       smp_rmb();
+-      if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
++      if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
+               mask |= POLLERR;
+ 
+       return mask;
+@@ -1787,7 +1787,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, 
size_t len, int nonblock,
+       if (unlikely(flags & MSG_ERRQUEUE))
+               return inet_recv_error(sk, msg, len, addr_len);
+ 
+-      if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue) &&
++      if (sk_can_busy_loop(sk) && 
skb_queue_empty_lockless(&sk->sk_receive_queue) &&
+           (sk->sk_state == TCP_ESTABLISHED))
+               sk_busy_loop(sk, nonblock);
+ 
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 0569718e3656..44a41ac2b0ca 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -245,7 +245,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr 
*uaddr, int addr_len)
+                                                inet->inet_daddr);
+       }
+ 
+-      inet->inet_id = tp->write_seq ^ jiffies;
++      inet->inet_id = prandom_u32();
+ 
+       if (tcp_fastopen_defer_connect(sk, &err))
+               return err;
+@@ -1368,7 +1368,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, 
struct sk_buff *skb,
+       inet_csk(newsk)->icsk_ext_hdr_len = 0;
+       if (inet_opt)
+               inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
+-      newinet->inet_id = newtp->write_seq ^ jiffies;
++      newinet->inet_id = prandom_u32();
+ 
+       if (!dst) {
+               dst = inet_csk_route_child_sock(sk, newsk, req);
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 54343dc29cb4..ab3f272a0884 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -419,7 +419,7 @@ static int compute_score(struct sock *sk, struct net *net,
+                       score += 4;
+       }
+ 
+-      if (sk->sk_incoming_cpu == raw_smp_processor_id())
++      if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
+               score++;
+       return score;
+ }
+@@ -1195,6 +1195,20 @@ static void udp_set_dev_scratch(struct sk_buff *skb)
+               scratch->_tsize_state |= UDP_SKB_IS_STATELESS;
+ }
+ 
++static void udp_skb_csum_unnecessary_set(struct sk_buff *skb)
++{
++      /* We come here after udp_lib_checksum_complete() returned 0.
++       * This means that __skb_checksum_complete() might have
++       * set skb->csum_valid to 1.
++       * On 64bit platforms, we can set csum_unnecessary
++       * to true, but only if the skb is not shared.
++       */
++#if BITS_PER_LONG == 64
++      if (!skb_shared(skb))
++              udp_skb_scratch(skb)->csum_unnecessary = true;
++#endif
++}
++
+ static int udp_skb_truesize(struct sk_buff *skb)
+ {
+       return udp_skb_scratch(skb)->_tsize_state & ~UDP_SKB_IS_STATELESS;
+@@ -1430,10 +1444,7 @@ static struct sk_buff *__first_packet_length(struct 
sock *sk,
+                       *total += skb->truesize;
+                       kfree_skb(skb);
+               } else {
+-                      /* the csum related bits could be changed, refresh
+-                       * the scratch area
+-                       */
+-                      udp_set_dev_scratch(skb);
++                      udp_skb_csum_unnecessary_set(skb);
+                       break;
+               }
+       }
+@@ -1457,7 +1468,7 @@ static int first_packet_length(struct sock *sk)
+ 
+       spin_lock_bh(&rcvq->lock);
+       skb = __first_packet_length(sk, rcvq, &total);
+-      if (!skb && !skb_queue_empty(sk_queue)) {
++      if (!skb && !skb_queue_empty_lockless(sk_queue)) {
+               spin_lock(&sk_queue->lock);
+               skb_queue_splice_tail_init(sk_queue, rcvq);
+               spin_unlock(&sk_queue->lock);
+@@ -1532,7 +1543,7 @@ struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned 
int flags,
+                               return skb;
+                       }
+ 
+-                      if (skb_queue_empty(sk_queue)) {
++                      if (skb_queue_empty_lockless(sk_queue)) {
+                               spin_unlock_bh(&queue->lock);
+                               goto busy_check;
+                       }
+@@ -1559,7 +1570,7 @@ busy_check:
+                               break;
+ 
+                       sk_busy_loop(sk, flags & MSG_DONTWAIT);
+-              } while (!skb_queue_empty(sk_queue));
++              } while (!skb_queue_empty_lockless(sk_queue));
+ 
+               /* sk_queue is empty, reader_queue may contain peeked packets */
+       } while (timeo &&
+@@ -2539,7 +2550,7 @@ unsigned int udp_poll(struct file *file, struct socket 
*sock, poll_table *wait)
+       unsigned int mask = datagram_poll(file, sock, wait);
+       struct sock *sk = sock->sk;
+ 
+-      if (!skb_queue_empty(&udp_sk(sk)->reader_queue))
++      if (!skb_queue_empty_lockless(&udp_sk(sk)->reader_queue))
+               mask |= POLLIN | POLLRDNORM;
+ 
+       sock_rps_record_flow(sk);
+diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
+index 6dc93ac28261..228983a5531b 100644
+--- a/net/ipv6/inet6_hashtables.c
++++ b/net/ipv6/inet6_hashtables.c
+@@ -118,7 +118,7 @@ static inline int compute_score(struct sock *sk, struct 
net *net,
+                       if (sk->sk_bound_dev_if)
+                               score++;
+               }
+-              if (sk->sk_incoming_cpu == raw_smp_processor_id())
++              if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
+                       score++;
+       }
+       return score;
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 9f7bfeb90fb0..a2ba7356fa65 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -170,7 +170,7 @@ static int compute_score(struct sock *sk, struct net *net,
+                       score++;
+       }
+ 
+-      if (sk->sk_incoming_cpu == raw_smp_processor_id())
++      if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
+               score++;
+ 
+       return score;
+diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
+index a51bfba19b9e..694a43c05eb9 100644
+--- a/net/nfc/llcp_sock.c
++++ b/net/nfc/llcp_sock.c
+@@ -567,11 +567,11 @@ static unsigned int llcp_sock_poll(struct file *file, 
struct socket *sock,
+       if (sk->sk_state == LLCP_LISTEN)
+               return llcp_accept_poll(sk);
+ 
+-      if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
++      if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
+               mask |= POLLERR |
+                       (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
+ 
+-      if (!skb_queue_empty(&sk->sk_receive_queue))
++      if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
+               mask |= POLLIN | POLLRDNORM;
+ 
+       if (sk->sk_state == LLCP_CLOSED)
+diff --git a/net/phonet/socket.c b/net/phonet/socket.c
+index 1b050dd17393..a1df36f3bb6e 100644
+--- a/net/phonet/socket.c
++++ b/net/phonet/socket.c
+@@ -352,9 +352,9 @@ static unsigned int pn_socket_poll(struct file *file, 
struct socket *sock,
+ 
+       if (sk->sk_state == TCP_CLOSE)
+               return POLLERR;
+-      if (!skb_queue_empty(&sk->sk_receive_queue))
++      if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
+               mask |= POLLIN | POLLRDNORM;
+-      if (!skb_queue_empty(&pn->ctrlreq_queue))
++      if (!skb_queue_empty_lockless(&pn->ctrlreq_queue))
+               mask |= POLLPRI;
+       if (!mask && sk->sk_state == TCP_CLOSE_WAIT)
+               return POLLHUP;
+diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
+index fa256f8038af..c73475c3a464 100644
+--- a/net/sched/sch_hhf.c
++++ b/net/sched/sch_hhf.c
+@@ -4,11 +4,11 @@
+  * Copyright (C) 2013 Nandita Dukkipati <[email protected]>
+  */
+ 
+-#include <linux/jhash.h>
+ #include <linux/jiffies.h>
+ #include <linux/module.h>
+ #include <linux/skbuff.h>
+ #include <linux/vmalloc.h>
++#include <linux/siphash.h>
+ #include <net/pkt_sched.h>
+ #include <net/sock.h>
+ 
+@@ -125,7 +125,7 @@ struct wdrr_bucket {
+ 
+ struct hhf_sched_data {
+       struct wdrr_bucket buckets[WDRR_BUCKET_CNT];
+-      u32                perturbation;   /* hash perturbation */
++      siphash_key_t      perturbation;   /* hash perturbation */
+       u32                quantum;        /* psched_mtu(qdisc_dev(sch)); */
+       u32                drop_overlimit; /* number of times max qdisc packet
+                                           * limit was hit
+@@ -263,7 +263,7 @@ static enum wdrr_bucket_idx hhf_classify(struct sk_buff 
*skb, struct Qdisc *sch)
+       }
+ 
+       /* Get hashed flow-id of the skb. */
+-      hash = skb_get_hash_perturb(skb, q->perturbation);
++      hash = skb_get_hash_perturb(skb, &q->perturbation);
+ 
+       /* Check if this packet belongs to an already established HH flow. */
+       flow_pos = hash & HHF_BIT_MASK;
+@@ -578,7 +578,7 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt)
+ 
+       sch->limit = 1000;
+       q->quantum = psched_mtu(qdisc_dev(sch));
+-      q->perturbation = prandom_u32();
++      get_random_bytes(&q->perturbation, sizeof(q->perturbation));
+       INIT_LIST_HEAD(&q->new_buckets);
+       INIT_LIST_HEAD(&q->old_buckets);
+ 
+diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
+index cc39e170b4aa..04f15e0aeaa8 100644
+--- a/net/sched/sch_sfb.c
++++ b/net/sched/sch_sfb.c
+@@ -22,7 +22,7 @@
+ #include <linux/errno.h>
+ #include <linux/skbuff.h>
+ #include <linux/random.h>
+-#include <linux/jhash.h>
++#include <linux/siphash.h>
+ #include <net/ip.h>
+ #include <net/pkt_sched.h>
+ #include <net/pkt_cls.h>
+@@ -49,7 +49,7 @@ struct sfb_bucket {
+  * (Section 4.4 of SFB reference : moving hash functions)
+  */
+ struct sfb_bins {
+-      u32               perturbation; /* jhash perturbation */
++      siphash_key_t     perturbation; /* siphash key */
+       struct sfb_bucket bins[SFB_LEVELS][SFB_NUMBUCKETS];
+ };
+ 
+@@ -221,7 +221,8 @@ static u32 sfb_compute_qlen(u32 *prob_r, u32 *avgpm_r, 
const struct sfb_sched_da
+ 
+ static void sfb_init_perturbation(u32 slot, struct sfb_sched_data *q)
+ {
+-      q->bins[slot].perturbation = prandom_u32();
++      get_random_bytes(&q->bins[slot].perturbation,
++                       sizeof(q->bins[slot].perturbation));
+ }
+ 
+ static void sfb_swap_slot(struct sfb_sched_data *q)
+@@ -317,9 +318,9 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc 
*sch,
+               /* If using external classifiers, get result and record it. */
+               if (!sfb_classify(skb, fl, &ret, &salt))
+                       goto other_drop;
+-              sfbhash = jhash_1word(salt, q->bins[slot].perturbation);
++              sfbhash = siphash_1u32(salt, &q->bins[slot].perturbation);
+       } else {
+-              sfbhash = skb_get_hash_perturb(skb, q->bins[slot].perturbation);
++              sfbhash = skb_get_hash_perturb(skb, 
&q->bins[slot].perturbation);
+       }
+ 
+ 
+@@ -355,7 +356,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc 
*sch,
+               /* Inelastic flow */
+               if (q->double_buffering) {
+                       sfbhash = skb_get_hash_perturb(skb,
+-                          q->bins[slot].perturbation);
++                          &q->bins[slot].perturbation);
+                       if (!sfbhash)
+                               sfbhash = 1;
+                       sfb_skb_cb(skb)->hashes[slot] = sfbhash;
+diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
+index 3fbf20126045..cbc54ddfe076 100644
+--- a/net/sched/sch_sfq.c
++++ b/net/sched/sch_sfq.c
+@@ -18,7 +18,7 @@
+ #include <linux/errno.h>
+ #include <linux/init.h>
+ #include <linux/skbuff.h>
+-#include <linux/jhash.h>
++#include <linux/siphash.h>
+ #include <linux/slab.h>
+ #include <linux/vmalloc.h>
+ #include <net/netlink.h>
+@@ -121,7 +121,7 @@ struct sfq_sched_data {
+       u8              headdrop;
+       u8              maxdepth;       /* limit of packets per flow */
+ 
+-      u32             perturbation;
++      siphash_key_t   perturbation;
+       u8              cur_depth;      /* depth of longest slot */
+       u8              flags;
+       unsigned short  scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */
+@@ -160,7 +160,7 @@ static inline struct sfq_head *sfq_dep_head(struct 
sfq_sched_data *q, sfq_index
+ static unsigned int sfq_hash(const struct sfq_sched_data *q,
+                            const struct sk_buff *skb)
+ {
+-      return skb_get_hash_perturb(skb, q->perturbation) & (q->divisor - 1);
++      return skb_get_hash_perturb(skb, &q->perturbation) & (q->divisor - 1);
+ }
+ 
+ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
+@@ -609,9 +609,11 @@ static void sfq_perturbation(unsigned long arg)
+       struct Qdisc *sch = (struct Qdisc *)arg;
+       struct sfq_sched_data *q = qdisc_priv(sch);
+       spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
++      siphash_key_t nkey;
+ 
++      get_random_bytes(&nkey, sizeof(nkey));
+       spin_lock(root_lock);
+-      q->perturbation = prandom_u32();
++      q->perturbation = nkey;
+       if (!q->filter_list && q->tail)
+               sfq_rehash(sch);
+       spin_unlock(root_lock);
+@@ -690,7 +692,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr 
*opt)
+       del_timer(&q->perturb_timer);
+       if (q->perturb_period) {
+               mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
+-              q->perturbation = prandom_u32();
++              get_random_bytes(&q->perturbation, sizeof(q->perturbation));
+       }
+       sch_tree_unlock(sch);
+       kfree(p);
+@@ -746,7 +748,7 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
+       q->quantum = psched_mtu(qdisc_dev(sch));
+       q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
+       q->perturb_period = 0;
+-      q->perturbation = prandom_u32();
++      get_random_bytes(&q->perturbation, sizeof(q->perturbation));
+ 
+       if (opt) {
+               int err = sfq_change(sch, opt);
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 4045d203b7d4..09cda66d0567 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -7371,7 +7371,7 @@ unsigned int sctp_poll(struct file *file, struct socket 
*sock, poll_table *wait)
+       mask = 0;
+ 
+       /* Is there any exceptional events?  */
+-      if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
++      if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
+               mask |= POLLERR |
+                       (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
+       if (sk->sk_shutdown & RCV_SHUTDOWN)
+@@ -7380,7 +7380,7 @@ unsigned int sctp_poll(struct file *file, struct socket 
*sock, poll_table *wait)
+               mask |= POLLHUP;
+ 
+       /* Is it readable?  Reconsider this code with TCP-style support.  */
+-      if (!skb_queue_empty(&sk->sk_receive_queue))
++      if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
+               mask |= POLLIN | POLLRDNORM;
+ 
+       /* The association is either gone or not ready.  */
+@@ -7716,7 +7716,7 @@ struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, 
int flags,
+               if (sk_can_busy_loop(sk)) {
+                       sk_busy_loop(sk, noblock);
+ 
+-                      if (!skb_queue_empty(&sk->sk_receive_queue))
++                      if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
+                               continue;
+               }
+ 
+@@ -8136,7 +8136,7 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
+       newinet->inet_rcv_saddr = inet->inet_rcv_saddr;
+       newinet->inet_dport = htons(asoc->peer.port);
+       newinet->pmtudisc = inet->pmtudisc;
+-      newinet->inet_id = asoc->next_tsn ^ jiffies;
++      newinet->inet_id = prandom_u32();
+ 
+       newinet->uc_ttl = inet->uc_ttl;
+       newinet->mc_loop = 1;
+diff --git a/net/tipc/socket.c b/net/tipc/socket.c
+index f13fb607c563..21929ba196eb 100644
+--- a/net/tipc/socket.c
++++ b/net/tipc/socket.c
+@@ -714,14 +714,14 @@ static unsigned int tipc_poll(struct file *file, struct 
socket *sock,
+               /* fall thru' */
+       case TIPC_LISTEN:
+       case TIPC_CONNECTING:
+-              if (!skb_queue_empty(&sk->sk_receive_queue))
++              if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
+                       mask |= (POLLIN | POLLRDNORM);
+               break;
+       case TIPC_OPEN:
+               if (!tsk->cong_link_cnt)
+                       mask |= POLLOUT;
+               if (tipc_sk_type_connectionless(sk) &&
+-                  (!skb_queue_empty(&sk->sk_receive_queue)))
++                  (!skb_queue_empty_lockless(&sk->sk_receive_queue)))
+                       mask |= (POLLIN | POLLRDNORM);
+               break;
+       case TIPC_DISCONNECTING:
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 2adfcc6dec5a..4de9dfd14d09 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -2665,7 +2665,7 @@ static unsigned int unix_poll(struct file *file, struct 
socket *sock, poll_table
+               mask |= POLLRDHUP | POLLIN | POLLRDNORM;
+ 
+       /* readable? */
+-      if (!skb_queue_empty(&sk->sk_receive_queue))
++      if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
+               mask |= POLLIN | POLLRDNORM;
+ 
+       /* Connection-based need to check for termination and startup */
+@@ -2693,7 +2693,7 @@ static unsigned int unix_dgram_poll(struct file *file, 
struct socket *sock,
+       mask = 0;
+ 
+       /* exceptional events? */
+-      if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
++      if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
+               mask |= POLLERR |
+                       (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
+ 
+@@ -2703,7 +2703,7 @@ static unsigned int unix_dgram_poll(struct file *file, 
struct socket *sock,
+               mask |= POLLHUP;
+ 
+       /* readable? */
+-      if (!skb_queue_empty(&sk->sk_receive_queue))
++      if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
+               mask |= POLLIN | POLLRDNORM;
+ 
+       /* Connection-based need to check for termination and startup */
+diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
+index 148c949cdfe7..1939b77e98b7 100644
+--- a/net/vmw_vsock/af_vsock.c
++++ b/net/vmw_vsock/af_vsock.c
+@@ -880,7 +880,7 @@ static unsigned int vsock_poll(struct file *file, struct 
socket *sock,
+                * the queue and write as long as the socket isn't shutdown for
+                * sending.
+                */
+-              if (!skb_queue_empty(&sk->sk_receive_queue) ||
++              if (!skb_queue_empty_lockless(&sk->sk_receive_queue) ||
+                   (sk->sk_shutdown & RCV_SHUTDOWN)) {
+                       mask |= POLLIN | POLLRDNORM;
+               }
+diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
+index d632a0511d62..158ce68bc9bf 100644
+--- a/sound/soc/codecs/wm_adsp.c
++++ b/sound/soc/codecs/wm_adsp.c
+@@ -1169,8 +1169,7 @@ static unsigned int wmfw_convert_flags(unsigned int in, 
unsigned int len)
+       }
+ 
+       if (in) {
+-              if (in & WMFW_CTL_FLAG_READABLE)
+-                      out |= rd;
++              out |= rd;
+               if (in & WMFW_CTL_FLAG_WRITEABLE)
+                       out |= wr;
+               if (in & WMFW_CTL_FLAG_VOLATILE)
+diff --git a/sound/soc/rockchip/rockchip_i2s.c 
b/sound/soc/rockchip/rockchip_i2s.c
+index 66fc13a2396a..0e07e3dea7de 100644
+--- a/sound/soc/rockchip/rockchip_i2s.c
++++ b/sound/soc/rockchip/rockchip_i2s.c
+@@ -676,7 +676,7 @@ static int rockchip_i2s_probe(struct platform_device *pdev)
+       ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
+       if (ret) {
+               dev_err(&pdev->dev, "Could not register PCM\n");
+-              return ret;
++              goto err_suspend;
+       }
+ 
+       return 0;
+diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c
+index 32e64a8a6443..bec7a2f1fb4d 100644
+--- a/tools/perf/builtin-c2c.c
++++ b/tools/perf/builtin-c2c.c
+@@ -2454,6 +2454,7 @@ static int build_cl_output(char *cl_sort, bool no_source)
+       bool add_sym   = false;
+       bool add_dso   = false;
+       bool add_src   = false;
++      int ret = 0;
+ 
+       if (!buf)
+               return -ENOMEM;
+@@ -2472,7 +2473,8 @@ static int build_cl_output(char *cl_sort, bool no_source)
+                       add_dso = true;
+               } else if (strcmp(tok, "offset")) {
+                       pr_err("unrecognized sort token: %s\n", tok);
+-                      return -EINVAL;
++                      ret = -EINVAL;
++                      goto err;
+               }
+       }
+ 
+@@ -2495,13 +2497,15 @@ static int build_cl_output(char *cl_sort, bool 
no_source)
+               add_sym ? "symbol," : "",
+               add_dso ? "dso," : "",
+               add_src ? "cl_srcline," : "",
+-              "node") < 0)
+-              return -ENOMEM;
++              "node") < 0) {
++              ret = -ENOMEM;
++              goto err;
++      }
+ 
+       c2c.show_src = add_src;
+-
++err:
+       free(buf);
+-      return 0;
++      return ret;
+ }
+ 
+ static int setup_coalesce(const char *coalesce, bool no_source)
+diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
+index 9e693ce4b73b..ce786f363476 100644
+--- a/tools/perf/builtin-kmem.c
++++ b/tools/perf/builtin-kmem.c
+@@ -687,6 +687,7 @@ static char *compact_gfp_flags(char *gfp_flags)
+                       new = realloc(new_flags, len + strlen(cpt) + 2);
+                       if (new == NULL) {
+                               free(new_flags);
++                              free(orig_flags);
+                               return NULL;
+                       }
+ 
+diff --git a/tools/testing/selftests/net/reuseport_dualstack.c 
b/tools/testing/selftests/net/reuseport_dualstack.c
+index fe3230c55986..fb7a59ed759e 100644
+--- a/tools/testing/selftests/net/reuseport_dualstack.c
++++ b/tools/testing/selftests/net/reuseport_dualstack.c
+@@ -129,7 +129,7 @@ static void test(int *rcv_fds, int count, int proto)
+ {
+       struct epoll_event ev;
+       int epfd, i, test_fd;
+-      uint16_t test_family;
++      int test_family;
+       socklen_t len;
+ 
+       epfd = epoll_create(1);
+@@ -146,6 +146,7 @@ static void test(int *rcv_fds, int count, int proto)
+       send_from_v4(proto);
+ 
+       test_fd = receive_once(epfd, proto);
++      len = sizeof(test_family);
+       if (getsockopt(test_fd, SOL_SOCKET, SO_DOMAIN, &test_family, &len))
+               error(1, errno, "failed to read socket domain");
+       if (test_family != AF_INET)
+diff --git a/tools/testing/selftests/powerpc/mm/Makefile 
b/tools/testing/selftests/powerpc/mm/Makefile
+index bf315bcbe663..fae8c52cf8f0 100644
+--- a/tools/testing/selftests/powerpc/mm/Makefile
++++ b/tools/testing/selftests/powerpc/mm/Makefile
+@@ -3,6 +3,7 @@ noarg:
+       $(MAKE) -C ../
+ 
+ TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot prot_sao
++TEST_GEN_PROGS_EXTENDED := tlbie_test
+ TEST_GEN_FILES := tempfile
+ 
+ include ../../lib.mk
+@@ -14,3 +15,4 @@ $(OUTPUT)/prot_sao: ../utils.c
+ $(OUTPUT)/tempfile:
+       dd if=/dev/zero of=$@ bs=64k count=1
+ 
++$(OUTPUT)/tlbie_test: LDLIBS += -lpthread
+diff --git a/tools/testing/selftests/powerpc/mm/tlbie_test.c 
b/tools/testing/selftests/powerpc/mm/tlbie_test.c
+new file mode 100644
+index 000000000000..f85a0938ab25
+--- /dev/null
++++ b/tools/testing/selftests/powerpc/mm/tlbie_test.c
+@@ -0,0 +1,734 @@
++// SPDX-License-Identifier: GPL-2.0
++
++/*
++ * Copyright 2019, Nick Piggin, Gautham R. Shenoy, Aneesh Kumar K.V, IBM Corp.
++ */
++
++/*
++ *
++ * Test tlbie/mtpidr race. We have 4 threads doing flush/load/compare/store
++ * sequence in a loop. The same threads also rung a context switch task
++ * that does sched_yield() in loop.
++ *
++ * The snapshot thread mark the mmap area PROT_READ in between, make a copy
++ * and copy it back to the original area. This helps us to detect if any
++ * store continued to happen after we marked the memory PROT_READ.
++ */
++
++#define _GNU_SOURCE
++#include <stdio.h>
++#include <sys/mman.h>
++#include <sys/types.h>
++#include <sys/wait.h>
++#include <sys/ipc.h>
++#include <sys/shm.h>
++#include <sys/stat.h>
++#include <sys/time.h>
++#include <linux/futex.h>
++#include <unistd.h>
++#include <asm/unistd.h>
++#include <string.h>
++#include <stdlib.h>
++#include <fcntl.h>
++#include <sched.h>
++#include <time.h>
++#include <stdarg.h>
++#include <sched.h>
++#include <pthread.h>
++#include <signal.h>
++#include <sys/prctl.h>
++
++static inline void dcbf(volatile unsigned int *addr)
++{
++      __asm__ __volatile__ ("dcbf %y0; sync" : : "Z"(*(unsigned char *)addr) 
: "memory");
++}
++
++static void err_msg(char *msg)
++{
++
++      time_t now;
++      time(&now);
++      printf("=================================\n");
++      printf("    Error: %s\n", msg);
++      printf("    %s", ctime(&now));
++      printf("=================================\n");
++      exit(1);
++}
++
++static char *map1;
++static char *map2;
++static pid_t rim_process_pid;
++
++/*
++ * A "rim-sequence" is defined to be the sequence of the following
++ * operations performed on a memory word:
++ *    1) FLUSH the contents of that word.
++ *    2) LOAD the contents of that word.
++ *    3) COMPARE the contents of that word with the content that was
++ *               previously stored at that word
++ *    4) STORE new content into that word.
++ *
++ * The threads in this test that perform the rim-sequence are termed
++ * as rim_threads.
++ */
++
++/*
++ * A "corruption" is defined to be the failed COMPARE operation in a
++ * rim-sequence.
++ *
++ * A rim_thread that detects a corruption informs about it to all the
++ * other rim_threads, and the mem_snapshot thread.
++ */
++static volatile unsigned int corruption_found;
++
++/*
++ * This defines the maximum number of rim_threads in this test.
++ *
++ * The THREAD_ID_BITS denote the number of bits required
++ * to represent the thread_ids [0..MAX_THREADS - 1].
++ * We are being a bit paranoid here and set it to 8 bits,
++ * though 6 bits suffice.
++ *
++ */
++#define MAX_THREADS           64
++#define THREAD_ID_BITS                8
++#define THREAD_ID_MASK                ((1 << THREAD_ID_BITS) - 1)
++static unsigned int rim_thread_ids[MAX_THREADS];
++static pthread_t rim_threads[MAX_THREADS];
++
++
++/*
++ * Each rim_thread works on an exclusive "chunk" of size
++ * RIM_CHUNK_SIZE.
++ *
++ * The ith rim_thread works on the ith chunk.
++ *
++ * The ith chunk begins at
++ * map1 + (i * RIM_CHUNK_SIZE)
++ */
++#define RIM_CHUNK_SIZE        1024
++#define BITS_PER_BYTE                 8
++#define WORD_SIZE                     (sizeof(unsigned int))
++#define WORD_BITS             (WORD_SIZE * BITS_PER_BYTE)
++#define WORDS_PER_CHUNK               (RIM_CHUNK_SIZE/WORD_SIZE)
++
++static inline char *compute_chunk_start_addr(unsigned int thread_id)
++{
++      char *chunk_start;
++
++      chunk_start = (char *)((unsigned long)map1 +
++                             (thread_id * RIM_CHUNK_SIZE));
++
++      return chunk_start;
++}
++
++/*
++ * The "word-offset" of a word-aligned address inside a chunk, is
++ * defined to be the number of words that precede the address in that
++ * chunk.
++ *
++ * WORD_OFFSET_BITS denote the number of bits required to represent
++ * the word-offsets of all the word-aligned addresses of a chunk.
++ */
++#define WORD_OFFSET_BITS      (__builtin_ctz(WORDS_PER_CHUNK))
++#define WORD_OFFSET_MASK      ((1 << WORD_OFFSET_BITS) - 1)
++
++static inline unsigned int compute_word_offset(char *start, unsigned int 
*addr)
++{
++      unsigned int delta_bytes, ret;
++      delta_bytes = (unsigned long)addr - (unsigned long)start;
++
++      ret = delta_bytes/WORD_SIZE;
++
++      return ret;
++}
++
++/*
++ * A "sweep" is defined to be the sequential execution of the
++ * rim-sequence by a rim_thread on its chunk one word at a time,
++ * starting from the first word of its chunk and ending with the last
++ * word of its chunk.
++ *
++ * Each sweep of a rim_thread is uniquely identified by a sweep_id.
++ * SWEEP_ID_BITS denote the number of bits required to represent
++ * the sweep_ids of rim_threads.
++ *
++ * As to why SWEEP_ID_BITS are computed as a function of THREAD_ID_BITS,
++ * WORD_OFFSET_BITS, and WORD_BITS, see the "store-pattern" below.
++ */
++#define SWEEP_ID_BITS         (WORD_BITS - (THREAD_ID_BITS + 
WORD_OFFSET_BITS))
++#define SWEEP_ID_MASK         ((1 << SWEEP_ID_BITS) - 1)
++
++/*
++ * A "store-pattern" is the word-pattern that is stored into a word
++ * location in the 4)STORE step of the rim-sequence.
++ *
++ * In the store-pattern, we shall encode:
++ *
++ *      - The thread-id of the rim_thread performing the store
++ *        (The most significant THREAD_ID_BITS)
++ *
++ *      - The word-offset of the address into which the store is being
++ *        performed (The next WORD_OFFSET_BITS)
++ *
++ *      - The sweep_id of the current sweep in which the store is
++ *        being performed. (The lower SWEEP_ID_BITS)
++ *
++ * Store Pattern: 32 bits
++ * |------------------|--------------------|---------------------------------|
++ * |    Thread id     |  Word offset       |         sweep_id                |
++ * |------------------|--------------------|---------------------------------|
++ *    THREAD_ID_BITS     WORD_OFFSET_BITS          SWEEP_ID_BITS
++ *
++ * In the store pattern, the (Thread-id + Word-offset) uniquely identify the
++ * address to which the store is being performed i.e,
++ *    address == map1 +
++ *              (Thread-id * RIM_CHUNK_SIZE) + (Word-offset * WORD_SIZE)
++ *
++ * And the sweep_id in the store pattern identifies the time when the
++ * store was performed by the rim_thread.
++ *
++ * We shall use this property in the 3)COMPARE step of the
++ * rim-sequence.
++ */
++#define SWEEP_ID_SHIFT        0
++#define WORD_OFFSET_SHIFT     (SWEEP_ID_BITS)
++#define THREAD_ID_SHIFT               (WORD_OFFSET_BITS + SWEEP_ID_BITS)
++
++/*
++ * Compute the store pattern for a given thread with id @tid, at
++ * location @addr in the sweep identified by @sweep_id
++ */
++static inline unsigned int compute_store_pattern(unsigned int tid,
++                                               unsigned int *addr,
++                                               unsigned int sweep_id)
++{
++      unsigned int ret = 0;
++      char *start = compute_chunk_start_addr(tid);
++      unsigned int word_offset = compute_word_offset(start, addr);
++
++      ret += (tid & THREAD_ID_MASK) << THREAD_ID_SHIFT;
++      ret += (word_offset & WORD_OFFSET_MASK) << WORD_OFFSET_SHIFT;
++      ret += (sweep_id & SWEEP_ID_MASK) << SWEEP_ID_SHIFT;
++      return ret;
++}
++
++/* Extract the thread-id from the given store-pattern */
++static inline unsigned int extract_tid(unsigned int pattern)
++{
++      unsigned int ret;
++
++      ret = (pattern >> THREAD_ID_SHIFT) & THREAD_ID_MASK;
++      return ret;
++}
++
++/* Extract the word-offset from the given store-pattern */
++static inline unsigned int extract_word_offset(unsigned int pattern)
++{
++      unsigned int ret;
++
++      ret = (pattern >> WORD_OFFSET_SHIFT) & WORD_OFFSET_MASK;
++
++      return ret;
++}
++
++/* Extract the sweep-id from the given store-pattern */
++static inline unsigned int extract_sweep_id(unsigned int pattern)
++
++{
++      unsigned int ret;
++
++      ret = (pattern >> SWEEP_ID_SHIFT) & SWEEP_ID_MASK;
++
++      return ret;
++}
++
++/************************************************************
++ *                                                          *
++ *          Logging the output of the verification          *
++ *                                                          *
++ ************************************************************/
++#define LOGDIR_NAME_SIZE 100
++static char logdir[LOGDIR_NAME_SIZE];
++
++static FILE *fp[MAX_THREADS];
++static const char logfilename[] ="Thread-%02d-Chunk";
++
++static inline void start_verification_log(unsigned int tid,
++                                        unsigned int *addr,
++                                        unsigned int cur_sweep_id,
++                                        unsigned int prev_sweep_id)
++{
++      FILE *f;
++      char logfile[30];
++      char path[LOGDIR_NAME_SIZE + 30];
++      char separator[2] = "/";
++      char *chunk_start = compute_chunk_start_addr(tid);
++      unsigned int size = RIM_CHUNK_SIZE;
++
++      sprintf(logfile, logfilename, tid);
++      strcpy(path, logdir);
++      strcat(path, separator);
++      strcat(path, logfile);
++      f = fopen(path, "w");
++
++      if (!f) {
++              err_msg("Unable to create logfile\n");
++      }
++
++      fp[tid] = f;
++
++      fprintf(f, 
"----------------------------------------------------------\n");
++      fprintf(f, "PID                = %d\n", rim_process_pid);
++      fprintf(f, "Thread id          = %02d\n", tid);
++      fprintf(f, "Chunk Start Addr   = 0x%016lx\n", (unsigned 
long)chunk_start);
++      fprintf(f, "Chunk Size         = %d\n", size);
++      fprintf(f, "Next Store Addr    = 0x%016lx\n", (unsigned long)addr);
++      fprintf(f, "Current sweep-id   = 0x%08x\n", cur_sweep_id);
++      fprintf(f, "Previous sweep-id  = 0x%08x\n", prev_sweep_id);
++      fprintf(f, 
"----------------------------------------------------------\n");
++}
++
++static inline void log_anamoly(unsigned int tid, unsigned int *addr,
++                             unsigned int expected, unsigned int observed)
++{
++      FILE *f = fp[tid];
++
++      fprintf(f, "Thread %02d: Addr 0x%lx: Expected 0x%x, Observed 0x%x\n",
++              tid, (unsigned long)addr, expected, observed);
++      fprintf(f, "Thread %02d: Expected Thread id   = %02d\n", tid, 
extract_tid(expected));
++      fprintf(f, "Thread %02d: Observed Thread id   = %02d\n", tid, 
extract_tid(observed));
++      fprintf(f, "Thread %02d: Expected Word offset = %03d\n", tid, 
extract_word_offset(expected));
++      fprintf(f, "Thread %02d: Observed Word offset = %03d\n", tid, 
extract_word_offset(observed));
++      fprintf(f, "Thread %02d: Expected sweep-id    = 0x%x\n", tid, 
extract_sweep_id(expected));
++      fprintf(f, "Thread %02d: Observed sweep-id    = 0x%x\n", tid, 
extract_sweep_id(observed));
++      fprintf(f, 
"----------------------------------------------------------\n");
++}
++
++static inline void end_verification_log(unsigned int tid, unsigned 
nr_anamolies)
++{
++      FILE *f = fp[tid];
++      char logfile[30];
++      char path[LOGDIR_NAME_SIZE + 30];
++      char separator[] = "/";
++
++      fclose(f);
++
++      if (nr_anamolies == 0) {
++              remove(path);
++              return;
++      }
++
++      sprintf(logfile, logfilename, tid);
++      strcpy(path, logdir);
++      strcat(path, separator);
++      strcat(path, logfile);
++
++      printf("Thread %02d chunk has %d corrupted words. For details check 
%s\n",
++              tid, nr_anamolies, path);
++}
++
++/*
++ * When a COMPARE step of a rim-sequence fails, the rim_thread informs
++ * everyone else via the shared_memory pointed to by
++ * corruption_found variable. On seeing this, every thread verifies the
++ * content of its chunk as follows.
++ *
++ * Suppose a thread identified with @tid was about to store (but not
++ * yet stored) to @next_store_addr in its current sweep identified
++ * @cur_sweep_id. Let @prev_sweep_id indicate the previous sweep_id.
++ *
++ * This implies that for all the addresses @addr < @next_store_addr,
++ * Thread @tid has already performed a store as part of its current
++ * sweep. Hence we expect the content of such @addr to be:
++ *    |-------------------------------------------------|
++ *    | tid   | word_offset(addr) |    cur_sweep_id     |
++ *    |-------------------------------------------------|
++ *
++ * Since Thread @tid is yet to perform stores on address
++ * @next_store_addr and above, we expect the content of such an
++ * address @addr to be:
++ *    |-------------------------------------------------|
++ *    | tid   | word_offset(addr) |    prev_sweep_id    |
++ *    |-------------------------------------------------|
++ *
++ * The verifier function @verify_chunk does this verification and logs
++ * any anamolies that it finds.
++ */
++static void verify_chunk(unsigned int tid, unsigned int *next_store_addr,
++                unsigned int cur_sweep_id,
++                unsigned int prev_sweep_id)
++{
++      unsigned int *iter_ptr;
++      unsigned int size = RIM_CHUNK_SIZE;
++      unsigned int expected;
++      unsigned int observed;
++      char *chunk_start = compute_chunk_start_addr(tid);
++
++      int nr_anamolies = 0;
++
++      start_verification_log(tid, next_store_addr,
++                             cur_sweep_id, prev_sweep_id);
++
++      for (iter_ptr = (unsigned int *)chunk_start;
++           (unsigned long)iter_ptr < (unsigned long)chunk_start + size;
++           iter_ptr++) {
++              unsigned int expected_sweep_id;
++
++              if (iter_ptr < next_store_addr) {
++                      expected_sweep_id = cur_sweep_id;
++              } else {
++                      expected_sweep_id = prev_sweep_id;
++              }
++
++              expected = compute_store_pattern(tid, iter_ptr, 
expected_sweep_id);
++
++              dcbf((volatile unsigned int*)iter_ptr); //Flush before reading
++              observed = *iter_ptr;
++
++              if (observed != expected) {
++                      nr_anamolies++;
++                      log_anamoly(tid, iter_ptr, expected, observed);
++              }
++      }
++
++      end_verification_log(tid, nr_anamolies);
++}
++
++static void set_pthread_cpu(pthread_t th, int cpu)
++{
++      cpu_set_t run_cpu_mask;
++      struct sched_param param;
++
++      CPU_ZERO(&run_cpu_mask);
++      CPU_SET(cpu, &run_cpu_mask);
++      pthread_setaffinity_np(th, sizeof(cpu_set_t), &run_cpu_mask);
++
++      param.sched_priority = 1;
++      if (0 && sched_setscheduler(0, SCHED_FIFO, &param) == -1) {
++              /* haven't reproduced with this setting, it kills random 
preemption which may be a factor */
++              fprintf(stderr, "could not set SCHED_FIFO, run as root?\n");
++      }
++}
++
++static void set_mycpu(int cpu)
++{
++      cpu_set_t run_cpu_mask;
++      struct sched_param param;
++
++      CPU_ZERO(&run_cpu_mask);
++      CPU_SET(cpu, &run_cpu_mask);
++      sched_setaffinity(0, sizeof(cpu_set_t), &run_cpu_mask);
++
++      param.sched_priority = 1;
++      if (0 && sched_setscheduler(0, SCHED_FIFO, &param) == -1) {
++              fprintf(stderr, "could not set SCHED_FIFO, run as root?\n");
++      }
++}
++
++static volatile int segv_wait;
++
++static void segv_handler(int signo, siginfo_t *info, void *extra)
++{
++      while (segv_wait) {
++              sched_yield();
++      }
++
++}
++
++static void set_segv_handler(void)
++{
++      struct sigaction sa;
++
++      sa.sa_flags = SA_SIGINFO;
++      sa.sa_sigaction = segv_handler;
++
++      if (sigaction(SIGSEGV, &sa, NULL) == -1) {
++              perror("sigaction");
++              exit(EXIT_FAILURE);
++      }
++}
++
++int timeout = 0;
++/*
++ * This function is executed by every rim_thread.
++ *
++ * This function performs sweeps over the exclusive chunks of the
++ * rim_threads executing the rim-sequence one word at a time.
++ */
++static void *rim_fn(void *arg)
++{
++      unsigned int tid = *((unsigned int *)arg);
++
++      int size = RIM_CHUNK_SIZE;
++      char *chunk_start = compute_chunk_start_addr(tid);
++
++      unsigned int prev_sweep_id;
++      unsigned int cur_sweep_id = 0;
++
++      /* word access */
++      unsigned int pattern = cur_sweep_id;
++      unsigned int *pattern_ptr = &pattern;
++      unsigned int *w_ptr, read_data;
++
++      set_segv_handler();
++
++      /*
++       * Let us initialize the chunk:
++       *
++       * Each word-aligned address addr in the chunk,
++       * is initialized to :
++       *    |-------------------------------------------------|
++       *    | tid   | word_offset(addr) |         0           |
++       *    |-------------------------------------------------|
++       */
++      for (w_ptr = (unsigned int *)chunk_start;
++           (unsigned long)w_ptr < (unsigned long)(chunk_start) + size;
++           w_ptr++) {
++
++              *pattern_ptr = compute_store_pattern(tid, w_ptr, cur_sweep_id);
++              *w_ptr = *pattern_ptr;
++      }
++
++      while (!corruption_found && !timeout) {
++              prev_sweep_id = cur_sweep_id;
++              cur_sweep_id = cur_sweep_id + 1;
++
++              for (w_ptr = (unsigned int *)chunk_start;
++                   (unsigned long)w_ptr < (unsigned long)(chunk_start) + size;
++                   w_ptr++)  {
++                      unsigned int old_pattern;
++
++                      /*
++                       * Compute the pattern that we would have
++                       * stored at this location in the previous
++                       * sweep.
++                       */
++                      old_pattern = compute_store_pattern(tid, w_ptr, 
prev_sweep_id);
++
++                      /*
++                       * FLUSH:Ensure that we flush the contents of
++                       *       the cache before loading
++                       */
++                      dcbf((volatile unsigned int*)w_ptr); //Flush
++
++                      /* LOAD: Read the value */
++                      read_data = *w_ptr; //Load
++
++                      /*
++                       * COMPARE: Is it the same as what we had stored
++                       *          in the previous sweep ? It better be!
++                       */
++                      if (read_data != old_pattern) {
++                              /* No it isn't! Tell everyone */
++                              corruption_found = 1;
++                      }
++
++                      /*
++                       * Before performing a store, let us check if
++                       * any rim_thread has found a corruption.
++                       */
++                      if (corruption_found || timeout) {
++                              /*
++                               * Yes. Someone (including us!) has found
++                               * a corruption :(
++                               *
++                               * Let us verify that our chunk is
++                               * correct.
++                               */
++                              /* But first, let us allow the dust to settle 
down! */
++                              verify_chunk(tid, w_ptr, cur_sweep_id, 
prev_sweep_id);
++
++                              return 0;
++                      }
++
++                      /*
++                       * Compute the new pattern that we are going
++                       * to write to this location
++                       */
++                      *pattern_ptr = compute_store_pattern(tid, w_ptr, 
cur_sweep_id);
++
++                      /*
++                       * STORE: Now let us write this pattern into
++                       *        the location
++                       */
++                      *w_ptr = *pattern_ptr;
++              }
++      }
++
++      return NULL;
++}
++
++
++static unsigned long start_cpu = 0;
++static unsigned long nrthreads = 4;
++
++static pthread_t mem_snapshot_thread;
++
++static void *mem_snapshot_fn(void *arg)
++{
++      int page_size = getpagesize();
++      size_t size = page_size;
++      void *tmp = malloc(size);
++
++      while (!corruption_found && !timeout) {
++              /* Stop memory migration once corruption is found */
++              segv_wait = 1;
++
++              mprotect(map1, size, PROT_READ);
++
++              /*
++               * Load from the working alias (map1). Loading from map2
++               * also fails.
++               */
++              memcpy(tmp, map1, size);
++
++              /*
++               * Stores must go via map2 which has write permissions, but
++               * the corrupted data tends to be seen in the snapshot buffer,
++               * so corruption does not appear to be introduced at the
++               * copy-back via map2 alias here.
++               */
++              memcpy(map2, tmp, size);
++              /*
++               * Before releasing other threads, must ensure the copy
++               * back to
++               */
++              asm volatile("sync" ::: "memory");
++              mprotect(map1, size, PROT_READ|PROT_WRITE);
++              asm volatile("sync" ::: "memory");
++              segv_wait = 0;
++
++              usleep(1); /* This value makes a big difference */
++      }
++
++      return 0;
++}
++
++void alrm_sighandler(int sig)
++{
++      timeout = 1;
++}
++
++int main(int argc, char *argv[])
++{
++      int c;
++      int page_size = getpagesize();
++      time_t now;
++      int i, dir_error;
++      pthread_attr_t attr;
++      key_t shm_key = (key_t) getpid();
++      int shmid, run_time = 20 * 60;
++      struct sigaction sa_alrm;
++
++      snprintf(logdir, LOGDIR_NAME_SIZE,
++               "/tmp/logdir-%u", (unsigned int)getpid());
++      while ((c = getopt(argc, argv, "r:hn:l:t:")) != -1) {
++              switch(c) {
++              case 'r':
++                      start_cpu = strtoul(optarg, NULL, 10);
++                      break;
++              case 'h':
++                      printf("%s [-r <start_cpu>] [-n <nrthreads>] [-l 
<logdir>] [-t <timeout>]\n", argv[0]);
++                      exit(0);
++                      break;
++              case 'n':
++                      nrthreads = strtoul(optarg, NULL, 10);
++                      break;
++              case 'l':
++                      strncpy(logdir, optarg, LOGDIR_NAME_SIZE - 1);
++                      break;
++              case 't':
++                      run_time = strtoul(optarg, NULL, 10);
++                      break;
++              default:
++                      printf("invalid option\n");
++                      exit(0);
++                      break;
++              }
++      }
++
++      if (nrthreads > MAX_THREADS)
++              nrthreads = MAX_THREADS;
++
++      shmid = shmget(shm_key, page_size, IPC_CREAT|0666);
++      if (shmid < 0) {
++              err_msg("Failed shmget\n");
++      }
++
++      map1 = shmat(shmid, NULL, 0);
++      if (map1 == (void *) -1) {
++              err_msg("Failed shmat");
++      }
++
++      map2 = shmat(shmid, NULL, 0);
++      if (map2 == (void *) -1) {
++              err_msg("Failed shmat");
++      }
++
++      dir_error = mkdir(logdir, 0755);
++
++      if (dir_error) {
++              err_msg("Failed mkdir");
++      }
++
++      printf("start_cpu list:%lu\n", start_cpu);
++      printf("number of worker threads:%lu + 1 snapshot thread\n", nrthreads);
++      printf("Allocated address:0x%016lx + secondary map:0x%016lx\n", 
(unsigned long)map1, (unsigned long)map2);
++      printf("logdir at : %s\n", logdir);
++      printf("Timeout: %d seconds\n", run_time);
++
++      time(&now);
++      printf("=================================\n");
++      printf("     Starting Test\n");
++      printf("     %s", ctime(&now));
++      printf("=================================\n");
++
++      for (i = 0; i < nrthreads; i++) {
++              if (1 && !fork()) {
++                      prctl(PR_SET_PDEATHSIG, SIGKILL);
++                      set_mycpu(start_cpu + i);
++                      for (;;)
++                              sched_yield();
++                      exit(0);
++              }
++      }
++
++
++      sa_alrm.sa_handler = &alrm_sighandler;
++      sigemptyset(&sa_alrm.sa_mask);
++      sa_alrm.sa_flags = 0;
++
++      if (sigaction(SIGALRM, &sa_alrm, 0) == -1) {
++              err_msg("Failed signal handler registration\n");
++      }
++
++      alarm(run_time);
++
++      pthread_attr_init(&attr);
++      for (i = 0; i < nrthreads; i++) {
++              rim_thread_ids[i] = i;
++              pthread_create(&rim_threads[i], &attr, rim_fn, 
&rim_thread_ids[i]);
++              set_pthread_cpu(rim_threads[i], start_cpu + i);
++      }
++
++      pthread_create(&mem_snapshot_thread, &attr, mem_snapshot_fn, map1);
++      set_pthread_cpu(mem_snapshot_thread, start_cpu + i);
++
++
++      pthread_join(mem_snapshot_thread, NULL);
++      for (i = 0; i < nrthreads; i++) {
++              pthread_join(rim_threads[i], NULL);
++      }
++
++      if (!timeout) {
++              time(&now);
++              printf("=================================\n");
++              printf("      Data Corruption Detected\n");
++              printf("      %s", ctime(&now));
++              printf("      See logfiles in %s\n", logdir);
++              printf("=================================\n");
++              return 1;
++      }
++      return 0;
++}

Reply via email to