commit:     e2bc25d301e5de34a242bb37fa33d5059b97a5cf
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed May 20 11:20:47 2020 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed May 20 11:20:47 2020 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e2bc25d3

Linux patch 4.4.224

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1223_linux-4.4.224.patch | 3600 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3604 insertions(+)

diff --git a/0000_README b/0000_README
index ea662bf..fb38e65 100644
--- a/0000_README
+++ b/0000_README
@@ -935,6 +935,10 @@ Patch:  1222_linux-4.4.223.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.4.223
 
+Patch:  1223_linux-4.4.224.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.4.224
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1223_linux-4.4.224.patch b/1223_linux-4.4.224.patch
new file mode 100644
index 0000000..4dcf48f
--- /dev/null
+++ b/1223_linux-4.4.224.patch
@@ -0,0 +1,3600 @@
+diff --git a/Makefile b/Makefile
+index 6b88acb0b9b1..f381af71fa32 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 4
+-SUBLEVEL = 223
++SUBLEVEL = 224
+ EXTRAVERSION =
+ NAME = Blurry Fish Butt
+ 
+@@ -631,7 +631,6 @@ ARCH_CFLAGS :=
+ include arch/$(SRCARCH)/Makefile
+ 
+ KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,)
+-KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,)
+ KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,)
+ KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation)
+ KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow)
+@@ -651,6 +650,7 @@ endif
+ 
+ # Tell gcc to never replace conditional load with a non-conditional one
+ KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
++KBUILD_CFLAGS += $(call cc-option,-fno-allow-store-data-races)
+ 
+ # check for 'asm goto'
+ ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) 
$(KBUILD_CFLAGS)), y)
+@@ -796,6 +796,17 @@ KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign)
+ # disable stringop warnings in gcc 8+
+ KBUILD_CFLAGS += $(call cc-disable-warning, stringop-truncation)
+ 
++# We'll want to enable this eventually, but it's not going away for 5.7 at 
least
++KBUILD_CFLAGS += $(call cc-disable-warning, zero-length-bounds)
++KBUILD_CFLAGS += $(call cc-disable-warning, array-bounds)
++KBUILD_CFLAGS += $(call cc-disable-warning, stringop-overflow)
++
++# Another good warning that we'll want to enable eventually
++KBUILD_CFLAGS += $(call cc-disable-warning, restrict)
++
++# Enabled with W=2, disabled by default as noisy
++KBUILD_CFLAGS += $(call cc-disable-warning, maybe-uninitialized)
++
+ # disable invalid "can't wrap" optimizations for signed / pointers
+ KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow)
+ 
+diff --git a/arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts 
b/arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts
+index 7c869fe3c30b..3baf5c4eec5b 100644
+--- a/arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts
++++ b/arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts
+@@ -81,8 +81,8 @@
+       imx27-phycard-s-rdk {
+               pinctrl_i2c1: i2c1grp {
+                       fsl,pins = <
+-                              MX27_PAD_I2C2_SDA__I2C2_SDA 0x0
+-                              MX27_PAD_I2C2_SCL__I2C2_SCL 0x0
++                              MX27_PAD_I2C_DATA__I2C_DATA 0x0
++                              MX27_PAD_I2C_CLK__I2C_CLK 0x0
+                       >;
+               };
+ 
+diff --git a/arch/arm/boot/dts/r8a7740.dtsi b/arch/arm/boot/dts/r8a7740.dtsi
+index e14cb1438216..2c43e12eb99a 100644
+--- a/arch/arm/boot/dts/r8a7740.dtsi
++++ b/arch/arm/boot/dts/r8a7740.dtsi
+@@ -461,7 +461,7 @@
+               cpg_clocks: cpg_clocks@e6150000 {
+                       compatible = "renesas,r8a7740-cpg-clocks";
+                       reg = <0xe6150000 0x10000>;
+-                      clocks = <&extal1_clk>, <&extalr_clk>;
++                      clocks = <&extal1_clk>, <&extal2_clk>, <&extalr_clk>;
+                       #clock-cells = <1>;
+                       clock-output-names = "system", "pllc0", "pllc1",
+                                            "pllc2", "r",
+diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
+index 575c9afeba9b..217b60246cbb 100644
+--- a/arch/x86/entry/entry_32.S
++++ b/arch/x86/entry/entry_32.S
+@@ -331,7 +331,8 @@ sysenter_past_esp:
+        * Return back to the vDSO, which will pop ecx and edx.
+        * Don't bother with DS and ES (they already contain __USER_DS).
+        */
+-      ENABLE_INTERRUPTS_SYSEXIT
++      sti
++      sysexit
+ 
+ .pushsection .fixup, "ax"
+ 2:    movl    $0, PT_FS(%esp)
+@@ -554,11 +555,6 @@ ENTRY(native_iret)
+       iret
+       _ASM_EXTABLE(native_iret, iret_exc)
+ END(native_iret)
+-
+-ENTRY(native_irq_enable_sysexit)
+-      sti
+-      sysexit
+-END(native_irq_enable_sysexit)
+ #endif
+ 
+ ENTRY(overflow)
+diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
+index 3d1ec41ae09a..20370c6db74b 100644
+--- a/arch/x86/include/asm/apm.h
++++ b/arch/x86/include/asm/apm.h
+@@ -6,8 +6,6 @@
+ #ifndef _ASM_X86_MACH_DEFAULT_APM_H
+ #define _ASM_X86_MACH_DEFAULT_APM_H
+ 
+-#include <asm/nospec-branch.h>
+-
+ #ifdef APM_ZERO_SEGS
+ #     define APM_DO_ZERO_SEGS \
+               "pushl %%ds\n\t" \
+@@ -33,7 +31,6 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, 
u32 ecx_in,
+        * N.B. We do NOT need a cld after the BIOS call
+        * because we always save and restore the flags.
+        */
+-      firmware_restrict_branch_speculation_start();
+       __asm__ __volatile__(APM_DO_ZERO_SEGS
+               "pushl %%edi\n\t"
+               "pushl %%ebp\n\t"
+@@ -46,7 +43,6 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, 
u32 ecx_in,
+                 "=S" (*esi)
+               : "a" (func), "b" (ebx_in), "c" (ecx_in)
+               : "memory", "cc");
+-      firmware_restrict_branch_speculation_end();
+ }
+ 
+ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
+@@ -59,7 +55,6 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 
ebx_in,
+        * N.B. We do NOT need a cld after the BIOS call
+        * because we always save and restore the flags.
+        */
+-      firmware_restrict_branch_speculation_start();
+       __asm__ __volatile__(APM_DO_ZERO_SEGS
+               "pushl %%edi\n\t"
+               "pushl %%ebp\n\t"
+@@ -72,7 +67,6 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 
ebx_in,
+                 "=S" (si)
+               : "a" (func), "b" (ebx_in), "c" (ecx_in)
+               : "memory", "cc");
+-      firmware_restrict_branch_speculation_end();
+       return error;
+ }
+ 
+diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
+index c759b3cca663..b4c5099cafee 100644
+--- a/arch/x86/include/asm/paravirt.h
++++ b/arch/x86/include/asm/paravirt.h
+@@ -938,13 +938,6 @@ extern void default_banner(void);
+       push %ecx; push %edx;                           \
+       call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
+       pop %edx; pop %ecx
+-
+-#define ENABLE_INTERRUPTS_SYSEXIT                                     \
+-      PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit),    \
+-                CLBR_NONE,                                            \
+-                jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
+-
+-
+ #else /* !CONFIG_X86_32 */
+ 
+ /*
+diff --git a/arch/x86/include/asm/paravirt_types.h 
b/arch/x86/include/asm/paravirt_types.h
+index 3d44191185f8..cc0e5a666c9e 100644
+--- a/arch/x86/include/asm/paravirt_types.h
++++ b/arch/x86/include/asm/paravirt_types.h
+@@ -162,15 +162,6 @@ struct pv_cpu_ops {
+ 
+       u64 (*read_pmc)(int counter);
+ 
+-#ifdef CONFIG_X86_32
+-      /*
+-       * Atomically enable interrupts and return to userspace.  This
+-       * is only used in 32-bit kernels.  64-bit kernels use
+-       * usergs_sysret32 instead.
+-       */
+-      void (*irq_enable_sysexit)(void);
+-#endif
+-
+       /*
+        * Switch to usermode gs and return to 64-bit usermode using
+        * sysret.  Only used in 64-bit kernels to return to 64-bit
+diff --git a/arch/x86/include/asm/stackprotector.h 
b/arch/x86/include/asm/stackprotector.h
+index 58505f01962f..743bd2d77e51 100644
+--- a/arch/x86/include/asm/stackprotector.h
++++ b/arch/x86/include/asm/stackprotector.h
+@@ -54,8 +54,13 @@
+ /*
+  * Initialize the stackprotector canary value.
+  *
+- * NOTE: this must only be called from functions that never return,
++ * NOTE: this must only be called from functions that never return
+  * and it must always be inlined.
++ *
++ * In addition, it should be called from a compilation unit for which
++ * stack protector is disabled. Alternatively, the caller should not end
++ * with a function call which gets tail-call optimized as that would
++ * lead to checking a modified canary value.
+  */
+ static __always_inline void boot_init_stack_canary(void)
+ {
+diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
+index 052c9c3026cc..dfdbe01ef9f2 100644
+--- a/arch/x86/kernel/apm_32.c
++++ b/arch/x86/kernel/apm_32.c
+@@ -239,6 +239,7 @@
+ #include <asm/olpc.h>
+ #include <asm/paravirt.h>
+ #include <asm/reboot.h>
++#include <asm/nospec-branch.h>
+ 
+ #if defined(CONFIG_APM_DISPLAY_BLANK) && defined(CONFIG_VT)
+ extern int (*console_blank_hook)(int);
+@@ -613,11 +614,13 @@ static long __apm_bios_call(void *_call)
+       gdt[0x40 / 8] = bad_bios_desc;
+ 
+       apm_irq_save(flags);
++      firmware_restrict_branch_speculation_start();
+       APM_DO_SAVE_SEGS;
+       apm_bios_call_asm(call->func, call->ebx, call->ecx,
+                         &call->eax, &call->ebx, &call->ecx, &call->edx,
+                         &call->esi);
+       APM_DO_RESTORE_SEGS;
++      firmware_restrict_branch_speculation_end();
+       apm_irq_restore(flags);
+       gdt[0x40 / 8] = save_desc_40;
+       put_cpu();
+@@ -689,10 +692,12 @@ static long __apm_bios_call_simple(void *_call)
+       gdt[0x40 / 8] = bad_bios_desc;
+ 
+       apm_irq_save(flags);
++      firmware_restrict_branch_speculation_start();
+       APM_DO_SAVE_SEGS;
+       error = apm_bios_call_simple_asm(call->func, call->ebx, call->ecx,
+                                        &call->eax);
+       APM_DO_RESTORE_SEGS;
++      firmware_restrict_branch_speculation_end();
+       apm_irq_restore(flags);
+       gdt[0x40 / 8] = save_desc_40;
+       put_cpu();
+diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
+index 439df975bc7a..84a7524b202c 100644
+--- a/arch/x86/kernel/asm-offsets.c
++++ b/arch/x86/kernel/asm-offsets.c
+@@ -65,9 +65,6 @@ void common(void) {
+       OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable);
+       OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable);
+       OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
+-#ifdef CONFIG_X86_32
+-      OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
+-#endif
+       OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
+       OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
+ #endif
+diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
+index 632195b41688..2cd05f34c0b6 100644
+--- a/arch/x86/kernel/paravirt.c
++++ b/arch/x86/kernel/paravirt.c
+@@ -168,9 +168,6 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, 
void *insnbuf,
+               ret = paravirt_patch_ident_64(insnbuf, len);
+ 
+       else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
+-#ifdef CONFIG_X86_32
+-               type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
+-#endif
+                type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret32) ||
+                type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret64))
+               /* If operation requires a jmp, then jmp */
+@@ -226,7 +223,6 @@ static u64 native_steal_clock(int cpu)
+ 
+ /* These are in entry.S */
+ extern void native_iret(void);
+-extern void native_irq_enable_sysexit(void);
+ extern void native_usergs_sysret32(void);
+ extern void native_usergs_sysret64(void);
+ 
+@@ -385,9 +381,6 @@ __visible struct pv_cpu_ops pv_cpu_ops = {
+ 
+       .load_sp0 = native_load_sp0,
+ 
+-#if defined(CONFIG_X86_32)
+-      .irq_enable_sysexit = native_irq_enable_sysexit,
+-#endif
+ #ifdef CONFIG_X86_64
+ #ifdef CONFIG_IA32_EMULATION
+       .usergs_sysret32 = native_usergs_sysret32,
+diff --git a/arch/x86/kernel/paravirt_patch_32.c 
b/arch/x86/kernel/paravirt_patch_32.c
+index c89f50a76e97..158dc0650d5d 100644
+--- a/arch/x86/kernel/paravirt_patch_32.c
++++ b/arch/x86/kernel/paravirt_patch_32.c
+@@ -5,7 +5,6 @@ DEF_NATIVE(pv_irq_ops, irq_enable, "sti");
+ DEF_NATIVE(pv_irq_ops, restore_fl, "push %eax; popf");
+ DEF_NATIVE(pv_irq_ops, save_fl, "pushf; pop %eax");
+ DEF_NATIVE(pv_cpu_ops, iret, "iret");
+-DEF_NATIVE(pv_cpu_ops, irq_enable_sysexit, "sti; sysexit");
+ DEF_NATIVE(pv_mmu_ops, read_cr2, "mov %cr2, %eax");
+ DEF_NATIVE(pv_mmu_ops, write_cr3, "mov %eax, %cr3");
+ DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax");
+@@ -46,7 +45,6 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
+               PATCH_SITE(pv_irq_ops, restore_fl);
+               PATCH_SITE(pv_irq_ops, save_fl);
+               PATCH_SITE(pv_cpu_ops, iret);
+-              PATCH_SITE(pv_cpu_ops, irq_enable_sysexit);
+               PATCH_SITE(pv_mmu_ops, read_cr2);
+               PATCH_SITE(pv_mmu_ops, read_cr3);
+               PATCH_SITE(pv_mmu_ops, write_cr3);
+diff --git a/arch/x86/kernel/paravirt_patch_64.c 
b/arch/x86/kernel/paravirt_patch_64.c
+index 0677bf8d3a42..03c6a8cf33c4 100644
+--- a/arch/x86/kernel/paravirt_patch_64.c
++++ b/arch/x86/kernel/paravirt_patch_64.c
+@@ -12,7 +12,6 @@ DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
+ DEF_NATIVE(pv_cpu_ops, clts, "clts");
+ DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd");
+ 
+-DEF_NATIVE(pv_cpu_ops, irq_enable_sysexit, "swapgs; sti; sysexit");
+ DEF_NATIVE(pv_cpu_ops, usergs_sysret64, "swapgs; sysretq");
+ DEF_NATIVE(pv_cpu_ops, usergs_sysret32, "swapgs; sysretl");
+ DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs");
+diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
+index c017f1c71560..0512af683871 100644
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -243,6 +243,14 @@ static void notrace start_secondary(void *unused)
+ 
+       wmb();
+       cpu_startup_entry(CPUHP_ONLINE);
++
++      /*
++       * Prevent tail call to cpu_startup_entry() because the stack protector
++       * guard has been changed a couple of function calls up, in
++       * boot_init_stack_canary() and must not be checked before tail calling
++       * another function.
++       */
++      prevent_tail_call_optimization();
+ }
+ 
+ void __init smp_store_boot_cpu_info(void)
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 3adc255e69cb..aac60d1605ff 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -2941,7 +2941,7 @@ static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu 
*vcpu,
+       unsigned bank_num = mcg_cap & 0xff, bank;
+ 
+       r = -EINVAL;
+-      if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
++      if (!bank_num || bank_num > KVM_MAX_MCE_BANKS)
+               goto out;
+       if (mcg_cap & ~(KVM_MCE_CAP_SUPPORTED | 0xff | 0xff0000))
+               goto out;
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index 82fd84d5e1aa..79aff24eed65 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -1240,10 +1240,7 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst 
= {
+ 
+       .iret = xen_iret,
+ #ifdef CONFIG_X86_64
+-      .usergs_sysret32 = xen_sysret32,
+       .usergs_sysret64 = xen_sysret64,
+-#else
+-      .irq_enable_sysexit = xen_sysexit,
+ #endif
+ 
+       .load_tr_desc = paravirt_nop,
+diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
+index 29e50d1229bc..ee48506ca151 100644
+--- a/arch/x86/xen/smp.c
++++ b/arch/x86/xen/smp.c
+@@ -116,6 +116,7 @@ asmlinkage __visible void cpu_bringup_and_idle(int cpu)
+ #endif
+       cpu_bringup();
+       cpu_startup_entry(CPUHP_ONLINE);
++      prevent_tail_call_optimization();
+ }
+ 
+ static void xen_smp_intr_free(unsigned int cpu)
+diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
+index fd92a64d748e..feb6d40a0860 100644
+--- a/arch/x86/xen/xen-asm_32.S
++++ b/arch/x86/xen/xen-asm_32.S
+@@ -34,20 +34,6 @@ check_events:
+       pop %eax
+       ret
+ 
+-/*
+- * We can't use sysexit directly, because we're not running in ring0.
+- * But we can easily fake it up using iret.  Assuming xen_sysexit is
+- * jumped to with a standard stack frame, we can just strip it back to
+- * a standard iret frame and use iret.
+- */
+-ENTRY(xen_sysexit)
+-      movl PT_EAX(%esp), %eax                 /* Shouldn't be necessary? */
+-      orl $X86_EFLAGS_IF, PT_EFLAGS(%esp)
+-      lea PT_EIP(%esp), %esp
+-
+-      jmp xen_iret
+-ENDPROC(xen_sysexit)
+-
+ /*
+  * This is run where a normal iret would be run, with the same stack setup:
+  *    8: eflags
+diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
+index 1399423f3418..4140b070f2e9 100644
+--- a/arch/x86/xen/xen-ops.h
++++ b/arch/x86/xen/xen-ops.h
+@@ -139,9 +139,6 @@ DECL_ASM(void, xen_restore_fl_direct, unsigned long);
+ 
+ /* These are not functions, and cannot be called normally */
+ __visible void xen_iret(void);
+-#ifdef CONFIG_X86_32
+-__visible void xen_sysexit(void);
+-#endif
+ __visible void xen_sysret32(void);
+ __visible void xen_sysret64(void);
+ __visible void xen_adjust_exception_frame(void);
+diff --git a/block/blk-core.c b/block/blk-core.c
+index 7662f97dded6..dc4119a1e122 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -719,6 +719,9 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, 
int node_id)
+ 
+       kobject_init(&q->kobj, &blk_queue_ktype);
+ 
++#ifdef CONFIG_BLK_DEV_IO_TRACE
++      mutex_init(&q->blk_trace_mutex);
++#endif
+       mutex_init(&q->sysfs_lock);
+       spin_lock_init(&q->__queue_lock);
+ 
+diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
+index a07ca3488d96..c1c654319287 100644
+--- a/block/blk-mq-tag.c
++++ b/block/blk-mq-tag.c
+@@ -481,6 +481,11 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, 
busy_iter_fn *fn,
+       struct blk_mq_hw_ctx *hctx;
+       int i;
+ 
++      /*
++       * Avoid potential races with things like queue removal.
++       */
++      if (!percpu_ref_tryget(&q->q_usage_counter))
++              return;
+ 
+       queue_for_each_hw_ctx(q, hctx, i) {
+               struct blk_mq_tags *tags = hctx->tags;
+@@ -497,7 +502,7 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, 
busy_iter_fn *fn,
+               bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, 
fn, priv,
+                     false);
+       }
+-
++      blk_queue_exit(q);
+ }
+ 
+ static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt)
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index b5633501f181..e027b8ed6030 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -628,6 +628,22 @@ static void blk_mq_rq_timer(unsigned long priv)
+       };
+       int i;
+ 
++      /* A deadlock might occur if a request is stuck requiring a
++       * timeout at the same time a queue freeze is waiting
++       * completion, since the timeout code would not be able to
++       * acquire the queue reference here.
++       *
++       * That's why we don't use blk_queue_enter here; instead, we use
++       * percpu_ref_tryget directly, because we need to be able to
++       * obtain a reference even in the short window between the queue
++       * starting to freeze, by dropping the first reference in
++       * blk_mq_freeze_queue_start, and the moment the last request is
++       * consumed, marked by the instant q_usage_counter reaches
++       * zero.
++       */
++      if (!percpu_ref_tryget(&q->q_usage_counter))
++              return;
++
+       blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data);
+ 
+       if (data.next_set) {
+@@ -642,6 +658,7 @@ static void blk_mq_rq_timer(unsigned long priv)
+                               blk_mq_tag_idle(hctx);
+               }
+       }
++      blk_queue_exit(q);
+ }
+ 
+ /*
+diff --git a/block/blk-timeout.c b/block/blk-timeout.c
+index aa40aa93381b..2bc03df554a6 100644
+--- a/block/blk-timeout.c
++++ b/block/blk-timeout.c
+@@ -134,6 +134,8 @@ void blk_rq_timed_out_timer(unsigned long data)
+       struct request *rq, *tmp;
+       int next_set = 0;
+ 
++      if (blk_queue_enter(q, GFP_NOWAIT))
++              return;
+       spin_lock_irqsave(q->queue_lock, flags);
+ 
+       list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list)
+@@ -143,6 +145,7 @@ void blk_rq_timed_out_timer(unsigned long data)
+               mod_timer(&q->timeout, round_jiffies_up(next));
+ 
+       spin_unlock_irqrestore(q->queue_lock, flags);
++      blk_queue_exit(q);
+ }
+ 
+ /**
+diff --git a/crypto/lrw.c b/crypto/lrw.c
+index d38a382b09eb..fc3d4fec8ddd 100644
+--- a/crypto/lrw.c
++++ b/crypto/lrw.c
+@@ -377,7 +377,7 @@ out_put_alg:
+       return inst;
+ }
+ 
+-static void free(struct crypto_instance *inst)
++static void free_inst(struct crypto_instance *inst)
+ {
+       crypto_drop_spawn(crypto_instance_ctx(inst));
+       kfree(inst);
+@@ -386,7 +386,7 @@ static void free(struct crypto_instance *inst)
+ static struct crypto_template crypto_tmpl = {
+       .name = "lrw",
+       .alloc = alloc,
+-      .free = free,
++      .free = free_inst,
+       .module = THIS_MODULE,
+ };
+ 
+diff --git a/crypto/xts.c b/crypto/xts.c
+index f6fd43f100c8..4ee09c440d12 100644
+--- a/crypto/xts.c
++++ b/crypto/xts.c
+@@ -334,7 +334,7 @@ out_put_alg:
+       return inst;
+ }
+ 
+-static void free(struct crypto_instance *inst)
++static void free_inst(struct crypto_instance *inst)
+ {
+       crypto_drop_spawn(crypto_instance_ctx(inst));
+       kfree(inst);
+@@ -343,7 +343,7 @@ static void free(struct crypto_instance *inst)
+ static struct crypto_template crypto_tmpl = {
+       .name = "xts",
+       .alloc = alloc,
+-      .free = free,
++      .free = free_inst,
+       .module = THIS_MODULE,
+ };
+ 
+diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
+index 8c5503c0bad7..0936b68eff80 100644
+--- a/drivers/acpi/video_detect.c
++++ b/drivers/acpi/video_detect.c
+@@ -289,17 +289,6 @@ static const struct dmi_system_id 
video_detect_dmi_table[] = {
+               DMI_MATCH(DMI_PRODUCT_NAME, "Dell System XPS L702X"),
+               },
+       },
+-      {
+-      /* https://bugzilla.redhat.com/show_bug.cgi?id=1204476 */
+-      /* 
https://bugs.launchpad.net/ubuntu/+source/linux-lts-trusty/+bug/1416940 */
+-      .callback = video_detect_force_native,
+-      .ident = "HP Pavilion dv6",
+-      .matches = {
+-              DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+-              DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv6 Notebook PC"),
+-              },
+-      },
+-
+       { },
+ };
+ 
+diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
+index 3df0422607d5..ac9aede1bfbe 100644
+--- a/drivers/dma/mmp_tdma.c
++++ b/drivers/dma/mmp_tdma.c
+@@ -364,6 +364,8 @@ static void mmp_tdma_free_descriptor(struct mmp_tdma_chan 
*tdmac)
+               gen_pool_free(gpool, (unsigned long)tdmac->desc_arr,
+                               size);
+       tdmac->desc_arr = NULL;
++      if (tdmac->status == DMA_ERROR)
++              tdmac->status = DMA_COMPLETE;
+ 
+       return;
+ }
+diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
+index 113605f6fe20..32517003e118 100644
+--- a/drivers/dma/pch_dma.c
++++ b/drivers/dma/pch_dma.c
+@@ -877,6 +877,7 @@ static int pch_dma_probe(struct pci_dev *pdev,
+       }
+ 
+       pci_set_master(pdev);
++      pd->dma.dev = &pdev->dev;
+ 
+       err = request_irq(pdev->irq, pd_irq, IRQF_SHARED, DRV_NAME, pd);
+       if (err) {
+@@ -892,7 +893,6 @@ static int pch_dma_probe(struct pci_dev *pdev,
+               goto err_free_irq;
+       }
+ 
+-      pd->dma.dev = &pdev->dev;
+ 
+       INIT_LIST_HEAD(&pd->dma.channels);
+ 
+diff --git a/drivers/gpu/drm/qxl/qxl_image.c b/drivers/gpu/drm/qxl/qxl_image.c
+index 7fbcc35e8ad3..c89c10055641 100644
+--- a/drivers/gpu/drm/qxl/qxl_image.c
++++ b/drivers/gpu/drm/qxl/qxl_image.c
+@@ -210,7 +210,8 @@ qxl_image_init_helper(struct qxl_device *qdev,
+               break;
+       default:
+               DRM_ERROR("unsupported image bit depth\n");
+-              return -EINVAL; /* TODO: cleanup */
++              qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr);
++              return -EINVAL;
+       }
+       image->u.bitmap.flags = QXL_BITMAP_TOP_DOWN;
+       image->u.bitmap.x = width;
+diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
+index 68835de07e07..a8349100854e 100644
+--- a/drivers/infiniband/core/addr.c
++++ b/drivers/infiniband/core/addr.c
+@@ -293,9 +293,9 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
+       fl6.saddr = src_in->sin6_addr;
+       fl6.flowi6_oif = addr->bound_dev_if;
+ 
+-      ret = ipv6_stub->ipv6_dst_lookup(addr->net, NULL, &dst, &fl6);
+-      if (ret < 0)
+-              goto put;
++      dst = ipv6_stub->ipv6_dst_lookup_flow(addr->net, NULL, &fl6, NULL);
++      if (IS_ERR(dst))
++              return PTR_ERR(dst);
+ 
+       if (ipv6_addr_any(&fl6.saddr)) {
+               ret = ipv6_dev_get_saddr(addr->net, ip6_dst_idev(dst)->dev,
+diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
+index 348828271cb0..ecd461ee6dbe 100644
+--- a/drivers/infiniband/hw/mlx4/qp.c
++++ b/drivers/infiniband/hw/mlx4/qp.c
+@@ -2156,6 +2156,7 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp 
*sqp,
+       int send_size;
+       int header_size;
+       int spc;
++      int err;
+       int i;
+ 
+       if (wr->wr.opcode != IB_WR_SEND)
+@@ -2190,7 +2191,9 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp 
*sqp,
+ 
+       sqp->ud_header.lrh.virtual_lane    = 0;
+       sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & 
IB_SEND_SOLICITED);
+-      ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey);
++      err = ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey);
++      if (err)
++              return err;
+       sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
+       if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER)
+               sqp->ud_header.bth.destination_qpn = 
cpu_to_be32(wr->remote_qpn);
+@@ -2423,9 +2426,14 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, 
struct ib_ud_wr *wr,
+       }
+       sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & 
IB_SEND_SOLICITED);
+       if (!sqp->qp.ibqp.qp_num)
+-              ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, 
&pkey);
++              err = ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index,
++                                       &pkey);
+       else
+-              ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->pkey_index, &pkey);
++              err = ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->pkey_index,
++                                       &pkey);
++      if (err)
++              return err;
++
+       sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
+       sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
+       sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 
1));
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c 
b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+index ffd88af80de3..bf39ce88360a 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+@@ -945,19 +945,6 @@ static inline int update_parent_pkey(struct 
ipoib_dev_priv *priv)
+                */
+               priv->dev->broadcast[8] = priv->pkey >> 8;
+               priv->dev->broadcast[9] = priv->pkey & 0xff;
+-
+-              /*
+-               * Update the broadcast address in the priv->broadcast object,
+-               * in case it already exists, otherwise no one will do that.
+-               */
+-              if (priv->broadcast) {
+-                      spin_lock_irq(&priv->lock);
+-                      memcpy(priv->broadcast->mcmember.mgid.raw,
+-                             priv->dev->broadcast + 4,
+-                      sizeof(union ib_gid));
+-                      spin_unlock_irq(&priv->lock);
+-              }
+-
+               return 0;
+       }
+ 
+diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c 
b/drivers/net/ethernet/cisco/enic/enic_main.c
+index 9b9793333816..3fd1cba0c7ec 100644
+--- a/drivers/net/ethernet/cisco/enic/enic_main.c
++++ b/drivers/net/ethernet/cisco/enic/enic_main.c
+@@ -1708,7 +1708,7 @@ static int enic_open(struct net_device *netdev)
+ {
+       struct enic *enic = netdev_priv(netdev);
+       unsigned int i;
+-      int err;
++      int err, ret;
+ 
+       err = enic_request_intr(enic);
+       if (err) {
+@@ -1766,10 +1766,9 @@ static int enic_open(struct net_device *netdev)
+ 
+ err_out_free_rq:
+       for (i = 0; i < enic->rq_count; i++) {
+-              err = vnic_rq_disable(&enic->rq[i]);
+-              if (err)
+-                      return err;
+-              vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
++              ret = vnic_rq_disable(&enic->rq[i]);
++              if (!ret)
++                      vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
+       }
+       enic_dev_notify_unset(enic);
+ err_out_free_intr:
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c 
b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+index dd4e6ea9e0e1..af7f97791320 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+@@ -266,7 +266,7 @@ static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw, u8 
module_pointer,
+  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+  * @data: word read from the Shadow RAM
+  *
+- * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
++ * Reads one 16 bit word from the Shadow RAM using the AdminQ
+  **/
+ static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
+                                        u16 *data)
+@@ -280,27 +280,49 @@ static i40e_status i40e_read_nvm_word_aq(struct i40e_hw 
*hw, u16 offset,
+ }
+ 
+ /**
+- * i40e_read_nvm_word - Reads Shadow RAM
++ * __i40e_read_nvm_word - Reads nvm word, assumes called does the locking
+  * @hw: pointer to the HW structure
+  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+  * @data: word read from the Shadow RAM
+  *
+- * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
++ * Reads one 16 bit word from the Shadow RAM.
++ *
++ * Do not use this function except in cases where the nvm lock is already
++ * taken via i40e_acquire_nvm().
++ **/
++static i40e_status __i40e_read_nvm_word(struct i40e_hw *hw,
++                                      u16 offset, u16 *data)
++{
++      i40e_status ret_code = 0;
++
++      if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
++              ret_code = i40e_read_nvm_word_aq(hw, offset, data);
++      else
++              ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
++      return ret_code;
++}
++
++/**
++ * i40e_read_nvm_word - Reads nvm word and acquire lock if necessary
++ * @hw: pointer to the HW structure
++ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
++ * @data: word read from the Shadow RAM
++ *
++ * Reads one 16 bit word from the Shadow RAM.
+  **/
+ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
+                              u16 *data)
+ {
+-      enum i40e_status_code ret_code = 0;
++      i40e_status ret_code = 0;
+ 
+       ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+-      if (!ret_code) {
+-              if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
+-                      ret_code = i40e_read_nvm_word_aq(hw, offset, data);
+-              } else {
+-                      ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
+-              }
+-              i40e_release_nvm(hw);
+-      }
++      if (ret_code)
++              return ret_code;
++
++      ret_code = __i40e_read_nvm_word(hw, offset, data);
++
++      i40e_release_nvm(hw);
++
+       return ret_code;
+ }
+ 
+@@ -393,31 +415,25 @@ read_nvm_buffer_aq_exit:
+ }
+ 
+ /**
+- * i40e_read_nvm_buffer - Reads Shadow RAM buffer
++ * __i40e_read_nvm_buffer - Reads nvm buffer, caller must acquire lock
+  * @hw: pointer to the HW structure
+  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+  * @words: (in) number of words to read; (out) number of words actually read
+  * @data: words read from the Shadow RAM
+  *
+  * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
+- * method. The buffer read is preceded by the NVM ownership take
+- * and followed by the release.
++ * method.
+  **/
+-i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+-                               u16 *words, u16 *data)
++static i40e_status __i40e_read_nvm_buffer(struct i40e_hw *hw,
++                                        u16 offset, u16 *words,
++                                        u16 *data)
+ {
+-      enum i40e_status_code ret_code = 0;
++      i40e_status ret_code = 0;
+ 
+-      if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
+-              ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+-              if (!ret_code) {
+-                      ret_code = i40e_read_nvm_buffer_aq(hw, offset, words,
+-                                                         data);
+-                      i40e_release_nvm(hw);
+-              }
+-      } else {
++      if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
++              ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, data);
++      else
+               ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
+-      }
+       return ret_code;
+ }
+ 
+@@ -499,15 +515,15 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw 
*hw,
+       data = (u16 *)vmem.va;
+ 
+       /* read pointer to VPD area */
+-      ret_code = i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
++      ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
+       if (ret_code) {
+               ret_code = I40E_ERR_NVM_CHECKSUM;
+               goto i40e_calc_nvm_checksum_exit;
+       }
+ 
+       /* read pointer to PCIe Alt Auto-load module */
+-      ret_code = i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
+-                                    &pcie_alt_module);
++      ret_code = __i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
++                                      &pcie_alt_module);
+       if (ret_code) {
+               ret_code = I40E_ERR_NVM_CHECKSUM;
+               goto i40e_calc_nvm_checksum_exit;
+@@ -521,7 +537,7 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw 
*hw,
+               if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) {
+                       u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS;
+ 
+-                      ret_code = i40e_read_nvm_buffer(hw, i, &words, data);
++                      ret_code = __i40e_read_nvm_buffer(hw, i, &words, data);
+                       if (ret_code) {
+                               ret_code = I40E_ERR_NVM_CHECKSUM;
+                               goto i40e_calc_nvm_checksum_exit;
+@@ -593,14 +609,19 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw 
*hw,
+       u16 checksum_sr = 0;
+       u16 checksum_local = 0;
+ 
++      /* We must acquire the NVM lock in order to correctly synchronize the
++       * NVM accesses across multiple PFs. Without doing so it is possible
++       * for one of the PFs to read invalid data potentially indicating that
++       * the checksum is invalid.
++       */
++      ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
++      if (ret_code)
++              return ret_code;
+       ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
++      __i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
++      i40e_release_nvm(hw);
+       if (ret_code)
+-              goto i40e_validate_nvm_checksum_exit;
+-
+-      /* Do not use i40e_read_nvm_word() because we do not want to take
+-       * the synchronization semaphores twice here.
+-       */
+-      i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
++              return ret_code;
+ 
+       /* Verify read checksum from EEPROM is the same as
+        * calculated checksum
+@@ -612,7 +633,6 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
+       if (checksum)
+               *checksum = checksum_local;
+ 
+-i40e_validate_nvm_checksum_exit:
+       return ret_code;
+ }
+ 
+@@ -958,6 +978,7 @@ retry:
+               break;
+ 
+       case I40E_NVMUPD_CSUM_CON:
++              /* Assumes the caller has acquired the nvm */
+               status = i40e_update_nvm_checksum(hw);
+               if (status) {
+                       *perrno = hw->aq.asq_last_status ?
+@@ -971,6 +992,7 @@ retry:
+               break;
+ 
+       case I40E_NVMUPD_CSUM_LCB:
++              /* Assumes the caller has acquired the nvm */
+               status = i40e_update_nvm_checksum(hw);
+               if (status) {
+                       *perrno = hw->aq.asq_last_status ?
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h 
b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+index bb9d583e5416..6caa2ab0ad74 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
++++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+@@ -282,8 +282,6 @@ i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
+ void i40e_release_nvm(struct i40e_hw *hw);
+ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
+                                        u16 *data);
+-i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+-                                         u16 *words, u16 *data);
+ i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw);
+ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
+                                                u16 *checksum);
+diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c 
b/drivers/net/ethernet/mellanox/mlx4/main.c
+index f8ac0e69d14b..b774ba64bd4b 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/main.c
++++ b/drivers/net/ethernet/mellanox/mlx4/main.c
+@@ -2295,6 +2295,7 @@ static int mlx4_allocate_default_counters(struct 
mlx4_dev *dev)
+ 
+               if (!err || err == -ENOSPC) {
+                       priv->def_counter[port] = idx;
++                      err = 0;
+               } else if (err == -ENOENT) {
+                       err = 0;
+                       continue;
+@@ -2344,7 +2345,8 @@ int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
+                                  MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+               if (!err)
+                       *idx = get_param_l(&out_param);
+-
++              if (WARN_ON(err == -ENOSPC))
++                      err = -EINVAL;
+               return err;
+       }
+       return __mlx4_counter_alloc(dev, idx);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c 
b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+index bf4447581072..e88605de84cc 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -933,7 +933,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct 
mlx5_priv *priv)
+       if (err) {
+               dev_err(&dev->pdev->dev, "Firmware over %d MS in 
pre-initializing state, aborting\n",
+                       FW_PRE_INIT_TIMEOUT_MILI);
+-              goto out;
++              goto out_err;
+       }
+ 
+       err = mlx5_cmd_init(dev);
+diff --git a/drivers/net/ethernet/moxa/moxart_ether.c 
b/drivers/net/ethernet/moxa/moxart_ether.c
+index f1dde59c9fa6..374e691b11da 100644
+--- a/drivers/net/ethernet/moxa/moxart_ether.c
++++ b/drivers/net/ethernet/moxa/moxart_ether.c
+@@ -541,7 +541,7 @@ static int moxart_remove(struct platform_device *pdev)
+       struct net_device *ndev = platform_get_drvdata(pdev);
+ 
+       unregister_netdev(ndev);
+-      free_irq(ndev->irq, ndev);
++      devm_free_irq(&pdev->dev, ndev->irq, ndev);
+       moxart_mac_free_memory(ndev);
+       free_netdev(ndev);
+ 
+diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c 
b/drivers/net/ethernet/natsemi/jazzsonic.c
+index acf3f11e38cc..68d2f31921ff 100644
+--- a/drivers/net/ethernet/natsemi/jazzsonic.c
++++ b/drivers/net/ethernet/natsemi/jazzsonic.c
+@@ -247,13 +247,15 @@ static int jazz_sonic_probe(struct platform_device *pdev)
+               goto out;
+       err = register_netdev(dev);
+       if (err)
+-              goto out1;
++              goto undo_probe1;
+ 
+       printk("%s: MAC %pM IRQ %d\n", dev->name, dev->dev_addr, dev->irq);
+ 
+       return 0;
+ 
+-out1:
++undo_probe1:
++      dma_free_coherent(lp->device, SIZEOF_SONIC_DESC * 
SONIC_BUS_SCALE(lp->dma_bitmode),
++                        lp->descriptors, lp->descriptors_laddr);
+       release_mem_region(dev->base_addr, SONIC_MEM_SIZE);
+ out:
+       free_netdev(dev);
+diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
+index 1988bc00de3c..ec13e2ae6d16 100644
+--- a/drivers/net/geneve.c
++++ b/drivers/net/geneve.c
+@@ -781,7 +781,9 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff 
*skb,
+               fl6->daddr = geneve->remote.sin6.sin6_addr;
+       }
+ 
+-      if (ipv6_stub->ipv6_dst_lookup(geneve->net, gs6->sock->sk, &dst, fl6)) {
++      dst = ipv6_stub->ipv6_dst_lookup_flow(geneve->net, gs6->sock->sk, fl6,
++                                            NULL);
++      if (IS_ERR(dst)) {
+               netdev_dbg(dev, "no route to %pI6\n", &fl6->daddr);
+               return ERR_PTR(-ENETUNREACH);
+       }
+diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
+index e6f564d50663..847c9fc10f9a 100644
+--- a/drivers/net/phy/dp83640.c
++++ b/drivers/net/phy/dp83640.c
+@@ -1107,7 +1107,7 @@ static struct dp83640_clock 
*dp83640_clock_get_bus(struct mii_bus *bus)
+               goto out;
+       }
+       dp83640_clock_init(clock, bus);
+-      list_add_tail(&phyter_clocks, &clock->list);
++      list_add_tail(&clock->list, &phyter_clocks);
+ out:
+       mutex_unlock(&phyter_clocks_lock);
+ 
+diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
+index 4eba646789c3..d31e944b9c24 100644
+--- a/drivers/net/phy/micrel.c
++++ b/drivers/net/phy/micrel.c
+@@ -592,6 +592,21 @@ ksz9021_wr_mmd_phyreg(struct phy_device *phydev, int 
ptrad, int devnum,
+ {
+ }
+ 
++static int kszphy_resume(struct phy_device *phydev)
++{
++      int value;
++
++      mutex_lock(&phydev->lock);
++
++      value = phy_read(phydev, MII_BMCR);
++      phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN);
++
++      kszphy_config_intr(phydev);
++      mutex_unlock(&phydev->lock);
++
++      return 0;
++}
++
+ static int kszphy_probe(struct phy_device *phydev)
+ {
+       const struct kszphy_type *type = phydev->drv->driver_data;
+@@ -783,7 +798,7 @@ static struct phy_driver ksphy_driver[] = {
+       .ack_interrupt  = kszphy_ack_interrupt,
+       .config_intr    = kszphy_config_intr,
+       .suspend        = genphy_suspend,
+-      .resume         = genphy_resume,
++      .resume         = kszphy_resume,
+       .driver         = { .owner = THIS_MODULE,},
+ }, {
+       .phy_id         = PHY_ID_KSZ8061,
+diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
+index e1c17ab5c2d5..8d09d21f4cbf 100644
+--- a/drivers/net/phy/phy.c
++++ b/drivers/net/phy/phy.c
+@@ -916,10 +916,10 @@ void phy_state_machine(struct work_struct *work)
+               phydev->adjust_link(phydev->attached_dev);
+               break;
+       case PHY_RUNNING:
+-              /* Only register a CHANGE if we are polling and link changed
+-               * since latest checking.
++              /* Only register a CHANGE if we are polling or ignoring
++               * interrupts and link changed since latest checking.
+                */
+-              if (phydev->irq == PHY_POLL) {
++              if (!phy_interrupt_is_valid(phydev)) {
+                       old_link = phydev->link;
+                       err = phy_read_status(phydev);
+                       if (err)
+@@ -1019,13 +1019,8 @@ void phy_state_machine(struct work_struct *work)
+       dev_dbg(&phydev->dev, "PHY state change %s -> %s\n",
+               phy_state_to_str(old_state), phy_state_to_str(phydev->state));
+ 
+-      /* Only re-schedule a PHY state machine change if we are polling the
+-       * PHY, if PHY_IGNORE_INTERRUPT is set, then we will be moving
+-       * between states from phy_mac_interrupt()
+-       */
+-      if (phydev->irq == PHY_POLL)
+-              queue_delayed_work(system_power_efficient_wq, 
&phydev->state_queue,
+-                                 PHY_STATE_TIME * HZ);
++      queue_delayed_work(system_power_efficient_wq, &phydev->state_queue,
++                         PHY_STATE_TIME * HZ);
+ }
+ 
+ void phy_mac_interrupt(struct phy_device *phydev, int new_link)
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
+index 752f44a0e3af..d6ae6d3c98ed 100644
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -1864,7 +1864,6 @@ static struct dst_entry *vxlan6_get_route(struct 
vxlan_dev *vxlan,
+ {
+       struct dst_entry *ndst;
+       struct flowi6 fl6;
+-      int err;
+ 
+       memset(&fl6, 0, sizeof(fl6));
+       fl6.flowi6_oif = oif;
+@@ -1873,11 +1872,10 @@ static struct dst_entry *vxlan6_get_route(struct 
vxlan_dev *vxlan,
+       fl6.flowi6_mark = skb->mark;
+       fl6.flowi6_proto = IPPROTO_UDP;
+ 
+-      err = ipv6_stub->ipv6_dst_lookup(vxlan->net,
+-                                       vxlan->vn6_sock->sock->sk,
+-                                       &ndst, &fl6);
+-      if (err < 0)
+-              return ERR_PTR(err);
++      ndst = ipv6_stub->ipv6_dst_lookup_flow(vxlan->net, 
vxlan->vn6_sock->sock->sk,
++                                             &fl6, NULL);
++      if (unlikely(IS_ERR(ndst)))
++              return ERR_PTR(-ENETUNREACH);
+ 
+       *saddr = fl6.saddr;
+       return ndst;
+diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
+index 60a5e0c63a13..efe68b13704d 100644
+--- a/drivers/ptp/ptp_clock.c
++++ b/drivers/ptp/ptp_clock.c
+@@ -171,10 +171,11 @@ static struct posix_clock_operations ptp_clock_ops = {
+       .read           = ptp_read,
+ };
+ 
+-static void delete_ptp_clock(struct posix_clock *pc)
++static void ptp_clock_release(struct device *dev)
+ {
+-      struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
++      struct ptp_clock *ptp = container_of(dev, struct ptp_clock, dev);
+ 
++      ptp_cleanup_pin_groups(ptp);
+       mutex_destroy(&ptp->tsevq_mux);
+       mutex_destroy(&ptp->pincfg_mux);
+       ida_simple_remove(&ptp_clocks_map, ptp->index);
+@@ -205,7 +206,6 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info 
*info,
+       }
+ 
+       ptp->clock.ops = ptp_clock_ops;
+-      ptp->clock.release = delete_ptp_clock;
+       ptp->info = info;
+       ptp->devid = MKDEV(major, index);
+       ptp->index = index;
+@@ -214,17 +214,9 @@ struct ptp_clock *ptp_clock_register(struct 
ptp_clock_info *info,
+       mutex_init(&ptp->pincfg_mux);
+       init_waitqueue_head(&ptp->tsev_wq);
+ 
+-      /* Create a new device in our class. */
+-      ptp->dev = device_create(ptp_class, parent, ptp->devid, ptp,
+-                               "ptp%d", ptp->index);
+-      if (IS_ERR(ptp->dev))
+-              goto no_device;
+-
+-      dev_set_drvdata(ptp->dev, ptp);
+-
+-      err = ptp_populate_sysfs(ptp);
++      err = ptp_populate_pin_groups(ptp);
+       if (err)
+-              goto no_sysfs;
++              goto no_pin_groups;
+ 
+       /* Register a new PPS source. */
+       if (info->pps) {
+@@ -235,13 +227,24 @@ struct ptp_clock *ptp_clock_register(struct 
ptp_clock_info *info,
+               pps.owner = info->owner;
+               ptp->pps_source = pps_register_source(&pps, PTP_PPS_DEFAULTS);
+               if (!ptp->pps_source) {
++                      err = -EINVAL;
+                       pr_err("failed to register pps source\n");
+                       goto no_pps;
+               }
+       }
+ 
+-      /* Create a posix clock. */
+-      err = posix_clock_register(&ptp->clock, ptp->devid);
++      /* Initialize a new device of our class in our clock structure. */
++      device_initialize(&ptp->dev);
++      ptp->dev.devt = ptp->devid;
++      ptp->dev.class = ptp_class;
++      ptp->dev.parent = parent;
++      ptp->dev.groups = ptp->pin_attr_groups;
++      ptp->dev.release = ptp_clock_release;
++      dev_set_drvdata(&ptp->dev, ptp);
++      dev_set_name(&ptp->dev, "ptp%d", ptp->index);
++
++      /* Create a posix clock and link it to the device. */
++      err = posix_clock_register(&ptp->clock, &ptp->dev);
+       if (err) {
+               pr_err("failed to create posix clock\n");
+               goto no_clock;
+@@ -253,10 +256,8 @@ no_clock:
+       if (ptp->pps_source)
+               pps_unregister_source(ptp->pps_source);
+ no_pps:
+-      ptp_cleanup_sysfs(ptp);
+-no_sysfs:
+-      device_destroy(ptp_class, ptp->devid);
+-no_device:
++      ptp_cleanup_pin_groups(ptp);
++no_pin_groups:
+       mutex_destroy(&ptp->tsevq_mux);
+       mutex_destroy(&ptp->pincfg_mux);
+ no_slot:
+@@ -274,10 +275,9 @@ int ptp_clock_unregister(struct ptp_clock *ptp)
+       /* Release the clock's resources. */
+       if (ptp->pps_source)
+               pps_unregister_source(ptp->pps_source);
+-      ptp_cleanup_sysfs(ptp);
+-      device_destroy(ptp_class, ptp->devid);
+ 
+       posix_clock_unregister(&ptp->clock);
++
+       return 0;
+ }
+ EXPORT_SYMBOL(ptp_clock_unregister);
+diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
+index 9c5d41421b65..15346e840caa 100644
+--- a/drivers/ptp/ptp_private.h
++++ b/drivers/ptp/ptp_private.h
+@@ -40,7 +40,7 @@ struct timestamp_event_queue {
+ 
+ struct ptp_clock {
+       struct posix_clock clock;
+-      struct device *dev;
++      struct device dev;
+       struct ptp_clock_info *info;
+       dev_t devid;
+       int index; /* index into clocks.map */
+@@ -54,6 +54,8 @@ struct ptp_clock {
+       struct device_attribute *pin_dev_attr;
+       struct attribute **pin_attr;
+       struct attribute_group pin_attr_group;
++      /* 1st entry is a pointer to the real group, 2nd is NULL terminator */
++      const struct attribute_group *pin_attr_groups[2];
+ };
+ 
+ /*
+@@ -94,8 +96,7 @@ uint ptp_poll(struct posix_clock *pc,
+ 
+ extern const struct attribute_group *ptp_groups[];
+ 
+-int ptp_cleanup_sysfs(struct ptp_clock *ptp);
+-
+-int ptp_populate_sysfs(struct ptp_clock *ptp);
++int ptp_populate_pin_groups(struct ptp_clock *ptp);
++void ptp_cleanup_pin_groups(struct ptp_clock *ptp);
+ 
+ #endif
+diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
+index 302e626fe6b0..731d0423c8aa 100644
+--- a/drivers/ptp/ptp_sysfs.c
++++ b/drivers/ptp/ptp_sysfs.c
+@@ -46,27 +46,6 @@ PTP_SHOW_INT(n_periodic_outputs, n_per_out);
+ PTP_SHOW_INT(n_programmable_pins, n_pins);
+ PTP_SHOW_INT(pps_available, pps);
+ 
+-static struct attribute *ptp_attrs[] = {
+-      &dev_attr_clock_name.attr,
+-      &dev_attr_max_adjustment.attr,
+-      &dev_attr_n_alarms.attr,
+-      &dev_attr_n_external_timestamps.attr,
+-      &dev_attr_n_periodic_outputs.attr,
+-      &dev_attr_n_programmable_pins.attr,
+-      &dev_attr_pps_available.attr,
+-      NULL,
+-};
+-
+-static const struct attribute_group ptp_group = {
+-      .attrs = ptp_attrs,
+-};
+-
+-const struct attribute_group *ptp_groups[] = {
+-      &ptp_group,
+-      NULL,
+-};
+-
+-
+ static ssize_t extts_enable_store(struct device *dev,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t count)
+@@ -91,6 +70,7 @@ static ssize_t extts_enable_store(struct device *dev,
+ out:
+       return err;
+ }
++static DEVICE_ATTR(extts_enable, 0220, NULL, extts_enable_store);
+ 
+ static ssize_t extts_fifo_show(struct device *dev,
+                              struct device_attribute *attr, char *page)
+@@ -124,6 +104,7 @@ out:
+       mutex_unlock(&ptp->tsevq_mux);
+       return cnt;
+ }
++static DEVICE_ATTR(fifo, 0444, extts_fifo_show, NULL);
+ 
+ static ssize_t period_store(struct device *dev,
+                           struct device_attribute *attr,
+@@ -151,6 +132,7 @@ static ssize_t period_store(struct device *dev,
+ out:
+       return err;
+ }
++static DEVICE_ATTR(period, 0220, NULL, period_store);
+ 
+ static ssize_t pps_enable_store(struct device *dev,
+                               struct device_attribute *attr,
+@@ -177,6 +159,57 @@ static ssize_t pps_enable_store(struct device *dev,
+ out:
+       return err;
+ }
++static DEVICE_ATTR(pps_enable, 0220, NULL, pps_enable_store);
++
++static struct attribute *ptp_attrs[] = {
++      &dev_attr_clock_name.attr,
++
++      &dev_attr_max_adjustment.attr,
++      &dev_attr_n_alarms.attr,
++      &dev_attr_n_external_timestamps.attr,
++      &dev_attr_n_periodic_outputs.attr,
++      &dev_attr_n_programmable_pins.attr,
++      &dev_attr_pps_available.attr,
++
++      &dev_attr_extts_enable.attr,
++      &dev_attr_fifo.attr,
++      &dev_attr_period.attr,
++      &dev_attr_pps_enable.attr,
++      NULL
++};
++
++static umode_t ptp_is_attribute_visible(struct kobject *kobj,
++                                      struct attribute *attr, int n)
++{
++      struct device *dev = kobj_to_dev(kobj);
++      struct ptp_clock *ptp = dev_get_drvdata(dev);
++      struct ptp_clock_info *info = ptp->info;
++      umode_t mode = attr->mode;
++
++      if (attr == &dev_attr_extts_enable.attr ||
++          attr == &dev_attr_fifo.attr) {
++              if (!info->n_ext_ts)
++                      mode = 0;
++      } else if (attr == &dev_attr_period.attr) {
++              if (!info->n_per_out)
++                      mode = 0;
++      } else if (attr == &dev_attr_pps_enable.attr) {
++              if (!info->pps)
++                      mode = 0;
++      }
++
++      return mode;
++}
++
++static const struct attribute_group ptp_group = {
++      .is_visible     = ptp_is_attribute_visible,
++      .attrs          = ptp_attrs,
++};
++
++const struct attribute_group *ptp_groups[] = {
++      &ptp_group,
++      NULL
++};
+ 
+ static int ptp_pin_name2index(struct ptp_clock *ptp, const char *name)
+ {
+@@ -235,40 +268,14 @@ static ssize_t ptp_pin_store(struct device *dev, struct 
device_attribute *attr,
+       return count;
+ }
+ 
+-static DEVICE_ATTR(extts_enable, 0220, NULL, extts_enable_store);
+-static DEVICE_ATTR(fifo,         0444, extts_fifo_show, NULL);
+-static DEVICE_ATTR(period,       0220, NULL, period_store);
+-static DEVICE_ATTR(pps_enable,   0220, NULL, pps_enable_store);
+-
+-int ptp_cleanup_sysfs(struct ptp_clock *ptp)
++int ptp_populate_pin_groups(struct ptp_clock *ptp)
+ {
+-      struct device *dev = ptp->dev;
+-      struct ptp_clock_info *info = ptp->info;
+-
+-      if (info->n_ext_ts) {
+-              device_remove_file(dev, &dev_attr_extts_enable);
+-              device_remove_file(dev, &dev_attr_fifo);
+-      }
+-      if (info->n_per_out)
+-              device_remove_file(dev, &dev_attr_period);
+-
+-      if (info->pps)
+-              device_remove_file(dev, &dev_attr_pps_enable);
+-
+-      if (info->n_pins) {
+-              sysfs_remove_group(&dev->kobj, &ptp->pin_attr_group);
+-              kfree(ptp->pin_attr);
+-              kfree(ptp->pin_dev_attr);
+-      }
+-      return 0;
+-}
+-
+-static int ptp_populate_pins(struct ptp_clock *ptp)
+-{
+-      struct device *dev = ptp->dev;
+       struct ptp_clock_info *info = ptp->info;
+       int err = -ENOMEM, i, n_pins = info->n_pins;
+ 
++      if (!n_pins)
++              return 0;
++
+       ptp->pin_dev_attr = kzalloc(n_pins * sizeof(*ptp->pin_dev_attr),
+                                   GFP_KERNEL);
+       if (!ptp->pin_dev_attr)
+@@ -292,61 +299,18 @@ static int ptp_populate_pins(struct ptp_clock *ptp)
+       ptp->pin_attr_group.name = "pins";
+       ptp->pin_attr_group.attrs = ptp->pin_attr;
+ 
+-      err = sysfs_create_group(&dev->kobj, &ptp->pin_attr_group);
+-      if (err)
+-              goto no_group;
++      ptp->pin_attr_groups[0] = &ptp->pin_attr_group;
++
+       return 0;
+ 
+-no_group:
+-      kfree(ptp->pin_attr);
+ no_pin_attr:
+       kfree(ptp->pin_dev_attr);
+ no_dev_attr:
+       return err;
+ }
+ 
+-int ptp_populate_sysfs(struct ptp_clock *ptp)
++void ptp_cleanup_pin_groups(struct ptp_clock *ptp)
+ {
+-      struct device *dev = ptp->dev;
+-      struct ptp_clock_info *info = ptp->info;
+-      int err;
+-
+-      if (info->n_ext_ts) {
+-              err = device_create_file(dev, &dev_attr_extts_enable);
+-              if (err)
+-                      goto out1;
+-              err = device_create_file(dev, &dev_attr_fifo);
+-              if (err)
+-                      goto out2;
+-      }
+-      if (info->n_per_out) {
+-              err = device_create_file(dev, &dev_attr_period);
+-              if (err)
+-                      goto out3;
+-      }
+-      if (info->pps) {
+-              err = device_create_file(dev, &dev_attr_pps_enable);
+-              if (err)
+-                      goto out4;
+-      }
+-      if (info->n_pins) {
+-              err = ptp_populate_pins(ptp);
+-              if (err)
+-                      goto out5;
+-      }
+-      return 0;
+-out5:
+-      if (info->pps)
+-              device_remove_file(dev, &dev_attr_pps_enable);
+-out4:
+-      if (info->n_per_out)
+-              device_remove_file(dev, &dev_attr_period);
+-out3:
+-      if (info->n_ext_ts)
+-              device_remove_file(dev, &dev_attr_fifo);
+-out2:
+-      if (info->n_ext_ts)
+-              device_remove_file(dev, &dev_attr_extts_enable);
+-out1:
+-      return err;
++      kfree(ptp->pin_attr);
++      kfree(ptp->pin_dev_attr);
+ }
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
+index 0fdc8c417035..b4fbcf4cade8 100644
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -1982,7 +1982,7 @@ static enum blk_eh_timer_return 
iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
+ 
+       ISCSI_DBG_EH(session, "scsi cmd %p timedout\n", sc);
+ 
+-      spin_lock(&session->frwd_lock);
++      spin_lock_bh(&session->frwd_lock);
+       task = (struct iscsi_task *)sc->SCp.ptr;
+       if (!task) {
+               /*
+@@ -2109,7 +2109,7 @@ static enum blk_eh_timer_return 
iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
+ done:
+       if (task)
+               task->last_timeout = jiffies;
+-      spin_unlock(&session->frwd_lock);
++      spin_unlock_bh(&session->frwd_lock);
+       ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ?
+                    "timer reset" : "shutdown or nh");
+       return rc;
+diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
+index 41a646696bab..0772804dbc27 100644
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -364,8 +364,8 @@ qla24xx_abort_sp_done(void *data, void *ptr, int res)
+       srb_t *sp = (srb_t *)ptr;
+       struct srb_iocb *abt = &sp->u.iocb_cmd;
+ 
+-      del_timer(&sp->u.iocb_cmd.timer);
+-      complete(&abt->u.abt.comp);
++      if (del_timer(&sp->u.iocb_cmd.timer))
++              complete(&abt->u.abt.comp);
+ }
+ 
+ static int
+diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
+index 63e21ca538f1..f5c66caad56b 100644
+--- a/drivers/scsi/sg.c
++++ b/drivers/scsi/sg.c
+@@ -706,8 +706,10 @@ sg_write(struct file *filp, const char __user *buf, 
size_t count, loff_t * ppos)
+       hp->flags = input_size; /* structure abuse ... */
+       hp->pack_id = old_hdr.pack_id;
+       hp->usr_ptr = NULL;
+-      if (__copy_from_user(cmnd, buf, cmd_size))
++      if (__copy_from_user(cmnd, buf, cmd_size)) {
++              sg_remove_request(sfp, srp);
+               return -EFAULT;
++      }
+       /*
+        * SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV,
+        * but is is possible that the app intended SG_DXFER_TO_DEV, because 
there
+diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
+index 87a0e47eeae6..4edd38d03b93 100644
+--- a/drivers/spi/spi-dw.c
++++ b/drivers/spi/spi-dw.c
+@@ -180,9 +180,11 @@ static inline u32 rx_max(struct dw_spi *dws)
+ 
+ static void dw_writer(struct dw_spi *dws)
+ {
+-      u32 max = tx_max(dws);
++      u32 max;
+       u16 txw = 0;
+ 
++      spin_lock(&dws->buf_lock);
++      max = tx_max(dws);
+       while (max--) {
+               /* Set the tx word if the transfer's original "tx" is not null 
*/
+               if (dws->tx_end - dws->len) {
+@@ -194,13 +196,16 @@ static void dw_writer(struct dw_spi *dws)
+               dw_write_io_reg(dws, DW_SPI_DR, txw);
+               dws->tx += dws->n_bytes;
+       }
++      spin_unlock(&dws->buf_lock);
+ }
+ 
+ static void dw_reader(struct dw_spi *dws)
+ {
+-      u32 max = rx_max(dws);
++      u32 max;
+       u16 rxw;
+ 
++      spin_lock(&dws->buf_lock);
++      max = rx_max(dws);
+       while (max--) {
+               rxw = dw_read_io_reg(dws, DW_SPI_DR);
+               /* Care rx only if the transfer's original "rx" is not null */
+@@ -212,6 +217,7 @@ static void dw_reader(struct dw_spi *dws)
+               }
+               dws->rx += dws->n_bytes;
+       }
++      spin_unlock(&dws->buf_lock);
+ }
+ 
+ static void int_error_stop(struct dw_spi *dws, const char *msg)
+@@ -284,6 +290,7 @@ static int dw_spi_transfer_one(struct spi_master *master,
+ {
+       struct dw_spi *dws = spi_master_get_devdata(master);
+       struct chip_data *chip = spi_get_ctldata(spi);
++      unsigned long flags;
+       u8 imask = 0;
+       u16 txlevel = 0;
+       u16 clk_div;
+@@ -291,12 +298,13 @@ static int dw_spi_transfer_one(struct spi_master *master,
+       int ret;
+ 
+       dws->dma_mapped = 0;
+-
++      spin_lock_irqsave(&dws->buf_lock, flags);
+       dws->tx = (void *)transfer->tx_buf;
+       dws->tx_end = dws->tx + transfer->len;
+       dws->rx = transfer->rx_buf;
+       dws->rx_end = dws->rx + transfer->len;
+       dws->len = transfer->len;
++      spin_unlock_irqrestore(&dws->buf_lock, flags);
+ 
+       spi_enable_chip(dws, 0);
+ 
+@@ -488,6 +496,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
+       dws->dma_inited = 0;
+       dws->dma_addr = (dma_addr_t)(dws->paddr + DW_SPI_DR);
+       snprintf(dws->name, sizeof(dws->name), "dw_spi%d", dws->bus_num);
++      spin_lock_init(&dws->buf_lock);
+ 
+       ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED, dws->name, master);
+       if (ret < 0) {
+diff --git a/drivers/spi/spi-dw.h b/drivers/spi/spi-dw.h
+index 35589a270468..d05b216ea3f8 100644
+--- a/drivers/spi/spi-dw.h
++++ b/drivers/spi/spi-dw.h
+@@ -117,6 +117,7 @@ struct dw_spi {
+       size_t                  len;
+       void                    *tx;
+       void                    *tx_end;
++      spinlock_t              buf_lock;
+       void                    *rx;
+       void                    *rx_end;
+       int                     dma_mapped;
+diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
+index d193e95f5889..5052c11a2520 100644
+--- a/drivers/usb/gadget/configfs.c
++++ b/drivers/usb/gadget/configfs.c
+@@ -260,6 +260,9 @@ static ssize_t gadget_dev_desc_UDC_store(struct 
config_item *item,
+       char *name;
+       int ret;
+ 
++      if (strlen(page) < len)
++              return -EOVERFLOW;
++
+       name = kstrdup(page, GFP_KERNEL);
+       if (!name)
+               return -ENOMEM;
+diff --git a/drivers/usb/gadget/legacy/audio.c 
b/drivers/usb/gadget/legacy/audio.c
+index 685cf3b4b78f..0fbe38d5d739 100644
+--- a/drivers/usb/gadget/legacy/audio.c
++++ b/drivers/usb/gadget/legacy/audio.c
+@@ -249,8 +249,10 @@ static int audio_bind(struct usb_composite_dev *cdev)
+               struct usb_descriptor_header *usb_desc;
+ 
+               usb_desc = usb_otg_descriptor_alloc(cdev->gadget);
+-              if (!usb_desc)
++              if (!usb_desc) {
++                      status = -ENOMEM;
+                       goto fail;
++              }
+               usb_otg_descriptor_init(cdev->gadget, usb_desc);
+               otg_desc[0] = usb_desc;
+               otg_desc[1] = NULL;
+diff --git a/drivers/usb/gadget/legacy/cdc2.c 
b/drivers/usb/gadget/legacy/cdc2.c
+index d70e7d43241a..2bac77a6f4b6 100644
+--- a/drivers/usb/gadget/legacy/cdc2.c
++++ b/drivers/usb/gadget/legacy/cdc2.c
+@@ -183,8 +183,10 @@ static int cdc_bind(struct usb_composite_dev *cdev)
+               struct usb_descriptor_header *usb_desc;
+ 
+               usb_desc = usb_otg_descriptor_alloc(gadget);
+-              if (!usb_desc)
++              if (!usb_desc) {
++                      status = -ENOMEM;
+                       goto fail1;
++              }
+               usb_otg_descriptor_init(gadget, usb_desc);
+               otg_desc[0] = usb_desc;
+               otg_desc[1] = NULL;
+diff --git a/drivers/usb/gadget/legacy/ncm.c b/drivers/usb/gadget/legacy/ncm.c
+index cc3ffacbade1..0d45eb497063 100644
+--- a/drivers/usb/gadget/legacy/ncm.c
++++ b/drivers/usb/gadget/legacy/ncm.c
+@@ -162,8 +162,10 @@ static int gncm_bind(struct usb_composite_dev *cdev)
+               struct usb_descriptor_header *usb_desc;
+ 
+               usb_desc = usb_otg_descriptor_alloc(gadget);
+-              if (!usb_desc)
++              if (!usb_desc) {
++                      status = -ENOMEM;
+                       goto fail;
++              }
+               usb_otg_descriptor_init(gadget, usb_desc);
+               otg_desc[0] = usb_desc;
+               otg_desc[1] = NULL;
+diff --git a/drivers/usb/gadget/udc/net2272.c 
b/drivers/usb/gadget/udc/net2272.c
+index 553922c3be85..285e21ffa711 100644
+--- a/drivers/usb/gadget/udc/net2272.c
++++ b/drivers/usb/gadget/udc/net2272.c
+@@ -2670,6 +2670,8 @@ net2272_plat_probe(struct platform_device *pdev)
+  err_req:
+       release_mem_region(base, len);
+  err:
++      kfree(dev);
++
+       return ret;
+ }
+ 
+diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
+index 2220c1b9df10..c2c50d5d4ad1 100644
+--- a/drivers/usb/serial/garmin_gps.c
++++ b/drivers/usb/serial/garmin_gps.c
+@@ -1162,8 +1162,8 @@ static void garmin_read_process(struct garmin_data 
*garmin_data_p,
+                  send it directly to the tty port */
+               if (garmin_data_p->flags & FLAGS_QUEUING) {
+                       pkt_add(garmin_data_p, data, data_length);
+-              } else if (bulk_data ||
+-                         getLayerId(data) == GARMIN_LAYERID_APPL) {
++              } else if (bulk_data || (data_length >= sizeof(u32) &&
++                              getLayerId(data) == GARMIN_LAYERID_APPL)) {
+ 
+                       spin_lock_irqsave(&garmin_data_p->lock, flags);
+                       garmin_data_p->flags |= APP_RESP_SEEN;
+diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
+index fb6dc16c754a..06916ddc3159 100644
+--- a/drivers/usb/serial/qcserial.c
++++ b/drivers/usb/serial/qcserial.c
+@@ -177,6 +177,7 @@ static const struct usb_device_id id_table[] = {
+       {DEVICE_SWI(0x413c, 0x81b3)},   /* Dell Wireless 5809e Gobi(TM) 4G LTE 
Mobile Broadband Card (rev3) */
+       {DEVICE_SWI(0x413c, 0x81b5)},   /* Dell Wireless 5811e QDL */
+       {DEVICE_SWI(0x413c, 0x81b6)},   /* Dell Wireless 5811e QDL */
++      {DEVICE_SWI(0x413c, 0x81cc)},   /* Dell Wireless 5816e */
+       {DEVICE_SWI(0x413c, 0x81cf)},   /* Dell Wireless 5819 */
+       {DEVICE_SWI(0x413c, 0x81d0)},   /* Dell Wireless 5819 */
+       {DEVICE_SWI(0x413c, 0x81d1)},   /* Dell Wireless 5818 */
+diff --git a/drivers/usb/storage/unusual_uas.h 
b/drivers/usb/storage/unusual_uas.h
+index 9aad6825947c..cb0af57aad6e 100644
+--- a/drivers/usb/storage/unusual_uas.h
++++ b/drivers/usb/storage/unusual_uas.h
+@@ -40,6 +40,13 @@
+  * and don't forget to CC: the USB development list 
<linux-...@vger.kernel.org>
+  */
+ 
++/* Reported-by: Julian Groß <julia...@posteo.de> */
++UNUSUAL_DEV(0x059f, 0x105f, 0x0000, 0x9999,
++              "LaCie",
++              "2Big Quadra USB3",
++              USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++              US_FL_NO_REPORT_OPCODES),
++
+ /*
+  * Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI
+  * commands in UAS mode.  Observed with the 1.28 firmware; are there others?
+diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
+index f1f32e55d877..164e5fedd7b6 100644
+--- a/fs/binfmt_elf.c
++++ b/fs/binfmt_elf.c
+@@ -1097,6 +1097,18 @@ static int load_elf_binary(struct linux_binprm *bprm)
+       current->mm->start_stack = bprm->p;
+ 
+       if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) {
++              /*
++               * For architectures with ELF randomization, when executing
++               * a loader directly (i.e. no interpreter listed in ELF
++               * headers), move the brk area out of the mmap region
++               * (since it grows up, and may collide early with the stack
++               * growing down), and into the unused ELF_ET_DYN_BASE region.
++               */
++              if (IS_ENABLED(CONFIG_ARCH_HAS_ELF_RANDOMIZE) &&
++                  loc->elf_ex.e_type == ET_DYN && !interpreter)
++                      current->mm->brk = current->mm->start_brk =
++                              ELF_ET_DYN_BASE;
++
+               current->mm->brk = current->mm->start_brk =
+                       arch_randomize_brk(current->mm);
+ #ifdef compat_brk_randomized
+diff --git a/fs/char_dev.c b/fs/char_dev.c
+index f1f3bb812799..9154a2d7b195 100644
+--- a/fs/char_dev.c
++++ b/fs/char_dev.c
+@@ -472,6 +472,85 @@ int cdev_add(struct cdev *p, dev_t dev, unsigned count)
+       return 0;
+ }
+ 
++/**
++ * cdev_set_parent() - set the parent kobject for a char device
++ * @p: the cdev structure
++ * @kobj: the kobject to take a reference to
++ *
++ * cdev_set_parent() sets a parent kobject which will be referenced
++ * appropriately so the parent is not freed before the cdev. This
++ * should be called before cdev_add.
++ */
++void cdev_set_parent(struct cdev *p, struct kobject *kobj)
++{
++      WARN_ON(!kobj->state_initialized);
++      p->kobj.parent = kobj;
++}
++
++/**
++ * cdev_device_add() - add a char device and it's corresponding
++ *    struct device, linkink
++ * @dev: the device structure
++ * @cdev: the cdev structure
++ *
++ * cdev_device_add() adds the char device represented by @cdev to the system,
++ * just as cdev_add does. It then adds @dev to the system using device_add
++ * The dev_t for the char device will be taken from the struct device which
++ * needs to be initialized first. This helper function correctly takes a
++ * reference to the parent device so the parent will not get released until
++ * all references to the cdev are released.
++ *
++ * This helper uses dev->devt for the device number. If it is not set
++ * it will not add the cdev and it will be equivalent to device_add.
++ *
++ * This function should be used whenever the struct cdev and the
++ * struct device are members of the same structure whose lifetime is
++ * managed by the struct device.
++ *
++ * NOTE: Callers must assume that userspace was able to open the cdev and
++ * can call cdev fops callbacks at any time, even if this function fails.
++ */
++int cdev_device_add(struct cdev *cdev, struct device *dev)
++{
++      int rc = 0;
++
++      if (dev->devt) {
++              cdev_set_parent(cdev, &dev->kobj);
++
++              rc = cdev_add(cdev, dev->devt, 1);
++              if (rc)
++                      return rc;
++      }
++
++      rc = device_add(dev);
++      if (rc)
++              cdev_del(cdev);
++
++      return rc;
++}
++
++/**
++ * cdev_device_del() - inverse of cdev_device_add
++ * @dev: the device structure
++ * @cdev: the cdev structure
++ *
++ * cdev_device_del() is a helper function to call cdev_del and device_del.
++ * It should be used whenever cdev_device_add is used.
++ *
++ * If dev->devt is not set it will not remove the cdev and will be equivalent
++ * to device_del.
++ *
++ * NOTE: This guarantees that associated sysfs callbacks are not running
++ * or runnable, however any cdevs already open will remain and their fops
++ * will still be callable even after this function returns.
++ */
++void cdev_device_del(struct cdev *cdev, struct device *dev)
++{
++      device_del(dev);
++      if (dev->devt)
++              cdev_del(cdev);
++}
++
+ static void cdev_unmap(dev_t dev, unsigned count)
+ {
+       kobj_unmap(cdev_map, dev, count);
+@@ -483,6 +562,10 @@ static void cdev_unmap(dev_t dev, unsigned count)
+  *
+  * cdev_del() removes @p from the system, possibly freeing the structure
+  * itself.
++ *
++ * NOTE: This guarantees that cdev device will no longer be able to be
++ * opened, however any cdevs already open will remain and their fops will
++ * still be callable even after cdev_del returns.
+  */
+ void cdev_del(struct cdev *p)
+ {
+@@ -571,5 +654,8 @@ EXPORT_SYMBOL(cdev_init);
+ EXPORT_SYMBOL(cdev_alloc);
+ EXPORT_SYMBOL(cdev_del);
+ EXPORT_SYMBOL(cdev_add);
++EXPORT_SYMBOL(cdev_set_parent);
++EXPORT_SYMBOL(cdev_device_add);
++EXPORT_SYMBOL(cdev_device_del);
+ EXPORT_SYMBOL(__register_chrdev);
+ EXPORT_SYMBOL(__unregister_chrdev);
+diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
+index b9b8f19dce0e..fa07f7cb85a5 100644
+--- a/fs/cifs/cifssmb.c
++++ b/fs/cifs/cifssmb.c
+@@ -184,6 +184,18 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int 
smb_command)
+        * reconnect the same SMB session
+        */
+       mutex_lock(&ses->session_mutex);
++
++      /*
++       * Recheck after acquire mutex. If another thread is negotiating
++       * and the server never sends an answer the socket will be closed
++       * and tcpStatus set to reconnect.
++       */
++      if (server->tcpStatus == CifsNeedReconnect) {
++              rc = -EHOSTDOWN;
++              mutex_unlock(&ses->session_mutex);
++              goto out;
++      }
++
+       rc = cifs_negotiate_protocol(0, ses);
+       if (rc == 0 && ses->need_reconnect)
+               rc = cifs_setup_session(0, ses, nls_codepage);
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index c9793ce0d336..21ddfd77966e 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -548,20 +548,21 @@ static bool
+ server_unresponsive(struct TCP_Server_Info *server)
+ {
+       /*
+-       * We need to wait 2 echo intervals to make sure we handle such
++       * We need to wait 3 echo intervals to make sure we handle such
+        * situations right:
+        * 1s  client sends a normal SMB request
+-       * 2s  client gets a response
++       * 3s  client gets a response
+        * 30s echo workqueue job pops, and decides we got a response recently
+        *     and don't need to send another
+        * ...
+        * 65s kernel_recvmsg times out, and we see that we haven't gotten
+        *     a response in >60s.
+        */
+-      if (server->tcpStatus == CifsGood &&
+-          time_after(jiffies, server->lstrp + 2 * SMB_ECHO_INTERVAL)) {
++      if ((server->tcpStatus == CifsGood ||
++          server->tcpStatus == CifsNeedNegotiate) &&
++          time_after(jiffies, server->lstrp + 3 * SMB_ECHO_INTERVAL)) {
+               cifs_dbg(VFS, "Server %s has not responded in %d seconds. 
Reconnecting...\n",
+-                       server->hostname, (2 * SMB_ECHO_INTERVAL) / HZ);
++                       server->hostname, (3 * SMB_ECHO_INTERVAL) / HZ);
+               cifs_reconnect(server);
+               wake_up(&server->response_q);
+               return true;
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index d4472a494758..4ffd5e177288 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -249,6 +249,18 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon 
*tcon)
+        * the same SMB session
+        */
+       mutex_lock(&tcon->ses->session_mutex);
++
++      /*
++       * Recheck after acquire mutex. If another thread is negotiating
++       * and the server never sends an answer the socket will be closed
++       * and tcpStatus set to reconnect.
++       */
++      if (server->tcpStatus == CifsNeedReconnect) {
++              rc = -EHOSTDOWN;
++              mutex_unlock(&tcon->ses->session_mutex);
++              goto out;
++      }
++
+       rc = cifs_negotiate_protocol(0, tcon->ses);
+       if (!rc && tcon->ses->need_reconnect) {
+               rc = cifs_setup_session(0, tcon->ses, nls_codepage);
+diff --git a/fs/exec.c b/fs/exec.c
+index d882ab7ac6e8..46cc0c072246 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -1124,6 +1124,8 @@ int flush_old_exec(struct linux_binprm * bprm)
+        */
+       set_mm_exe_file(bprm->mm, bprm->file);
+ 
++      would_dump(bprm, bprm->file);
++
+       /*
+        * Release all of the old mmap stuff
+        */
+@@ -1632,8 +1634,6 @@ static int do_execveat_common(int fd, struct filename 
*filename,
+       if (retval < 0)
+               goto out;
+ 
+-      would_dump(bprm, bprm->file);
+-
+       retval = exec_binprm(bprm);
+       if (retval < 0)
+               goto out;
+diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c
+index ccd80f2b3b19..d5055b3adccc 100644
+--- a/fs/ext4/block_validity.c
++++ b/fs/ext4/block_validity.c
+@@ -152,6 +152,7 @@ static int ext4_protect_reserved_inode(struct super_block 
*sb, u32 ino)
+               return PTR_ERR(inode);
+       num = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
+       while (i < num) {
++              cond_resched();
+               map.m_lblk = i;
+               map.m_len = num - i;
+               n = ext4_map_blocks(NULL, inode, &map, 0);
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index 4ae5d6ecd727..6e83ea61436a 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -431,7 +431,8 @@ struct request_queue {
+       unsigned int            sg_reserved_size;
+       int                     node;
+ #ifdef CONFIG_BLK_DEV_IO_TRACE
+-      struct blk_trace        *blk_trace;
++      struct blk_trace __rcu  *blk_trace;
++      struct mutex            blk_trace_mutex;
+ #endif
+       /*
+        * for flush operations
+diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
+index afc1343df3c7..e644bfe50019 100644
+--- a/include/linux/blktrace_api.h
++++ b/include/linux/blktrace_api.h
+@@ -51,9 +51,13 @@ void __trace_note_message(struct blk_trace *, const char 
*fmt, ...);
+  **/
+ #define blk_add_trace_msg(q, fmt, ...)                                        
\
+       do {                                                            \
+-              struct blk_trace *bt = (q)->blk_trace;                  \
++              struct blk_trace *bt;                                   \
++                                                                      \
++              rcu_read_lock();                                        \
++              bt = rcu_dereference((q)->blk_trace);                   \
+               if (unlikely(bt))                                       \
+                       __trace_note_message(bt, fmt, ##__VA_ARGS__);   \
++              rcu_read_unlock();                                      \
+       } while (0)
+ #define BLK_TN_MAX_MSG                128
+ 
+diff --git a/include/linux/cdev.h b/include/linux/cdev.h
+index f8763615a5f2..408bc09ce497 100644
+--- a/include/linux/cdev.h
++++ b/include/linux/cdev.h
+@@ -4,6 +4,7 @@
+ #include <linux/kobject.h>
+ #include <linux/kdev_t.h>
+ #include <linux/list.h>
++#include <linux/device.h>
+ 
+ struct file_operations;
+ struct inode;
+@@ -26,6 +27,10 @@ void cdev_put(struct cdev *p);
+ 
+ int cdev_add(struct cdev *, dev_t, unsigned);
+ 
++void cdev_set_parent(struct cdev *p, struct kobject *kobj);
++int cdev_device_add(struct cdev *cdev, struct device *dev);
++void cdev_device_del(struct cdev *cdev, struct device *dev);
++
+ void cdev_del(struct cdev *);
+ 
+ void cd_forget(struct inode *);
+diff --git a/include/linux/compiler.h b/include/linux/compiler.h
+index 5f8749440c6a..e5d349d65ae9 100644
+--- a/include/linux/compiler.h
++++ b/include/linux/compiler.h
+@@ -556,4 +556,11 @@ static __always_inline void __write_once_size(volatile 
void *p, void *res, int s
+ # define __kprobes
+ # define nokprobe_inline      inline
+ #endif
++
++/*
++ * This is needed in functions which generate the stack canary, see
++ * arch/x86/kernel/smpboot.c::start_secondary() for an example.
++ */
++#define prevent_tail_call_optimization()      mb()
++
+ #endif /* __LINUX_COMPILER_H */
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index e79b31f273bd..842a6a28b0e8 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -915,7 +915,7 @@ struct file_handle {
+       __u32 handle_bytes;
+       int handle_type;
+       /* file identifier */
+-      unsigned char f_handle[0];
++      unsigned char f_handle[];
+ };
+ 
+ static inline struct file *get_file(struct file *f)
+diff --git a/include/linux/pnp.h b/include/linux/pnp.h
+index 5df733b8f704..c03a368b5911 100644
+--- a/include/linux/pnp.h
++++ b/include/linux/pnp.h
+@@ -219,10 +219,8 @@ struct pnp_card {
+ #define global_to_pnp_card(n) list_entry(n, struct pnp_card, global_list)
+ #define protocol_to_pnp_card(n) list_entry(n, struct pnp_card, protocol_list)
+ #define to_pnp_card(n) container_of(n, struct pnp_card, dev)
+-#define pnp_for_each_card(card) \
+-      for((card) = global_to_pnp_card(pnp_cards.next); \
+-      (card) != global_to_pnp_card(&pnp_cards); \
+-      (card) = global_to_pnp_card((card)->global_list.next))
++#define pnp_for_each_card(card)       \
++      list_for_each_entry(card, &pnp_cards, global_list)
+ 
+ struct pnp_card_link {
+       struct pnp_card *card;
+@@ -275,14 +273,9 @@ struct pnp_dev {
+ #define card_to_pnp_dev(n) list_entry(n, struct pnp_dev, card_list)
+ #define protocol_to_pnp_dev(n) list_entry(n, struct pnp_dev, protocol_list)
+ #define       to_pnp_dev(n) container_of(n, struct pnp_dev, dev)
+-#define pnp_for_each_dev(dev) \
+-      for((dev) = global_to_pnp_dev(pnp_global.next); \
+-      (dev) != global_to_pnp_dev(&pnp_global); \
+-      (dev) = global_to_pnp_dev((dev)->global_list.next))
+-#define card_for_each_dev(card,dev) \
+-      for((dev) = card_to_pnp_dev((card)->devices.next); \
+-      (dev) != card_to_pnp_dev(&(card)->devices); \
+-      (dev) = card_to_pnp_dev((dev)->card_list.next))
++#define pnp_for_each_dev(dev) list_for_each_entry(dev, &pnp_global, 
global_list)
++#define card_for_each_dev(card, dev)  \
++      list_for_each_entry(dev, &(card)->devices, card_list)
+ #define pnp_dev_name(dev) (dev)->name
+ 
+ static inline void *pnp_get_drvdata(struct pnp_dev *pdev)
+@@ -434,14 +427,10 @@ struct pnp_protocol {
+ };
+ 
+ #define to_pnp_protocol(n) list_entry(n, struct pnp_protocol, protocol_list)
+-#define protocol_for_each_card(protocol,card) \
+-      for((card) = protocol_to_pnp_card((protocol)->cards.next); \
+-      (card) != protocol_to_pnp_card(&(protocol)->cards); \
+-      (card) = protocol_to_pnp_card((card)->protocol_list.next))
+-#define protocol_for_each_dev(protocol,dev) \
+-      for((dev) = protocol_to_pnp_dev((protocol)->devices.next); \
+-      (dev) != protocol_to_pnp_dev(&(protocol)->devices); \
+-      (dev) = protocol_to_pnp_dev((dev)->protocol_list.next))
++#define protocol_for_each_card(protocol, card)        \
++      list_for_each_entry(card, &(protocol)->cards, protocol_list)
++#define protocol_for_each_dev(protocol, dev)  \
++      list_for_each_entry(dev, &(protocol)->devices, protocol_list)
+ 
+ extern struct bus_type pnp_bus_type;
+ 
+diff --git a/include/linux/posix-clock.h b/include/linux/posix-clock.h
+index 83b22ae9ae12..b39420a0321c 100644
+--- a/include/linux/posix-clock.h
++++ b/include/linux/posix-clock.h
+@@ -104,29 +104,32 @@ struct posix_clock_operations {
+  *
+  * @ops:     Functional interface to the clock
+  * @cdev:    Character device instance for this clock
+- * @kref:    Reference count.
++ * @dev:     Pointer to the clock's device.
+  * @rwsem:   Protects the 'zombie' field from concurrent access.
+  * @zombie:  If 'zombie' is true, then the hardware has disappeared.
+- * @release: A function to free the structure when the reference count reaches
+- *           zero. May be NULL if structure is statically allocated.
+  *
+  * Drivers should embed their struct posix_clock within a private
+  * structure, obtaining a reference to it during callbacks using
+  * container_of().
++ *
++ * Drivers should supply an initialized but not exposed struct device
++ * to posix_clock_register(). It is used to manage lifetime of the
++ * driver's private structure. It's 'release' field should be set to
++ * a release function for this private structure.
+  */
+ struct posix_clock {
+       struct posix_clock_operations ops;
+       struct cdev cdev;
+-      struct kref kref;
++      struct device *dev;
+       struct rw_semaphore rwsem;
+       bool zombie;
+-      void (*release)(struct posix_clock *clk);
+ };
+ 
+ /**
+  * posix_clock_register() - register a new clock
+- * @clk:   Pointer to the clock. Caller must provide 'ops' and 'release'
+- * @devid: Allocated device id
++ * @clk:   Pointer to the clock. Caller must provide 'ops' field
++ * @dev:   Pointer to the initialized device. Caller must provide
++ *         'release' field
+  *
+  * A clock driver calls this function to register itself with the
+  * clock device subsystem. If 'clk' points to dynamically allocated
+@@ -135,7 +138,7 @@ struct posix_clock {
+  *
+  * Returns zero on success, non-zero otherwise.
+  */
+-int posix_clock_register(struct posix_clock *clk, dev_t devid);
++int posix_clock_register(struct posix_clock *clk, struct device *dev);
+ 
+ /**
+  * posix_clock_unregister() - unregister a clock
+diff --git a/include/linux/tty.h b/include/linux/tty.h
+index 812cdd8cff22..e5b15a83c8d7 100644
+--- a/include/linux/tty.h
++++ b/include/linux/tty.h
+@@ -64,7 +64,7 @@ struct tty_buffer {
+       int read;
+       int flags;
+       /* Data points here */
+-      unsigned long data[0];
++      unsigned long data[];
+ };
+ 
+ /* Values for .flags field of tty_buffer */
+diff --git a/include/net/addrconf.h b/include/net/addrconf.h
+index af032e5405f6..27a1833c7b00 100644
+--- a/include/net/addrconf.h
++++ b/include/net/addrconf.h
+@@ -192,8 +192,10 @@ struct ipv6_stub {
+                                const struct in6_addr *addr);
+       int (*ipv6_sock_mc_drop)(struct sock *sk, int ifindex,
+                                const struct in6_addr *addr);
+-      int (*ipv6_dst_lookup)(struct net *net, struct sock *sk,
+-                             struct dst_entry **dst, struct flowi6 *fl6);
++      struct dst_entry *(*ipv6_dst_lookup_flow)(struct net *net,
++                                                const struct sock *sk,
++                                                struct flowi6 *fl6,
++                                                const struct in6_addr 
*final_dst);
+       void (*udpv6_encap_enable)(void);
+       void (*ndisc_send_na)(struct net_device *dev, const struct in6_addr 
*daddr,
+                             const struct in6_addr *solicited_addr,
+diff --git a/include/net/ipv6.h b/include/net/ipv6.h
+index c07cf9596b6f..6258264a0bf7 100644
+--- a/include/net/ipv6.h
++++ b/include/net/ipv6.h
+@@ -853,7 +853,7 @@ static inline struct sk_buff *ip6_finish_skb(struct sock 
*sk)
+ 
+ int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
+                  struct flowi6 *fl6);
+-struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 
*fl6,
++struct dst_entry *ip6_dst_lookup_flow(struct net *net, const struct sock *sk, 
struct flowi6 *fl6,
+                                     const struct in6_addr *final_dst);
+ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
+                                        const struct in6_addr *final_dst);
+diff --git a/include/net/netfilter/nf_conntrack.h 
b/include/net/netfilter/nf_conntrack.h
+index 636e9e11bd5f..e3f73fd1d53a 100644
+--- a/include/net/netfilter/nf_conntrack.h
++++ b/include/net/netfilter/nf_conntrack.h
+@@ -98,7 +98,7 @@ struct nf_conn {
+       possible_net_t ct_net;
+ 
+       /* all members below initialized via memset */
+-      u8 __nfct_init_offset[0];
++      struct { } __nfct_init_offset;
+ 
+       /* If we were expected by an expectation, this will be it */
+       struct nf_conn *master;
+diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h
+index 27b2c653d2f0..fb36e8a706fb 100644
+--- a/include/sound/rawmidi.h
++++ b/include/sound/rawmidi.h
+@@ -76,6 +76,7 @@ struct snd_rawmidi_runtime {
+       size_t avail_min;       /* min avail for wakeup */
+       size_t avail;           /* max used buffer for wakeup */
+       size_t xruns;           /* over/underruns counter */
++      int buffer_ref;         /* buffer reference count */
+       /* misc */
+       spinlock_t lock;
+       wait_queue_head_t sleep;
+diff --git a/init/main.c b/init/main.c
+index e88c8cdef6a7..88159063baa1 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -683,6 +683,8 @@ asmlinkage __visible void __init start_kernel(void)
+ 
+       /* Do the rest non-__init'ed, we're now alive */
+       rest_init();
++
++      prevent_tail_call_optimization();
+ }
+ 
+ /* Call all constructor functions linked into the kernel. */
+diff --git a/ipc/util.c b/ipc/util.c
+index 2724f9071ab3..7af476b6dcdd 100644
+--- a/ipc/util.c
++++ b/ipc/util.c
+@@ -756,21 +756,21 @@ static struct kern_ipc_perm *sysvipc_find_ipc(struct 
ipc_ids *ids, loff_t pos,
+                       total++;
+       }
+ 
+-      *new_pos = pos + 1;
++      ipc = NULL;
+       if (total >= ids->in_use)
+-              return NULL;
++              goto out;
+ 
+       for (; pos < IPCMNI; pos++) {
+               ipc = idr_find(&ids->ipcs_idr, pos);
+               if (ipc != NULL) {
+                       rcu_read_lock();
+                       ipc_lock_object(ipc);
+-                      return ipc;
++                      break;
+               }
+       }
+-
+-      /* Out of range - return NULL to terminate iteration */
+-      return NULL;
++out:
++      *new_pos = pos + 1;
++      return ipc;
+ }
+ 
+ static void *sysvipc_proc_next(struct seq_file *s, void *it, loff_t *pos)
+diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c
+index e24008c098c6..45a0a26023d4 100644
+--- a/kernel/time/posix-clock.c
++++ b/kernel/time/posix-clock.c
+@@ -25,8 +25,6 @@
+ #include <linux/syscalls.h>
+ #include <linux/uaccess.h>
+ 
+-static void delete_clock(struct kref *kref);
+-
+ /*
+  * Returns NULL if the posix_clock instance attached to 'fp' is old and stale.
+  */
+@@ -168,7 +166,7 @@ static int posix_clock_open(struct inode *inode, struct 
file *fp)
+               err = 0;
+ 
+       if (!err) {
+-              kref_get(&clk->kref);
++              get_device(clk->dev);
+               fp->private_data = clk;
+       }
+ out:
+@@ -184,7 +182,7 @@ static int posix_clock_release(struct inode *inode, struct 
file *fp)
+       if (clk->ops.release)
+               err = clk->ops.release(clk);
+ 
+-      kref_put(&clk->kref, delete_clock);
++      put_device(clk->dev);
+ 
+       fp->private_data = NULL;
+ 
+@@ -206,38 +204,35 @@ static const struct file_operations 
posix_clock_file_operations = {
+ #endif
+ };
+ 
+-int posix_clock_register(struct posix_clock *clk, dev_t devid)
++int posix_clock_register(struct posix_clock *clk, struct device *dev)
+ {
+       int err;
+ 
+-      kref_init(&clk->kref);
+       init_rwsem(&clk->rwsem);
+ 
+       cdev_init(&clk->cdev, &posix_clock_file_operations);
++      err = cdev_device_add(&clk->cdev, dev);
++      if (err) {
++              pr_err("%s unable to add device %d:%d\n",
++                      dev_name(dev), MAJOR(dev->devt), MINOR(dev->devt));
++              return err;
++      }
+       clk->cdev.owner = clk->ops.owner;
+-      err = cdev_add(&clk->cdev, devid, 1);
++      clk->dev = dev;
+ 
+-      return err;
++      return 0;
+ }
+ EXPORT_SYMBOL_GPL(posix_clock_register);
+ 
+-static void delete_clock(struct kref *kref)
+-{
+-      struct posix_clock *clk = container_of(kref, struct posix_clock, kref);
+-
+-      if (clk->release)
+-              clk->release(clk);
+-}
+-
+ void posix_clock_unregister(struct posix_clock *clk)
+ {
+-      cdev_del(&clk->cdev);
++      cdev_device_del(&clk->cdev, clk->dev);
+ 
+       down_write(&clk->rwsem);
+       clk->zombie = true;
+       up_write(&clk->rwsem);
+ 
+-      kref_put(&clk->kref, delete_clock);
++      put_device(clk->dev);
+ }
+ EXPORT_SYMBOL_GPL(posix_clock_unregister);
+ 
+diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
+index 210b8e726a97..673756468019 100644
+--- a/kernel/trace/blktrace.c
++++ b/kernel/trace/blktrace.c
+@@ -319,11 +319,12 @@ static void put_probe_ref(void)
+ 
+ static void blk_trace_cleanup(struct blk_trace *bt)
+ {
++      synchronize_rcu();
+       blk_trace_free(bt);
+       put_probe_ref();
+ }
+ 
+-int blk_trace_remove(struct request_queue *q)
++static int __blk_trace_remove(struct request_queue *q)
+ {
+       struct blk_trace *bt;
+ 
+@@ -336,6 +337,17 @@ int blk_trace_remove(struct request_queue *q)
+ 
+       return 0;
+ }
++
++int blk_trace_remove(struct request_queue *q)
++{
++      int ret;
++
++      mutex_lock(&q->blk_trace_mutex);
++      ret = __blk_trace_remove(q);
++      mutex_unlock(&q->blk_trace_mutex);
++
++      return ret;
++}
+ EXPORT_SYMBOL_GPL(blk_trace_remove);
+ 
+ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
+@@ -546,9 +558,8 @@ err:
+       return ret;
+ }
+ 
+-int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
+-                  struct block_device *bdev,
+-                  char __user *arg)
++static int __blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
++                           struct block_device *bdev, char __user *arg)
+ {
+       struct blk_user_trace_setup buts;
+       int ret;
+@@ -562,11 +573,24 @@ int blk_trace_setup(struct request_queue *q, char *name, 
dev_t dev,
+               return ret;
+ 
+       if (copy_to_user(arg, &buts, sizeof(buts))) {
+-              blk_trace_remove(q);
++              __blk_trace_remove(q);
+               return -EFAULT;
+       }
+       return 0;
+ }
++
++int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
++                  struct block_device *bdev,
++                  char __user *arg)
++{
++      int ret;
++
++      mutex_lock(&q->blk_trace_mutex);
++      ret = __blk_trace_setup(q, name, dev, bdev, arg);
++      mutex_unlock(&q->blk_trace_mutex);
++
++      return ret;
++}
+ EXPORT_SYMBOL_GPL(blk_trace_setup);
+ 
+ #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
+@@ -595,7 +619,7 @@ static int compat_blk_trace_setup(struct request_queue *q, 
char *name,
+               return ret;
+ 
+       if (copy_to_user(arg, &buts.name, ARRAY_SIZE(buts.name))) {
+-              blk_trace_remove(q);
++              __blk_trace_remove(q);
+               return -EFAULT;
+       }
+ 
+@@ -603,11 +627,13 @@ static int compat_blk_trace_setup(struct request_queue 
*q, char *name,
+ }
+ #endif
+ 
+-int blk_trace_startstop(struct request_queue *q, int start)
++static int __blk_trace_startstop(struct request_queue *q, int start)
+ {
+       int ret;
+-      struct blk_trace *bt = q->blk_trace;
++      struct blk_trace *bt;
+ 
++      bt = rcu_dereference_protected(q->blk_trace,
++                                     lockdep_is_held(&q->blk_trace_mutex));
+       if (bt == NULL)
+               return -EINVAL;
+ 
+@@ -642,8 +668,25 @@ int blk_trace_startstop(struct request_queue *q, int 
start)
+ 
+       return ret;
+ }
++
++int blk_trace_startstop(struct request_queue *q, int start)
++{
++      int ret;
++
++      mutex_lock(&q->blk_trace_mutex);
++      ret = __blk_trace_startstop(q, start);
++      mutex_unlock(&q->blk_trace_mutex);
++
++      return ret;
++}
+ EXPORT_SYMBOL_GPL(blk_trace_startstop);
+ 
++/*
++ * When reading or writing the blktrace sysfs files, the references to the
++ * opened sysfs or device files should prevent the underlying block device
++ * from being removed. So no further delete protection is really needed.
++ */
++
+ /**
+  * blk_trace_ioctl: - handle the ioctls associated with tracing
+  * @bdev:     the block device
+@@ -661,12 +704,12 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned 
cmd, char __user *arg)
+       if (!q)
+               return -ENXIO;
+ 
+-      mutex_lock(&bdev->bd_mutex);
++      mutex_lock(&q->blk_trace_mutex);
+ 
+       switch (cmd) {
+       case BLKTRACESETUP:
+               bdevname(bdev, b);
+-              ret = blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
++              ret = __blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
+               break;
+ #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
+       case BLKTRACESETUP32:
+@@ -677,17 +720,17 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned 
cmd, char __user *arg)
+       case BLKTRACESTART:
+               start = 1;
+       case BLKTRACESTOP:
+-              ret = blk_trace_startstop(q, start);
++              ret = __blk_trace_startstop(q, start);
+               break;
+       case BLKTRACETEARDOWN:
+-              ret = blk_trace_remove(q);
++              ret = __blk_trace_remove(q);
+               break;
+       default:
+               ret = -ENOTTY;
+               break;
+       }
+ 
+-      mutex_unlock(&bdev->bd_mutex);
++      mutex_unlock(&q->blk_trace_mutex);
+       return ret;
+ }
+ 
+@@ -698,10 +741,14 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned 
cmd, char __user *arg)
+  **/
+ void blk_trace_shutdown(struct request_queue *q)
+ {
+-      if (q->blk_trace) {
+-              blk_trace_startstop(q, 0);
+-              blk_trace_remove(q);
++      mutex_lock(&q->blk_trace_mutex);
++      if (rcu_dereference_protected(q->blk_trace,
++                                    lockdep_is_held(&q->blk_trace_mutex))) {
++              __blk_trace_startstop(q, 0);
++              __blk_trace_remove(q);
+       }
++
++      mutex_unlock(&q->blk_trace_mutex);
+ }
+ 
+ /*
+@@ -722,10 +769,14 @@ void blk_trace_shutdown(struct request_queue *q)
+ static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
+                            unsigned int nr_bytes, u32 what)
+ {
+-      struct blk_trace *bt = q->blk_trace;
++      struct blk_trace *bt;
+ 
+-      if (likely(!bt))
++      rcu_read_lock();
++      bt = rcu_dereference(q->blk_trace);
++      if (likely(!bt)) {
++              rcu_read_unlock();
+               return;
++      }
+ 
+       if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
+               what |= BLK_TC_ACT(BLK_TC_PC);
+@@ -736,6 +787,7 @@ static void blk_add_trace_rq(struct request_queue *q, 
struct request *rq,
+               __blk_add_trace(bt, blk_rq_pos(rq), nr_bytes,
+                               rq->cmd_flags, what, rq->errors, 0, NULL);
+       }
++      rcu_read_unlock();
+ }
+ 
+ static void blk_add_trace_rq_abort(void *ignore,
+@@ -785,13 +837,18 @@ static void blk_add_trace_rq_complete(void *ignore,
+ static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
+                             u32 what, int error)
+ {
+-      struct blk_trace *bt = q->blk_trace;
++      struct blk_trace *bt;
+ 
+-      if (likely(!bt))
++      rcu_read_lock();
++      bt = rcu_dereference(q->blk_trace);
++      if (likely(!bt)) {
++              rcu_read_unlock();
+               return;
++      }
+ 
+       __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
+                       bio->bi_rw, what, error, 0, NULL);
++      rcu_read_unlock();
+ }
+ 
+ static void blk_add_trace_bio_bounce(void *ignore,
+@@ -836,10 +893,13 @@ static void blk_add_trace_getrq(void *ignore,
+       if (bio)
+               blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0);
+       else {
+-              struct blk_trace *bt = q->blk_trace;
++              struct blk_trace *bt;
+ 
++              rcu_read_lock();
++              bt = rcu_dereference(q->blk_trace);
+               if (bt)
+                       __blk_add_trace(bt, 0, 0, rw, BLK_TA_GETRQ, 0, 0, NULL);
++              rcu_read_unlock();
+       }
+ }
+ 
+@@ -851,27 +911,35 @@ static void blk_add_trace_sleeprq(void *ignore,
+       if (bio)
+               blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0);
+       else {
+-              struct blk_trace *bt = q->blk_trace;
++              struct blk_trace *bt;
+ 
++              rcu_read_lock();
++              bt = rcu_dereference(q->blk_trace);
+               if (bt)
+                       __blk_add_trace(bt, 0, 0, rw, BLK_TA_SLEEPRQ,
+                                       0, 0, NULL);
++              rcu_read_unlock();
+       }
+ }
+ 
+ static void blk_add_trace_plug(void *ignore, struct request_queue *q)
+ {
+-      struct blk_trace *bt = q->blk_trace;
++      struct blk_trace *bt;
+ 
++      rcu_read_lock();
++      bt = rcu_dereference(q->blk_trace);
+       if (bt)
+               __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL);
++      rcu_read_unlock();
+ }
+ 
+ static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
+                                   unsigned int depth, bool explicit)
+ {
+-      struct blk_trace *bt = q->blk_trace;
++      struct blk_trace *bt;
+ 
++      rcu_read_lock();
++      bt = rcu_dereference(q->blk_trace);
+       if (bt) {
+               __be64 rpdu = cpu_to_be64(depth);
+               u32 what;
+@@ -883,14 +951,17 @@ static void blk_add_trace_unplug(void *ignore, struct 
request_queue *q,
+ 
+               __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu);
+       }
++      rcu_read_unlock();
+ }
+ 
+ static void blk_add_trace_split(void *ignore,
+                               struct request_queue *q, struct bio *bio,
+                               unsigned int pdu)
+ {
+-      struct blk_trace *bt = q->blk_trace;
++      struct blk_trace *bt;
+ 
++      rcu_read_lock();
++      bt = rcu_dereference(q->blk_trace);
+       if (bt) {
+               __be64 rpdu = cpu_to_be64(pdu);
+ 
+@@ -898,6 +969,7 @@ static void blk_add_trace_split(void *ignore,
+                               bio->bi_iter.bi_size, bio->bi_rw, BLK_TA_SPLIT,
+                               bio->bi_error, sizeof(rpdu), &rpdu);
+       }
++      rcu_read_unlock();
+ }
+ 
+ /**
+@@ -917,11 +989,15 @@ static void blk_add_trace_bio_remap(void *ignore,
+                                   struct request_queue *q, struct bio *bio,
+                                   dev_t dev, sector_t from)
+ {
+-      struct blk_trace *bt = q->blk_trace;
++      struct blk_trace *bt;
+       struct blk_io_trace_remap r;
+ 
+-      if (likely(!bt))
++      rcu_read_lock();
++      bt = rcu_dereference(q->blk_trace);
++      if (likely(!bt)) {
++              rcu_read_unlock();
+               return;
++      }
+ 
+       r.device_from = cpu_to_be32(dev);
+       r.device_to   = cpu_to_be32(bio->bi_bdev->bd_dev);
+@@ -930,6 +1006,7 @@ static void blk_add_trace_bio_remap(void *ignore,
+       __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
+                       bio->bi_rw, BLK_TA_REMAP, bio->bi_error,
+                       sizeof(r), &r);
++      rcu_read_unlock();
+ }
+ 
+ /**
+@@ -950,11 +1027,15 @@ static void blk_add_trace_rq_remap(void *ignore,
+                                  struct request *rq, dev_t dev,
+                                  sector_t from)
+ {
+-      struct blk_trace *bt = q->blk_trace;
++      struct blk_trace *bt;
+       struct blk_io_trace_remap r;
+ 
+-      if (likely(!bt))
++      rcu_read_lock();
++      bt = rcu_dereference(q->blk_trace);
++      if (likely(!bt)) {
++              rcu_read_unlock();
+               return;
++      }
+ 
+       r.device_from = cpu_to_be32(dev);
+       r.device_to   = cpu_to_be32(disk_devt(rq->rq_disk));
+@@ -963,6 +1044,7 @@ static void blk_add_trace_rq_remap(void *ignore,
+       __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
+                       rq_data_dir(rq), BLK_TA_REMAP, !!rq->errors,
+                       sizeof(r), &r);
++      rcu_read_unlock();
+ }
+ 
+ /**
+@@ -980,10 +1062,14 @@ void blk_add_driver_data(struct request_queue *q,
+                        struct request *rq,
+                        void *data, size_t len)
+ {
+-      struct blk_trace *bt = q->blk_trace;
++      struct blk_trace *bt;
+ 
+-      if (likely(!bt))
++      rcu_read_lock();
++      bt = rcu_dereference(q->blk_trace);
++      if (likely(!bt)) {
++              rcu_read_unlock();
+               return;
++      }
+ 
+       if (rq->cmd_type == REQ_TYPE_BLOCK_PC)
+               __blk_add_trace(bt, 0, blk_rq_bytes(rq), 0,
+@@ -991,6 +1077,7 @@ void blk_add_driver_data(struct request_queue *q,
+       else
+               __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), 0,
+                               BLK_TA_DRV_DATA, rq->errors, len, data);
++      rcu_read_unlock();
+ }
+ EXPORT_SYMBOL_GPL(blk_add_driver_data);
+ 
+@@ -1482,6 +1569,7 @@ static int blk_trace_remove_queue(struct request_queue 
*q)
+               return -EINVAL;
+ 
+       put_probe_ref();
++      synchronize_rcu();
+       blk_trace_free(bt);
+       return 0;
+ }
+@@ -1642,6 +1730,7 @@ static ssize_t sysfs_blk_trace_attr_show(struct device 
*dev,
+       struct hd_struct *p = dev_to_part(dev);
+       struct request_queue *q;
+       struct block_device *bdev;
++      struct blk_trace *bt;
+       ssize_t ret = -ENXIO;
+ 
+       bdev = bdget(part_devt(p));
+@@ -1652,26 +1741,28 @@ static ssize_t sysfs_blk_trace_attr_show(struct device 
*dev,
+       if (q == NULL)
+               goto out_bdput;
+ 
+-      mutex_lock(&bdev->bd_mutex);
++      mutex_lock(&q->blk_trace_mutex);
+ 
++      bt = rcu_dereference_protected(q->blk_trace,
++                                     lockdep_is_held(&q->blk_trace_mutex));
+       if (attr == &dev_attr_enable) {
+-              ret = sprintf(buf, "%u\n", !!q->blk_trace);
++              ret = sprintf(buf, "%u\n", !!bt);
+               goto out_unlock_bdev;
+       }
+ 
+-      if (q->blk_trace == NULL)
++      if (bt == NULL)
+               ret = sprintf(buf, "disabled\n");
+       else if (attr == &dev_attr_act_mask)
+-              ret = blk_trace_mask2str(buf, q->blk_trace->act_mask);
++              ret = blk_trace_mask2str(buf, bt->act_mask);
+       else if (attr == &dev_attr_pid)
+-              ret = sprintf(buf, "%u\n", q->blk_trace->pid);
++              ret = sprintf(buf, "%u\n", bt->pid);
+       else if (attr == &dev_attr_start_lba)
+-              ret = sprintf(buf, "%llu\n", q->blk_trace->start_lba);
++              ret = sprintf(buf, "%llu\n", bt->start_lba);
+       else if (attr == &dev_attr_end_lba)
+-              ret = sprintf(buf, "%llu\n", q->blk_trace->end_lba);
++              ret = sprintf(buf, "%llu\n", bt->end_lba);
+ 
+ out_unlock_bdev:
+-      mutex_unlock(&bdev->bd_mutex);
++      mutex_unlock(&q->blk_trace_mutex);
+ out_bdput:
+       bdput(bdev);
+ out:
+@@ -1685,6 +1776,7 @@ static ssize_t sysfs_blk_trace_attr_store(struct device 
*dev,
+       struct block_device *bdev;
+       struct request_queue *q;
+       struct hd_struct *p;
++      struct blk_trace *bt;
+       u64 value;
+       ssize_t ret = -EINVAL;
+ 
+@@ -1713,10 +1805,12 @@ static ssize_t sysfs_blk_trace_attr_store(struct 
device *dev,
+       if (q == NULL)
+               goto out_bdput;
+ 
+-      mutex_lock(&bdev->bd_mutex);
++      mutex_lock(&q->blk_trace_mutex);
+ 
++      bt = rcu_dereference_protected(q->blk_trace,
++                                     lockdep_is_held(&q->blk_trace_mutex));
+       if (attr == &dev_attr_enable) {
+-              if (!!value == !!q->blk_trace) {
++              if (!!value == !!bt) {
+                       ret = 0;
+                       goto out_unlock_bdev;
+               }
+@@ -1728,22 +1822,25 @@ static ssize_t sysfs_blk_trace_attr_store(struct 
device *dev,
+       }
+ 
+       ret = 0;
+-      if (q->blk_trace == NULL)
++      if (bt == NULL) {
+               ret = blk_trace_setup_queue(q, bdev);
++              bt = rcu_dereference_protected(q->blk_trace,
++                              lockdep_is_held(&q->blk_trace_mutex));
++      }
+ 
+       if (ret == 0) {
+               if (attr == &dev_attr_act_mask)
+-                      q->blk_trace->act_mask = value;
++                      bt->act_mask = value;
+               else if (attr == &dev_attr_pid)
+-                      q->blk_trace->pid = value;
++                      bt->pid = value;
+               else if (attr == &dev_attr_start_lba)
+-                      q->blk_trace->start_lba = value;
++                      bt->start_lba = value;
+               else if (attr == &dev_attr_end_lba)
+-                      q->blk_trace->end_lba = value;
++                      bt->end_lba = value;
+       }
+ 
+ out_unlock_bdev:
+-      mutex_unlock(&bdev->bd_mutex);
++      mutex_unlock(&q->blk_trace_mutex);
+ out_bdput:
+       bdput(bdev);
+ out:
+diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
+index 804cbfe9132d..5fa8a3606f40 100644
+--- a/mm/memory_hotplug.c
++++ b/mm/memory_hotplug.c
+@@ -1397,7 +1397,7 @@ int test_pages_in_a_zone(unsigned long start_pfn, 
unsigned long end_pfn,
+                       while ((i < MAX_ORDER_NR_PAGES) &&
+                               !pfn_valid_within(pfn + i))
+                               i++;
+-                      if (i == MAX_ORDER_NR_PAGES)
++                      if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn)
+                               continue;
+                       /* Check if we got outside of the zone */
+                       if (zone && !zone_spans_pfn(zone, pfn + i))
+@@ -1414,7 +1414,7 @@ int test_pages_in_a_zone(unsigned long start_pfn, 
unsigned long end_pfn,
+ 
+       if (zone) {
+               *valid_start = start;
+-              *valid_end = end;
++              *valid_end = min(end, end_pfn);
+               return 1;
+       } else {
+               return 0;
+diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
+index 86c69208da2b..91de807a8f03 100644
+--- a/net/batman-adv/network-coding.c
++++ b/net/batman-adv/network-coding.c
+@@ -991,15 +991,8 @@ static struct batadv_nc_path *batadv_nc_get_path(struct 
batadv_priv *bat_priv,
+  */
+ static u8 batadv_nc_random_weight_tq(u8 tq)
+ {
+-      u8 rand_val, rand_tq;
+-
+-      get_random_bytes(&rand_val, sizeof(rand_val));
+-
+       /* randomize the estimated packet loss (max TQ - estimated TQ) */
+-      rand_tq = rand_val * (BATADV_TQ_MAX_VALUE - tq);
+-
+-      /* normalize the randomized packet loss */
+-      rand_tq /= BATADV_TQ_MAX_VALUE;
++      u8 rand_tq = prandom_u32_max(BATADV_TQ_MAX_VALUE + 1 - tq);
+ 
+       /* convert to (randomized) estimated tq again */
+       return BATADV_TQ_MAX_VALUE - rand_tq;
+diff --git a/net/core/dev.c b/net/core/dev.c
+index a1043225c0c0..38e4977eb09d 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -6449,11 +6449,13 @@ static void netdev_sync_lower_features(struct 
net_device *upper,
+                       netdev_dbg(upper, "Disabling feature %pNF on lower dev 
%s.\n",
+                                  &feature, lower->name);
+                       lower->wanted_features &= ~feature;
+-                      netdev_update_features(lower);
++                      __netdev_update_features(lower);
+ 
+                       if (unlikely(lower->features & feature))
+                               netdev_WARN(upper, "failed to disable %pNF on 
%s!\n",
+                                           &feature, lower->name);
++                      else
++                              netdev_features_change(lower);
+               }
+       }
+ }
+diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
+index a2270188b864..9bcc6fdade3e 100644
+--- a/net/core/drop_monitor.c
++++ b/net/core/drop_monitor.c
+@@ -159,6 +159,7 @@ static void sched_send_work(unsigned long _data)
+ static void trace_drop_common(struct sk_buff *skb, void *location)
+ {
+       struct net_dm_alert_msg *msg;
++      struct net_dm_drop_point *point;
+       struct nlmsghdr *nlh;
+       struct nlattr *nla;
+       int i;
+@@ -177,11 +178,13 @@ static void trace_drop_common(struct sk_buff *skb, void 
*location)
+       nlh = (struct nlmsghdr *)dskb->data;
+       nla = genlmsg_data(nlmsg_data(nlh));
+       msg = nla_data(nla);
++      point = msg->points;
+       for (i = 0; i < msg->entries; i++) {
+-              if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) {
+-                      msg->points[i].count++;
++              if (!memcmp(&location, &point->pc, sizeof(void *))) {
++                      point->count++;
+                       goto out;
+               }
++              point++;
+       }
+       if (msg->entries == dm_hit_limit)
+               goto out;
+@@ -190,8 +193,8 @@ static void trace_drop_common(struct sk_buff *skb, void 
*location)
+        */
+       __nla_reserve_nohdr(dskb, sizeof(struct net_dm_drop_point));
+       nla->nla_len += NLA_ALIGN(sizeof(struct net_dm_drop_point));
+-      memcpy(msg->points[msg->entries].pc, &location, sizeof(void *));
+-      msg->points[msg->entries].count = 1;
++      memcpy(point->pc, &location, sizeof(void *));
++      point->count = 1;
+       msg->entries++;
+ 
+       if (!timer_pending(&data->send_timer)) {
+diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
+index 10eabd1a60aa..736cc95b5201 100644
+--- a/net/dccp/ipv6.c
++++ b/net/dccp/ipv6.c
+@@ -209,7 +209,7 @@ static int dccp_v6_send_response(const struct sock *sk, 
struct request_sock *req
+       final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final);
+       rcu_read_unlock();
+ 
+-      dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
++      dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
+       if (IS_ERR(dst)) {
+               err = PTR_ERR(dst);
+               dst = NULL;
+@@ -276,7 +276,7 @@ static void dccp_v6_ctl_send_reset(const struct sock *sk, 
struct sk_buff *rxskb)
+       security_skb_classify_flow(rxskb, flowi6_to_flowi(&fl6));
+ 
+       /* sk = NULL, but it is safe for now. RST socket required. */
+-      dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
++      dst = ip6_dst_lookup_flow(sock_net(ctl_sk), ctl_sk, &fl6, NULL);
+       if (!IS_ERR(dst)) {
+               skb_dst_set(skb, dst);
+               ip6_xmit(ctl_sk, skb, &fl6, NULL, 0);
+@@ -879,7 +879,7 @@ static int dccp_v6_connect(struct sock *sk, struct 
sockaddr *uaddr,
+       opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
+       final_p = fl6_update_dst(&fl6, opt, &final);
+ 
+-      dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
++      dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
+       if (IS_ERR(dst)) {
+               err = PTR_ERR(dst);
+               goto failure;
+diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
+index 98ed5e43ab7b..0e83c5b08e0e 100644
+--- a/net/ipv4/cipso_ipv4.c
++++ b/net/ipv4/cipso_ipv4.c
+@@ -1343,7 +1343,8 @@ static int cipso_v4_parsetag_rbm(const struct 
cipso_v4_doi *doi_def,
+                       return ret_val;
+               }
+ 
+-              secattr->flags |= NETLBL_SECATTR_MLS_CAT;
++              if (secattr->attr.mls.cat)
++                      secattr->flags |= NETLBL_SECATTR_MLS_CAT;
+       }
+ 
+       return 0;
+@@ -1524,7 +1525,8 @@ static int cipso_v4_parsetag_rng(const struct 
cipso_v4_doi *doi_def,
+                       return ret_val;
+               }
+ 
+-              secattr->flags |= NETLBL_SECATTR_MLS_CAT;
++              if (secattr->attr.mls.cat)
++                      secattr->flags |= NETLBL_SECATTR_MLS_CAT;
+       }
+ 
+       return 0;
+diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
+index e5448570d648..900ee28bda99 100644
+--- a/net/ipv4/ip_gre.c
++++ b/net/ipv4/ip_gre.c
+@@ -399,7 +399,10 @@ static int ipgre_rcv(struct sk_buff *skb, const struct 
tnl_ptk_info *tpi)
+                                 iph->saddr, iph->daddr, tpi->key);
+ 
+       if (tunnel) {
+-              skb_pop_mac_header(skb);
++              if (tunnel->dev->type != ARPHRD_NONE)
++                      skb_pop_mac_header(skb);
++              else
++                      skb_reset_mac_header(skb);
+               if (tunnel->collect_md) {
+                       __be16 flags;
+                       __be64 tun_id;
+@@ -1015,6 +1018,8 @@ static void ipgre_netlink_parms(struct net_device *dev,
+               struct ip_tunnel *t = netdev_priv(dev);
+ 
+               t->collect_md = true;
++              if (dev->type == ARPHRD_IPGRE)
++                      dev->type = ARPHRD_NONE;
+       }
+ }
+ 
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 29a87fadf01b..325083464dbd 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -898,7 +898,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
+       /* Check for load limit; set rate_last to the latest sent
+        * redirect.
+        */
+-      if (peer->rate_tokens == 0 ||
++      if (peer->n_redirects == 0 ||
+           time_after(jiffies,
+                      (peer->rate_last +
+                       (ip_rt_redirect_load << peer->n_redirects)))) {
+diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c
+index bfa941fc1165..129324b36fb6 100644
+--- a/net/ipv6/addrconf_core.c
++++ b/net/ipv6/addrconf_core.c
+@@ -107,15 +107,16 @@ int inet6addr_notifier_call_chain(unsigned long val, 
void *v)
+ }
+ EXPORT_SYMBOL(inet6addr_notifier_call_chain);
+ 
+-static int eafnosupport_ipv6_dst_lookup(struct net *net, struct sock *u1,
+-                                      struct dst_entry **u2,
+-                                      struct flowi6 *u3)
++static struct dst_entry *eafnosupport_ipv6_dst_lookup_flow(struct net *net,
++                                                         const struct sock 
*sk,
++                                                         struct flowi6 *fl6,
++                                                         const struct 
in6_addr *final_dst)
+ {
+-      return -EAFNOSUPPORT;
++      return ERR_PTR(-EAFNOSUPPORT);
+ }
+ 
+ const struct ipv6_stub *ipv6_stub __read_mostly = &(struct ipv6_stub) {
+-      .ipv6_dst_lookup = eafnosupport_ipv6_dst_lookup,
++      .ipv6_dst_lookup_flow = eafnosupport_ipv6_dst_lookup_flow,
+ };
+ EXPORT_SYMBOL_GPL(ipv6_stub);
+ 
+diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
+index f9a4447ca002..37a562fc13d5 100644
+--- a/net/ipv6/af_inet6.c
++++ b/net/ipv6/af_inet6.c
+@@ -683,7 +683,7 @@ int inet6_sk_rebuild_header(struct sock *sk)
+                                        &final);
+               rcu_read_unlock();
+ 
+-              dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
++              dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
+               if (IS_ERR(dst)) {
+                       sk->sk_route_caps = 0;
+                       sk->sk_err_soft = -PTR_ERR(dst);
+@@ -841,7 +841,7 @@ static struct pernet_operations inet6_net_ops = {
+ static const struct ipv6_stub ipv6_stub_impl = {
+       .ipv6_sock_mc_join = ipv6_sock_mc_join,
+       .ipv6_sock_mc_drop = ipv6_sock_mc_drop,
+-      .ipv6_dst_lookup = ip6_dst_lookup,
++      .ipv6_dst_lookup_flow = ip6_dst_lookup_flow,
+       .udpv6_encap_enable = udpv6_encap_enable,
+       .ndisc_send_na = ndisc_send_na,
+       .nd_tbl = &nd_tbl,
+@@ -1029,11 +1029,11 @@ netfilter_fail:
+ igmp_fail:
+       ndisc_cleanup();
+ ndisc_fail:
+-      ip6_mr_cleanup();
++      icmpv6_cleanup();
+ icmp_fail:
+-      unregister_pernet_subsys(&inet6_net_ops);
++      ip6_mr_cleanup();
+ ipmr_fail:
+-      icmpv6_cleanup();
++      unregister_pernet_subsys(&inet6_net_ops);
+ register_pernet_fail:
+       sock_unregister(PF_INET6);
+       rtnl_unregister_all(PF_INET6);
+diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
+index 27cdf543c539..f33154365b64 100644
+--- a/net/ipv6/datagram.c
++++ b/net/ipv6/datagram.c
+@@ -179,7 +179,7 @@ ipv4_connected:
+       final_p = fl6_update_dst(&fl6, opt, &final);
+       rcu_read_unlock();
+ 
+-      dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
++      dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
+       err = 0;
+       if (IS_ERR(dst)) {
+               err = PTR_ERR(dst);
+diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
+index d21e81cd6120..fa96e05cf22b 100644
+--- a/net/ipv6/icmp.c
++++ b/net/ipv6/icmp.c
+@@ -445,8 +445,10 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 
code, __u32 info)
+ 
+       if (__ipv6_addr_needs_scope_id(addr_type))
+               iif = skb->dev->ifindex;
+-      else
+-              iif = l3mdev_master_ifindex(skb_dst(skb)->dev);
++      else {
++              dst = skb_dst(skb);
++              iif = l3mdev_master_ifindex(dst ? dst->dev : skb->dev);
++      }
+ 
+       /*
+        *      Must not send error if the source does not uniquely
+diff --git a/net/ipv6/inet6_connection_sock.c 
b/net/ipv6/inet6_connection_sock.c
+index a7ca2cde2ecb..b31ab511c767 100644
+--- a/net/ipv6/inet6_connection_sock.c
++++ b/net/ipv6/inet6_connection_sock.c
+@@ -88,7 +88,7 @@ struct dst_entry *inet6_csk_route_req(const struct sock *sk,
+       fl6->fl6_sport = htons(ireq->ir_num);
+       security_req_classify_flow(req, flowi6_to_flowi(fl6));
+ 
+-      dst = ip6_dst_lookup_flow(sk, fl6, final_p);
++      dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p);
+       if (IS_ERR(dst))
+               return NULL;
+ 
+@@ -142,7 +142,7 @@ static struct dst_entry *inet6_csk_route_socket(struct 
sock *sk,
+ 
+       dst = __inet6_csk_dst_check(sk, np->dst_cookie);
+       if (!dst) {
+-              dst = ip6_dst_lookup_flow(sk, fl6, final_p);
++              dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p);
+ 
+               if (!IS_ERR(dst))
+                       ip6_dst_store(sk, dst, NULL, NULL);
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index e39dc94486b2..1e2b8d33d303 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -1057,13 +1057,13 @@ EXPORT_SYMBOL_GPL(ip6_dst_lookup);
+  *    It returns a valid dst pointer on success, or a pointer encoded
+  *    error code.
+  */
+-struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 
*fl6,
++struct dst_entry *ip6_dst_lookup_flow(struct net *net, const struct sock *sk, 
struct flowi6 *fl6,
+                                     const struct in6_addr *final_dst)
+ {
+       struct dst_entry *dst = NULL;
+       int err;
+ 
+-      err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6);
++      err = ip6_dst_lookup_tail(net, sk, &dst, fl6);
+       if (err)
+               return ERR_PTR(err);
+       if (final_dst)
+@@ -1071,7 +1071,7 @@ struct dst_entry *ip6_dst_lookup_flow(const struct sock 
*sk, struct flowi6 *fl6,
+       if (!fl6->flowi6_oif)
+               fl6->flowi6_oif = l3mdev_fib_oif(dst->dev);
+ 
+-      return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 
0);
++      return xfrm_lookup_route(net, dst, flowi6_to_flowi(fl6), sk, 0);
+ }
+ EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow);
+ 
+@@ -1096,7 +1096,7 @@ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock 
*sk, struct flowi6 *fl6,
+ 
+       dst = ip6_sk_dst_check(sk, dst, fl6);
+       if (!dst)
+-              dst = ip6_dst_lookup_flow(sk, fl6, final_dst);
++              dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_dst);
+ 
+       return dst;
+ }
+diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
+index 86c75e97cfec..67cdcd3d644f 100644
+--- a/net/ipv6/raw.c
++++ b/net/ipv6/raw.c
+@@ -889,7 +889,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr 
*msg, size_t len)
+       if (hdrincl)
+               fl6.flowi6_flags |= FLOWI_FLAG_KNOWN_NH;
+ 
+-      dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
++      dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
+       if (IS_ERR(dst)) {
+               err = PTR_ERR(dst);
+               goto out;
+diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
+index aee87282d352..fb3ba2a51119 100644
+--- a/net/ipv6/syncookies.c
++++ b/net/ipv6/syncookies.c
+@@ -231,7 +231,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct 
sk_buff *skb)
+               fl6.fl6_sport = inet_sk(sk)->inet_sport;
+               security_req_classify_flow(req, flowi6_to_flowi(&fl6));
+ 
+-              dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
++              dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
+               if (IS_ERR(dst))
+                       goto out_free;
+       }
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 6a36fcc5c4e1..b4ffcec732b4 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -245,7 +245,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr 
*uaddr,
+ 
+       security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
+ 
+-      dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
++      dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
+       if (IS_ERR(dst)) {
+               err = PTR_ERR(dst);
+               goto failure;
+@@ -831,7 +831,7 @@ static void tcp_v6_send_response(const struct sock *sk, 
struct sk_buff *skb, u32
+        * Underlying function will use this to retrieve the network
+        * namespace
+        */
+-      dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
++      dst = ip6_dst_lookup_flow(sock_net(ctl_sk), ctl_sk, &fl6, NULL);
+       if (!IS_ERR(dst)) {
+               skb_dst_set(buff, dst);
+               ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
+diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
+index af04a8a68269..2b5230ef8536 100644
+--- a/net/l2tp/l2tp_ip6.c
++++ b/net/l2tp/l2tp_ip6.c
+@@ -619,7 +619,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr 
*msg, size_t len)
+ 
+       security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
+ 
+-      dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
++      dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
+       if (IS_ERR(dst)) {
+               err = PTR_ERR(dst);
+               goto out;
+diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
+index c2ce7dec5198..50d9138b2a1c 100644
+--- a/net/mpls/af_mpls.c
++++ b/net/mpls/af_mpls.c
+@@ -470,16 +470,15 @@ static struct net_device *inet6_fib_lookup_dev(struct 
net *net,
+       struct net_device *dev;
+       struct dst_entry *dst;
+       struct flowi6 fl6;
+-      int err;
+ 
+       if (!ipv6_stub)
+               return ERR_PTR(-EAFNOSUPPORT);
+ 
+       memset(&fl6, 0, sizeof(fl6));
+       memcpy(&fl6.daddr, addr, sizeof(struct in6_addr));
+-      err = ipv6_stub->ipv6_dst_lookup(net, NULL, &dst, &fl6);
+-      if (err)
+-              return ERR_PTR(err);
++      dst = ipv6_stub->ipv6_dst_lookup_flow(net, NULL, &fl6, NULL);
++      if (IS_ERR(dst))
++              return ERR_CAST(dst);
+ 
+       dev = dst->dev;
+       dev_hold(dev);
+diff --git a/net/netfilter/nf_conntrack_core.c 
b/net/netfilter/nf_conntrack_core.c
+index de0aad12b91d..e58516274e86 100644
+--- a/net/netfilter/nf_conntrack_core.c
++++ b/net/netfilter/nf_conntrack_core.c
+@@ -898,9 +898,9 @@ __nf_conntrack_alloc(struct net *net,
+       /* Don't set timer yet: wait for confirmation */
+       setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct);
+       write_pnet(&ct->ct_net, net);
+-      memset(&ct->__nfct_init_offset[0], 0,
++      memset(&ct->__nfct_init_offset, 0,
+              offsetof(struct nf_conn, proto) -
+-             offsetof(struct nf_conn, __nfct_init_offset[0]));
++             offsetof(struct nf_conn, __nfct_init_offset));
+ 
+       if (zone && nf_ct_zone_add(ct, GFP_ATOMIC, zone) < 0)
+               goto out_free;
+diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
+index bfa2b6d5b5cf..25ab12e25e05 100644
+--- a/net/netlabel/netlabel_kapi.c
++++ b/net/netlabel/netlabel_kapi.c
+@@ -605,6 +605,12 @@ int netlbl_catmap_getlong(struct netlbl_lsm_catmap 
*catmap,
+       if ((off & (BITS_PER_LONG - 1)) != 0)
+               return -EINVAL;
+ 
++      /* a null catmap is equivalent to an empty one */
++      if (!catmap) {
++              *offset = (u32)-1;
++              return 0;
++      }
++
+       if (off < catmap->startbit) {
+               off = catmap->startbit;
+               *offset = off;
+diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
+index fd6c587b6a04..828fdced4ecd 100644
+--- a/net/openvswitch/actions.c
++++ b/net/openvswitch/actions.c
+@@ -143,8 +143,7 @@ static void update_ethertype(struct sk_buff *skb, struct 
ethhdr *hdr,
+       if (skb->ip_summed == CHECKSUM_COMPLETE) {
+               __be16 diff[] = { ~(hdr->h_proto), ethertype };
+ 
+-              skb->csum = ~csum_partial((char *)diff, sizeof(diff),
+-                                      ~skb->csum);
++              skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum);
+       }
+ 
+       hdr->h_proto = ethertype;
+@@ -227,8 +226,7 @@ static int set_mpls(struct sk_buff *skb, struct 
sw_flow_key *flow_key,
+       if (skb->ip_summed == CHECKSUM_COMPLETE) {
+               __be32 diff[] = { ~(*stack), lse };
+ 
+-              skb->csum = ~csum_partial((char *)diff, sizeof(diff),
+-                                        ~skb->csum);
++              skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum);
+       }
+ 
+       *stack = lse;
+diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
+index e8dcf94a23c8..2812de74c9a7 100644
+--- a/net/sched/sch_choke.c
++++ b/net/sched/sch_choke.c
+@@ -396,7 +396,8 @@ static void choke_reset(struct Qdisc *sch)
+               qdisc_drop(skb, sch);
+       }
+ 
+-      memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *));
++      if (q->tab)
++              memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *));
+       q->head = q->tail = 0;
+       red_restart(&q->vars);
+ }
+diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
+index e2e4ebc0c4c3..7929c1a11e12 100644
+--- a/net/sched/sch_sfq.c
++++ b/net/sched/sch_sfq.c
+@@ -635,6 +635,15 @@ static int sfq_change(struct Qdisc *sch, struct nlattr 
*opt)
+       if (ctl->divisor &&
+           (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536))
+               return -EINVAL;
++
++      /* slot->allot is a short, make sure quantum is not too big. */
++      if (ctl->quantum) {
++              unsigned int scaled = SFQ_ALLOT_SIZE(ctl->quantum);
++
++              if (scaled <= 0 || scaled > SHRT_MAX)
++                      return -EINVAL;
++      }
++
+       if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max,
+                                       ctl_v1->Wlog))
+               return -EINVAL;
+diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
+index dd097e065f39..1a6849add0e3 100644
+--- a/net/sctp/ipv6.c
++++ b/net/sctp/ipv6.c
+@@ -268,7 +268,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, 
union sctp_addr *saddr,
+       final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
+       rcu_read_unlock();
+ 
+-      dst = ip6_dst_lookup_flow(sk, fl6, final_p);
++      dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p);
+       if (!asoc || saddr) {
+               t->dst = dst;
+               memcpy(fl, &_fl, sizeof(_fl));
+@@ -326,7 +326,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, 
union sctp_addr *saddr,
+               fl6->saddr = laddr->a.v6.sin6_addr;
+               fl6->fl6_sport = laddr->a.v6.sin6_port;
+               final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
+-              bdst = ip6_dst_lookup_flow(sk, fl6, final_p);
++              bdst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p);
+ 
+               if (IS_ERR(bdst))
+                       continue;
+diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
+index cb39f1c4251e..ac2079439242 100644
+--- a/net/tipc/udp_media.c
++++ b/net/tipc/udp_media.c
+@@ -200,10 +200,13 @@ static int tipc_udp_send_msg(struct net *net, struct 
sk_buff *skb,
+                       .saddr = src->ipv6,
+                       .flowi6_proto = IPPROTO_UDP
+               };
+-              err = ipv6_stub->ipv6_dst_lookup(net, ub->ubsock->sk, &ndst,
+-                                               &fl6);
+-              if (err)
++              ndst = ipv6_stub->ipv6_dst_lookup_flow(net,
++                                                     ub->ubsock->sk,
++                                                     &fl6, NULL);
++              if (IS_ERR(ndst)) {
++                      err = PTR_ERR(ndst);
+                       goto tx_error;
++              }
+               ttl = ip6_dst_hoplimit(ndst);
+               err = udp_tunnel6_xmit_skb(ndst, ub->ubsock->sk, skb,
+                                          ndst->dev, &src->ipv6,
+diff --git a/scripts/decodecode b/scripts/decodecode
+index d8824f37acce..aae7a035242b 100755
+--- a/scripts/decodecode
++++ b/scripts/decodecode
+@@ -98,7 +98,7 @@ faultlinenum=$(( $(wc -l $T.oo  | cut -d" " -f1) - \
+ faultline=`cat $T.dis | head -1 | cut -d":" -f2-`
+ faultline=`echo "$faultline" | sed -e 's/\[/\\\[/g; s/\]/\\\]/g'`
+ 
+-cat $T.oo | sed -e "${faultlinenum}s/^\(.*:\)\(.*\)/\1\*\2\t\t<-- trapping 
instruction/"
++cat $T.oo | sed -e "${faultlinenum}s/^\([^:]*:\)\(.*\)/\1\*\2\t\t<-- trapping 
instruction/"
+ echo
+ cat $T.aa
+ cleanup
+diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
+index c8b2309352d7..481c1ad1db57 100644
+--- a/sound/core/rawmidi.c
++++ b/sound/core/rawmidi.c
+@@ -108,6 +108,17 @@ static void snd_rawmidi_input_event_work(struct 
work_struct *work)
+               runtime->event(runtime->substream);
+ }
+ 
++/* buffer refcount management: call with runtime->lock held */
++static inline void snd_rawmidi_buffer_ref(struct snd_rawmidi_runtime *runtime)
++{
++      runtime->buffer_ref++;
++}
++
++static inline void snd_rawmidi_buffer_unref(struct snd_rawmidi_runtime 
*runtime)
++{
++      runtime->buffer_ref--;
++}
++
+ static int snd_rawmidi_runtime_create(struct snd_rawmidi_substream *substream)
+ {
+       struct snd_rawmidi_runtime *runtime;
+@@ -125,7 +136,7 @@ static int snd_rawmidi_runtime_create(struct 
snd_rawmidi_substream *substream)
+               runtime->avail = 0;
+       else
+               runtime->avail = runtime->buffer_size;
+-      if ((runtime->buffer = kmalloc(runtime->buffer_size, GFP_KERNEL)) == 
NULL) {
++      if ((runtime->buffer = kzalloc(runtime->buffer_size, GFP_KERNEL)) == 
NULL) {
+               kfree(runtime);
+               return -ENOMEM;
+       }
+@@ -650,10 +661,15 @@ int snd_rawmidi_output_params(struct 
snd_rawmidi_substream *substream,
+               return -EINVAL;
+       }
+       if (params->buffer_size != runtime->buffer_size) {
+-              newbuf = kmalloc(params->buffer_size, GFP_KERNEL);
++              newbuf = kzalloc(params->buffer_size, GFP_KERNEL);
+               if (!newbuf)
+                       return -ENOMEM;
+               spin_lock_irq(&runtime->lock);
++              if (runtime->buffer_ref) {
++                      spin_unlock_irq(&runtime->lock);
++                      kfree(newbuf);
++                      return -EBUSY;
++              }
+               oldbuf = runtime->buffer;
+               runtime->buffer = newbuf;
+               runtime->buffer_size = params->buffer_size;
+@@ -962,8 +978,10 @@ static long snd_rawmidi_kernel_read1(struct 
snd_rawmidi_substream *substream,
+       long result = 0, count1;
+       struct snd_rawmidi_runtime *runtime = substream->runtime;
+       unsigned long appl_ptr;
++      int err = 0;
+ 
+       spin_lock_irqsave(&runtime->lock, flags);
++      snd_rawmidi_buffer_ref(runtime);
+       while (count > 0 && runtime->avail) {
+               count1 = runtime->buffer_size - runtime->appl_ptr;
+               if (count1 > count)
+@@ -982,16 +1000,19 @@ static long snd_rawmidi_kernel_read1(struct 
snd_rawmidi_substream *substream,
+               if (userbuf) {
+                       spin_unlock_irqrestore(&runtime->lock, flags);
+                       if (copy_to_user(userbuf + result,
+-                                       runtime->buffer + appl_ptr, count1)) {
+-                              return result > 0 ? result : -EFAULT;
+-                      }
++                                       runtime->buffer + appl_ptr, count1))
++                              err = -EFAULT;
+                       spin_lock_irqsave(&runtime->lock, flags);
++                      if (err)
++                              goto out;
+               }
+               result += count1;
+               count -= count1;
+       }
++ out:
++      snd_rawmidi_buffer_unref(runtime);
+       spin_unlock_irqrestore(&runtime->lock, flags);
+-      return result;
++      return result > 0 ? result : err;
+ }
+ 
+ long snd_rawmidi_kernel_read(struct snd_rawmidi_substream *substream,
+@@ -1262,6 +1283,7 @@ static long snd_rawmidi_kernel_write1(struct 
snd_rawmidi_substream *substream,
+                       return -EAGAIN;
+               }
+       }
++      snd_rawmidi_buffer_ref(runtime);
+       while (count > 0 && runtime->avail > 0) {
+               count1 = runtime->buffer_size - runtime->appl_ptr;
+               if (count1 > count)
+@@ -1293,6 +1315,7 @@ static long snd_rawmidi_kernel_write1(struct 
snd_rawmidi_substream *substream,
+       }
+       __end:
+       count1 = runtime->avail < runtime->buffer_size;
++      snd_rawmidi_buffer_unref(runtime);
+       spin_unlock_irqrestore(&runtime->lock, flags);
+       if (count1)
+               snd_rawmidi_output_trigger(substream, 1);
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index b236e94b5808..7c5bbc6b91b9 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -4840,6 +4840,7 @@ enum {
+       ALC269_FIXUP_HP_LINE1_MIC1_LED,
+       ALC269_FIXUP_INV_DMIC,
+       ALC269_FIXUP_LENOVO_DOCK,
++      ALC269_FIXUP_LENOVO_DOCK_LIMIT_BOOST,
+       ALC269_FIXUP_NO_SHUTUP,
+       ALC286_FIXUP_SONY_MIC_NO_PRESENCE,
+       ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT,
+@@ -5106,6 +5107,12 @@ static const struct hda_fixup alc269_fixups[] = {
+               .chained = true,
+               .chain_id = ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT
+       },
++      [ALC269_FIXUP_LENOVO_DOCK_LIMIT_BOOST] = {
++              .type = HDA_FIXUP_FUNC,
++              .v.func = alc269_fixup_limit_int_mic_boost,
++              .chained = true,
++              .chain_id = ALC269_FIXUP_LENOVO_DOCK,
++      },
+       [ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc269_fixup_pincfg_no_hp_to_lineout,
+@@ -5760,7 +5767,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+       SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", 
ALC269_FIXUP_SKU_IGNORE),
+       SND_PCI_QUIRK(0x17aa, 0x21ca, "Thinkpad L412", ALC269_FIXUP_SKU_IGNORE),
+       SND_PCI_QUIRK(0x17aa, 0x21e9, "Thinkpad Edge 15", 
ALC269_FIXUP_SKU_IGNORE),
+-      SND_PCI_QUIRK(0x17aa, 0x21f6, "Thinkpad T530", 
ALC269_FIXUP_LENOVO_DOCK),
++      SND_PCI_QUIRK(0x17aa, 0x21f6, "Thinkpad T530", 
ALC269_FIXUP_LENOVO_DOCK_LIMIT_BOOST),
+       SND_PCI_QUIRK(0x17aa, 0x21fa, "Thinkpad X230", 
ALC269_FIXUP_LENOVO_DOCK),
+       SND_PCI_QUIRK(0x17aa, 0x21f3, "Thinkpad T430", 
ALC269_FIXUP_LENOVO_DOCK),
+       SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", 
ALC269_FIXUP_LENOVO_DOCK),
+@@ -5870,6 +5877,7 @@ static const struct hda_model_fixup 
alc269_fixup_models[] = {
+       {.id = ALC269_FIXUP_HEADSET_MODE, .name = "headset-mode"},
+       {.id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC, .name = 
"headset-mode-no-hp-mic"},
+       {.id = ALC269_FIXUP_LENOVO_DOCK, .name = "lenovo-dock"},
++      {.id = ALC269_FIXUP_LENOVO_DOCK_LIMIT_BOOST, .name = 
"lenovo-dock-limit-boost"},
+       {.id = ALC269_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"},
+       {.id = ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED, .name = 
"hp-dock-gpio-mic1-led"},
+       {.id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, .name = 
"dell-headset-multi"},
+@@ -6333,8 +6341,6 @@ static int patch_alc269(struct hda_codec *codec)
+               alc_update_coef_idx(codec, 0x36, 1 << 13, 1 << 5); /* Switch 
pcbeep path to Line in path*/
+               break;
+       case 0x10ec0225:
+-              codec->power_save_node = 1;
+-              /* fall through */
+       case 0x10ec0295:
+       case 0x10ec0299:
+               spec->codec_variant = ALC269_TYPE_ALC225;

Reply via email to