commit:     8f9c5a44d98694dba36e19234ed5f67ee891d232
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Apr 24 11:28:29 2018 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Apr 24 11:28:29 2018 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=8f9c5a44

Linux patch 4.4.129

 0000_README              |    4 +
 1128_linux-4.4.129.patch | 3714 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3718 insertions(+)

diff --git a/0000_README b/0000_README
index 618f750..c2625c8 100644
--- a/0000_README
+++ b/0000_README
@@ -555,6 +555,10 @@ Patch:  1127_linux-4.4.128.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.4.128
 
+Patch:  1128_linux-4.4.129.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.4.129
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1128_linux-4.4.129.patch b/1128_linux-4.4.129.patch
new file mode 100644
index 0000000..8b1de93
--- /dev/null
+++ b/1128_linux-4.4.129.patch
@@ -0,0 +1,3714 @@
+diff --git a/Makefile b/Makefile
+index 575459bb47eb..096d7e867b6c 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 4
+-SUBLEVEL = 128
++SUBLEVEL = 129
+ EXTRAVERSION =
+ NAME = Blurry Fish Butt
+ 
+diff --git a/arch/arm/boot/dts/at91sam9g25.dtsi 
b/arch/arm/boot/dts/at91sam9g25.dtsi
+index a7da0dd0c98f..0898213f3bb2 100644
+--- a/arch/arm/boot/dts/at91sam9g25.dtsi
++++ b/arch/arm/boot/dts/at91sam9g25.dtsi
+@@ -21,7 +21,7 @@
+                               atmel,mux-mask = <
+                                     /*    A         B          C     */
+                                      0xffffffff 0xffe0399f 0xc000001c  /* 
pioA */
+-                                     0x0007ffff 0x8000fe3f 0x00000000  /* 
pioB */
++                                     0x0007ffff 0x00047e3f 0x00000000  /* 
pioB */
+                                      0x80000000 0x07c0ffff 0xb83fffff  /* 
pioC */
+                                      0x003fffff 0x003f8000 0x00000000  /* 
pioD */
+                                     >;
+diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi
+index 3daf8d5d7878..fb0d1b252dc8 100644
+--- a/arch/arm/boot/dts/sama5d4.dtsi
++++ b/arch/arm/boot/dts/sama5d4.dtsi
+@@ -1354,7 +1354,7 @@
+                       pinctrl@fc06a000 {
+                               #address-cells = <1>;
+                               #size-cells = <1>;
+-                              compatible = "atmel,at91sam9x5-pinctrl", 
"atmel,at91rm9200-pinctrl", "simple-bus";
++                              compatible = "atmel,sama5d3-pinctrl", 
"atmel,at91sam9x5-pinctrl", "simple-bus";
+                               ranges = <0xfc068000 0xfc068000 0x100
+                                         0xfc06a000 0xfc06a000 0x4000>;
+                               /* WARNING: revisit as pin spec has changed */
+diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
+index c74c32ccc647..4f281768937f 100644
+--- a/arch/mips/include/asm/uaccess.h
++++ b/arch/mips/include/asm/uaccess.h
+@@ -1238,6 +1238,13 @@ __clear_user(void __user *addr, __kernel_size_t size)
+ {
+       __kernel_size_t res;
+ 
++#ifdef CONFIG_CPU_MICROMIPS
++/* micromips memset / bzero also clobbers t7 & t8 */
++#define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$15", "$24", "$31"
++#else
++#define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"
++#endif /* CONFIG_CPU_MICROMIPS */
++
+       if (eva_kernel_access()) {
+               __asm__ __volatile__(
+                       "move\t$4, %1\n\t"
+@@ -1247,7 +1254,7 @@ __clear_user(void __user *addr, __kernel_size_t size)
+                       "move\t%0, $6"
+                       : "=r" (res)
+                       : "r" (addr), "r" (size)
+-                      : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
++                      : bzero_clobbers);
+       } else {
+               might_fault();
+               __asm__ __volatile__(
+@@ -1258,7 +1265,7 @@ __clear_user(void __user *addr, __kernel_size_t size)
+                       "move\t%0, $6"
+                       : "=r" (res)
+                       : "r" (addr), "r" (size)
+-                      : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
++                      : bzero_clobbers);
+       }
+ 
+       return res;
+diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S
+index 8f0019a2e5c8..2d33cf2185d9 100644
+--- a/arch/mips/lib/memset.S
++++ b/arch/mips/lib/memset.S
+@@ -218,7 +218,7 @@
+ 1:    PTR_ADDIU       a0, 1                   /* fill bytewise */
+       R10KCBARRIER(0(ra))
+       bne             t1, a0, 1b
+-      sb              a1, -1(a0)
++       EX(sb, a1, -1(a0), .Lsmall_fixup\@)
+ 
+ 2:    jr              ra                      /* done */
+       move            a2, zero
+@@ -249,13 +249,18 @@
+       PTR_L           t0, TI_TASK($28)
+       andi            a2, STORMASK
+       LONG_L          t0, THREAD_BUADDR(t0)
+-      LONG_ADDU       a2, t1
++      LONG_ADDU       a2, a0
+       jr              ra
+       LONG_SUBU       a2, t0
+ 
+ .Llast_fixup\@:
+       jr              ra
+-      andi            v1, a2, STORMASK
++       nop
++
++.Lsmall_fixup\@:
++      PTR_SUBU        a2, t1, a0
++      jr              ra
++       PTR_ADDIU      a2, 1
+ 
+       .endm
+ 
+diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
+index dba508fe1683..4f7060ec6875 100644
+--- a/arch/parisc/kernel/drivers.c
++++ b/arch/parisc/kernel/drivers.c
+@@ -648,6 +648,10 @@ static int match_pci_device(struct device *dev, int index,
+                                       (modpath->mod == PCI_FUNC(devfn)));
+       }
+ 
++      /* index might be out of bounds for bc[] */
++      if (index >= 6)
++              return 0;
++
+       id = PCI_SLOT(pdev->devfn) | (PCI_FUNC(pdev->devfn) << 5);
+       return (modpath->bc[index] == id);
+ }
+diff --git a/arch/powerpc/include/asm/barrier.h 
b/arch/powerpc/include/asm/barrier.h
+index 0eca6efc0631..b9e16855a037 100644
+--- a/arch/powerpc/include/asm/barrier.h
++++ b/arch/powerpc/include/asm/barrier.h
+@@ -36,7 +36,8 @@
+ 
+ #define smp_store_mb(var, value)      do { WRITE_ONCE(var, value); mb(); } 
while (0)
+ 
+-#ifdef __SUBARCH_HAS_LWSYNC
++/* The sub-arch has lwsync */
++#if defined(__powerpc64__) || defined(CONFIG_PPC_E500MC)
+ #    define SMPWMB      LWSYNC
+ #else
+ #    define SMPWMB      eieio
+diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
+index 07a99e638449..bab3461115bb 100644
+--- a/arch/powerpc/include/asm/opal.h
++++ b/arch/powerpc/include/asm/opal.h
+@@ -21,6 +21,9 @@
+ /* We calculate number of sg entries based on PAGE_SIZE */
+ #define SG_ENTRIES_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct opal_sg_entry))
+ 
++/* Default time to sleep or delay between OPAL_BUSY/OPAL_BUSY_EVENT loops */
++#define OPAL_BUSY_DELAY_MS    10
++
+ /* /sys/firmware/opal */
+ extern struct kobject *opal_kobj;
+ 
+diff --git a/arch/powerpc/include/asm/synch.h 
b/arch/powerpc/include/asm/synch.h
+index c50868681f9e..e8d6a842f4bb 100644
+--- a/arch/powerpc/include/asm/synch.h
++++ b/arch/powerpc/include/asm/synch.h
+@@ -5,10 +5,6 @@
+ #include <linux/stringify.h>
+ #include <asm/feature-fixups.h>
+ 
+-#if defined(__powerpc64__) || defined(CONFIG_PPC_E500MC)
+-#define __SUBARCH_HAS_LWSYNC
+-#endif
+-
+ #ifndef __ASSEMBLY__
+ extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup;
+ extern void do_lwsync_fixups(unsigned long value, void *fixup_start,
+diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
+index 98f81800e00c..304f07cfa262 100644
+--- a/arch/powerpc/kernel/eeh_pe.c
++++ b/arch/powerpc/kernel/eeh_pe.c
+@@ -788,7 +788,8 @@ static void eeh_restore_bridge_bars(struct eeh_dev *edev)
+       eeh_ops->write_config(pdn, 15*4, 4, edev->config_space[15]);
+ 
+       /* PCI Command: 0x4 */
+-      eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1]);
++      eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1] |
++                            PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
+ 
+       /* Check the PCIe link is ready */
+       eeh_bridge_check_link(edev);
+diff --git a/arch/powerpc/lib/feature-fixups.c 
b/arch/powerpc/lib/feature-fixups.c
+index a18d648d31a6..3af014684872 100644
+--- a/arch/powerpc/lib/feature-fixups.c
++++ b/arch/powerpc/lib/feature-fixups.c
+@@ -53,7 +53,7 @@ static int patch_alt_instruction(unsigned int *src, unsigned 
int *dest,
+               unsigned int *target = (unsigned int *)branch_target(src);
+ 
+               /* Branch within the section doesn't need translating */
+-              if (target < alt_start || target >= alt_end) {
++              if (target < alt_start || target > alt_end) {
+                       instr = translate_branch(dest, src);
+                       if (!instr)
+                               return 1;
+diff --git a/arch/powerpc/platforms/powernv/opal-nvram.c 
b/arch/powerpc/platforms/powernv/opal-nvram.c
+index 9db4398ded5d..1bceb95f422d 100644
+--- a/arch/powerpc/platforms/powernv/opal-nvram.c
++++ b/arch/powerpc/platforms/powernv/opal-nvram.c
+@@ -11,6 +11,7 @@
+ 
+ #define DEBUG
+ 
++#include <linux/delay.h>
+ #include <linux/kernel.h>
+ #include <linux/init.h>
+ #include <linux/of.h>
+@@ -56,9 +57,17 @@ static ssize_t opal_nvram_write(char *buf, size_t count, 
loff_t *index)
+ 
+       while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
+               rc = opal_write_nvram(__pa(buf), count, off);
+-              if (rc == OPAL_BUSY_EVENT)
++              if (rc == OPAL_BUSY_EVENT) {
++                      msleep(OPAL_BUSY_DELAY_MS);
+                       opal_poll_events(NULL);
++              } else if (rc == OPAL_BUSY) {
++                      msleep(OPAL_BUSY_DELAY_MS);
++              }
+       }
++
++      if (rc)
++              return -EIO;
++
+       *index += count;
+       return count;
+ }
+diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
+index b2e5902bd8f4..c670279b33f0 100644
+--- a/arch/s390/hypfs/inode.c
++++ b/arch/s390/hypfs/inode.c
+@@ -318,7 +318,7 @@ static void hypfs_kill_super(struct super_block *sb)
+ 
+       if (sb->s_root)
+               hypfs_delete_tree(sb->s_root);
+-      if (sb_info->update_file)
++      if (sb_info && sb_info->update_file)
+               hypfs_remove(sb_info->update_file);
+       kfree(sb->s_fs_info);
+       sb->s_fs_info = NULL;
+diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
+index 42570d8fb265..e73979236659 100644
+--- a/arch/s390/kernel/ipl.c
++++ b/arch/s390/kernel/ipl.c
+@@ -798,6 +798,7 @@ static ssize_t reipl_generic_loadparm_store(struct 
ipl_parameter_block *ipb,
+       /* copy and convert to ebcdic */
+       memcpy(ipb->hdr.loadparm, buf, lp_len);
+       ASCEBC(ipb->hdr.loadparm, LOADPARM_LEN);
++      ipb->hdr.flags |= DIAG308_FLAGS_LP_VALID;
+       return len;
+ }
+ 
+diff --git a/arch/um/os-Linux/signal.c b/arch/um/os-Linux/signal.c
+index c211153ca69a..56648f4f8b41 100644
+--- a/arch/um/os-Linux/signal.c
++++ b/arch/um/os-Linux/signal.c
+@@ -140,7 +140,7 @@ static void (*handlers[_NSIG])(int sig, struct siginfo 
*si, mcontext_t *mc) = {
+ 
+ static void hard_handler(int sig, siginfo_t *si, void *p)
+ {
+-      struct ucontext *uc = p;
++      ucontext_t *uc = p;
+       mcontext_t *mc = &uc->uc_mcontext;
+       unsigned long pending = 1UL << sig;
+ 
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index bb6aab2fa7f5..eab1ef25eecd 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -280,11 +280,6 @@ config X86_32_LAZY_GS
+       def_bool y
+       depends on X86_32 && !CC_STACKPROTECTOR
+ 
+-config ARCH_HWEIGHT_CFLAGS
+-      string
+-      default "-fcall-saved-ecx -fcall-saved-edx" if X86_32
+-      default "-fcall-saved-rdi -fcall-saved-rsi -fcall-saved-rdx 
-fcall-saved-rcx -fcall-saved-r8 -fcall-saved-r9 -fcall-saved-r10 
-fcall-saved-r11" if X86_64
+-
+ config ARCH_SUPPORTS_UPROBES
+       def_bool y
+ 
+diff --git a/arch/x86/include/asm/arch_hweight.h 
b/arch/x86/include/asm/arch_hweight.h
+index 259a7c1ef709..44f825c80ed5 100644
+--- a/arch/x86/include/asm/arch_hweight.h
++++ b/arch/x86/include/asm/arch_hweight.h
+@@ -2,8 +2,8 @@
+ #define _ASM_X86_HWEIGHT_H
+ 
+ #ifdef CONFIG_64BIT
+-/* popcnt %edi, %eax -- redundant REX prefix for alignment */
+-#define POPCNT32 ".byte 0xf3,0x40,0x0f,0xb8,0xc7"
++/* popcnt %edi, %eax */
++#define POPCNT32 ".byte 0xf3,0x0f,0xb8,0xc7"
+ /* popcnt %rdi, %rax */
+ #define POPCNT64 ".byte 0xf3,0x48,0x0f,0xb8,0xc7"
+ #define REG_IN "D"
+@@ -15,19 +15,15 @@
+ #define REG_OUT "a"
+ #endif
+ 
+-/*
+- * __sw_hweightXX are called from within the alternatives below
+- * and callee-clobbered registers need to be taken care of. See
+- * ARCH_HWEIGHT_CFLAGS in <arch/x86/Kconfig> for the respective
+- * compiler switches.
+- */
++#define __HAVE_ARCH_SW_HWEIGHT
++
+ static __always_inline unsigned int __arch_hweight32(unsigned int w)
+ {
+-      unsigned int res = 0;
++      unsigned int res;
+ 
+       asm (ALTERNATIVE("call __sw_hweight32", POPCNT32, X86_FEATURE_POPCNT)
+-                   : "="REG_OUT (res)
+-                   : REG_IN (w));
++                       : "="REG_OUT (res)
++                       : REG_IN (w));
+ 
+       return res;
+ }
+@@ -51,11 +47,11 @@ static inline unsigned long __arch_hweight64(__u64 w)
+ #else
+ static __always_inline unsigned long __arch_hweight64(__u64 w)
+ {
+-      unsigned long res = 0;
++      unsigned long res;
+ 
+       asm (ALTERNATIVE("call __sw_hweight64", POPCNT64, X86_FEATURE_POPCNT)
+-                   : "="REG_OUT (res)
+-                   : REG_IN (w));
++                       : "="REG_OUT (res)
++                       : REG_IN (w));
+ 
+       return res;
+ }
+diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
+index 64341aa485ae..d40ee8a38fed 100644
+--- a/arch/x86/kernel/i386_ksyms_32.c
++++ b/arch/x86/kernel/i386_ksyms_32.c
+@@ -42,3 +42,5 @@ EXPORT_SYMBOL(empty_zero_page);
+ EXPORT_SYMBOL(___preempt_schedule);
+ EXPORT_SYMBOL(___preempt_schedule_notrace);
+ #endif
++
++EXPORT_SYMBOL(__sw_hweight32);
+diff --git a/arch/x86/kernel/x8664_ksyms_64.c 
b/arch/x86/kernel/x8664_ksyms_64.c
+index a0695be19864..c7efd394c42b 100644
+--- a/arch/x86/kernel/x8664_ksyms_64.c
++++ b/arch/x86/kernel/x8664_ksyms_64.c
+@@ -42,6 +42,9 @@ EXPORT_SYMBOL(clear_page);
+ 
+ EXPORT_SYMBOL(csum_partial);
+ 
++EXPORT_SYMBOL(__sw_hweight32);
++EXPORT_SYMBOL(__sw_hweight64);
++
+ /*
+  * Export string functions. We normally rely on gcc builtin for most of these,
+  * but gcc sometimes decides not to inline them.
+diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
+index 12a34d15b648..c0c8b0a49bb8 100644
+--- a/arch/x86/lib/Makefile
++++ b/arch/x86/lib/Makefile
+@@ -23,7 +23,7 @@ lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
+ lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o
+ lib-$(CONFIG_RETPOLINE) += retpoline.o
+ 
+-obj-y += msr.o msr-reg.o msr-reg-export.o
++obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o
+ 
+ ifeq ($(CONFIG_X86_32),y)
+         obj-y += atomic64_32.o
+diff --git a/arch/x86/lib/hweight.S b/arch/x86/lib/hweight.S
+new file mode 100644
+index 000000000000..8a602a1e404a
+--- /dev/null
++++ b/arch/x86/lib/hweight.S
+@@ -0,0 +1,79 @@
++#include <linux/linkage.h>
++
++#include <asm/asm.h>
++
++/*
++ * unsigned int __sw_hweight32(unsigned int w)
++ * %rdi: w
++ */
++ENTRY(__sw_hweight32)
++
++#ifdef CONFIG_X86_64
++      movl %edi, %eax                         # w
++#endif
++      __ASM_SIZE(push,) %__ASM_REG(dx)
++      movl %eax, %edx                         # w -> t
++      shrl %edx                               # t >>= 1
++      andl $0x55555555, %edx                  # t &= 0x55555555
++      subl %edx, %eax                         # w -= t
++
++      movl %eax, %edx                         # w -> t
++      shrl $2, %eax                           # w_tmp >>= 2
++      andl $0x33333333, %edx                  # t     &= 0x33333333
++      andl $0x33333333, %eax                  # w_tmp &= 0x33333333
++      addl %edx, %eax                         # w = w_tmp + t
++
++      movl %eax, %edx                         # w -> t
++      shrl $4, %edx                           # t >>= 4
++      addl %edx, %eax                         # w_tmp += t
++      andl  $0x0f0f0f0f, %eax                 # w_tmp &= 0x0f0f0f0f
++      imull $0x01010101, %eax, %eax           # w_tmp *= 0x01010101
++      shrl $24, %eax                          # w = w_tmp >> 24
++      __ASM_SIZE(pop,) %__ASM_REG(dx)
++      ret
++ENDPROC(__sw_hweight32)
++
++ENTRY(__sw_hweight64)
++#ifdef CONFIG_X86_64
++      pushq   %rdi
++      pushq   %rdx
++
++      movq    %rdi, %rdx                      # w -> t
++      movabsq $0x5555555555555555, %rax
++      shrq    %rdx                            # t >>= 1
++      andq    %rdx, %rax                      # t &= 0x5555555555555555
++      movabsq $0x3333333333333333, %rdx
++      subq    %rax, %rdi                      # w -= t
++
++      movq    %rdi, %rax                      # w -> t
++      shrq    $2, %rdi                        # w_tmp >>= 2
++      andq    %rdx, %rax                      # t     &= 0x3333333333333333
++      andq    %rdi, %rdx                      # w_tmp &= 0x3333333333333333
++      addq    %rdx, %rax                      # w = w_tmp + t
++
++      movq    %rax, %rdx                      # w -> t
++      shrq    $4, %rdx                        # t >>= 4
++      addq    %rdx, %rax                      # w_tmp += t
++      movabsq $0x0f0f0f0f0f0f0f0f, %rdx
++      andq    %rdx, %rax                      # w_tmp &= 0x0f0f0f0f0f0f0f0f
++      movabsq $0x0101010101010101, %rdx
++      imulq   %rdx, %rax                      # w_tmp *= 0x0101010101010101
++      shrq    $56, %rax                       # w = w_tmp >> 56
++
++      popq    %rdx
++      popq    %rdi
++      ret
++#else /* CONFIG_X86_32 */
++      /* We're getting an u64 arg in (%eax,%edx): unsigned long 
hweight64(__u64 w) */
++      pushl   %ecx
++
++      call    __sw_hweight32
++      movl    %eax, %ecx                      # stash away result
++      movl    %edx, %eax                      # second part of input
++      call    __sw_hweight32
++      addl    %ecx, %eax                      # result
++
++      popl    %ecx
++      ret
++#endif
++ENDPROC(__sw_hweight64)
+diff --git a/arch/x86/um/stub_segv.c b/arch/x86/um/stub_segv.c
+index 1518d2805ae8..fd6825537b97 100644
+--- a/arch/x86/um/stub_segv.c
++++ b/arch/x86/um/stub_segv.c
+@@ -10,7 +10,7 @@
+ void __attribute__ ((__section__ (".__syscall_stub")))
+ stub_segv_handler(int sig, siginfo_t *info, void *p)
+ {
+-      struct ucontext *uc = p;
++      ucontext_t *uc = p;
+ 
+       GET_FAULTINFO_FROM_MC(*((struct faultinfo *) STUB_DATA),
+                             &uc->uc_mcontext);
+diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
+index b48ecbfc4498..8c5503c0bad7 100644
+--- a/drivers/acpi/video_detect.c
++++ b/drivers/acpi/video_detect.c
+@@ -205,6 +205,15 @@ static const struct dmi_system_id 
video_detect_dmi_table[] = {
+                         "3570R/370R/470R/450R/510R/4450RV"),
+               },
+       },
++      {
++       /* https://bugzilla.redhat.com/show_bug.cgi?id=1557060 */
++       .callback = video_detect_force_video,
++       .ident = "SAMSUNG 670Z5E",
++       .matches = {
++              DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
++              DMI_MATCH(DMI_PRODUCT_NAME, "670Z5E"),
++              },
++      },
+       {
+        /* https://bugzilla.redhat.com/show_bug.cgi?id=1094948 */
+        .callback = video_detect_force_video,
+diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
+index 4ac63c0e50c7..fd377b956199 100644
+--- a/drivers/base/regmap/regmap.c
++++ b/drivers/base/regmap/regmap.c
+@@ -1582,7 +1582,7 @@ int regmap_raw_write(struct regmap *map, unsigned int 
reg,
+               return -EINVAL;
+       if (val_len % map->format.val_bytes)
+               return -EINVAL;
+-      if (map->max_raw_write && map->max_raw_write > val_len)
++      if (map->max_raw_write && map->max_raw_write < val_len)
+               return -E2BIG;
+ 
+       map->lock(map->lock_arg);
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index 1dd16f26e77d..e8165ec55e6f 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -1121,11 +1121,15 @@ loop_set_status(struct loop_device *lo, const struct 
loop_info64 *info)
+       if (info->lo_encrypt_type) {
+               unsigned int type = info->lo_encrypt_type;
+ 
+-              if (type >= MAX_LO_CRYPT)
+-                      return -EINVAL;
++              if (type >= MAX_LO_CRYPT) {
++                      err = -EINVAL;
++                      goto exit;
++              }
+               xfer = xfer_funcs[type];
+-              if (xfer == NULL)
+-                      return -EINVAL;
++              if (xfer == NULL) {
++                      err = -EINVAL;
++                      goto exit;
++              }
+       } else
+               xfer = NULL;
+ 
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index bd9fc2baa6aa..dffd06a3bb76 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -724,7 +724,7 @@ retry:
+ 
+ static int credit_entropy_bits_safe(struct entropy_store *r, int nbits)
+ {
+-      const int nbits_max = (int)(~0U >> (ENTROPY_SHIFT + 1));
++      const int nbits_max = r->poolinfo->poolwords * 32;
+ 
+       if (nbits < 0)
+               return -EINVAL;
+diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
+index 7c4b1ffe874f..d56ba46e6b78 100644
+--- a/drivers/clk/bcm/clk-bcm2835.c
++++ b/drivers/clk/bcm/clk-bcm2835.c
+@@ -891,9 +891,7 @@ static void bcm2835_pll_off(struct clk_hw *hw)
+       const struct bcm2835_pll_data *data = pll->data;
+ 
+       spin_lock(&cprman->regs_lock);
+-      cprman_write(cprman, data->cm_ctrl_reg,
+-                   cprman_read(cprman, data->cm_ctrl_reg) |
+-                   CM_PLL_ANARST);
++      cprman_write(cprman, data->cm_ctrl_reg, CM_PLL_ANARST);
+       cprman_write(cprman, data->a2w_ctrl_reg,
+                    cprman_read(cprman, data->a2w_ctrl_reg) |
+                    A2W_PLL_CTRL_PWRDN);
+@@ -929,6 +927,10 @@ static int bcm2835_pll_on(struct clk_hw *hw)
+               cpu_relax();
+       }
+ 
++      cprman_write(cprman, data->a2w_ctrl_reg,
++                   cprman_read(cprman, data->a2w_ctrl_reg) |
++                   A2W_PLL_CTRL_PRST_DISABLE);
++
+       return 0;
+ }
+ 
+diff --git a/drivers/clk/mvebu/armada-38x.c b/drivers/clk/mvebu/armada-38x.c
+index 8bccf4ecdab6..9ff4ea63932d 100644
+--- a/drivers/clk/mvebu/armada-38x.c
++++ b/drivers/clk/mvebu/armada-38x.c
+@@ -46,10 +46,11 @@ static u32 __init armada_38x_get_tclk_freq(void __iomem 
*sar)
+ }
+ 
+ static const u32 armada_38x_cpu_frequencies[] __initconst = {
+-      0, 0, 0, 0,
+-      1066 * 1000 * 1000, 0, 0, 0,
++      666 * 1000 * 1000,  0, 800 * 1000 * 1000, 0,
++      1066 * 1000 * 1000, 0, 1200 * 1000 * 1000, 0,
+       1332 * 1000 * 1000, 0, 0, 0,
+-      1600 * 1000 * 1000,
++      1600 * 1000 * 1000, 0, 0, 0,
++      1866 * 1000 * 1000, 0, 0, 2000 * 1000 * 1000,
+ };
+ 
+ static u32 __init armada_38x_get_cpu_freq(void __iomem *sar)
+@@ -75,11 +76,11 @@ static const struct coreclk_ratio 
armada_38x_coreclk_ratios[] __initconst = {
+ };
+ 
+ static const int armada_38x_cpu_l2_ratios[32][2] __initconst = {
+-      {0, 1}, {0, 1}, {0, 1}, {0, 1},
+-      {1, 2}, {0, 1}, {0, 1}, {0, 1},
++      {1, 2}, {0, 1}, {1, 2}, {0, 1},
++      {1, 2}, {0, 1}, {1, 2}, {0, 1},
+       {1, 2}, {0, 1}, {0, 1}, {0, 1},
+       {1, 2}, {0, 1}, {0, 1}, {0, 1},
+-      {0, 1}, {0, 1}, {0, 1}, {0, 1},
++      {1, 2}, {0, 1}, {0, 1}, {1, 2},
+       {0, 1}, {0, 1}, {0, 1}, {0, 1},
+       {0, 1}, {0, 1}, {0, 1}, {0, 1},
+       {0, 1}, {0, 1}, {0, 1}, {0, 1},
+@@ -90,7 +91,7 @@ static const int armada_38x_cpu_ddr_ratios[32][2] 
__initconst = {
+       {1, 2}, {0, 1}, {0, 1}, {0, 1},
+       {1, 2}, {0, 1}, {0, 1}, {0, 1},
+       {1, 2}, {0, 1}, {0, 1}, {0, 1},
+-      {0, 1}, {0, 1}, {0, 1}, {0, 1},
++      {1, 2}, {0, 1}, {0, 1}, {7, 15},
+       {0, 1}, {0, 1}, {0, 1}, {0, 1},
+       {0, 1}, {0, 1}, {0, 1}, {0, 1},
+       {0, 1}, {0, 1}, {0, 1}, {0, 1},
+diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
+index 66c073fc8afc..82a7c89caae2 100644
+--- a/drivers/dma/at_xdmac.c
++++ b/drivers/dma/at_xdmac.c
+@@ -1473,10 +1473,10 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t 
cookie,
+       for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
+               check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 
0xfffffffc;
+               rmb();
+-              initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & 
AT_XDMAC_CC_INITD);
+-              rmb();
+               cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
+               rmb();
++              initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & 
AT_XDMAC_CC_INITD);
++              rmb();
+               cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 
0xfffffffc;
+               rmb();
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_object.c 
b/drivers/gpu/drm/radeon/radeon_object.c
+index fb6ad143873f..83aee9e814ba 100644
+--- a/drivers/gpu/drm/radeon/radeon_object.c
++++ b/drivers/gpu/drm/radeon/radeon_object.c
+@@ -238,9 +238,10 @@ int radeon_bo_create(struct radeon_device *rdev,
+        * may be slow
+        * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
+        */
+-
++#ifndef CONFIG_COMPILE_TEST
+ #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
+        thanks to write-combining
++#endif
+ 
+       if (bo->flags & RADEON_GEM_GTT_WC)
+               DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for 
"
+diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
+index d9007cc37be1..892d0a71d766 100644
+--- a/drivers/gpu/drm/radeon/si_dpm.c
++++ b/drivers/gpu/drm/radeon/si_dpm.c
+@@ -5964,9 +5964,9 @@ static void si_set_pcie_lane_width_in_smc(struct 
radeon_device *rdev,
+ {
+       u32 lane_width;
+       u32 new_lane_width =
+-              (radeon_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> 
ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
++              ((radeon_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> 
ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
+       u32 current_lane_width =
+-              (radeon_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) 
>> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
++              ((radeon_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) 
>> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
+ 
+       if (new_lane_width != current_lane_width) {
+               radeon_set_pcie_lanes(rdev, new_lane_width);
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index d07fb967f92b..e4541c6bf3d3 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -1331,7 +1331,7 @@ u8 *hid_alloc_report_buf(struct hid_report *report, 
gfp_t flags)
+        * of implement() working on 8 byte chunks
+        */
+ 
+-      int len = hid_report_len(report) + 7;
++      u32 len = hid_report_len(report) + 7;
+ 
+       return kmalloc(len, flags);
+ }
+@@ -1396,7 +1396,7 @@ void __hid_request(struct hid_device *hid, struct 
hid_report *report,
+ {
+       char *buf;
+       int ret;
+-      int len;
++      u32 len;
+ 
+       buf = hid_alloc_report_buf(report, GFP_KERNEL);
+       if (!buf)
+@@ -1422,14 +1422,14 @@ out:
+ }
+ EXPORT_SYMBOL_GPL(__hid_request);
+ 
+-int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
++int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
+               int interrupt)
+ {
+       struct hid_report_enum *report_enum = hid->report_enum + type;
+       struct hid_report *report;
+       struct hid_driver *hdrv;
+       unsigned int a;
+-      int rsize, csize = size;
++      u32 rsize, csize = size;
+       u8 *cdata = data;
+       int ret = 0;
+ 
+@@ -1487,7 +1487,7 @@ EXPORT_SYMBOL_GPL(hid_report_raw_event);
+  *
+  * This is data entry for lower layers.
+  */
+-int hid_input_report(struct hid_device *hid, int type, u8 *data, int size, 
int interrupt)
++int hid_input_report(struct hid_device *hid, int type, u8 *data, u32 size, 
int interrupt)
+ {
+       struct hid_report_enum *report_enum;
+       struct hid_driver *hdrv;
+diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
+index 53e54855c366..8d74e691ac90 100644
+--- a/drivers/hid/hid-input.c
++++ b/drivers/hid/hid-input.c
+@@ -1258,7 +1258,8 @@ static void hidinput_led_worker(struct work_struct *work)
+                                             led_work);
+       struct hid_field *field;
+       struct hid_report *report;
+-      int len, ret;
++      int ret;
++      u32 len;
+       __u8 *buf;
+ 
+       field = hidinput_get_led_field(hid);
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index f62a9d6601cc..9de379c1b3fd 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -314,7 +314,8 @@ static struct attribute_group mt_attribute_group = {
+ static void mt_get_feature(struct hid_device *hdev, struct hid_report *report)
+ {
+       struct mt_device *td = hid_get_drvdata(hdev);
+-      int ret, size = hid_report_len(report);
++      int ret;
++      u32 size = hid_report_len(report);
+       u8 *buf;
+ 
+       /*
+@@ -919,7 +920,7 @@ static void mt_set_input_mode(struct hid_device *hdev)
+       struct hid_report_enum *re;
+       struct mt_class *cls = &td->mtclass;
+       char *buf;
+-      int report_len;
++      u32 report_len;
+ 
+       if (td->inputmode < 0)
+               return;
+diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
+index 67cd059a8f46..41a4a2af9db1 100644
+--- a/drivers/hid/hid-rmi.c
++++ b/drivers/hid/hid-rmi.c
+@@ -110,8 +110,8 @@ struct rmi_data {
+       u8 *writeReport;
+       u8 *readReport;
+ 
+-      int input_report_size;
+-      int output_report_size;
++      u32 input_report_size;
++      u32 output_report_size;
+ 
+       unsigned long flags;
+ 
+diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
+index 9c2d7c23f296..c0c4df198725 100644
+--- a/drivers/hid/hidraw.c
++++ b/drivers/hid/hidraw.c
+@@ -197,6 +197,11 @@ static ssize_t hidraw_get_report(struct file *file, char 
__user *buffer, size_t
+       int ret = 0, len;
+       unsigned char report_number;
+ 
++      if (!hidraw_table[minor] || !hidraw_table[minor]->exist) {
++              ret = -ENODEV;
++              goto out;
++      }
++
+       dev = hidraw_table[minor]->hid;
+ 
+       if (!dev->ll_driver->raw_request) {
+diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
+index 312aa1e33fb2..4c3ed078c6b9 100644
+--- a/drivers/hid/i2c-hid/i2c-hid.c
++++ b/drivers/hid/i2c-hid/i2c-hid.c
+@@ -137,10 +137,10 @@ struct i2c_hid {
+                                                  * register of the HID
+                                                  * descriptor. */
+       unsigned int            bufsize;        /* i2c buffer size */
+-      char                    *inbuf;         /* Input buffer */
+-      char                    *rawbuf;        /* Raw Input buffer */
+-      char                    *cmdbuf;        /* Command buffer */
+-      char                    *argsbuf;       /* Command arguments buffer */
++      u8                      *inbuf;         /* Input buffer */
++      u8                      *rawbuf;        /* Raw Input buffer */
++      u8                      *cmdbuf;        /* Command buffer */
++      u8                      *argsbuf;       /* Command arguments buffer */
+ 
+       unsigned long           flags;          /* device flags */
+ 
+@@ -387,7 +387,8 @@ static int i2c_hid_hwreset(struct i2c_client *client)
+ 
+ static void i2c_hid_get_input(struct i2c_hid *ihid)
+ {
+-      int ret, ret_size;
++      int ret;
++      u32 ret_size;
+       int size = le16_to_cpu(ihid->hdesc.wMaxInputLength);
+ 
+       if (size > ihid->bufsize)
+@@ -412,7 +413,7 @@ static void i2c_hid_get_input(struct i2c_hid *ihid)
+               return;
+       }
+ 
+-      if (ret_size > size) {
++      if ((ret_size > size) || (ret_size <= 2)) {
+               dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n",
+                       __func__, size, ret_size);
+               return;
+diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
+index a629f7c130f0..ac63e562071f 100644
+--- a/drivers/hwmon/ina2xx.c
++++ b/drivers/hwmon/ina2xx.c
+@@ -447,6 +447,7 @@ static int ina2xx_probe(struct i2c_client *client,
+ 
+       /* set the device type */
+       data->config = &ina2xx_config[id->driver_data];
++      mutex_init(&data->config_lock);
+ 
+       if (of_property_read_u32(dev->of_node, "shunt-resistor", &val) < 0) {
+               struct ina2xx_platform_data *pdata = dev_get_platdata(dev);
+@@ -473,8 +474,6 @@ static int ina2xx_probe(struct i2c_client *client,
+               return -ENODEV;
+       }
+ 
+-      mutex_init(&data->config_lock);
+-
+       data->groups[group++] = &ina2xx_group;
+       if (id->driver_data == ina226)
+               data->groups[group++] = &ina226_group;
+diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
+index 960fcb613198..ea3bc9bb1b7a 100644
+--- a/drivers/infiniband/core/ucma.c
++++ b/drivers/infiniband/core/ucma.c
+@@ -1230,6 +1230,9 @@ static int ucma_set_ib_path(struct ucma_context *ctx,
+       if (!optlen)
+               return -EINVAL;
+ 
++      if (!ctx->cm_id->device)
++              return -EINVAL;
++
+       memset(&sa_path, 0, sizeof(sa_path));
+ 
+       ib_sa_unpack_path(path_data->path_rec, &sa_path);
+diff --git a/drivers/infiniband/ulp/srp/ib_srp.c 
b/drivers/infiniband/ulp/srp/ib_srp.c
+index 9a99cee2665a..4fd2892613dd 100644
+--- a/drivers/infiniband/ulp/srp/ib_srp.c
++++ b/drivers/infiniband/ulp/srp/ib_srp.c
+@@ -2581,9 +2581,11 @@ static int srp_abort(struct scsi_cmnd *scmnd)
+               ret = FAST_IO_FAIL;
+       else
+               ret = FAILED;
+-      srp_free_req(ch, req, scmnd, 0);
+-      scmnd->result = DID_ABORT << 16;
+-      scmnd->scsi_done(scmnd);
++      if (ret == SUCCESS) {
++              srp_free_req(ch, req, scmnd, 0);
++              scmnd->result = DID_ABORT << 16;
++              scmnd->scsi_done(scmnd);
++      }
+ 
+       return ret;
+ }
+@@ -3309,12 +3311,10 @@ static ssize_t srp_create_target(struct device *dev,
+                                     num_online_nodes());
+               const int ch_end = ((node_idx + 1) * target->ch_count /
+                                   num_online_nodes());
+-              const int cv_start = (node_idx * ibdev->num_comp_vectors /
+-                                    num_online_nodes() + target->comp_vector)
+-                                   % ibdev->num_comp_vectors;
+-              const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
+-                                  num_online_nodes() + target->comp_vector)
+-                                 % ibdev->num_comp_vectors;
++              const int cv_start = node_idx * ibdev->num_comp_vectors /
++                                   num_online_nodes();
++              const int cv_end = (node_idx + 1) * ibdev->num_comp_vectors /
++                                 num_online_nodes();
+               int cpu_idx = 0;
+ 
+               for_each_online_cpu(cpu) {
+diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
+index a7d516f973dd..10068a481e22 100644
+--- a/drivers/iommu/intel-svm.c
++++ b/drivers/iommu/intel-svm.c
+@@ -389,6 +389,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int 
flags, struct svm_dev_
+                               pasid_max - 1, GFP_KERNEL);
+               if (ret < 0) {
+                       kfree(svm);
++                      kfree(sdev);
+                       goto out;
+               }
+               svm->pasid = ret;
+diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c 
b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+index 943f90e392a7..e0ae2f34623a 100644
+--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
++++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+@@ -101,7 +101,7 @@ static int get_v4l2_window32(struct v4l2_window __user *kp,
+ static int put_v4l2_window32(struct v4l2_window __user *kp,
+                            struct v4l2_window32 __user *up)
+ {
+-      struct v4l2_clip __user *kclips = kp->clips;
++      struct v4l2_clip __user *kclips;
+       struct v4l2_clip32 __user *uclips;
+       compat_caddr_t p;
+       u32 clipcount;
+@@ -116,6 +116,8 @@ static int put_v4l2_window32(struct v4l2_window __user *kp,
+       if (!clipcount)
+               return 0;
+ 
++      if (get_user(kclips, &kp->clips))
++              return -EFAULT;
+       if (get_user(p, &up->clips))
+               return -EFAULT;
+       uclips = compat_ptr(p);
+diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
+index 76e8bce6f46e..ad572a0f2124 100644
+--- a/drivers/mmc/host/jz4740_mmc.c
++++ b/drivers/mmc/host/jz4740_mmc.c
+@@ -368,9 +368,9 @@ static void jz4740_mmc_set_irq_enabled(struct 
jz4740_mmc_host *host,
+               host->irq_mask &= ~irq;
+       else
+               host->irq_mask |= irq;
+-      spin_unlock_irqrestore(&host->lock, flags);
+ 
+       writew(host->irq_mask, host->base + JZ_REG_MMC_IMASK);
++      spin_unlock_irqrestore(&host->lock, flags);
+ }
+ 
+ static void jz4740_mmc_clock_enable(struct jz4740_mmc_host *host,
+diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
+index b2fb0528c092..07ad86759d92 100644
+--- a/drivers/mtd/ubi/block.c
++++ b/drivers/mtd/ubi/block.c
+@@ -244,7 +244,7 @@ static int ubiblock_open(struct block_device *bdev, 
fmode_t mode)
+        * in any case.
+        */
+       if (mode & FMODE_WRITE) {
+-              ret = -EPERM;
++              ret = -EROFS;
+               goto out_unlock;
+       }
+ 
+diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
+index 27de0463226e..a2e6c7848b0a 100644
+--- a/drivers/mtd/ubi/build.c
++++ b/drivers/mtd/ubi/build.c
+@@ -889,6 +889,17 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
+               return -EINVAL;
+       }
+ 
++      /*
++       * Both UBI and UBIFS have been designed for SLC NAND and NOR flashes.
++       * MLC NAND is different and needs special care, otherwise UBI or UBIFS
++       * will die soon and you will lose all your data.
++       */
++      if (mtd->type == MTD_MLCNANDFLASH) {
++              pr_err("ubi: refuse attaching mtd%d - MLC NAND is not 
supported\n",
++                      mtd->index);
++              return -EINVAL;
++      }
++
+       if (ubi_num == UBI_DEV_NUM_AUTO) {
+               /* Search for an empty slot in the @ubi_devices array */
+               for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++)
+diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c
+index 30d3999dddba..ed62f1efe6eb 100644
+--- a/drivers/mtd/ubi/fastmap-wl.c
++++ b/drivers/mtd/ubi/fastmap-wl.c
+@@ -360,7 +360,6 @@ static void ubi_fastmap_close(struct ubi_device *ubi)
+ {
+       int i;
+ 
+-      flush_work(&ubi->fm_work);
+       return_unused_pool_pebs(ubi, &ubi->fm_pool);
+       return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
+ 
+diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
+index 27ed25252aac..cfd81eb1b532 100644
+--- a/drivers/net/slip/slhc.c
++++ b/drivers/net/slip/slhc.c
+@@ -509,6 +509,10 @@ slhc_uncompress(struct slcompress *comp, unsigned char 
*icp, int isize)
+               if(x < 0 || x > comp->rslot_limit)
+                       goto bad;
+ 
++              /* Check if the cstate is initialized */
++              if (!comp->rstate[x].initialized)
++                      goto bad;
++
+               comp->flags &=~ SLF_TOSS;
+               comp->recv_current = x;
+       } else {
+@@ -673,6 +677,7 @@ slhc_remember(struct slcompress *comp, unsigned char *icp, 
int isize)
+       if (cs->cs_tcp.doff > 5)
+         memcpy(cs->cs_tcpopt, icp + ihl*4 + sizeof(struct tcphdr), 
(cs->cs_tcp.doff - 5) * 4);
+       cs->cs_hsize = ihl*2 + cs->cs_tcp.doff*2;
++      cs->initialized = true;
+       /* Put headers back on packet
+        * Neither header checksum is recalculated
+        */
+diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
+index f9343bee1de3..6578127db847 100644
+--- a/drivers/net/usb/cdc_ether.c
++++ b/drivers/net/usb/cdc_ether.c
+@@ -704,6 +704,12 @@ static const struct usb_device_id products[] = {
+                                     USB_CDC_SUBCLASS_ETHERNET,
+                                     USB_CDC_PROTO_NONE),
+       .driver_info = (unsigned long)&wwan_info,
++}, {
++      /* Cinterion AHS3 modem by GEMALTO */
++      USB_DEVICE_AND_INTERFACE_INFO(0x1e2d, 0x0055, USB_CLASS_COMM,
++                                    USB_CDC_SUBCLASS_ETHERNET,
++                                    USB_CDC_PROTO_NONE),
++      .driver_info = (unsigned long)&wwan_info,
+ }, {
+       /* Telit modules */
+       USB_VENDOR_AND_INTERFACE_INFO(0x1bc7, USB_CLASS_COMM,
+diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
+index ebdee8f01f65..a6d429950cb0 100644
+--- a/drivers/net/usb/lan78xx.c
++++ b/drivers/net/usb/lan78xx.c
+@@ -618,7 +618,8 @@ static int lan78xx_read_otp(struct lan78xx_net *dev, u32 
offset,
+                       offset += 0x100;
+               else
+                       ret = -EINVAL;
+-              ret = lan78xx_read_raw_otp(dev, offset, length, data);
++              if (!ret)
++                      ret = lan78xx_read_raw_otp(dev, offset, length, data);
+       }
+ 
+       return ret;
+diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c 
b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
+index b7f72f9c7988..b3691712df61 100644
+--- a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
++++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
+@@ -1454,6 +1454,7 @@ static int rtl8187_probe(struct usb_interface *intf,
+               goto err_free_dev;
+       }
+       mutex_init(&priv->io_mutex);
++      mutex_init(&priv->conf_mutex);
+ 
+       SET_IEEE80211_DEV(dev, &intf->dev);
+       usb_set_intfdata(intf, dev);
+@@ -1627,7 +1628,6 @@ static int rtl8187_probe(struct usb_interface *intf,
+               printk(KERN_ERR "rtl8187: Cannot register device\n");
+               goto err_free_dmabuf;
+       }
+-      mutex_init(&priv->conf_mutex);
+       skb_queue_head_init(&priv->b_tx_status.queue);
+ 
+       wiphy_info(dev->wiphy, "hwaddr %pM, %s V%d + %s, rfkill mask %d\n",
+diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
+index 0b8d2655985f..fee4c01fbdfd 100644
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -2024,7 +2024,10 @@ static void netback_changed(struct xenbus_device *dev,
+       case XenbusStateInitialised:
+       case XenbusStateReconfiguring:
+       case XenbusStateReconfigured:
++              break;
++
+       case XenbusStateUnknown:
++              wake_up_all(&module_unload_q);
+               break;
+ 
+       case XenbusStateInitWait:
+@@ -2155,7 +2158,9 @@ static int xennet_remove(struct xenbus_device *dev)
+               xenbus_switch_state(dev, XenbusStateClosing);
+               wait_event(module_unload_q,
+                          xenbus_read_driver_state(dev->otherend) ==
+-                         XenbusStateClosing);
++                         XenbusStateClosing ||
++                         xenbus_read_driver_state(dev->otherend) ==
++                         XenbusStateUnknown);
+ 
+               xenbus_switch_state(dev, XenbusStateClosed);
+               wait_event(module_unload_q,
+diff --git a/drivers/pci/hotplug/acpiphp_glue.c 
b/drivers/pci/hotplug/acpiphp_glue.c
+index 0b3e0bfa7be5..572ca192cb1f 100644
+--- a/drivers/pci/hotplug/acpiphp_glue.c
++++ b/drivers/pci/hotplug/acpiphp_glue.c
+@@ -587,6 +587,7 @@ static unsigned int get_slot_status(struct acpiphp_slot 
*slot)
+ {
+       unsigned long long sta = 0;
+       struct acpiphp_func *func;
++      u32 dvid;
+ 
+       list_for_each_entry(func, &slot->funcs, sibling) {
+               if (func->flags & FUNC_HAS_STA) {
+@@ -597,19 +598,27 @@ static unsigned int get_slot_status(struct acpiphp_slot 
*slot)
+                       if (ACPI_SUCCESS(status) && sta)
+                               break;
+               } else {
+-                      u32 dvid;
+-
+-                      pci_bus_read_config_dword(slot->bus,
+-                                                PCI_DEVFN(slot->device,
+-                                                          func->function),
+-                                                PCI_VENDOR_ID, &dvid);
+-                      if (dvid != 0xffffffff) {
++                      if (pci_bus_read_dev_vendor_id(slot->bus,
++                                      PCI_DEVFN(slot->device, func->function),
++                                      &dvid, 0)) {
+                               sta = ACPI_STA_ALL;
+                               break;
+                       }
+               }
+       }
+ 
++      if (!sta) {
++              /*
++               * Check for the slot itself since it may be that the
++               * ACPI slot is a device below PCIe upstream port so in
++               * that case it may not even be reachable yet.
++               */
++              if (pci_bus_read_dev_vendor_id(slot->bus,
++                              PCI_DEVFN(slot->device, 0), &dvid, 0)) {
++                      sta = ACPI_STA_ALL;
++              }
++      }
++
+       return (unsigned int)sta;
+ }
+ 
+diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
+index 4bb5262f7aee..742ca57ece8c 100644
+--- a/drivers/s390/cio/qdio_main.c
++++ b/drivers/s390/cio/qdio_main.c
+@@ -126,7 +126,7 @@ static inline int qdio_check_ccq(struct qdio_q *q, 
unsigned int ccq)
+ static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
+                       int start, int count, int auto_ack)
+ {
+-      int rc, tmp_count = count, tmp_start = start, nr = q->nr, retried = 0;
++      int rc, tmp_count = count, tmp_start = start, nr = q->nr;
+       unsigned int ccq = 0;
+ 
+       qperf_inc(q, eqbs);
+@@ -149,14 +149,7 @@ again:
+               qperf_inc(q, eqbs_partial);
+               DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x",
+                       tmp_count);
+-              /*
+-               * Retry once, if that fails bail out and process the
+-               * extracted buffers before trying again.
+-               */
+-              if (!retried++)
+-                      goto again;
+-              else
+-                      return count - tmp_count;
++              return count - tmp_count;
+       }
+ 
+       DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
+@@ -212,7 +205,10 @@ again:
+       return 0;
+ }
+ 
+-/* returns number of examined buffers and their common state in *state */
++/*
++ * Returns number of examined buffers and their common state in *state.
++ * Requested number of buffers-to-examine must be > 0.
++ */
+ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
+                                unsigned char *state, unsigned int count,
+                                int auto_ack, int merge_pending)
+@@ -223,17 +219,23 @@ static inline int get_buf_states(struct qdio_q *q, 
unsigned int bufnr,
+       if (is_qebsm(q))
+               return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
+ 
+-      for (i = 0; i < count; i++) {
+-              if (!__state) {
+-                      __state = q->slsb.val[bufnr];
+-                      if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
+-                              __state = SLSB_P_OUTPUT_EMPTY;
+-              } else if (merge_pending) {
+-                      if ((q->slsb.val[bufnr] & __state) != __state)
+-                              break;
+-              } else if (q->slsb.val[bufnr] != __state)
+-                      break;
++      /* get initial state: */
++      __state = q->slsb.val[bufnr];
++      if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
++              __state = SLSB_P_OUTPUT_EMPTY;
++
++      for (i = 1; i < count; i++) {
+               bufnr = next_buf(bufnr);
++
++              /* merge PENDING into EMPTY: */
++              if (merge_pending &&
++                  q->slsb.val[bufnr] == SLSB_P_OUTPUT_PENDING &&
++                  __state == SLSB_P_OUTPUT_EMPTY)
++                      continue;
++
++              /* stop if next state differs from initial state: */
++              if (q->slsb.val[bufnr] != __state)
++                      break;
+       }
+       *state = __state;
+       return i;
+diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
+index c5547bd711db..6a8300108148 100644
+--- a/drivers/thermal/imx_thermal.c
++++ b/drivers/thermal/imx_thermal.c
+@@ -589,6 +589,9 @@ static int imx_thermal_probe(struct platform_device *pdev)
+       regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_POWER_DOWN);
+       regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_MEASURE_TEMP);
+ 
++      data->irq_enabled = true;
++      data->mode = THERMAL_DEVICE_ENABLED;
++
+       ret = devm_request_threaded_irq(&pdev->dev, data->irq,
+                       imx_thermal_alarm_irq, imx_thermal_alarm_irq_thread,
+                       0, "imx_thermal", data);
+@@ -600,9 +603,6 @@ static int imx_thermal_probe(struct platform_device *pdev)
+               return ret;
+       }
+ 
+-      data->irq_enabled = true;
+-      data->mode = THERMAL_DEVICE_ENABLED;
+-
+       return 0;
+ }
+ 
+diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
+index 20a41f7de76f..6713fd1958e7 100644
+--- a/drivers/thunderbolt/nhi.c
++++ b/drivers/thunderbolt/nhi.c
+@@ -627,6 +627,7 @@ static const struct dev_pm_ops nhi_pm_ops = {
+                                           * we just disable hotplug, the
+                                           * pci-tunnels stay alive.
+                                           */
++      .thaw_noirq = nhi_resume_noirq,
+       .restore_noirq = nhi_resume_noirq,
+ };
+ 
+diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
+index 41dda25da049..190e5dc15738 100644
+--- a/drivers/tty/n_tty.c
++++ b/drivers/tty/n_tty.c
+@@ -2238,6 +2238,12 @@ static ssize_t n_tty_read(struct tty_struct *tty, 
struct file *file,
+                               }
+                               if (tty_hung_up_p(file))
+                                       break;
++                              /*
++                               * Abort readers for ttys which never actually
++                               * get hung up.  See __tty_hangup().
++                               */
++                              if (test_bit(TTY_HUPPING, &tty->flags))
++                                      break;
+                               if (!timeout)
+                                       break;
+                               if (file->f_flags & O_NONBLOCK) {
+diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
+index a638c1738547..89fd20382ce4 100644
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -702,6 +702,14 @@ static void __tty_hangup(struct tty_struct *tty, int 
exit_session)
+               return;
+       }
+ 
++      /*
++       * Some console devices aren't actually hung up for technical and
++       * historical reasons, which can lead to indefinite interruptible
++       * sleep in n_tty_read().  The following explicitly tells
++       * n_tty_read() to abort readers.
++       */
++      set_bit(TTY_HUPPING, &tty->flags);
++
+       /* inuse_filps is protected by the single tty lock,
+          this really needs to change if we want to flush the
+          workqueue with the lock held */
+@@ -757,6 +765,7 @@ static void __tty_hangup(struct tty_struct *tty, int 
exit_session)
+        * can't yet guarantee all that.
+        */
+       set_bit(TTY_HUPPED, &tty->flags);
++      clear_bit(TTY_HUPPING, &tty->flags);
+       tty_unlock(tty);
+ 
+       if (f)
+diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
+index 358ca8dd784f..a5240b4d7ab9 100644
+--- a/drivers/usb/core/generic.c
++++ b/drivers/usb/core/generic.c
+@@ -208,8 +208,13 @@ static int generic_suspend(struct usb_device *udev, 
pm_message_t msg)
+       if (!udev->parent)
+               rc = hcd_bus_suspend(udev, msg);
+ 
+-      /* Non-root devices don't need to do anything for FREEZE or PRETHAW */
+-      else if (msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_PRETHAW)
++      /*
++       * Non-root USB2 devices don't need to do anything for FREEZE
++       * or PRETHAW. USB3 devices don't support global suspend and
++       * needs to be selectively suspended.
++       */
++      else if ((msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_PRETHAW)
++               && (udev->speed < USB_SPEED_SUPER))
+               rc = 0;
+       else
+               rc = usb_port_suspend(udev, msg);
+diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
+index d2c0c1a8d979..68230adf2449 100644
+--- a/drivers/usb/dwc3/dwc3-pci.c
++++ b/drivers/usb/dwc3/dwc3-pci.c
+@@ -167,7 +167,7 @@ static int dwc3_pci_probe(struct pci_dev *pci,
+       ret = platform_device_add_resources(dwc3, res, ARRAY_SIZE(res));
+       if (ret) {
+               dev_err(dev, "couldn't add resources to dwc3 device\n");
+-              return ret;
++              goto err;
+       }
+ 
+       pci_set_drvdata(pci, dwc3);
+diff --git a/drivers/usb/musb/musb_gadget_ep0.c 
b/drivers/usb/musb/musb_gadget_ep0.c
+index 10d30afe4a3c..a0d1417362cd 100644
+--- a/drivers/usb/musb/musb_gadget_ep0.c
++++ b/drivers/usb/musb/musb_gadget_ep0.c
+@@ -114,15 +114,19 @@ static int service_tx_status_request(
+               }
+ 
+               is_in = epnum & USB_DIR_IN;
+-              if (is_in) {
+-                      epnum &= 0x0f;
++              epnum &= 0x0f;
++              if (epnum >= MUSB_C_NUM_EPS) {
++                      handled = -EINVAL;
++                      break;
++              }
++
++              if (is_in)
+                       ep = &musb->endpoints[epnum].ep_in;
+-              } else {
++              else
+                       ep = &musb->endpoints[epnum].ep_out;
+-              }
+               regs = musb->endpoints[epnum].regs;
+ 
+-              if (epnum >= MUSB_C_NUM_EPS || !ep->desc) {
++              if (!ep->desc) {
+                       handled = -EINVAL;
+                       break;
+               }
+diff --git a/drivers/vfio/pci/vfio_pci_config.c 
b/drivers/vfio/pci/vfio_pci_config.c
+index fe2b470d7ec6..c55c632a3b24 100644
+--- a/drivers/vfio/pci/vfio_pci_config.c
++++ b/drivers/vfio/pci/vfio_pci_config.c
+@@ -752,6 +752,62 @@ static int __init init_pci_cap_pcix_perm(struct perm_bits 
*perm)
+       return 0;
+ }
+ 
++static int vfio_exp_config_write(struct vfio_pci_device *vdev, int pos,
++                               int count, struct perm_bits *perm,
++                               int offset, __le32 val)
++{
++      __le16 *ctrl = (__le16 *)(vdev->vconfig + pos -
++                                offset + PCI_EXP_DEVCTL);
++      int readrq = le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ;
++
++      count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
++      if (count < 0)
++              return count;
++
++      /*
++       * The FLR bit is virtualized, if set and the device supports PCIe
++       * FLR, issue a reset_function.  Regardless, clear the bit, the spec
++       * requires it to be always read as zero.  NB, reset_function might
++       * not use a PCIe FLR, we don't have that level of granularity.
++       */
++      if (*ctrl & cpu_to_le16(PCI_EXP_DEVCTL_BCR_FLR)) {
++              u32 cap;
++              int ret;
++
++              *ctrl &= ~cpu_to_le16(PCI_EXP_DEVCTL_BCR_FLR);
++
++              ret = pci_user_read_config_dword(vdev->pdev,
++                                               pos - offset + PCI_EXP_DEVCAP,
++                                               &cap);
++
++              if (!ret && (cap & PCI_EXP_DEVCAP_FLR))
++                      pci_try_reset_function(vdev->pdev);
++      }
++
++      /*
++       * MPS is virtualized to the user, writes do not change the physical
++       * register since determining a proper MPS value requires a system wide
++       * device view.  The MRRS is largely independent of MPS, but since the
++       * user does not have that system-wide view, they might set a safe, but
++       * inefficiently low value.  Here we allow writes through to hardware,
++       * but we set the floor to the physical device MPS setting, so that
++       * we can at least use full TLPs, as defined by the MPS value.
++       *
++       * NB, if any devices actually depend on an artificially low MRRS
++       * setting, this will need to be revisited, perhaps with a quirk
++       * though pcie_set_readrq().
++       */
++      if (readrq != (le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ)) {
++              readrq = 128 <<
++                      ((le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ) >> 12);
++              readrq = max(readrq, pcie_get_mps(vdev->pdev));
++
++              pcie_set_readrq(vdev->pdev, readrq);
++      }
++
++      return count;
++}
++
+ /* Permissions for PCI Express capability */
+ static int __init init_pci_cap_exp_perm(struct perm_bits *perm)
+ {
+@@ -759,26 +815,67 @@ static int __init init_pci_cap_exp_perm(struct perm_bits 
*perm)
+       if (alloc_perm_bits(perm, PCI_CAP_EXP_ENDPOINT_SIZEOF_V2))
+               return -ENOMEM;
+ 
++      perm->writefn = vfio_exp_config_write;
++
+       p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
+ 
+       /*
+-       * Allow writes to device control fields (includes FLR!)
+-       * but not to devctl_phantom which could confuse IOMMU
+-       * or to the ARI bit in devctl2 which is set at probe time
++       * Allow writes to device control fields, except devctl_phantom,
++       * which could confuse IOMMU, MPS, which can break communication
++       * with other physical devices, and the ARI bit in devctl2, which
++       * is set at probe time.  FLR and MRRS get virtualized via our
++       * writefn.
+        */
+-      p_setw(perm, PCI_EXP_DEVCTL, NO_VIRT, ~PCI_EXP_DEVCTL_PHANTOM);
++      p_setw(perm, PCI_EXP_DEVCTL,
++             PCI_EXP_DEVCTL_BCR_FLR | PCI_EXP_DEVCTL_PAYLOAD |
++             PCI_EXP_DEVCTL_READRQ, ~PCI_EXP_DEVCTL_PHANTOM);
+       p_setw(perm, PCI_EXP_DEVCTL2, NO_VIRT, ~PCI_EXP_DEVCTL2_ARI);
+       return 0;
+ }
+ 
++static int vfio_af_config_write(struct vfio_pci_device *vdev, int pos,
++                              int count, struct perm_bits *perm,
++                              int offset, __le32 val)
++{
++      u8 *ctrl = vdev->vconfig + pos - offset + PCI_AF_CTRL;
++
++      count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
++      if (count < 0)
++              return count;
++
++      /*
++       * The FLR bit is virtualized, if set and the device supports AF
++       * FLR, issue a reset_function.  Regardless, clear the bit, the spec
++       * requires it to be always read as zero.  NB, reset_function might
++       * not use an AF FLR, we don't have that level of granularity.
++       */
++      if (*ctrl & PCI_AF_CTRL_FLR) {
++              u8 cap;
++              int ret;
++
++              *ctrl &= ~PCI_AF_CTRL_FLR;
++
++              ret = pci_user_read_config_byte(vdev->pdev,
++                                              pos - offset + PCI_AF_CAP,
++                                              &cap);
++
++              if (!ret && (cap & PCI_AF_CAP_FLR) && (cap & PCI_AF_CAP_TP))
++                      pci_try_reset_function(vdev->pdev);
++      }
++
++      return count;
++}
++
+ /* Permissions for Advanced Function capability */
+ static int __init init_pci_cap_af_perm(struct perm_bits *perm)
+ {
+       if (alloc_perm_bits(perm, pci_cap_length[PCI_CAP_ID_AF]))
+               return -ENOMEM;
+ 
++      perm->writefn = vfio_af_config_write;
++
+       p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
+-      p_setb(perm, PCI_AF_CTRL, NO_VIRT, PCI_AF_CTRL_FLR);
++      p_setb(perm, PCI_AF_CTRL, PCI_AF_CTRL_FLR, PCI_AF_CTRL_FLR);
+       return 0;
+ }
+ 
+diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c
+index 016bd9355190..aa93df5833dc 100644
+--- a/drivers/watchdog/f71808e_wdt.c
++++ b/drivers/watchdog/f71808e_wdt.c
+@@ -450,7 +450,7 @@ static bool watchdog_is_running(void)
+ 
+       is_running = (superio_inb(watchdog.sioaddr, SIO_REG_ENABLE) & BIT(0))
+               && (superio_inb(watchdog.sioaddr, F71808FG_REG_WDT_CONF)
+-                      & F71808FG_FLAG_WD_EN);
++                      & BIT(F71808FG_FLAG_WD_EN));
+ 
+       superio_exit(watchdog.sioaddr);
+ 
+diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
+index 7a54c6a867c8..500098cdb960 100644
+--- a/fs/autofs4/root.c
++++ b/fs/autofs4/root.c
+@@ -746,7 +746,7 @@ static int autofs4_dir_mkdir(struct inode *dir, struct 
dentry *dentry, umode_t m
+ 
+       autofs4_del_active(dentry);
+ 
+-      inode = autofs4_get_inode(dir->i_sb, S_IFDIR | 0555);
++      inode = autofs4_get_inode(dir->i_sb, S_IFDIR | mode);
+       if (!inode)
+               return -ENOMEM;
+       d_add(dentry, inode);
+diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
+index f97110461c19..78c51ce913db 100644
+--- a/fs/ext4/balloc.c
++++ b/fs/ext4/balloc.c
+@@ -242,8 +242,6 @@ static int ext4_init_block_bitmap(struct super_block *sb,
+        */
+       ext4_mark_bitmap_end(num_clusters_in_group(sb, block_group),
+                            sb->s_blocksize * 8, bh->b_data);
+-      ext4_block_bitmap_csum_set(sb, block_group, gdp, bh);
+-      ext4_group_desc_csum_set(sb, block_group, gdp);
+       return 0;
+ }
+ 
+@@ -447,6 +445,7 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, 
ext4_group_t block_group)
+               err = ext4_init_block_bitmap(sb, bh, block_group, desc);
+               set_bitmap_uptodate(bh);
+               set_buffer_uptodate(bh);
++              set_buffer_verified(bh);
+               ext4_unlock_group(sb, block_group);
+               unlock_buffer(bh);
+               if (err) {
+diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
+index 5388207d2832..e10c12f59c58 100644
+--- a/fs/ext4/ialloc.c
++++ b/fs/ext4/ialloc.c
+@@ -63,44 +63,6 @@ void ext4_mark_bitmap_end(int start_bit, int end_bit, char 
*bitmap)
+               memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3);
+ }
+ 
+-/* Initializes an uninitialized inode bitmap */
+-static int ext4_init_inode_bitmap(struct super_block *sb,
+-                                     struct buffer_head *bh,
+-                                     ext4_group_t block_group,
+-                                     struct ext4_group_desc *gdp)
+-{
+-      struct ext4_group_info *grp;
+-      struct ext4_sb_info *sbi = EXT4_SB(sb);
+-      J_ASSERT_BH(bh, buffer_locked(bh));
+-
+-      /* If checksum is bad mark all blocks and inodes use to prevent
+-       * allocation, essentially implementing a per-group read-only flag. */
+-      if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
+-              grp = ext4_get_group_info(sb, block_group);
+-              if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
+-                      percpu_counter_sub(&sbi->s_freeclusters_counter,
+-                                         grp->bb_free);
+-              set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
+-              if (!EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
+-                      int count;
+-                      count = ext4_free_inodes_count(sb, gdp);
+-                      percpu_counter_sub(&sbi->s_freeinodes_counter,
+-                                         count);
+-              }
+-              set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
+-              return -EFSBADCRC;
+-      }
+-
+-      memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
+-      ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8,
+-                      bh->b_data);
+-      ext4_inode_bitmap_csum_set(sb, block_group, gdp, bh,
+-                                 EXT4_INODES_PER_GROUP(sb) / 8);
+-      ext4_group_desc_csum_set(sb, block_group, gdp);
+-
+-      return 0;
+-}
+-
+ void ext4_end_bitmap_read(struct buffer_head *bh, int uptodate)
+ {
+       if (uptodate) {
+@@ -184,17 +146,14 @@ ext4_read_inode_bitmap(struct super_block *sb, 
ext4_group_t block_group)
+ 
+       ext4_lock_group(sb, block_group);
+       if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
+-              err = ext4_init_inode_bitmap(sb, bh, block_group, desc);
++              memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
++              ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb),
++                                   sb->s_blocksize * 8, bh->b_data);
+               set_bitmap_uptodate(bh);
+               set_buffer_uptodate(bh);
+               set_buffer_verified(bh);
+               ext4_unlock_group(sb, block_group);
+               unlock_buffer(bh);
+-              if (err) {
+-                      ext4_error(sb, "Failed to init inode bitmap for group "
+-                                 "%u: %d", block_group, err);
+-                      goto out;
+-              }
+               return bh;
+       }
+       ext4_unlock_group(sb, block_group);
+diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
+index dad8e7bdf0a6..3006b81c107f 100644
+--- a/fs/ext4/inline.c
++++ b/fs/ext4/inline.c
+@@ -376,7 +376,7 @@ out:
+ static int ext4_prepare_inline_data(handle_t *handle, struct inode *inode,
+                                   unsigned int len)
+ {
+-      int ret, size;
++      int ret, size, no_expand;
+       struct ext4_inode_info *ei = EXT4_I(inode);
+ 
+       if (!ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA))
+@@ -386,15 +386,14 @@ static int ext4_prepare_inline_data(handle_t *handle, 
struct inode *inode,
+       if (size < len)
+               return -ENOSPC;
+ 
+-      down_write(&EXT4_I(inode)->xattr_sem);
++      ext4_write_lock_xattr(inode, &no_expand);
+ 
+       if (ei->i_inline_off)
+               ret = ext4_update_inline_data(handle, inode, len);
+       else
+               ret = ext4_create_inline_data(handle, inode, len);
+ 
+-      up_write(&EXT4_I(inode)->xattr_sem);
+-
++      ext4_write_unlock_xattr(inode, &no_expand);
+       return ret;
+ }
+ 
+@@ -523,7 +522,7 @@ static int ext4_convert_inline_data_to_extent(struct 
address_space *mapping,
+                                             struct inode *inode,
+                                             unsigned flags)
+ {
+-      int ret, needed_blocks;
++      int ret, needed_blocks, no_expand;
+       handle_t *handle = NULL;
+       int retries = 0, sem_held = 0;
+       struct page *page = NULL;
+@@ -563,7 +562,7 @@ retry:
+               goto out;
+       }
+ 
+-      down_write(&EXT4_I(inode)->xattr_sem);
++      ext4_write_lock_xattr(inode, &no_expand);
+       sem_held = 1;
+       /* If some one has already done this for us, just exit. */
+       if (!ext4_has_inline_data(inode)) {
+@@ -599,7 +598,7 @@ retry:
+               page_cache_release(page);
+               page = NULL;
+               ext4_orphan_add(handle, inode);
+-              up_write(&EXT4_I(inode)->xattr_sem);
++              ext4_write_unlock_xattr(inode, &no_expand);
+               sem_held = 0;
+               ext4_journal_stop(handle);
+               handle = NULL;
+@@ -625,7 +624,7 @@ out:
+               page_cache_release(page);
+       }
+       if (sem_held)
+-              up_write(&EXT4_I(inode)->xattr_sem);
++              ext4_write_unlock_xattr(inode, &no_expand);
+       if (handle)
+               ext4_journal_stop(handle);
+       brelse(iloc.bh);
+@@ -718,7 +717,7 @@ convert:
+ int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
+                              unsigned copied, struct page *page)
+ {
+-      int ret;
++      int ret, no_expand;
+       void *kaddr;
+       struct ext4_iloc iloc;
+ 
+@@ -736,7 +735,7 @@ int ext4_write_inline_data_end(struct inode *inode, loff_t 
pos, unsigned len,
+               goto out;
+       }
+ 
+-      down_write(&EXT4_I(inode)->xattr_sem);
++      ext4_write_lock_xattr(inode, &no_expand);
+       BUG_ON(!ext4_has_inline_data(inode));
+ 
+       kaddr = kmap_atomic(page);
+@@ -746,7 +745,7 @@ int ext4_write_inline_data_end(struct inode *inode, loff_t 
pos, unsigned len,
+       /* clear page dirty so that writepages wouldn't work for us. */
+       ClearPageDirty(page);
+ 
+-      up_write(&EXT4_I(inode)->xattr_sem);
++      ext4_write_unlock_xattr(inode, &no_expand);
+       brelse(iloc.bh);
+ out:
+       return copied;
+@@ -757,7 +756,7 @@ ext4_journalled_write_inline_data(struct inode *inode,
+                                 unsigned len,
+                                 struct page *page)
+ {
+-      int ret;
++      int ret, no_expand;
+       void *kaddr;
+       struct ext4_iloc iloc;
+ 
+@@ -767,11 +766,11 @@ ext4_journalled_write_inline_data(struct inode *inode,
+               return NULL;
+       }
+ 
+-      down_write(&EXT4_I(inode)->xattr_sem);
++      ext4_write_lock_xattr(inode, &no_expand);
+       kaddr = kmap_atomic(page);
+       ext4_write_inline_data(inode, &iloc, kaddr, 0, len);
+       kunmap_atomic(kaddr);
+-      up_write(&EXT4_I(inode)->xattr_sem);
++      ext4_write_unlock_xattr(inode, &no_expand);
+ 
+       return iloc.bh;
+ }
+@@ -1255,7 +1254,7 @@ out:
+ int ext4_try_add_inline_entry(handle_t *handle, struct ext4_filename *fname,
+                             struct dentry *dentry, struct inode *inode)
+ {
+-      int ret, inline_size;
++      int ret, inline_size, no_expand;
+       void *inline_start;
+       struct ext4_iloc iloc;
+       struct inode *dir = d_inode(dentry->d_parent);
+@@ -1264,7 +1263,7 @@ int ext4_try_add_inline_entry(handle_t *handle, struct 
ext4_filename *fname,
+       if (ret)
+               return ret;
+ 
+-      down_write(&EXT4_I(dir)->xattr_sem);
++      ext4_write_lock_xattr(dir, &no_expand);
+       if (!ext4_has_inline_data(dir))
+               goto out;
+ 
+@@ -1310,7 +1309,7 @@ int ext4_try_add_inline_entry(handle_t *handle, struct 
ext4_filename *fname,
+ 
+ out:
+       ext4_mark_inode_dirty(handle, dir);
+-      up_write(&EXT4_I(dir)->xattr_sem);
++      ext4_write_unlock_xattr(dir, &no_expand);
+       brelse(iloc.bh);
+       return ret;
+ }
+@@ -1670,7 +1669,7 @@ int ext4_delete_inline_entry(handle_t *handle,
+                            struct buffer_head *bh,
+                            int *has_inline_data)
+ {
+-      int err, inline_size;
++      int err, inline_size, no_expand;
+       struct ext4_iloc iloc;
+       void *inline_start;
+ 
+@@ -1678,7 +1677,7 @@ int ext4_delete_inline_entry(handle_t *handle,
+       if (err)
+               return err;
+ 
+-      down_write(&EXT4_I(dir)->xattr_sem);
++      ext4_write_lock_xattr(dir, &no_expand);
+       if (!ext4_has_inline_data(dir)) {
+               *has_inline_data = 0;
+               goto out;
+@@ -1713,7 +1712,7 @@ int ext4_delete_inline_entry(handle_t *handle,
+ 
+       ext4_show_inline_dir(dir, iloc.bh, inline_start, inline_size);
+ out:
+-      up_write(&EXT4_I(dir)->xattr_sem);
++      ext4_write_unlock_xattr(dir, &no_expand);
+       brelse(iloc.bh);
+       if (err != -ENOENT)
+               ext4_std_error(dir->i_sb, err);
+@@ -1812,11 +1811,11 @@ out:
+ 
+ int ext4_destroy_inline_data(handle_t *handle, struct inode *inode)
+ {
+-      int ret;
++      int ret, no_expand;
+ 
+-      down_write(&EXT4_I(inode)->xattr_sem);
++      ext4_write_lock_xattr(inode, &no_expand);
+       ret = ext4_destroy_inline_data_nolock(handle, inode);
+-      up_write(&EXT4_I(inode)->xattr_sem);
++      ext4_write_unlock_xattr(inode, &no_expand);
+ 
+       return ret;
+ }
+@@ -1901,7 +1900,7 @@ out:
+ void ext4_inline_data_truncate(struct inode *inode, int *has_inline)
+ {
+       handle_t *handle;
+-      int inline_size, value_len, needed_blocks;
++      int inline_size, value_len, needed_blocks, no_expand;
+       size_t i_size;
+       void *value = NULL;
+       struct ext4_xattr_ibody_find is = {
+@@ -1918,7 +1917,7 @@ void ext4_inline_data_truncate(struct inode *inode, int 
*has_inline)
+       if (IS_ERR(handle))
+               return;
+ 
+-      down_write(&EXT4_I(inode)->xattr_sem);
++      ext4_write_lock_xattr(inode, &no_expand);
+       if (!ext4_has_inline_data(inode)) {
+               *has_inline = 0;
+               ext4_journal_stop(handle);
+@@ -1976,7 +1975,7 @@ out_error:
+       up_write(&EXT4_I(inode)->i_data_sem);
+ out:
+       brelse(is.iloc.bh);
+-      up_write(&EXT4_I(inode)->xattr_sem);
++      ext4_write_unlock_xattr(inode, &no_expand);
+       kfree(value);
+       if (inode->i_nlink)
+               ext4_orphan_del(handle, inode);
+@@ -1992,7 +1991,7 @@ out:
+ 
+ int ext4_convert_inline_data(struct inode *inode)
+ {
+-      int error, needed_blocks;
++      int error, needed_blocks, no_expand;
+       handle_t *handle;
+       struct ext4_iloc iloc;
+ 
+@@ -2014,15 +2013,10 @@ int ext4_convert_inline_data(struct inode *inode)
+               goto out_free;
+       }
+ 
+-      down_write(&EXT4_I(inode)->xattr_sem);
+-      if (!ext4_has_inline_data(inode)) {
+-              up_write(&EXT4_I(inode)->xattr_sem);
+-              goto out;
+-      }
+-
+-      error = ext4_convert_inline_data_nolock(handle, inode, &iloc);
+-      up_write(&EXT4_I(inode)->xattr_sem);
+-out:
++      ext4_write_lock_xattr(inode, &no_expand);
++      if (ext4_has_inline_data(inode))
++              error = ext4_convert_inline_data_nolock(handle, inode, &iloc);
++      ext4_write_unlock_xattr(inode, &no_expand);
+       ext4_journal_stop(handle);
+ out_free:
+       brelse(iloc.bh);
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index f0cabc8c96cb..56ce7fd0f0d0 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -1515,6 +1515,8 @@ static void mpage_release_unused_pages(struct 
mpage_da_data *mpd,
+                       BUG_ON(!PageLocked(page));
+                       BUG_ON(PageWriteback(page));
+                       if (invalidate) {
++                              if (page_mapped(page))
++                                      clear_page_dirty_for_io(page);
+                               block_invalidatepage(page, 0, PAGE_CACHE_SIZE);
+                               ClearPageUptodate(page);
+                       }
+@@ -3256,29 +3258,29 @@ static ssize_t ext4_ext_direct_IO(struct kiocb *iocb, 
struct iov_iter *iter,
+        * case, we allocate an io_end structure to hook to the iocb.
+        */
+       iocb->private = NULL;
+-      ext4_inode_aio_set(inode, NULL);
+-      if (!is_sync_kiocb(iocb)) {
+-              io_end = ext4_init_io_end(inode, GFP_NOFS);
+-              if (!io_end) {
+-                      ret = -ENOMEM;
+-                      goto retake_lock;
+-              }
+-              /*
+-               * Grab reference for DIO. Will be dropped in ext4_end_io_dio()
+-               */
+-              iocb->private = ext4_get_io_end(io_end);
+-              /*
+-               * we save the io structure for current async direct
+-               * IO, so that later ext4_map_blocks() could flag the
+-               * io structure whether there is a unwritten extents
+-               * needs to be converted when IO is completed.
+-               */
+-              ext4_inode_aio_set(inode, io_end);
+-      }
+-
+       if (overwrite) {
+               get_block_func = ext4_get_block_write_nolock;
+       } else {
++              ext4_inode_aio_set(inode, NULL);
++              if (!is_sync_kiocb(iocb)) {
++                      io_end = ext4_init_io_end(inode, GFP_NOFS);
++                      if (!io_end) {
++                              ret = -ENOMEM;
++                              goto retake_lock;
++                      }
++                      /*
++                       * Grab reference for DIO. Will be dropped in
++                       * ext4_end_io_dio()
++                       */
++                      iocb->private = ext4_get_io_end(io_end);
++                      /*
++                       * we save the io structure for current async direct
++                       * IO, so that later ext4_map_blocks() could flag the
++                       * io structure whether there is a unwritten extents
++                       * needs to be converted when IO is completed.
++                       */
++                      ext4_inode_aio_set(inode, io_end);
++              }
+               get_block_func = ext4_get_block_write;
+               dio_flags = DIO_LOCKING;
+       }
+@@ -4231,6 +4233,12 @@ struct inode *ext4_iget(struct super_block *sb, 
unsigned long ino)
+               goto bad_inode;
+       raw_inode = ext4_raw_inode(&iloc);
+ 
++      if ((ino == EXT4_ROOT_INO) && (raw_inode->i_links_count == 0)) {
++              EXT4_ERROR_INODE(inode, "root inode unallocated");
++              ret = -EFSCORRUPTED;
++              goto bad_inode;
++      }
++
+       if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
+               ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
+               if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 742455292dfe..0e0438b5ddbe 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -2131,6 +2131,8 @@ static int ext4_check_descriptors(struct super_block *sb,
+                       ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
+                                "Block bitmap for group %u overlaps "
+                                "superblock", i);
++                      if (!(sb->s_flags & MS_RDONLY))
++                              return 0;
+               }
+               if (block_bitmap < first_block || block_bitmap > last_block) {
+                       ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
+@@ -2143,6 +2145,8 @@ static int ext4_check_descriptors(struct super_block *sb,
+                       ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
+                                "Inode bitmap for group %u overlaps "
+                                "superblock", i);
++                      if (!(sb->s_flags & MS_RDONLY))
++                              return 0;
+               }
+               if (inode_bitmap < first_block || inode_bitmap > last_block) {
+                       ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
+@@ -2155,6 +2159,8 @@ static int ext4_check_descriptors(struct super_block *sb,
+                       ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
+                                "Inode table for group %u overlaps "
+                                "superblock", i);
++                      if (!(sb->s_flags & MS_RDONLY))
++                              return 0;
+               }
+               if (inode_table < first_block ||
+                   inode_table + sbi->s_itb_per_group - 1 > last_block) {
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index 8d661b3c47b6..c7cad05aed27 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -1117,16 +1117,14 @@ ext4_xattr_set_handle(handle_t *handle, struct inode 
*inode, int name_index,
+       struct ext4_xattr_block_find bs = {
+               .s = { .not_found = -ENODATA, },
+       };
+-      unsigned long no_expand;
++      int no_expand;
+       int error;
+ 
+       if (!name)
+               return -EINVAL;
+       if (strlen(name) > 255)
+               return -ERANGE;
+-      down_write(&EXT4_I(inode)->xattr_sem);
+-      no_expand = ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND);
+-      ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND);
++      ext4_write_lock_xattr(inode, &no_expand);
+ 
+       error = ext4_reserve_inode_write(handle, inode, &is.iloc);
+       if (error)
+@@ -1187,7 +1185,7 @@ ext4_xattr_set_handle(handle_t *handle, struct inode 
*inode, int name_index,
+               ext4_xattr_update_super_block(handle, inode->i_sb);
+               inode->i_ctime = ext4_current_time(inode);
+               if (!value)
+-                      ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
++                      no_expand = 0;
+               error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
+               /*
+                * The bh is consumed by ext4_mark_iloc_dirty, even with
+@@ -1201,9 +1199,7 @@ ext4_xattr_set_handle(handle_t *handle, struct inode 
*inode, int name_index,
+ cleanup:
+       brelse(is.iloc.bh);
+       brelse(bs.bh);
+-      if (no_expand == 0)
+-              ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
+-      up_write(&EXT4_I(inode)->xattr_sem);
++      ext4_write_unlock_xattr(inode, &no_expand);
+       return error;
+ }
+ 
+@@ -1287,12 +1283,11 @@ int ext4_expand_extra_isize_ea(struct inode *inode, 
int new_extra_isize,
+       int error = 0, tried_min_extra_isize = 0;
+       int s_min_extra_isize = 
le16_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_min_extra_isize);
+       int isize_diff; /* How much do we need to grow i_extra_isize */
++      int no_expand;
++
++      if (ext4_write_trylock_xattr(inode, &no_expand) == 0)
++              return 0;
+ 
+-      down_write(&EXT4_I(inode)->xattr_sem);
+-      /*
+-       * Set EXT4_STATE_NO_EXPAND to avoid recursion when marking inode dirty
+-       */
+-      ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND);
+ retry:
+       isize_diff = new_extra_isize - EXT4_I(inode)->i_extra_isize;
+       if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
+@@ -1486,8 +1481,7 @@ retry:
+       }
+       brelse(bh);
+ out:
+-      ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
+-      up_write(&EXT4_I(inode)->xattr_sem);
++      ext4_write_unlock_xattr(inode, &no_expand);
+       return 0;
+ 
+ cleanup:
+@@ -1499,10 +1493,10 @@ cleanup:
+       kfree(bs);
+       brelse(bh);
+       /*
+-       * We deliberately leave EXT4_STATE_NO_EXPAND set here since inode
+-       * size expansion failed.
++       * Inode size expansion failed; don't try again
+        */
+-      up_write(&EXT4_I(inode)->xattr_sem);
++      no_expand = 1;
++      ext4_write_unlock_xattr(inode, &no_expand);
+       return error;
+ }
+ 
+diff --git a/fs/ext4/xattr.h b/fs/ext4/xattr.h
+index ddc0957760ba..c000ed398555 100644
+--- a/fs/ext4/xattr.h
++++ b/fs/ext4/xattr.h
+@@ -101,6 +101,38 @@ extern const struct xattr_handler 
ext4_xattr_security_handler;
+ 
+ #define EXT4_XATTR_NAME_ENCRYPTION_CONTEXT "c"
+ 
++/*
++ * The EXT4_STATE_NO_EXPAND is overloaded and used for two purposes.
++ * The first is to signal that there the inline xattrs and data are
++ * taking up so much space that we might as well not keep trying to
++ * expand it.  The second is that xattr_sem is taken for writing, so
++ * we shouldn't try to recurse into the inode expansion.  For this
++ * second case, we need to make sure that we take save and restore the
++ * NO_EXPAND state flag appropriately.
++ */
++static inline void ext4_write_lock_xattr(struct inode *inode, int *save)
++{
++      down_write(&EXT4_I(inode)->xattr_sem);
++      *save = ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND);
++      ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND);
++}
++
++static inline int ext4_write_trylock_xattr(struct inode *inode, int *save)
++{
++      if (down_write_trylock(&EXT4_I(inode)->xattr_sem) == 0)
++              return 0;
++      *save = ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND);
++      ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND);
++      return 1;
++}
++
++static inline void ext4_write_unlock_xattr(struct inode *inode, int *save)
++{
++      if (*save == 0)
++              ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
++      up_write(&EXT4_I(inode)->xattr_sem);
++}
++
+ extern ssize_t ext4_listxattr(struct dentry *, char *, size_t);
+ 
+ extern int ext4_xattr_get(struct inode *, int, const char *, void *, size_t);
+diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
+index 22b30249fbcb..0fe667875852 100644
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -747,11 +747,12 @@ int inode_congested(struct inode *inode, int cong_bits)
+        */
+       if (inode && inode_to_wb_is_valid(inode)) {
+               struct bdi_writeback *wb;
+-              bool locked, congested;
++              struct wb_lock_cookie lock_cookie = {};
++              bool congested;
+ 
+-              wb = unlocked_inode_to_wb_begin(inode, &locked);
++              wb = unlocked_inode_to_wb_begin(inode, &lock_cookie);
+               congested = wb_congested(wb, cong_bits);
+-              unlocked_inode_to_wb_end(inode, locked);
++              unlocked_inode_to_wb_end(inode, &lock_cookie);
+               return congested;
+       }
+ 
+diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
+index 624a57a9c4aa..4759df4eb8ce 100644
+--- a/fs/jbd2/journal.c
++++ b/fs/jbd2/journal.c
+@@ -914,7 +914,7 @@ out:
+ }
+ 
+ /*
+- * This is a variaon of __jbd2_update_log_tail which checks for validity of
++ * This is a variation of __jbd2_update_log_tail which checks for validity of
+  * provided log tail and locks j_checkpoint_mutex. So it is safe against races
+  * with other threads updating log tail.
+  */
+@@ -1384,6 +1384,9 @@ int jbd2_journal_update_sb_log_tail(journal_t *journal, 
tid_t tail_tid,
+       journal_superblock_t *sb = journal->j_superblock;
+       int ret;
+ 
++      if (is_journal_aborted(journal))
++              return -EIO;
++
+       BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
+       jbd_debug(1, "JBD2: updating superblock (start %lu, seq %u)\n",
+                 tail_block, tail_tid);
+diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
+index d86c5e3176a1..600da1a4df29 100644
+--- a/fs/jffs2/super.c
++++ b/fs/jffs2/super.c
+@@ -345,7 +345,7 @@ static void jffs2_put_super (struct super_block *sb)
+ static void jffs2_kill_sb(struct super_block *sb)
+ {
+       struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
+-      if (!(sb->s_flags & MS_RDONLY))
++      if (c && !(sb->s_flags & MS_RDONLY))
+               jffs2_stop_garbage_collect_thread(c);
+       kill_mtd_super(sb);
+       kfree(c);
+diff --git a/fs/namei.c b/fs/namei.c
+index 0fcad42e4d3e..de57dd59d95f 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -219,9 +219,10 @@ getname_kernel(const char * filename)
+       if (len <= EMBEDDED_NAME_MAX) {
+               result->name = (char *)result->iname;
+       } else if (len <= PATH_MAX) {
++              const size_t size = offsetof(struct filename, iname[1]);
+               struct filename *tmp;
+ 
+-              tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
++              tmp = kmalloc(size, GFP_KERNEL);
+               if (unlikely(!tmp)) {
+                       __putname(result);
+                       return ERR_PTR(-ENOMEM);
+diff --git a/fs/namespace.c b/fs/namespace.c
+index ec4078d16eb7..a879560ea144 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -1018,7 +1018,8 @@ static struct mount *clone_mnt(struct mount *old, struct 
dentry *root,
+                       goto out_free;
+       }
+ 
+-      mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~(MNT_WRITE_HOLD|MNT_MARKED);
++      mnt->mnt.mnt_flags = old->mnt.mnt_flags;
++      mnt->mnt.mnt_flags &= ~(MNT_WRITE_HOLD|MNT_MARKED|MNT_INTERNAL);
+       /* Don't allow unprivileged users to change mount flags */
+       if (flag & CL_UNPRIVILEGED) {
+               mnt->mnt.mnt_flags |= MNT_LOCK_ATIME;
+diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
+index e0e5f7c3c99f..8a459b179183 100644
+--- a/fs/notify/fanotify/fanotify.c
++++ b/fs/notify/fanotify/fanotify.c
+@@ -92,7 +92,7 @@ static bool fanotify_should_send_event(struct fsnotify_mark 
*inode_mark,
+                                      u32 event_mask,
+                                      void *data, int data_type)
+ {
+-      __u32 marks_mask, marks_ignored_mask;
++      __u32 marks_mask = 0, marks_ignored_mask = 0;
+       struct path *path = data;
+ 
+       pr_debug("%s: inode_mark=%p vfsmnt_mark=%p mask=%x data=%p"
+@@ -108,24 +108,20 @@ static bool fanotify_should_send_event(struct 
fsnotify_mark *inode_mark,
+           !d_can_lookup(path->dentry))
+               return false;
+ 
+-      if (inode_mark && vfsmnt_mark) {
+-              marks_mask = (vfsmnt_mark->mask | inode_mark->mask);
+-              marks_ignored_mask = (vfsmnt_mark->ignored_mask | 
inode_mark->ignored_mask);
+-      } else if (inode_mark) {
+-              /*
+-               * if the event is for a child and this inode doesn't care about
+-               * events on the child, don't send it!
+-               */
+-              if ((event_mask & FS_EVENT_ON_CHILD) &&
+-                  !(inode_mark->mask & FS_EVENT_ON_CHILD))
+-                      return false;
+-              marks_mask = inode_mark->mask;
+-              marks_ignored_mask = inode_mark->ignored_mask;
+-      } else if (vfsmnt_mark) {
+-              marks_mask = vfsmnt_mark->mask;
+-              marks_ignored_mask = vfsmnt_mark->ignored_mask;
+-      } else {
+-              BUG();
++      /*
++       * if the event is for a child and this inode doesn't care about
++       * events on the child, don't send it!
++       */
++      if (inode_mark &&
++          (!(event_mask & FS_EVENT_ON_CHILD) ||
++           (inode_mark->mask & FS_EVENT_ON_CHILD))) {
++              marks_mask |= inode_mark->mask;
++              marks_ignored_mask |= inode_mark->ignored_mask;
++      }
++
++      if (vfsmnt_mark) {
++              marks_mask |= vfsmnt_mark->mask;
++              marks_ignored_mask |= vfsmnt_mark->ignored_mask;
+       }
+ 
+       if (d_is_dir(path->dentry) &&
+diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
+index a72097b625ef..00985f9db9f7 100644
+--- a/fs/reiserfs/journal.c
++++ b/fs/reiserfs/journal.c
+@@ -2643,7 +2643,7 @@ static int journal_init_dev(struct super_block *super,
+       if (IS_ERR(journal->j_dev_bd)) {
+               result = PTR_ERR(journal->j_dev_bd);
+               journal->j_dev_bd = NULL;
+-              reiserfs_warning(super,
++              reiserfs_warning(super, "sh-457",
+                                "journal_init_dev: Cannot open '%s': %i",
+                                jdev_name, result);
+               return result;
+diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
+index 1fd90c079537..0bb6de356451 100644
+--- a/fs/ubifs/super.c
++++ b/fs/ubifs/super.c
+@@ -1728,8 +1728,11 @@ static void ubifs_remount_ro(struct ubifs_info *c)
+ 
+       dbg_save_space_info(c);
+ 
+-      for (i = 0; i < c->jhead_cnt; i++)
+-              ubifs_wbuf_sync(&c->jheads[i].wbuf);
++      for (i = 0; i < c->jhead_cnt; i++) {
++              err = ubifs_wbuf_sync(&c->jheads[i].wbuf);
++              if (err)
++                      ubifs_ro_mode(c, err);
++      }
+ 
+       c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_DIRTY);
+       c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS);
+@@ -1795,8 +1798,11 @@ static void ubifs_put_super(struct super_block *sb)
+                       int err;
+ 
+                       /* Synchronize write-buffers */
+-                      for (i = 0; i < c->jhead_cnt; i++)
+-                              ubifs_wbuf_sync(&c->jheads[i].wbuf);
++                      for (i = 0; i < c->jhead_cnt; i++) {
++                              err = ubifs_wbuf_sync(&c->jheads[i].wbuf);
++                              if (err)
++                                      ubifs_ro_mode(c, err);
++                      }
+ 
+                       /*
+                        * We are being cleanly unmounted which means the
+diff --git a/include/linux/backing-dev-defs.h 
b/include/linux/backing-dev-defs.h
+index 140c29635069..a307c37c2e6c 100644
+--- a/include/linux/backing-dev-defs.h
++++ b/include/linux/backing-dev-defs.h
+@@ -191,6 +191,11 @@ static inline void set_bdi_congested(struct 
backing_dev_info *bdi, int sync)
+       set_wb_congested(bdi->wb.congested, sync);
+ }
+ 
++struct wb_lock_cookie {
++      bool locked;
++      unsigned long flags;
++};
++
+ #ifdef CONFIG_CGROUP_WRITEBACK
+ 
+ /**
+diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
+index 89d3de3e096b..361274ce5815 100644
+--- a/include/linux/backing-dev.h
++++ b/include/linux/backing-dev.h
+@@ -366,7 +366,7 @@ static inline struct bdi_writeback *inode_to_wb(struct 
inode *inode)
+ /**
+  * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
+  * @inode: target inode
+- * @lockedp: temp bool output param, to be passed to the end function
++ * @cookie: output param, to be passed to the end function
+  *
+  * The caller wants to access the wb associated with @inode but isn't
+  * holding inode->i_lock, mapping->tree_lock or wb->list_lock.  This
+@@ -374,12 +374,12 @@ static inline struct bdi_writeback *inode_to_wb(struct 
inode *inode)
+  * association doesn't change until the transaction is finished with
+  * unlocked_inode_to_wb_end().
+  *
+- * The caller must call unlocked_inode_to_wb_end() with *@lockdep
+- * afterwards and can't sleep during transaction.  IRQ may or may not be
+- * disabled on return.
++ * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards 
and
++ * can't sleep during the transaction.  IRQs may or may not be disabled on
++ * return.
+  */
+ static inline struct bdi_writeback *
+-unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
++unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
+ {
+       rcu_read_lock();
+ 
+@@ -387,10 +387,10 @@ unlocked_inode_to_wb_begin(struct inode *inode, bool 
*lockedp)
+        * Paired with store_release in inode_switch_wb_work_fn() and
+        * ensures that we see the new wb if we see cleared I_WB_SWITCH.
+        */
+-      *lockedp = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
++      cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
+ 
+-      if (unlikely(*lockedp))
+-              spin_lock_irq(&inode->i_mapping->tree_lock);
++      if (unlikely(cookie->locked))
++              spin_lock_irqsave(&inode->i_mapping->tree_lock, cookie->flags);
+ 
+       /*
+        * Protected by either !I_WB_SWITCH + rcu_read_lock() or tree_lock.
+@@ -402,12 +402,14 @@ unlocked_inode_to_wb_begin(struct inode *inode, bool 
*lockedp)
+ /**
+  * unlocked_inode_to_wb_end - end inode wb access transaction
+  * @inode: target inode
+- * @locked: *@lockedp from unlocked_inode_to_wb_begin()
++ * @cookie: @cookie from unlocked_inode_to_wb_begin()
+  */
+-static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
++static inline void unlocked_inode_to_wb_end(struct inode *inode,
++                                          struct wb_lock_cookie *cookie)
+ {
+-      if (unlikely(locked))
+-              spin_unlock_irq(&inode->i_mapping->tree_lock);
++      if (unlikely(cookie->locked))
++              spin_unlock_irqrestore(&inode->i_mapping->tree_lock,
++                                     cookie->flags);
+ 
+       rcu_read_unlock();
+ }
+@@ -454,12 +456,13 @@ static inline struct bdi_writeback *inode_to_wb(struct 
inode *inode)
+ }
+ 
+ static inline struct bdi_writeback *
+-unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
++unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
+ {
+       return inode_to_wb(inode);
+ }
+ 
+-static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
++static inline void unlocked_inode_to_wb_end(struct inode *inode,
++                                          struct wb_lock_cookie *cookie)
+ {
+ }
+ 
+diff --git a/include/linux/hid.h b/include/linux/hid.h
+index 251a1d382e23..fd86687f8119 100644
+--- a/include/linux/hid.h
++++ b/include/linux/hid.h
+@@ -793,7 +793,7 @@ extern int hidinput_connect(struct hid_device *hid, 
unsigned int force);
+ extern void hidinput_disconnect(struct hid_device *);
+ 
+ int hid_set_field(struct hid_field *, unsigned, __s32);
+-int hid_input_report(struct hid_device *, int type, u8 *, int, int);
++int hid_input_report(struct hid_device *, int type, u8 *, u32, int);
+ int hidinput_find_field(struct hid_device *hid, unsigned int type, unsigned 
int code, struct hid_field **field);
+ struct hid_field *hidinput_get_led_field(struct hid_device *hid);
+ unsigned int hidinput_count_leds(struct hid_device *hid);
+@@ -1098,13 +1098,13 @@ static inline void hid_hw_wait(struct hid_device *hdev)
+  *
+  * @report: the report we want to know the length
+  */
+-static inline int hid_report_len(struct hid_report *report)
++static inline u32 hid_report_len(struct hid_report *report)
+ {
+       /* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */
+       return ((report->size - 1) >> 3) + 1 + (report->id > 0);
+ }
+ 
+-int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
++int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
+               int interrupt);
+ 
+ /* HID quirks API */
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index 55f950afb60d..a100946607a5 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -225,10 +225,14 @@ extern pgprot_t protection_map[16];
+  * ->fault function. The vma's ->fault is responsible for returning a bitmask
+  * of VM_FAULT_xxx flags that give details about how the fault was handled.
+  *
++ * MM layer fills up gfp_mask for page allocations but fault handler might
++ * alter it if its implementation requires a different allocation context.
++ *
+  * pgoff should be used in favour of virtual_address, if possible.
+  */
+ struct vm_fault {
+       unsigned int flags;             /* FAULT_FLAG_xxx flags */
++      gfp_t gfp_mask;                 /* gfp mask to be used for allocations 
*/
+       pgoff_t pgoff;                  /* Logical page offset based on vma */
+       void __user *virtual_address;   /* Faulting virtual address */
+ 
+diff --git a/include/linux/tty.h b/include/linux/tty.h
+index a1042afff99a..d67ceb3f5958 100644
+--- a/include/linux/tty.h
++++ b/include/linux/tty.h
+@@ -342,6 +342,7 @@ struct tty_file_private {
+ #define TTY_PTY_LOCK          16      /* pty private */
+ #define TTY_NO_WRITE_SPLIT    17      /* Preserve write boundaries to driver 
*/
+ #define TTY_HUPPED            18      /* Post driver->hangup() */
++#define TTY_HUPPING           19      /* Hangup in progress */
+ #define TTY_LDISC_HALTED      22      /* Line discipline is halted */
+ 
+ #define TTY_WRITE_FLUSH(tty) tty_write_flush((tty))
+diff --git a/include/net/slhc_vj.h b/include/net/slhc_vj.h
+index 8716d5942b65..8fcf8908a694 100644
+--- a/include/net/slhc_vj.h
++++ b/include/net/slhc_vj.h
+@@ -127,6 +127,7 @@ typedef __u32 int32;
+  */
+ struct cstate {
+       byte_t  cs_this;        /* connection id number (xmit) */
++      bool    initialized;    /* true if initialized */
+       struct cstate *next;    /* next in ring (xmit) */
+       struct iphdr cs_ip;     /* ip/tcp hdr from most recent packet */
+       struct tcphdr cs_tcp;
+diff --git a/include/sound/pcm_oss.h b/include/sound/pcm_oss.h
+index 760c969d885d..12bbf8c81112 100644
+--- a/include/sound/pcm_oss.h
++++ b/include/sound/pcm_oss.h
+@@ -57,6 +57,7 @@ struct snd_pcm_oss_runtime {
+       char *buffer;                           /* vmallocated period */
+       size_t buffer_used;                     /* used length from period 
buffer */
+       struct mutex params_lock;
++      atomic_t rw_ref;                /* concurrent read/write accesses */
+ #ifdef CONFIG_SND_PCM_OSS_PLUGINS
+       struct snd_pcm_plugin *plugin_first;
+       struct snd_pcm_plugin *plugin_last;
+diff --git a/ipc/shm.c b/ipc/shm.c
+index 4982a4e7f009..a492dd81cf56 100644
+--- a/ipc/shm.c
++++ b/ipc/shm.c
+@@ -198,6 +198,12 @@ static int __shm_open(struct vm_area_struct *vma)
+       if (IS_ERR(shp))
+               return PTR_ERR(shp);
+ 
++      if (shp->shm_file != sfd->file) {
++              /* ID was reused */
++              shm_unlock(shp);
++              return -EINVAL;
++      }
++
+       shp->shm_atim = get_seconds();
+       shp->shm_lprid = task_tgid_vnr(current);
+       shp->shm_nattch++;
+@@ -414,8 +420,9 @@ static int shm_mmap(struct file *file, struct 
vm_area_struct *vma)
+       int ret;
+ 
+       /*
+-       * In case of remap_file_pages() emulation, the file can represent
+-       * removed IPC ID: propogate shm_lock() error to caller.
++       * In case of remap_file_pages() emulation, the file can represent an
++       * IPC ID that was removed, and possibly even reused by another shm
++       * segment already.  Propagate this case as an error to caller.
+        */
+       ret =__shm_open(vma);
+       if (ret)
+@@ -439,6 +446,7 @@ static int shm_release(struct inode *ino, struct file 
*file)
+       struct shm_file_data *sfd = shm_file_data(file);
+ 
+       put_ipc_ns(sfd->ns);
++      fput(sfd->file);
+       shm_file_data(file) = NULL;
+       kfree(sfd);
+       return 0;
+@@ -1198,7 +1206,16 @@ long do_shmat(int shmid, char __user *shmaddr, int 
shmflg,
+       file->f_mapping = shp->shm_file->f_mapping;
+       sfd->id = shp->shm_perm.id;
+       sfd->ns = get_ipc_ns(ns);
+-      sfd->file = shp->shm_file;
++      /*
++       * We need to take a reference to the real shm file to prevent the
++       * pointer from becoming stale in cases where the lifetime of the outer
++       * file extends beyond that of the shm segment.  It's not usually
++       * possible, but it can happen during remap_file_pages() emulation as
++       * that unmaps the memory, then does ->mmap() via file reference only.
++       * We'll deny the ->mmap() if the shm segment was since removed, but to
++       * detect shm ID reuse we need to compare the file pointers.
++       */
++      sfd->file = get_file(shp->shm_file);
+       sfd->vm_ops = NULL;
+ 
+       err = security_mmap_file(file, prot, flags);
+diff --git a/kernel/resource.c b/kernel/resource.c
+index a4a94e700fb9..41718cd8cab5 100644
+--- a/kernel/resource.c
++++ b/kernel/resource.c
+@@ -611,7 +611,8 @@ static int __find_resource(struct resource *root, struct 
resource *old,
+                       alloc.start = 
constraint->alignf(constraint->alignf_data, &avail,
+                                       size, constraint->align);
+                       alloc.end = alloc.start + size - 1;
+-                      if (resource_contains(&avail, &alloc)) {
++                      if (alloc.start <= alloc.end &&
++                          resource_contains(&avail, &alloc)) {
+                               new->start = alloc.start;
+                               new->end = alloc.end;
+                               return 0;
+diff --git a/lib/Makefile b/lib/Makefile
+index 7f1de26613d2..cb4f6aa95013 100644
+--- a/lib/Makefile
++++ b/lib/Makefile
+@@ -58,8 +58,6 @@ obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o
+ obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o
+ obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
+ 
+-GCOV_PROFILE_hweight.o := n
+-CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
+ obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
+ 
+ obj-$(CONFIG_BTREE) += btree.o
+diff --git a/lib/hweight.c b/lib/hweight.c
+index 9a5c1f221558..43273a7d83cf 100644
+--- a/lib/hweight.c
++++ b/lib/hweight.c
+@@ -9,6 +9,7 @@
+  * The Hamming Weight of a number is the total number of bits set in it.
+  */
+ 
++#ifndef __HAVE_ARCH_SW_HWEIGHT
+ unsigned int __sw_hweight32(unsigned int w)
+ {
+ #ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
+@@ -25,6 +26,7 @@ unsigned int __sw_hweight32(unsigned int w)
+ #endif
+ }
+ EXPORT_SYMBOL(__sw_hweight32);
++#endif
+ 
+ unsigned int __sw_hweight16(unsigned int w)
+ {
+@@ -43,6 +45,7 @@ unsigned int __sw_hweight8(unsigned int w)
+ }
+ EXPORT_SYMBOL(__sw_hweight8);
+ 
++#ifndef __HAVE_ARCH_SW_HWEIGHT
+ unsigned long __sw_hweight64(__u64 w)
+ {
+ #if BITS_PER_LONG == 32
+@@ -65,3 +68,4 @@ unsigned long __sw_hweight64(__u64 w)
+ #endif
+ }
+ EXPORT_SYMBOL(__sw_hweight64);
++#endif
+diff --git a/mm/filemap.c b/mm/filemap.c
+index 69f75c77c098..b15f1d8bba43 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -571,7 +571,7 @@ int replace_page_cache_page(struct page *old, struct page 
*new, gfp_t gfp_mask)
+       VM_BUG_ON_PAGE(!PageLocked(new), new);
+       VM_BUG_ON_PAGE(new->mapping, new);
+ 
+-      error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
++      error = radix_tree_preload(gfp_mask & GFP_RECLAIM_MASK);
+       if (!error) {
+               struct address_space *mapping = old->mapping;
+               void (*freepage)(struct page *);
+@@ -630,7 +630,7 @@ static int __add_to_page_cache_locked(struct page *page,
+                       return error;
+       }
+ 
+-      error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM);
++      error = radix_tree_maybe_preload(gfp_mask & GFP_RECLAIM_MASK);
+       if (error) {
+               if (!huge)
+                       mem_cgroup_cancel_charge(page, memcg);
+@@ -1192,8 +1192,7 @@ no_page:
+               if (fgp_flags & FGP_ACCESSED)
+                       __SetPageReferenced(page);
+ 
+-              err = add_to_page_cache_lru(page, mapping, offset,
+-                              gfp_mask & GFP_RECLAIM_MASK);
++              err = add_to_page_cache_lru(page, mapping, offset, gfp_mask);
+               if (unlikely(err)) {
+                       page_cache_release(page);
+                       page = NULL;
+@@ -1827,19 +1826,18 @@ EXPORT_SYMBOL(generic_file_read_iter);
+  * This adds the requested page to the page cache if it isn't already there,
+  * and schedules an I/O to read in its contents from disk.
+  */
+-static int page_cache_read(struct file *file, pgoff_t offset)
++static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask)
+ {
+       struct address_space *mapping = file->f_mapping;
+       struct page *page;
+       int ret;
+ 
+       do {
+-              page = page_cache_alloc_cold(mapping);
++              page = __page_cache_alloc(gfp_mask|__GFP_COLD);
+               if (!page)
+                       return -ENOMEM;
+ 
+-              ret = add_to_page_cache_lru(page, mapping, offset,
+-                              mapping_gfp_constraint(mapping, GFP_KERNEL));
++              ret = add_to_page_cache_lru(page, mapping, offset, gfp_mask);
+               if (ret == 0)
+                       ret = mapping->a_ops->readpage(file, page);
+               else if (ret == -EEXIST)
+@@ -2020,7 +2018,7 @@ no_cached_page:
+        * We're only likely to ever get here if MADV_RANDOM is in
+        * effect.
+        */
+-      error = page_cache_read(file, offset);
++      error = page_cache_read(file, offset, vmf->gfp_mask);
+ 
+       /*
+        * The page we want has now been added to the page cache.
+diff --git a/mm/memory.c b/mm/memory.c
+index 31ca97f7ebbc..177cb7d111a9 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -1990,6 +1990,20 @@ static inline void cow_user_page(struct page *dst, 
struct page *src, unsigned lo
+               copy_user_highpage(dst, src, va, vma);
+ }
+ 
++static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
++{
++      struct file *vm_file = vma->vm_file;
++
++      if (vm_file)
++              return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | 
__GFP_IO;
++
++      /*
++       * Special mappings (e.g. VDSO) do not have any file so fake
++       * a default GFP_KERNEL for them.
++       */
++      return GFP_KERNEL;
++}
++
+ /*
+  * Notify the address space that the page is about to become writable so that
+  * it can prohibit this or wait for the page to get into an appropriate state.
+@@ -2005,6 +2019,7 @@ static int do_page_mkwrite(struct vm_area_struct *vma, 
struct page *page,
+       vmf.virtual_address = (void __user *)(address & PAGE_MASK);
+       vmf.pgoff = page->index;
+       vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
++      vmf.gfp_mask = __get_fault_gfp_mask(vma);
+       vmf.page = page;
+       vmf.cow_page = NULL;
+ 
+@@ -2770,6 +2785,7 @@ static int __do_fault(struct vm_area_struct *vma, 
unsigned long address,
+       vmf.pgoff = pgoff;
+       vmf.flags = flags;
+       vmf.page = NULL;
++      vmf.gfp_mask = __get_fault_gfp_mask(vma);
+       vmf.cow_page = cow_page;
+ 
+       ret = vma->vm_ops->fault(vma, &vmf);
+@@ -2936,6 +2952,7 @@ static void do_fault_around(struct vm_area_struct *vma, 
unsigned long address,
+       vmf.pgoff = pgoff;
+       vmf.max_pgoff = max_pgoff;
+       vmf.flags = flags;
++      vmf.gfp_mask = __get_fault_gfp_mask(vma);
+       vma->vm_ops->map_pages(vma, &vmf);
+ }
+ 
+diff --git a/mm/page-writeback.c b/mm/page-writeback.c
+index 6d0dbde4503b..3309dbda7ffa 100644
+--- a/mm/page-writeback.c
++++ b/mm/page-writeback.c
+@@ -2510,13 +2510,13 @@ void account_page_redirty(struct page *page)
+       if (mapping && mapping_cap_account_dirty(mapping)) {
+               struct inode *inode = mapping->host;
+               struct bdi_writeback *wb;
+-              bool locked;
++              struct wb_lock_cookie cookie = {};
+ 
+-              wb = unlocked_inode_to_wb_begin(inode, &locked);
++              wb = unlocked_inode_to_wb_begin(inode, &cookie);
+               current->nr_dirtied--;
+               dec_zone_page_state(page, NR_DIRTIED);
+               dec_wb_stat(wb, WB_DIRTIED);
+-              unlocked_inode_to_wb_end(inode, locked);
++              unlocked_inode_to_wb_end(inode, &cookie);
+       }
+ }
+ EXPORT_SYMBOL(account_page_redirty);
+@@ -2622,15 +2622,15 @@ void cancel_dirty_page(struct page *page)
+               struct inode *inode = mapping->host;
+               struct bdi_writeback *wb;
+               struct mem_cgroup *memcg;
+-              bool locked;
++              struct wb_lock_cookie cookie = {};
+ 
+               memcg = mem_cgroup_begin_page_stat(page);
+-              wb = unlocked_inode_to_wb_begin(inode, &locked);
++              wb = unlocked_inode_to_wb_begin(inode, &cookie);
+ 
+               if (TestClearPageDirty(page))
+                       account_page_cleaned(page, mapping, memcg, wb);
+ 
+-              unlocked_inode_to_wb_end(inode, locked);
++              unlocked_inode_to_wb_end(inode, &cookie);
+               mem_cgroup_end_page_stat(memcg);
+       } else {
+               ClearPageDirty(page);
+@@ -2663,7 +2663,7 @@ int clear_page_dirty_for_io(struct page *page)
+               struct inode *inode = mapping->host;
+               struct bdi_writeback *wb;
+               struct mem_cgroup *memcg;
+-              bool locked;
++              struct wb_lock_cookie cookie = {};
+ 
+               /*
+                * Yes, Virginia, this is indeed insane.
+@@ -2701,14 +2701,14 @@ int clear_page_dirty_for_io(struct page *page)
+                * exclusion.
+                */
+               memcg = mem_cgroup_begin_page_stat(page);
+-              wb = unlocked_inode_to_wb_begin(inode, &locked);
++              wb = unlocked_inode_to_wb_begin(inode, &cookie);
+               if (TestClearPageDirty(page)) {
+                       mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_DIRTY);
+                       dec_zone_page_state(page, NR_FILE_DIRTY);
+                       dec_wb_stat(wb, WB_RECLAIMABLE);
+                       ret = 1;
+               }
+-              unlocked_inode_to_wb_end(inode, locked);
++              unlocked_inode_to_wb_end(inode, &cookie);
+               mem_cgroup_end_page_stat(memcg);
+               return ret;
+       }
+diff --git a/mm/slab.c b/mm/slab.c
+index 4765c97ce690..fa49c01225a7 100644
+--- a/mm/slab.c
++++ b/mm/slab.c
+@@ -3915,7 +3915,8 @@ next:
+       next_reap_node();
+ out:
+       /* Set up the next iteration */
+-      schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_AC));
++      schedule_delayed_work_on(smp_processor_id(), work,
++                              round_jiffies_relative(REAPTIMEOUT_AC));
+ }
+ 
+ #ifdef CONFIG_SLABINFO
+diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
+index d81186d34558..9103dd15511c 100644
+--- a/net/sunrpc/rpc_pipe.c
++++ b/net/sunrpc/rpc_pipe.c
+@@ -1375,6 +1375,7 @@ rpc_gssd_dummy_depopulate(struct dentry *pipe_dentry)
+       struct dentry *clnt_dir = pipe_dentry->d_parent;
+       struct dentry *gssd_dir = clnt_dir->d_parent;
+ 
++      dget(pipe_dentry);
+       __rpc_rmpipe(d_inode(clnt_dir), pipe_dentry);
+       __rpc_depopulate(clnt_dir, gssd_dummy_info_file, 0, 1);
+       __rpc_depopulate(gssd_dir, gssd_dummy_clnt_dir, 0, 1);
+diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
+index 6cd8aec146f2..07feb35f1935 100644
+--- a/sound/core/oss/pcm_oss.c
++++ b/sound/core/oss/pcm_oss.c
+@@ -833,8 +833,25 @@ static int choose_rate(struct snd_pcm_substream 
*substream,
+       return snd_pcm_hw_param_near(substream, params, 
SNDRV_PCM_HW_PARAM_RATE, best_rate, NULL);
+ }
+ 
+-static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream,
+-                                   bool trylock)
++/* parameter locking: returns immediately if tried during streaming */
++static int lock_params(struct snd_pcm_runtime *runtime)
++{
++      if (mutex_lock_interruptible(&runtime->oss.params_lock))
++              return -ERESTARTSYS;
++      if (atomic_read(&runtime->oss.rw_ref)) {
++              mutex_unlock(&runtime->oss.params_lock);
++              return -EBUSY;
++      }
++      return 0;
++}
++
++static void unlock_params(struct snd_pcm_runtime *runtime)
++{
++      mutex_unlock(&runtime->oss.params_lock);
++}
++
++/* call with params_lock held */
++static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream 
*substream)
+ {
+       struct snd_pcm_runtime *runtime = substream->runtime;
+       struct snd_pcm_hw_params *params, *sparams;
+@@ -848,12 +865,9 @@ static int snd_pcm_oss_change_params(struct 
snd_pcm_substream *substream,
+       struct snd_mask sformat_mask;
+       struct snd_mask mask;
+ 
+-      if (trylock) {
+-              if (!(mutex_trylock(&runtime->oss.params_lock)))
+-                      return -EAGAIN;
+-      } else if (mutex_lock_interruptible(&runtime->oss.params_lock))
+-              return -EINTR;
+-      sw_params = kmalloc(sizeof(*sw_params), GFP_KERNEL);
++      if (!runtime->oss.params)
++              return 0;
++      sw_params = kzalloc(sizeof(*sw_params), GFP_KERNEL);
+       params = kmalloc(sizeof(*params), GFP_KERNEL);
+       sparams = kmalloc(sizeof(*sparams), GFP_KERNEL);
+       if (!sw_params || !params || !sparams) {
+@@ -991,7 +1005,6 @@ static int snd_pcm_oss_change_params(struct 
snd_pcm_substream *substream,
+               goto failure;
+       }
+ 
+-      memset(sw_params, 0, sizeof(*sw_params));
+       if (runtime->oss.trigger) {
+               sw_params->start_threshold = 1;
+       } else {
+@@ -1079,6 +1092,23 @@ failure:
+       kfree(sw_params);
+       kfree(params);
+       kfree(sparams);
++      return err;
++}
++
++/* this one takes the lock by itself */
++static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream,
++                                   bool trylock)
++{
++      struct snd_pcm_runtime *runtime = substream->runtime;
++      int err;
++
++      if (trylock) {
++              if (!(mutex_trylock(&runtime->oss.params_lock)))
++                      return -EAGAIN;
++      } else if (mutex_lock_interruptible(&runtime->oss.params_lock))
++              return -ERESTARTSYS;
++
++      err = snd_pcm_oss_change_params_locked(substream);
+       mutex_unlock(&runtime->oss.params_lock);
+       return err;
+ }
+@@ -1107,6 +1137,10 @@ static int snd_pcm_oss_get_active_substream(struct 
snd_pcm_oss_file *pcm_oss_fil
+       return 0;
+ }
+ 
++/* call with params_lock held */
++/* NOTE: this always call PREPARE unconditionally no matter whether
++ * runtime->oss.prepare is set or not
++ */
+ static int snd_pcm_oss_prepare(struct snd_pcm_substream *substream)
+ {
+       int err;
+@@ -1131,14 +1165,35 @@ static int snd_pcm_oss_make_ready(struct 
snd_pcm_substream *substream)
+       struct snd_pcm_runtime *runtime;
+       int err;
+ 
+-      if (substream == NULL)
+-              return 0;
+       runtime = substream->runtime;
+       if (runtime->oss.params) {
+               err = snd_pcm_oss_change_params(substream, false);
+               if (err < 0)
+                       return err;
+       }
++      if (runtime->oss.prepare) {
++              if (mutex_lock_interruptible(&runtime->oss.params_lock))
++                      return -ERESTARTSYS;
++              err = snd_pcm_oss_prepare(substream);
++              mutex_unlock(&runtime->oss.params_lock);
++              if (err < 0)
++                      return err;
++      }
++      return 0;
++}
++
++/* call with params_lock held */
++static int snd_pcm_oss_make_ready_locked(struct snd_pcm_substream *substream)
++{
++      struct snd_pcm_runtime *runtime;
++      int err;
++
++      runtime = substream->runtime;
++      if (runtime->oss.params) {
++              err = snd_pcm_oss_change_params_locked(substream);
++              if (err < 0)
++                      return err;
++      }
+       if (runtime->oss.prepare) {
+               err = snd_pcm_oss_prepare(substream);
+               if (err < 0)
+@@ -1367,13 +1422,15 @@ static ssize_t snd_pcm_oss_write1(struct 
snd_pcm_substream *substream, const cha
+       if (atomic_read(&substream->mmap_count))
+               return -ENXIO;
+ 
+-      if ((tmp = snd_pcm_oss_make_ready(substream)) < 0)
+-              return tmp;
++      atomic_inc(&runtime->oss.rw_ref);
+       while (bytes > 0) {
+               if (mutex_lock_interruptible(&runtime->oss.params_lock)) {
+                       tmp = -ERESTARTSYS;
+                       break;
+               }
++              tmp = snd_pcm_oss_make_ready_locked(substream);
++              if (tmp < 0)
++                      goto err;
+               if (bytes < runtime->oss.period_bytes || 
runtime->oss.buffer_used > 0) {
+                       tmp = bytes;
+                       if (tmp + runtime->oss.buffer_used > 
runtime->oss.period_bytes)
+@@ -1429,6 +1486,7 @@ static ssize_t snd_pcm_oss_write1(struct 
snd_pcm_substream *substream, const cha
+               }
+               tmp = 0;
+       }
++      atomic_dec(&runtime->oss.rw_ref);
+       return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp;
+ }
+ 
+@@ -1474,13 +1532,15 @@ static ssize_t snd_pcm_oss_read1(struct 
snd_pcm_substream *substream, char __use
+       if (atomic_read(&substream->mmap_count))
+               return -ENXIO;
+ 
+-      if ((tmp = snd_pcm_oss_make_ready(substream)) < 0)
+-              return tmp;
++      atomic_inc(&runtime->oss.rw_ref);
+       while (bytes > 0) {
+               if (mutex_lock_interruptible(&runtime->oss.params_lock)) {
+                       tmp = -ERESTARTSYS;
+                       break;
+               }
++              tmp = snd_pcm_oss_make_ready_locked(substream);
++              if (tmp < 0)
++                      goto err;
+               if (bytes < runtime->oss.period_bytes || 
runtime->oss.buffer_used > 0) {
+                       if (runtime->oss.buffer_used == 0) {
+                               tmp = snd_pcm_oss_read2(substream, 
runtime->oss.buffer, runtime->oss.period_bytes, 1);
+@@ -1521,6 +1581,7 @@ static ssize_t snd_pcm_oss_read1(struct 
snd_pcm_substream *substream, char __use
+               }
+               tmp = 0;
+       }
++      atomic_dec(&runtime->oss.rw_ref);
+       return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp;
+ }
+ 
+@@ -1536,10 +1597,12 @@ static int snd_pcm_oss_reset(struct snd_pcm_oss_file 
*pcm_oss_file)
+                       continue;
+               runtime = substream->runtime;
+               snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
++              mutex_lock(&runtime->oss.params_lock);
+               runtime->oss.prepare = 1;
+               runtime->oss.buffer_used = 0;
+               runtime->oss.prev_hw_ptr_period = 0;
+               runtime->oss.period_ptr = 0;
++              mutex_unlock(&runtime->oss.params_lock);
+       }
+       return 0;
+ }
+@@ -1625,9 +1688,13 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file 
*pcm_oss_file)
+                       goto __direct;
+               if ((err = snd_pcm_oss_make_ready(substream)) < 0)
+                       return err;
++              atomic_inc(&runtime->oss.rw_ref);
++              if (mutex_lock_interruptible(&runtime->oss.params_lock)) {
++                      atomic_dec(&runtime->oss.rw_ref);
++                      return -ERESTARTSYS;
++              }
+               format = snd_pcm_oss_format_from(runtime->oss.format);
+               width = snd_pcm_format_physical_width(format);
+-              mutex_lock(&runtime->oss.params_lock);
+               if (runtime->oss.buffer_used > 0) {
+ #ifdef OSS_DEBUG
+                       pcm_dbg(substream->pcm, "sync: buffer_used\n");
+@@ -1637,10 +1704,8 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file 
*pcm_oss_file)
+                                                  runtime->oss.buffer + 
runtime->oss.buffer_used,
+                                                  size);
+                       err = snd_pcm_oss_sync1(substream, 
runtime->oss.period_bytes);
+-                      if (err < 0) {
+-                              mutex_unlock(&runtime->oss.params_lock);
+-                              return err;
+-                      }
++                      if (err < 0)
++                              goto unlock;
+               } else if (runtime->oss.period_ptr > 0) {
+ #ifdef OSS_DEBUG
+                       pcm_dbg(substream->pcm, "sync: period_ptr\n");
+@@ -1650,10 +1715,8 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file 
*pcm_oss_file)
+                                                  runtime->oss.buffer,
+                                                  size * 8 / width);
+                       err = snd_pcm_oss_sync1(substream, size);
+-                      if (err < 0) {
+-                              mutex_unlock(&runtime->oss.params_lock);
+-                              return err;
+-                      }
++                      if (err < 0)
++                              goto unlock;
+               }
+               /*
+                * The ALSA's period might be a bit large than OSS one.
+@@ -1684,7 +1747,11 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file 
*pcm_oss_file)
+                               snd_pcm_lib_writev(substream, buffers, size);
+                       }
+               }
++unlock:
+               mutex_unlock(&runtime->oss.params_lock);
++              atomic_dec(&runtime->oss.rw_ref);
++              if (err < 0)
++                      return err;
+               /*
+                * finish sync: drain the buffer
+                */
+@@ -1695,7 +1762,9 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file 
*pcm_oss_file)
+               substream->f_flags = saved_f_flags;
+               if (err < 0)
+                       return err;
++              mutex_lock(&runtime->oss.params_lock);
+               runtime->oss.prepare = 1;
++              mutex_unlock(&runtime->oss.params_lock);
+       }
+ 
+       substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_CAPTURE];
+@@ -1706,8 +1775,10 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file 
*pcm_oss_file)
+               err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, 
NULL);
+               if (err < 0)
+                       return err;
++              mutex_lock(&runtime->oss.params_lock);
+               runtime->oss.buffer_used = 0;
+               runtime->oss.prepare = 1;
++              mutex_unlock(&runtime->oss.params_lock);
+       }
+       return 0;
+ }
+@@ -1719,6 +1790,8 @@ static int snd_pcm_oss_set_rate(struct snd_pcm_oss_file 
*pcm_oss_file, int rate)
+       for (idx = 1; idx >= 0; --idx) {
+               struct snd_pcm_substream *substream = 
pcm_oss_file->streams[idx];
+               struct snd_pcm_runtime *runtime;
++              int err;
++
+               if (substream == NULL)
+                       continue;
+               runtime = substream->runtime;
+@@ -1726,10 +1799,14 @@ static int snd_pcm_oss_set_rate(struct 
snd_pcm_oss_file *pcm_oss_file, int rate)
+                       rate = 1000;
+               else if (rate > 192000)
+                       rate = 192000;
++              err = lock_params(runtime);
++              if (err < 0)
++                      return err;
+               if (runtime->oss.rate != rate) {
+                       runtime->oss.params = 1;
+                       runtime->oss.rate = rate;
+               }
++              unlock_params(runtime);
+       }
+       return snd_pcm_oss_get_rate(pcm_oss_file);
+ }
+@@ -1754,13 +1831,19 @@ static int snd_pcm_oss_set_channels(struct 
snd_pcm_oss_file *pcm_oss_file, unsig
+       for (idx = 1; idx >= 0; --idx) {
+               struct snd_pcm_substream *substream = 
pcm_oss_file->streams[idx];
+               struct snd_pcm_runtime *runtime;
++              int err;
++
+               if (substream == NULL)
+                       continue;
+               runtime = substream->runtime;
++              err = lock_params(runtime);
++              if (err < 0)
++                      return err;
+               if (runtime->oss.channels != channels) {
+                       runtime->oss.params = 1;
+                       runtime->oss.channels = channels;
+               }
++              unlock_params(runtime);
+       }
+       return snd_pcm_oss_get_channels(pcm_oss_file);
+ }
+@@ -1833,6 +1916,7 @@ static int snd_pcm_oss_get_formats(struct 
snd_pcm_oss_file *pcm_oss_file)
+ static int snd_pcm_oss_set_format(struct snd_pcm_oss_file *pcm_oss_file, int 
format)
+ {
+       int formats, idx;
++      int err;
+       
+       if (format != AFMT_QUERY) {
+               formats = snd_pcm_oss_get_formats(pcm_oss_file);
+@@ -1846,10 +1930,14 @@ static int snd_pcm_oss_set_format(struct 
snd_pcm_oss_file *pcm_oss_file, int for
+                       if (substream == NULL)
+                               continue;
+                       runtime = substream->runtime;
++                      err = lock_params(runtime);
++                      if (err < 0)
++                              return err;
+                       if (runtime->oss.format != format) {
+                               runtime->oss.params = 1;
+                               runtime->oss.format = format;
+                       }
++                      unlock_params(runtime);
+               }
+       }
+       return snd_pcm_oss_get_format(pcm_oss_file);
+@@ -1869,8 +1957,6 @@ static int snd_pcm_oss_set_subdivide1(struct 
snd_pcm_substream *substream, int s
+ {
+       struct snd_pcm_runtime *runtime;
+ 
+-      if (substream == NULL)
+-              return 0;
+       runtime = substream->runtime;
+       if (subdivide == 0) {
+               subdivide = runtime->oss.subdivision;
+@@ -1894,9 +1980,17 @@ static int snd_pcm_oss_set_subdivide(struct 
snd_pcm_oss_file *pcm_oss_file, int
+ 
+       for (idx = 1; idx >= 0; --idx) {
+               struct snd_pcm_substream *substream = 
pcm_oss_file->streams[idx];
++              struct snd_pcm_runtime *runtime;
++
+               if (substream == NULL)
+                       continue;
+-              if ((err = snd_pcm_oss_set_subdivide1(substream, subdivide)) < 
0)
++              runtime = substream->runtime;
++              err = lock_params(runtime);
++              if (err < 0)
++                      return err;
++              err = snd_pcm_oss_set_subdivide1(substream, subdivide);
++              unlock_params(runtime);
++              if (err < 0)
+                       return err;
+       }
+       return err;
+@@ -1906,8 +2000,6 @@ static int snd_pcm_oss_set_fragment1(struct 
snd_pcm_substream *substream, unsign
+ {
+       struct snd_pcm_runtime *runtime;
+ 
+-      if (substream == NULL)
+-              return 0;
+       runtime = substream->runtime;
+       if (runtime->oss.subdivision || runtime->oss.fragshift)
+               return -EINVAL;
+@@ -1927,9 +2019,17 @@ static int snd_pcm_oss_set_fragment(struct 
snd_pcm_oss_file *pcm_oss_file, unsig
+ 
+       for (idx = 1; idx >= 0; --idx) {
+               struct snd_pcm_substream *substream = 
pcm_oss_file->streams[idx];
++              struct snd_pcm_runtime *runtime;
++
+               if (substream == NULL)
+                       continue;
+-              if ((err = snd_pcm_oss_set_fragment1(substream, val)) < 0)
++              runtime = substream->runtime;
++              err = lock_params(runtime);
++              if (err < 0)
++                      return err;
++              err = snd_pcm_oss_set_fragment1(substream, val);
++              unlock_params(runtime);
++              if (err < 0)
+                       return err;
+       }
+       return err;
+@@ -2013,6 +2113,9 @@ static int snd_pcm_oss_set_trigger(struct 
snd_pcm_oss_file *pcm_oss_file, int tr
+       }
+               if (psubstream) {
+                       runtime = psubstream->runtime;
++              cmd = 0;
++              if (mutex_lock_interruptible(&runtime->oss.params_lock))
++                      return -ERESTARTSYS;
+               if (trigger & PCM_ENABLE_OUTPUT) {
+                       if (runtime->oss.trigger)
+                               goto _skip1;
+@@ -2030,13 +2133,19 @@ static int snd_pcm_oss_set_trigger(struct 
snd_pcm_oss_file *pcm_oss_file, int tr
+                       cmd = SNDRV_PCM_IOCTL_DROP;
+                       runtime->oss.prepare = 1;
+               }
+-              err = snd_pcm_kernel_ioctl(psubstream, cmd, NULL);
+-              if (err < 0)
+-                      return err;
+-      }
+  _skip1:
++              mutex_unlock(&runtime->oss.params_lock);
++              if (cmd) {
++                      err = snd_pcm_kernel_ioctl(psubstream, cmd, NULL);
++                      if (err < 0)
++                              return err;
++              }
++      }
+       if (csubstream) {
+                       runtime = csubstream->runtime;
++              cmd = 0;
++              if (mutex_lock_interruptible(&runtime->oss.params_lock))
++                      return -ERESTARTSYS;
+               if (trigger & PCM_ENABLE_INPUT) {
+                       if (runtime->oss.trigger)
+                               goto _skip2;
+@@ -2051,11 +2160,14 @@ static int snd_pcm_oss_set_trigger(struct 
snd_pcm_oss_file *pcm_oss_file, int tr
+                       cmd = SNDRV_PCM_IOCTL_DROP;
+                       runtime->oss.prepare = 1;
+               }
+-              err = snd_pcm_kernel_ioctl(csubstream, cmd, NULL);
+-              if (err < 0)
+-                      return err;
+-      }
+  _skip2:
++              mutex_unlock(&runtime->oss.params_lock);
++              if (cmd) {
++                      err = snd_pcm_kernel_ioctl(csubstream, cmd, NULL);
++                      if (err < 0)
++                              return err;
++              }
++      }
+       return 0;
+ }
+ 
+@@ -2307,6 +2419,7 @@ static void snd_pcm_oss_init_substream(struct 
snd_pcm_substream *substream,
+       runtime->oss.maxfrags = 0;
+       runtime->oss.subdivision = 0;
+       substream->pcm_release = snd_pcm_oss_release_substream;
++      atomic_set(&runtime->oss.rw_ref, 0);
+ }
+ 
+ static int snd_pcm_oss_release_file(struct snd_pcm_oss_file *pcm_oss_file)
+diff --git a/sound/core/pcm.c b/sound/core/pcm.c
+index 074363b63cc4..6bda8f6c5f84 100644
+--- a/sound/core/pcm.c
++++ b/sound/core/pcm.c
+@@ -28,6 +28,7 @@
+ #include <sound/core.h>
+ #include <sound/minors.h>
+ #include <sound/pcm.h>
++#include <sound/timer.h>
+ #include <sound/control.h>
+ #include <sound/info.h>
+ 
+@@ -1025,8 +1026,13 @@ void snd_pcm_detach_substream(struct snd_pcm_substream 
*substream)
+       snd_free_pages((void*)runtime->control,
+                      PAGE_ALIGN(sizeof(struct snd_pcm_mmap_control)));
+       kfree(runtime->hw_constraints.rules);
+-      kfree(runtime);
++      /* Avoid concurrent access to runtime via PCM timer interface */
++      if (substream->timer)
++              spin_lock_irq(&substream->timer->lock);
+       substream->runtime = NULL;
++      if (substream->timer)
++              spin_unlock_irq(&substream->timer->lock);
++      kfree(runtime);
+       put_pid(substream->pid);
+       substream->pid = NULL;
+       substream->pstr->substream_opened--;
+diff --git a/sound/core/rawmidi_compat.c b/sound/core/rawmidi_compat.c
+index 09a89094dcf7..4e304a24924a 100644
+--- a/sound/core/rawmidi_compat.c
++++ b/sound/core/rawmidi_compat.c
+@@ -36,8 +36,6 @@ static int snd_rawmidi_ioctl_params_compat(struct 
snd_rawmidi_file *rfile,
+       struct snd_rawmidi_params params;
+       unsigned int val;
+ 
+-      if (rfile->output == NULL)
+-              return -EINVAL;
+       if (get_user(params.stream, &src->stream) ||
+           get_user(params.buffer_size, &src->buffer_size) ||
+           get_user(params.avail_min, &src->avail_min) ||
+@@ -46,8 +44,12 @@ static int snd_rawmidi_ioctl_params_compat(struct 
snd_rawmidi_file *rfile,
+       params.no_active_sensing = val;
+       switch (params.stream) {
+       case SNDRV_RAWMIDI_STREAM_OUTPUT:
++              if (!rfile->output)
++                      return -EINVAL;
+               return snd_rawmidi_output_params(rfile->output, &params);
+       case SNDRV_RAWMIDI_STREAM_INPUT:
++              if (!rfile->input)
++                      return -EINVAL;
+               return snd_rawmidi_input_params(rfile->input, &params);
+       }
+       return -EINVAL;
+@@ -67,16 +69,18 @@ static int snd_rawmidi_ioctl_status_compat(struct 
snd_rawmidi_file *rfile,
+       int err;
+       struct snd_rawmidi_status status;
+ 
+-      if (rfile->output == NULL)
+-              return -EINVAL;
+       if (get_user(status.stream, &src->stream))
+               return -EFAULT;
+ 
+       switch (status.stream) {
+       case SNDRV_RAWMIDI_STREAM_OUTPUT:
++              if (!rfile->output)
++                      return -EINVAL;
+               err = snd_rawmidi_output_status(rfile->output, &status);
+               break;
+       case SNDRV_RAWMIDI_STREAM_INPUT:
++              if (!rfile->input)
++                      return -EINVAL;
+               err = snd_rawmidi_input_status(rfile->input, &status);
+               break;
+       default:
+@@ -113,16 +117,18 @@ static int snd_rawmidi_ioctl_status_x32(struct 
snd_rawmidi_file *rfile,
+       int err;
+       struct snd_rawmidi_status status;
+ 
+-      if (rfile->output == NULL)
+-              return -EINVAL;
+       if (get_user(status.stream, &src->stream))
+               return -EFAULT;
+ 
+       switch (status.stream) {
+       case SNDRV_RAWMIDI_STREAM_OUTPUT:
++              if (!rfile->output)
++                      return -EINVAL;
+               err = snd_rawmidi_output_status(rfile->output, &status);
+               break;
+       case SNDRV_RAWMIDI_STREAM_INPUT:
++              if (!rfile->input)
++                      return -EINVAL;
+               err = snd_rawmidi_input_status(rfile->input, &status);
+               break;
+       default:
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index fbd00821e326..3be91696ac35 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -1549,7 +1549,8 @@ static void azx_check_snoop_available(struct azx *chip)
+                */
+               u8 val;
+               pci_read_config_byte(chip->pci, 0x42, &val);
+-              if (!(val & 0x80) && chip->pci->revision == 0x30)
++              if (!(val & 0x80) && (chip->pci->revision == 0x30 ||
++                                    chip->pci->revision == 0x20))
+                       snoop = false;
+       }
+ 
+diff --git a/sound/soc/codecs/ssm2602.c b/sound/soc/codecs/ssm2602.c
+index 4452fea0b118..bd4998f577a0 100644
+--- a/sound/soc/codecs/ssm2602.c
++++ b/sound/soc/codecs/ssm2602.c
+@@ -54,10 +54,17 @@ struct ssm2602_priv {
+  * using 2 wire for device control, so we cache them instead.
+  * There is no point in caching the reset register
+  */
+-static const u16 ssm2602_reg[SSM2602_CACHEREGNUM] = {
+-      0x0097, 0x0097, 0x0079, 0x0079,
+-      0x000a, 0x0008, 0x009f, 0x000a,
+-      0x0000, 0x0000
++static const struct reg_default ssm2602_reg[SSM2602_CACHEREGNUM] = {
++      { .reg = 0x00, .def = 0x0097 },
++      { .reg = 0x01, .def = 0x0097 },
++      { .reg = 0x02, .def = 0x0079 },
++      { .reg = 0x03, .def = 0x0079 },
++      { .reg = 0x04, .def = 0x000a },
++      { .reg = 0x05, .def = 0x0008 },
++      { .reg = 0x06, .def = 0x009f },
++      { .reg = 0x07, .def = 0x000a },
++      { .reg = 0x08, .def = 0x0000 },
++      { .reg = 0x09, .def = 0x0000 }
+ };
+ 
+ 
+@@ -618,8 +625,8 @@ const struct regmap_config ssm2602_regmap_config = {
+       .volatile_reg = ssm2602_register_volatile,
+ 
+       .cache_type = REGCACHE_RBTREE,
+-      .reg_defaults_raw = ssm2602_reg,
+-      .num_reg_defaults_raw = ARRAY_SIZE(ssm2602_reg),
++      .reg_defaults = ssm2602_reg,
++      .num_reg_defaults = ARRAY_SIZE(ssm2602_reg),
+ };
+ EXPORT_SYMBOL_GPL(ssm2602_regmap_config);
+ 
+diff --git a/sound/usb/line6/midi.c b/sound/usb/line6/midi.c
+index cebea9b7f769..6a9be1df7851 100644
+--- a/sound/usb/line6/midi.c
++++ b/sound/usb/line6/midi.c
+@@ -125,7 +125,7 @@ static int send_midi_async(struct usb_line6 *line6, 
unsigned char *data,
+       }
+ 
+       usb_fill_int_urb(urb, line6->usbdev,
+-                       usb_sndbulkpipe(line6->usbdev,
++                       usb_sndintpipe(line6->usbdev,
+                                        line6->properties->ep_ctrl_w),
+                        transfer_buffer, length, midi_sent, line6,
+                        line6->interval);
+diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c
+index 6ea4fcfaab36..a767a6400c5c 100644
+--- a/tools/perf/tests/code-reading.c
++++ b/tools/perf/tests/code-reading.c
+@@ -182,8 +182,6 @@ static int read_object_code(u64 addr, size_t len, u8 
cpumode,
+       unsigned char buf2[BUFSZ];
+       size_t ret_len;
+       u64 objdump_addr;
+-      const char *objdump_name;
+-      char decomp_name[KMOD_DECOMP_LEN];
+       int ret;
+ 
+       pr_debug("Reading object code for memory address: %#"PRIx64"\n", addr);
+@@ -244,25 +242,9 @@ static int read_object_code(u64 addr, size_t len, u8 
cpumode,
+               state->done[state->done_cnt++] = al.map->start;
+       }
+ 
+-      objdump_name = al.map->dso->long_name;
+-      if (dso__needs_decompress(al.map->dso)) {
+-              if (dso__decompress_kmodule_path(al.map->dso, objdump_name,
+-                                               decomp_name,
+-                                               sizeof(decomp_name)) < 0) {
+-                      pr_debug("decompression failed\n");
+-                      return -1;
+-              }
+-
+-              objdump_name = decomp_name;
+-      }
+-
+       /* Read the object code using objdump */
+       objdump_addr = map__rip_2objdump(al.map, al.addr);
+-      ret = read_via_objdump(objdump_name, objdump_addr, buf2, len);
+-
+-      if (dso__needs_decompress(al.map->dso))
+-              unlink(objdump_name);
+-
++      ret = read_via_objdump(al.map->dso->long_name, objdump_addr, buf2, len);
+       if (ret > 0) {
+               /*
+                * The kernel maps are inaccurate - assume objdump is right in
+diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c 
b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+index eeeae0629ad3..0b540b84f8b7 100644
+--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
++++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+@@ -1270,6 +1270,7 @@ static int intel_pt_overflow(struct intel_pt_decoder 
*decoder)
+       intel_pt_clear_tx_flags(decoder);
+       decoder->have_tma = false;
+       decoder->cbr = 0;
++      decoder->timestamp_insn_cnt = 0;
+       decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
+       decoder->overflow = true;
+       return -EOVERFLOW;
+@@ -1492,6 +1493,7 @@ static int intel_pt_walk_fup_tip(struct intel_pt_decoder 
*decoder)
+               case INTEL_PT_PSBEND:
+                       intel_pt_log("ERROR: Missing TIP after FUP\n");
+                       decoder->pkt_state = INTEL_PT_STATE_ERR3;
++                      decoder->pkt_step = 0;
+                       return -ENOENT;
+ 
+               case INTEL_PT_OVF:
+@@ -2152,14 +2154,6 @@ const struct intel_pt_state *intel_pt_decode(struct 
intel_pt_decoder *decoder)
+       return &decoder->state;
+ }
+ 
+-static bool intel_pt_at_psb(unsigned char *buf, size_t len)
+-{
+-      if (len < INTEL_PT_PSB_LEN)
+-              return false;
+-      return memmem(buf, INTEL_PT_PSB_LEN, INTEL_PT_PSB_STR,
+-                    INTEL_PT_PSB_LEN);
+-}
+-
+ /**
+  * intel_pt_next_psb - move buffer pointer to the start of the next PSB 
packet.
+  * @buf: pointer to buffer pointer
+@@ -2248,6 +2242,7 @@ static unsigned char *intel_pt_last_psb(unsigned char 
*buf, size_t len)
+  * @buf: buffer
+  * @len: size of buffer
+  * @tsc: TSC value returned
++ * @rem: returns remaining size when TSC is found
+  *
+  * Find a TSC packet in @buf and return the TSC value.  This function assumes
+  * that @buf starts at a PSB and that PSB+ will contain TSC and so stops if a
+@@ -2255,7 +2250,8 @@ static unsigned char *intel_pt_last_psb(unsigned char 
*buf, size_t len)
+  *
+  * Return: %true if TSC is found, false otherwise.
+  */
+-static bool intel_pt_next_tsc(unsigned char *buf, size_t len, uint64_t *tsc)
++static bool intel_pt_next_tsc(unsigned char *buf, size_t len, uint64_t *tsc,
++                            size_t *rem)
+ {
+       struct intel_pt_pkt packet;
+       int ret;
+@@ -2266,6 +2262,7 @@ static bool intel_pt_next_tsc(unsigned char *buf, size_t 
len, uint64_t *tsc)
+                       return false;
+               if (packet.type == INTEL_PT_TSC) {
+                       *tsc = packet.payload;
++                      *rem = len;
+                       return true;
+               }
+               if (packet.type == INTEL_PT_PSBEND)
+@@ -2316,6 +2313,8 @@ static int intel_pt_tsc_cmp(uint64_t tsc1, uint64_t tsc2)
+  * @len_a: size of first buffer
+  * @buf_b: second buffer
+  * @len_b: size of second buffer
++ * @consecutive: returns true if there is data in buf_b that is consecutive
++ *               to buf_a
+  *
+  * If the trace contains TSC we can look at the last TSC of @buf_a and the
+  * first TSC of @buf_b in order to determine if the buffers overlap, and then
+@@ -2328,33 +2327,41 @@ static int intel_pt_tsc_cmp(uint64_t tsc1, uint64_t 
tsc2)
+ static unsigned char *intel_pt_find_overlap_tsc(unsigned char *buf_a,
+                                               size_t len_a,
+                                               unsigned char *buf_b,
+-                                              size_t len_b)
++                                              size_t len_b, bool *consecutive)
+ {
+       uint64_t tsc_a, tsc_b;
+       unsigned char *p;
+-      size_t len;
++      size_t len, rem_a, rem_b;
+ 
+       p = intel_pt_last_psb(buf_a, len_a);
+       if (!p)
+               return buf_b; /* No PSB in buf_a => no overlap */
+ 
+       len = len_a - (p - buf_a);
+-      if (!intel_pt_next_tsc(p, len, &tsc_a)) {
++      if (!intel_pt_next_tsc(p, len, &tsc_a, &rem_a)) {
+               /* The last PSB+ in buf_a is incomplete, so go back one more */
+               len_a -= len;
+               p = intel_pt_last_psb(buf_a, len_a);
+               if (!p)
+                       return buf_b; /* No full PSB+ => assume no overlap */
+               len = len_a - (p - buf_a);
+-              if (!intel_pt_next_tsc(p, len, &tsc_a))
++              if (!intel_pt_next_tsc(p, len, &tsc_a, &rem_a))
+                       return buf_b; /* No TSC in buf_a => assume no overlap */
+       }
+ 
+       while (1) {
+               /* Ignore PSB+ with no TSC */
+-              if (intel_pt_next_tsc(buf_b, len_b, &tsc_b) &&
+-                  intel_pt_tsc_cmp(tsc_a, tsc_b) < 0)
+-                      return buf_b; /* tsc_a < tsc_b => no overlap */
++              if (intel_pt_next_tsc(buf_b, len_b, &tsc_b, &rem_b)) {
++                      int cmp = intel_pt_tsc_cmp(tsc_a, tsc_b);
++
++                      /* Same TSC, so buffers are consecutive */
++                      if (!cmp && rem_b >= rem_a) {
++                              *consecutive = true;
++                              return buf_b + len_b - (rem_b - rem_a);
++                      }
++                      if (cmp < 0)
++                              return buf_b; /* tsc_a < tsc_b => no overlap */
++              }
+ 
+               if (!intel_pt_step_psb(&buf_b, &len_b))
+                       return buf_b + len_b; /* No PSB in buf_b => no data */
+@@ -2368,6 +2375,8 @@ static unsigned char *intel_pt_find_overlap_tsc(unsigned 
char *buf_a,
+  * @buf_b: second buffer
+  * @len_b: size of second buffer
+  * @have_tsc: can use TSC packets to detect overlap
++ * @consecutive: returns true if there is data in buf_b that is consecutive
++ *               to buf_a
+  *
+  * When trace samples or snapshots are recorded there is the possibility that
+  * the data overlaps.  Note that, for the purposes of decoding, data is only
+@@ -2378,7 +2387,7 @@ static unsigned char *intel_pt_find_overlap_tsc(unsigned 
char *buf_a,
+  */
+ unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a,
+                                    unsigned char *buf_b, size_t len_b,
+-                                   bool have_tsc)
++                                   bool have_tsc, bool *consecutive)
+ {
+       unsigned char *found;
+ 
+@@ -2390,7 +2399,8 @@ unsigned char *intel_pt_find_overlap(unsigned char 
*buf_a, size_t len_a,
+               return buf_b; /* No overlap */
+ 
+       if (have_tsc) {
+-              found = intel_pt_find_overlap_tsc(buf_a, len_a, buf_b, len_b);
++              found = intel_pt_find_overlap_tsc(buf_a, len_a, buf_b, len_b,
++                                                consecutive);
+               if (found)
+                       return found;
+       }
+@@ -2405,28 +2415,16 @@ unsigned char *intel_pt_find_overlap(unsigned char 
*buf_a, size_t len_a,
+       }
+ 
+       /* Now len_b >= len_a */
+-      if (len_b > len_a) {
+-              /* The leftover buffer 'b' must start at a PSB */
+-              while (!intel_pt_at_psb(buf_b + len_a, len_b - len_a)) {
+-                      if (!intel_pt_step_psb(&buf_a, &len_a))
+-                              return buf_b; /* No overlap */
+-              }
+-      }
+-
+       while (1) {
+               /* Potential overlap so check the bytes */
+               found = memmem(buf_a, len_a, buf_b, len_a);
+-              if (found)
++              if (found) {
++                      *consecutive = true;
+                       return buf_b + len_a;
++              }
+ 
+               /* Try again at next PSB in buffer 'a' */
+               if (!intel_pt_step_psb(&buf_a, &len_a))
+                       return buf_b; /* No overlap */
+-
+-              /* The leftover buffer 'b' must start at a PSB */
+-              while (!intel_pt_at_psb(buf_b + len_a, len_b - len_a)) {
+-                      if (!intel_pt_step_psb(&buf_a, &len_a))
+-                              return buf_b; /* No overlap */
+-              }
+       }
+ }
+diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h 
b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
+index 02c38fec1c37..89a3eda6a318 100644
+--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
++++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
+@@ -102,7 +102,7 @@ const struct intel_pt_state *intel_pt_decode(struct 
intel_pt_decoder *decoder);
+ 
+ unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a,
+                                    unsigned char *buf_b, size_t len_b,
+-                                   bool have_tsc);
++                                   bool have_tsc, bool *consecutive);
+ 
+ int intel_pt__strerror(int code, char *buf, size_t buflen);
+ 
+diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
+index 89927b5beebf..3693cb26ec66 100644
+--- a/tools/perf/util/intel-pt.c
++++ b/tools/perf/util/intel-pt.c
+@@ -125,6 +125,7 @@ struct intel_pt_queue {
+       bool stop;
+       bool step_through_buffers;
+       bool use_buffer_pid_tid;
++      bool sync_switch;
+       pid_t pid, tid;
+       int cpu;
+       int switch_state;
+@@ -188,14 +189,17 @@ static void intel_pt_dump_event(struct intel_pt *pt, 
unsigned char *buf,
+ static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct 
auxtrace_buffer *a,
+                                  struct auxtrace_buffer *b)
+ {
++      bool consecutive = false;
+       void *start;
+ 
+       start = intel_pt_find_overlap(a->data, a->size, b->data, b->size,
+-                                    pt->have_tsc);
++                                    pt->have_tsc, &consecutive);
+       if (!start)
+               return -EINVAL;
+       b->use_size = b->data + b->size - start;
+       b->use_data = start;
++      if (b->use_size && consecutive)
++              b->consecutive = true;
+       return 0;
+ }
+ 
+@@ -849,10 +853,12 @@ static int intel_pt_setup_queue(struct intel_pt *pt,
+                       if (pt->timeless_decoding || !pt->have_sched_switch)
+                               ptq->use_buffer_pid_tid = true;
+               }
++
++              ptq->sync_switch = pt->sync_switch;
+       }
+ 
+       if (!ptq->on_heap &&
+-          (!pt->sync_switch ||
++          (!ptq->sync_switch ||
+            ptq->switch_state != INTEL_PT_SS_EXPECTING_SWITCH_EVENT)) {
+               const struct intel_pt_state *state;
+               int ret;
+@@ -1235,7 +1241,7 @@ static int intel_pt_sample(struct intel_pt_queue *ptq)
+       if (pt->synth_opts.last_branch)
+               intel_pt_update_last_branch_rb(ptq);
+ 
+-      if (!pt->sync_switch)
++      if (!ptq->sync_switch)
+               return 0;
+ 
+       if (intel_pt_is_switch_ip(ptq, state->to_ip)) {
+@@ -1316,6 +1322,21 @@ static u64 intel_pt_switch_ip(struct intel_pt *pt, u64 
*ptss_ip)
+       return switch_ip;
+ }
+ 
++static void intel_pt_enable_sync_switch(struct intel_pt *pt)
++{
++      unsigned int i;
++
++      pt->sync_switch = true;
++
++      for (i = 0; i < pt->queues.nr_queues; i++) {
++              struct auxtrace_queue *queue = &pt->queues.queue_array[i];
++              struct intel_pt_queue *ptq = queue->priv;
++
++              if (ptq)
++                      ptq->sync_switch = true;
++      }
++}
++
+ static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
+ {
+       const struct intel_pt_state *state = ptq->state;
+@@ -1332,7 +1353,7 @@ static int intel_pt_run_decoder(struct intel_pt_queue 
*ptq, u64 *timestamp)
+                       if (pt->switch_ip) {
+                               intel_pt_log("switch_ip: %"PRIx64" ptss_ip: 
%"PRIx64"\n",
+                                            pt->switch_ip, pt->ptss_ip);
+-                              pt->sync_switch = true;
++                              intel_pt_enable_sync_switch(pt);
+                       }
+               }
+       }
+@@ -1348,9 +1369,9 @@ static int intel_pt_run_decoder(struct intel_pt_queue 
*ptq, u64 *timestamp)
+               if (state->err) {
+                       if (state->err == INTEL_PT_ERR_NODATA)
+                               return 1;
+-                      if (pt->sync_switch &&
++                      if (ptq->sync_switch &&
+                           state->from_ip >= pt->kernel_start) {
+-                              pt->sync_switch = false;
++                              ptq->sync_switch = false;
+                               intel_pt_next_tid(pt, ptq);
+                       }
+                       if (pt->synth_opts.errors) {
+@@ -1376,7 +1397,7 @@ static int intel_pt_run_decoder(struct intel_pt_queue 
*ptq, u64 *timestamp)
+                                    state->timestamp, state->est_timestamp);
+                       ptq->timestamp = state->est_timestamp;
+               /* Use estimated TSC in unknown switch state */
+-              } else if (pt->sync_switch &&
++              } else if (ptq->sync_switch &&
+                          ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
+                          intel_pt_is_switch_ip(ptq, state->to_ip) &&
+                          ptq->next_tid == -1) {
+@@ -1523,7 +1544,7 @@ static int intel_pt_sync_switch(struct intel_pt *pt, int 
cpu, pid_t tid,
+               return 1;
+ 
+       ptq = intel_pt_cpu_to_ptq(pt, cpu);
+-      if (!ptq)
++      if (!ptq || !ptq->sync_switch)
+               return 1;
+ 
+       switch (ptq->switch_state) {

Reply via email to