commit:     b683818e028e60256838523df40ef06ddb1d0244
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Dec  9 23:03:51 2014 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Dec  9 23:03:51 2014 +0000
URL:        
http://sources.gentoo.org/gitweb/?p=proj/linux-patches.git;a=commit;h=b683818e

Linux patch 3.14.26

---
 0000_README              |    4 +
 1025_linux-3.14.26.patch | 2603 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2607 insertions(+)

diff --git a/0000_README b/0000_README
index d1f2872..e1c5538 100644
--- a/0000_README
+++ b/0000_README
@@ -142,6 +142,10 @@ Patch:  1024_linux-3.14.25.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.14.25
 
+Patch:  1025_linux-3.14.26.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.14.26
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1025_linux-3.14.26.patch b/1025_linux-3.14.26.patch
new file mode 100644
index 0000000..5d79376
--- /dev/null
+++ b/1025_linux-3.14.26.patch
@@ -0,0 +1,2603 @@
+diff --git 
a/Documentation/devicetree/bindings/interrupt-controller/interrupts.txt 
b/Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
+index ce6a1a072028..8a3c40829899 100644
+--- a/Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
++++ b/Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
+@@ -30,10 +30,6 @@ should only be used when a device has multiple interrupt 
parents.
+   Example:
+       interrupts-extended = <&intc1 5 1>, <&intc2 1 0>;
+ 
+-A device node may contain either "interrupts" or "interrupts-extended", but 
not
+-both. If both properties are present, then the operating system should log an
+-error and use only the data in "interrupts".
+-
+ 2) Interrupt controller nodes
+ -----------------------------
+ 
+diff --git a/Makefile b/Makefile
+index eb96e40238f7..63a5ee858cc3 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 14
+-SUBLEVEL = 25
++SUBLEVEL = 26
+ EXTRAVERSION =
+ NAME = Remembering Coco
+ 
+diff --git a/arch/arm/include/asm/thread_info.h 
b/arch/arm/include/asm/thread_info.h
+index 71a06b293489..3e635eed9c6c 100644
+--- a/arch/arm/include/asm/thread_info.h
++++ b/arch/arm/include/asm/thread_info.h
+@@ -43,16 +43,6 @@ struct cpu_context_save {
+       __u32   extra[2];               /* Xscale 'acc' register, etc */
+ };
+ 
+-struct arm_restart_block {
+-      union {
+-              /* For user cache flushing */
+-              struct {
+-                      unsigned long start;
+-                      unsigned long end;
+-              } cache;
+-      };
+-};
+-
+ /*
+  * low level task data that entry.S needs immediate access to.
+  * __switch_to() assumes cpu_context follows immediately after cpu_domain.
+@@ -78,7 +68,6 @@ struct thread_info {
+       unsigned long           thumbee_state;  /* ThumbEE Handler Base 
register */
+ #endif
+       struct restart_block    restart_block;
+-      struct arm_restart_block        arm_restart_block;
+ };
+ 
+ #define INIT_THREAD_INFO(tsk)                                         \
+diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
+index 9265b8bb529a..3f314433d653 100644
+--- a/arch/arm/kernel/traps.c
++++ b/arch/arm/kernel/traps.c
+@@ -510,8 +510,6 @@ static int bad_syscall(int n, struct pt_regs *regs)
+       return regs->ARM_r0;
+ }
+ 
+-static long do_cache_op_restart(struct restart_block *);
+-
+ static inline int
+ __do_cache_op(unsigned long start, unsigned long end)
+ {
+@@ -520,24 +518,8 @@ __do_cache_op(unsigned long start, unsigned long end)
+       do {
+               unsigned long chunk = min(PAGE_SIZE, end - start);
+ 
+-              if (signal_pending(current)) {
+-                      struct thread_info *ti = current_thread_info();
+-
+-                      ti->restart_block = (struct restart_block) {
+-                              .fn     = do_cache_op_restart,
+-                      };
+-
+-                      ti->arm_restart_block = (struct arm_restart_block) {
+-                              {
+-                                      .cache = {
+-                                              .start  = start,
+-                                              .end    = end,
+-                                      },
+-                              },
+-                      };
+-
+-                      return -ERESTART_RESTARTBLOCK;
+-              }
++              if (fatal_signal_pending(current))
++                      return 0;
+ 
+               ret = flush_cache_user_range(start, start + chunk);
+               if (ret)
+@@ -550,15 +532,6 @@ __do_cache_op(unsigned long start, unsigned long end)
+       return 0;
+ }
+ 
+-static long do_cache_op_restart(struct restart_block *unused)
+-{
+-      struct arm_restart_block *restart_block;
+-
+-      restart_block = &current_thread_info()->arm_restart_block;
+-      return __do_cache_op(restart_block->cache.start,
+-                           restart_block->cache.end);
+-}
+-
+ static inline int
+ do_cache_op(unsigned long start, unsigned long end, int flags)
+ {
+diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
+index 74f6033e76dd..fdedc31e0f40 100644
+--- a/arch/arm/mm/proc-v7.S
++++ b/arch/arm/mm/proc-v7.S
+@@ -211,7 +211,6 @@ __v7_pj4b_setup:
+ /* Auxiliary Debug Modes Control 1 Register */
+ #define PJ4B_STATIC_BP (1 << 2) /* Enable Static BP */
+ #define PJ4B_INTER_PARITY (1 << 8) /* Disable Internal Parity Handling */
+-#define PJ4B_BCK_OFF_STREX (1 << 5) /* Enable the back off of STREX instr */
+ #define PJ4B_CLEAN_LINE (1 << 16) /* Disable data transfer for clean line */
+ 
+ /* Auxiliary Debug Modes Control 2 Register */
+@@ -234,7 +233,6 @@ __v7_pj4b_setup:
+       /* Auxiliary Debug Modes Control 1 Register */
+       mrc     p15, 1, r0, c15, c1, 1
+       orr     r0, r0, #PJ4B_CLEAN_LINE
+-      orr     r0, r0, #PJ4B_BCK_OFF_STREX
+       orr     r0, r0, #PJ4B_INTER_PARITY
+       bic     r0, r0, #PJ4B_STATIC_BP
+       mcr     p15, 1, r0, c15, c1, 1
+diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
+index d19b1cfcad91..b34b95f45cb3 100644
+--- a/arch/arm/mm/proc-xscale.S
++++ b/arch/arm/mm/proc-xscale.S
+@@ -535,7 +535,7 @@ ENTRY(cpu_xscale_do_suspend)
+       mrc     p15, 0, r5, c15, c1, 0  @ CP access reg
+       mrc     p15, 0, r6, c13, c0, 0  @ PID
+       mrc     p15, 0, r7, c3, c0, 0   @ domain ID
+-      mrc     p15, 0, r8, c1, c1, 0   @ auxiliary control reg
++      mrc     p15, 0, r8, c1, c0, 1   @ auxiliary control reg
+       mrc     p15, 0, r9, c1, c0, 0   @ control reg
+       bic     r4, r4, #2              @ clear frequency change bit
+       stmia   r0, {r4 - r9}           @ store cp regs
+@@ -552,7 +552,7 @@ ENTRY(cpu_xscale_do_resume)
+       mcr     p15, 0, r6, c13, c0, 0  @ PID
+       mcr     p15, 0, r7, c3, c0, 0   @ domain ID
+       mcr     p15, 0, r1, c2, c0, 0   @ translation table base addr
+-      mcr     p15, 0, r8, c1, c1, 0   @ auxiliary control reg
++      mcr     p15, 0, r8, c1, c0, 1   @ auxiliary control reg
+       mov     r0, r9                  @ control register
+       b       cpu_resume_mmu
+ ENDPROC(cpu_xscale_do_resume)
+diff --git a/arch/mips/loongson/common/Makefile 
b/arch/mips/loongson/common/Makefile
+index 9e4484ccbb03..9005a8d60969 100644
+--- a/arch/mips/loongson/common/Makefile
++++ b/arch/mips/loongson/common/Makefile
+@@ -11,7 +11,8 @@ obj-$(CONFIG_PCI) += pci.o
+ # Serial port support
+ #
+ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
+-obj-$(CONFIG_SERIAL_8250) += serial.o
++loongson-serial-$(CONFIG_SERIAL_8250) := serial.o
++obj-y += $(loongson-serial-m) $(loongson-serial-y)
+ obj-$(CONFIG_LOONGSON_UART_BASE) += uart_base.o
+ obj-$(CONFIG_LOONGSON_MC146818) += rtc.o
+ 
+diff --git a/arch/mips/oprofile/backtrace.c b/arch/mips/oprofile/backtrace.c
+index 6854ed5097d2..83a1dfd8f0e3 100644
+--- a/arch/mips/oprofile/backtrace.c
++++ b/arch/mips/oprofile/backtrace.c
+@@ -92,7 +92,7 @@ static inline int unwind_user_frame(struct stackframe 
*old_frame,
+                               /* This marks the end of the previous function,
+                                  which means we overran. */
+                               break;
+-                      stack_size = (unsigned) stack_adjustment;
++                      stack_size = (unsigned long) stack_adjustment;
+               } else if (is_ra_save_ins(&ip)) {
+                       int ra_slot = ip.i_format.simmediate;
+                       if (ra_slot < 0)
+diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c 
b/arch/powerpc/platforms/powernv/pci-ioda.c
+index beedaf0c5e75..d558b8595e6f 100644
+--- a/arch/powerpc/platforms/powernv/pci-ioda.c
++++ b/arch/powerpc/platforms/powernv/pci-ioda.c
+@@ -902,7 +902,6 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, 
struct pci_dev *dev,
+                                 unsigned int is_64, struct msi_msg *msg)
+ {
+       struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev);
+-      struct pci_dn *pdn = pci_get_pdn(dev);
+       struct irq_data *idata;
+       struct irq_chip *ichip;
+       unsigned int xive_num = hwirq - phb->msi_base;
+@@ -918,7 +917,7 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, 
struct pci_dev *dev,
+               return -ENXIO;
+ 
+       /* Force 32-bit MSI on some broken devices */
+-      if (pdn && pdn->force_32bit_msi)
++      if (dev->no_64bit_msi)
+               is_64 = 0;
+ 
+       /* Assign XIVE to PE */
+diff --git a/arch/powerpc/platforms/powernv/pci.c 
b/arch/powerpc/platforms/powernv/pci.c
+index 8518817dcdfd..52c1162bcee3 100644
+--- a/arch/powerpc/platforms/powernv/pci.c
++++ b/arch/powerpc/platforms/powernv/pci.c
+@@ -1,3 +1,4 @@
++
+ /*
+  * Support PCI/PCIe on PowerNV platforms
+  *
+@@ -50,9 +51,8 @@ static int pnv_msi_check_device(struct pci_dev* pdev, int 
nvec, int type)
+ {
+       struct pci_controller *hose = pci_bus_to_host(pdev->bus);
+       struct pnv_phb *phb = hose->private_data;
+-      struct pci_dn *pdn = pci_get_pdn(pdev);
+ 
+-      if (pdn && pdn->force_32bit_msi && !phb->msi32_support)
++      if (pdev->no_64bit_msi && !phb->msi32_support)
+               return -ENODEV;
+ 
+       return (phb && phb->msi_bmp.bitmap) ? 0 : -ENODEV;
+diff --git a/arch/powerpc/platforms/pseries/msi.c 
b/arch/powerpc/platforms/pseries/msi.c
+index 0c882e83c4ce..6849d85ea0d5 100644
+--- a/arch/powerpc/platforms/pseries/msi.c
++++ b/arch/powerpc/platforms/pseries/msi.c
+@@ -428,7 +428,7 @@ static int rtas_setup_msi_irqs(struct pci_dev *pdev, int 
nvec_in, int type)
+        */
+ again:
+       if (type == PCI_CAP_ID_MSI) {
+-              if (pdn->force_32bit_msi) {
++              if (pdev->no_64bit_msi) {
+                       rc = rtas_change_msi(pdn, RTAS_CHANGE_32MSI_FN, nvec);
+                       if (rc < 0) {
+                               /*
+diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
+index b07909850f77..bc5fbc201bcb 100644
+--- a/arch/powerpc/xmon/xmon.c
++++ b/arch/powerpc/xmon/xmon.c
+@@ -288,10 +288,10 @@ static inline void disable_surveillance(void)
+       args.token = rtas_token("set-indicator");
+       if (args.token == RTAS_UNKNOWN_SERVICE)
+               return;
+-      args.nargs = 3;
+-      args.nret = 1;
++      args.nargs = cpu_to_be32(3);
++      args.nret = cpu_to_be32(1);
+       args.rets = &args.args[3];
+-      args.args[0] = SURVEILLANCE_TOKEN;
++      args.args[0] = cpu_to_be32(SURVEILLANCE_TOKEN);
+       args.args[1] = 0;
+       args.args[2] = 0;
+       enter_rtas(__pa(&args));
+diff --git a/arch/sparc/include/uapi/asm/swab.h 
b/arch/sparc/include/uapi/asm/swab.h
+index a34ad079487e..4c7c12d69bea 100644
+--- a/arch/sparc/include/uapi/asm/swab.h
++++ b/arch/sparc/include/uapi/asm/swab.h
+@@ -9,9 +9,9 @@ static inline __u16 __arch_swab16p(const __u16 *addr)
+ {
+       __u16 ret;
+ 
+-      __asm__ __volatile__ ("lduha [%1] %2, %0"
++      __asm__ __volatile__ ("lduha [%2] %3, %0"
+                             : "=r" (ret)
+-                            : "r" (addr), "i" (ASI_PL));
++                            : "m" (*addr), "r" (addr), "i" (ASI_PL));
+       return ret;
+ }
+ #define __arch_swab16p __arch_swab16p
+@@ -20,9 +20,9 @@ static inline __u32 __arch_swab32p(const __u32 *addr)
+ {
+       __u32 ret;
+ 
+-      __asm__ __volatile__ ("lduwa [%1] %2, %0"
++      __asm__ __volatile__ ("lduwa [%2] %3, %0"
+                             : "=r" (ret)
+-                            : "r" (addr), "i" (ASI_PL));
++                            : "m" (*addr), "r" (addr), "i" (ASI_PL));
+       return ret;
+ }
+ #define __arch_swab32p __arch_swab32p
+@@ -31,9 +31,9 @@ static inline __u64 __arch_swab64p(const __u64 *addr)
+ {
+       __u64 ret;
+ 
+-      __asm__ __volatile__ ("ldxa [%1] %2, %0"
++      __asm__ __volatile__ ("ldxa [%2] %3, %0"
+                             : "=r" (ret)
+-                            : "r" (addr), "i" (ASI_PL));
++                            : "m" (*addr), "r" (addr), "i" (ASI_PL));
+       return ret;
+ }
+ #define __arch_swab64p __arch_swab64p
+diff --git a/arch/x86/include/asm/cpufeature.h 
b/arch/x86/include/asm/cpufeature.h
+index 5f1296872aed..1717156f4dd1 100644
+--- a/arch/x86/include/asm/cpufeature.h
++++ b/arch/x86/include/asm/cpufeature.h
+@@ -203,6 +203,7 @@
+ #define X86_FEATURE_DECODEASSISTS (8*32+12) /* AMD Decode Assists support */
+ #define X86_FEATURE_PAUSEFILTER (8*32+13) /* AMD filtered pause intercept */
+ #define X86_FEATURE_PFTHRESHOLD (8*32+14) /* AMD pause filter threshold */
++#define X86_FEATURE_VMMCALL   (8*32+15) /* Prefer vmmcall to vmcall */
+ 
+ 
+ /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
+diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
+index c7678e43465b..e62cf897f781 100644
+--- a/arch/x86/include/asm/kvm_para.h
++++ b/arch/x86/include/asm/kvm_para.h
+@@ -2,6 +2,7 @@
+ #define _ASM_X86_KVM_PARA_H
+ 
+ #include <asm/processor.h>
++#include <asm/alternative.h>
+ #include <uapi/asm/kvm_para.h>
+ 
+ extern void kvmclock_init(void);
+@@ -16,10 +17,15 @@ static inline bool kvm_check_and_clear_guest_paused(void)
+ }
+ #endif /* CONFIG_KVM_GUEST */
+ 
+-/* This instruction is vmcall.  On non-VT architectures, it will generate a
+- * trap that we will then rewrite to the appropriate instruction.
++#ifdef CONFIG_DEBUG_RODATA
++#define KVM_HYPERCALL \
++        ALTERNATIVE(".byte 0x0f,0x01,0xc1", ".byte 0x0f,0x01,0xd9", 
X86_FEATURE_VMMCALL)
++#else
++/* On AMD processors, vmcall will generate a trap that we will
++ * then rewrite to the appropriate instruction.
+  */
+ #define KVM_HYPERCALL ".byte 0x0f,0x01,0xc1"
++#endif
+ 
+ /* For KVM hypercalls, a three-byte sequence of either the vmcall or the 
vmmcall
+  * instruction.  The hypervisor may replace it with something else but only 
the
+diff --git a/arch/x86/include/asm/page_32_types.h 
b/arch/x86/include/asm/page_32_types.h
+index f48b17df4224..3a52ee0e726d 100644
+--- a/arch/x86/include/asm/page_32_types.h
++++ b/arch/x86/include/asm/page_32_types.h
+@@ -20,7 +20,6 @@
+ #define THREAD_SIZE_ORDER     1
+ #define THREAD_SIZE           (PAGE_SIZE << THREAD_SIZE_ORDER)
+ 
+-#define STACKFAULT_STACK 0
+ #define DOUBLEFAULT_STACK 1
+ #define NMI_STACK 0
+ #define DEBUG_STACK 0
+diff --git a/arch/x86/include/asm/page_64_types.h 
b/arch/x86/include/asm/page_64_types.h
+index 8de6d9cf3b95..d54d1eebeffe 100644
+--- a/arch/x86/include/asm/page_64_types.h
++++ b/arch/x86/include/asm/page_64_types.h
+@@ -14,12 +14,11 @@
+ #define IRQ_STACK_ORDER 2
+ #define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER)
+ 
+-#define STACKFAULT_STACK 1
+-#define DOUBLEFAULT_STACK 2
+-#define NMI_STACK 3
+-#define DEBUG_STACK 4
+-#define MCE_STACK 5
+-#define N_EXCEPTION_STACKS 5  /* hw limit: 7 */
++#define DOUBLEFAULT_STACK 1
++#define NMI_STACK 2
++#define DEBUG_STACK 3
++#define MCE_STACK 4
++#define N_EXCEPTION_STACKS 4  /* hw limit: 7 */
+ 
+ #define PUD_PAGE_SIZE         (_AC(1, UL) << PUD_SHIFT)
+ #define PUD_PAGE_MASK         (~(PUD_PAGE_SIZE-1))
+diff --git a/arch/x86/include/asm/thread_info.h 
b/arch/x86/include/asm/thread_info.h
+index e1940c06ed02..e870ea9232c3 100644
+--- a/arch/x86/include/asm/thread_info.h
++++ b/arch/x86/include/asm/thread_info.h
+@@ -144,7 +144,7 @@ struct thread_info {
+ /* Only used for 64 bit */
+ #define _TIF_DO_NOTIFY_MASK                                           \
+       (_TIF_SIGPENDING | _TIF_MCE_NOTIFY | _TIF_NOTIFY_RESUME |       \
+-       _TIF_USER_RETURN_NOTIFY)
++       _TIF_USER_RETURN_NOTIFY | _TIF_UPROBE)
+ 
+ /* flags to check in __switch_to() */
+ #define _TIF_WORK_CTXSW                                                       
\
+diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
+index 58d66fe06b61..b409b17efb48 100644
+--- a/arch/x86/include/asm/traps.h
++++ b/arch/x86/include/asm/traps.h
+@@ -39,6 +39,7 @@ asmlinkage void simd_coprocessor_error(void);
+ 
+ #ifdef CONFIG_TRACING
+ asmlinkage void trace_page_fault(void);
++#define trace_stack_segment stack_segment
+ #define trace_divide_error divide_error
+ #define trace_bounds bounds
+ #define trace_invalid_op invalid_op
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index c67ffa686064..c005fdd52529 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -508,6 +508,13 @@ static void early_init_amd(struct cpuinfo_x86 *c)
+       }
+ #endif
+ 
++      /*
++       * This is only needed to tell the kernel whether to use VMCALL
++       * and VMMCALL.  VMMCALL is never executed except under virt, so
++       * we can set it unconditionally.
++       */
++      set_cpu_cap(c, X86_FEATURE_VMMCALL);
++
+       /* F16h erratum 793, CVE-2013-6885 */
+       if (c->x86 == 0x16 && c->x86_model <= 0xf) {
+               u64 val;
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index 3f27f5fd0847..e6bddd5b9da3 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -144,6 +144,8 @@ EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
+ 
+ static int __init x86_xsave_setup(char *s)
+ {
++      if (strlen(s))
++              return 0;
+       setup_clear_cpu_cap(X86_FEATURE_XSAVE);
+       setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
+       setup_clear_cpu_cap(X86_FEATURE_AVX);
+diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
+index addb207dab92..66e274a3d968 100644
+--- a/arch/x86/kernel/dumpstack_64.c
++++ b/arch/x86/kernel/dumpstack_64.c
+@@ -24,7 +24,6 @@ static char x86_stack_ids[][8] = {
+               [ DEBUG_STACK-1                 ]       = "#DB",
+               [ NMI_STACK-1                   ]       = "NMI",
+               [ DOUBLEFAULT_STACK-1           ]       = "#DF",
+-              [ STACKFAULT_STACK-1            ]       = "#SS",
+               [ MCE_STACK-1                   ]       = "#MC",
+ #if DEBUG_STKSZ > EXCEPTION_STKSZ
+               [ N_EXCEPTION_STACKS ...
+diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
+index 03cd2a8f6009..02553d6d183d 100644
+--- a/arch/x86/kernel/entry_64.S
++++ b/arch/x86/kernel/entry_64.S
+@@ -1053,9 +1053,15 @@ ENTRY(native_iret)
+       jnz native_irq_return_ldt
+ #endif
+ 
++.global native_irq_return_iret
+ native_irq_return_iret:
++      /*
++       * This may fault.  Non-paranoid faults on return to userspace are
++       * handled by fixup_bad_iret.  These include #SS, #GP, and #NP.
++       * Double-faults due to espfix64 are handled in do_double_fault.
++       * Other faults here are fatal.
++       */
+       iretq
+-      _ASM_EXTABLE(native_irq_return_iret, bad_iret)
+ 
+ #ifdef CONFIG_X86_ESPFIX64
+ native_irq_return_ldt:
+@@ -1083,25 +1089,6 @@ native_irq_return_ldt:
+       jmp native_irq_return_iret
+ #endif
+ 
+-      .section .fixup,"ax"
+-bad_iret:
+-      /*
+-       * The iret traps when the %cs or %ss being restored is bogus.
+-       * We've lost the original trap vector and error code.
+-       * #GPF is the most likely one to get for an invalid selector.
+-       * So pretend we completed the iret and took the #GPF in user mode.
+-       *
+-       * We are now running with the kernel GS after exception recovery.
+-       * But error_entry expects us to have user GS to match the user %cs,
+-       * so swap back.
+-       */
+-      pushq $0
+-
+-      SWAPGS
+-      jmp general_protection
+-
+-      .previous
+-
+       /* edi: workmask, edx: work */
+ retint_careful:
+       CFI_RESTORE_STATE
+@@ -1147,37 +1134,6 @@ ENTRY(retint_kernel)
+       CFI_ENDPROC
+ END(common_interrupt)
+ 
+-      /*
+-       * If IRET takes a fault on the espfix stack, then we
+-       * end up promoting it to a doublefault.  In that case,
+-       * modify the stack to make it look like we just entered
+-       * the #GP handler from user space, similar to bad_iret.
+-       */
+-#ifdef CONFIG_X86_ESPFIX64
+-      ALIGN
+-__do_double_fault:
+-      XCPT_FRAME 1 RDI+8
+-      movq RSP(%rdi),%rax             /* Trap on the espfix stack? */
+-      sarq $PGDIR_SHIFT,%rax
+-      cmpl $ESPFIX_PGD_ENTRY,%eax
+-      jne do_double_fault             /* No, just deliver the fault */
+-      cmpl $__KERNEL_CS,CS(%rdi)
+-      jne do_double_fault
+-      movq RIP(%rdi),%rax
+-      cmpq $native_irq_return_iret,%rax
+-      jne do_double_fault             /* This shouldn't happen... */
+-      movq PER_CPU_VAR(kernel_stack),%rax
+-      subq $(6*8-KERNEL_STACK_OFFSET),%rax    /* Reset to original stack */
+-      movq %rax,RSP(%rdi)
+-      movq $0,(%rax)                  /* Missing (lost) #GP error code */
+-      movq $general_protection,RIP(%rdi)
+-      retq
+-      CFI_ENDPROC
+-END(__do_double_fault)
+-#else
+-# define __do_double_fault do_double_fault
+-#endif
+-
+ /*
+  * End of kprobes section
+  */
+@@ -1379,7 +1335,7 @@ zeroentry overflow do_overflow
+ zeroentry bounds do_bounds
+ zeroentry invalid_op do_invalid_op
+ zeroentry device_not_available do_device_not_available
+-paranoiderrorentry double_fault __do_double_fault
++paranoiderrorentry double_fault do_double_fault
+ zeroentry coprocessor_segment_overrun do_coprocessor_segment_overrun
+ errorentry invalid_TSS do_invalid_TSS
+ errorentry segment_not_present do_segment_not_present
+@@ -1549,7 +1505,7 @@ apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
+ 
+ paranoidzeroentry_ist debug do_debug DEBUG_STACK
+ paranoidzeroentry_ist int3 do_int3 DEBUG_STACK
+-paranoiderrorentry stack_segment do_stack_segment
++errorentry stack_segment do_stack_segment
+ #ifdef CONFIG_XEN
+ zeroentry xen_debug do_debug
+ zeroentry xen_int3 do_int3
+@@ -1659,16 +1615,15 @@ error_sti:
+ 
+ /*
+  * There are two places in the kernel that can potentially fault with
+- * usergs. Handle them here. The exception handlers after iret run with
+- * kernel gs again, so don't set the user space flag. B stepping K8s
+- * sometimes report an truncated RIP for IRET exceptions returning to
+- * compat mode. Check for these here too.
++ * usergs. Handle them here.  B stepping K8s sometimes report a
++ * truncated RIP for IRET exceptions returning to compat mode. Check
++ * for these here too.
+  */
+ error_kernelspace:
+       incl %ebx
+       leaq native_irq_return_iret(%rip),%rcx
+       cmpq %rcx,RIP+8(%rsp)
+-      je error_swapgs
++      je error_bad_iret
+       movl %ecx,%eax  /* zero extend */
+       cmpq %rax,RIP+8(%rsp)
+       je bstep_iret
+@@ -1679,7 +1634,15 @@ error_kernelspace:
+ bstep_iret:
+       /* Fix truncated RIP */
+       movq %rcx,RIP+8(%rsp)
+-      jmp error_swapgs
++      /* fall through */
++
++error_bad_iret:
++      SWAPGS
++      mov %rsp,%rdi
++      call fixup_bad_iret
++      mov %rax,%rsp
++      decl %ebx       /* Return to usergs */
++      jmp error_sti
+       CFI_ENDPROC
+ END(error_entry)
+ 
+diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
+index 57409f6b8c62..f9d976e0ae67 100644
+--- a/arch/x86/kernel/traps.c
++++ b/arch/x86/kernel/traps.c
+@@ -218,32 +218,40 @@ DO_ERROR_INFO(X86_TRAP_UD,     SIGILL,  "invalid 
opcode",                invalid_op,                  ILL
+ DO_ERROR     (X86_TRAP_OLD_MF, SIGFPE,  "coprocessor segment overrun",        
coprocessor_segment_overrun                       )
+ DO_ERROR     (X86_TRAP_TS,     SIGSEGV, "invalid TSS",                        
invalid_TSS                                       )
+ DO_ERROR     (X86_TRAP_NP,     SIGBUS,  "segment not present",                
segment_not_present                               )
+-#ifdef CONFIG_X86_32
+ DO_ERROR     (X86_TRAP_SS,     SIGBUS,  "stack segment",              
stack_segment                                     )
+-#endif
+ DO_ERROR_INFO(X86_TRAP_AC,     SIGBUS,  "alignment check",            
alignment_check,             BUS_ADRALN, 0        )
+ 
+ #ifdef CONFIG_X86_64
+ /* Runs on IST stack */
+-dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code)
+-{
+-      enum ctx_state prev_state;
+-
+-      prev_state = exception_enter();
+-      if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
+-                     X86_TRAP_SS, SIGBUS) != NOTIFY_STOP) {
+-              preempt_conditional_sti(regs);
+-              do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, 
NULL);
+-              preempt_conditional_cli(regs);
+-      }
+-      exception_exit(prev_state);
+-}
+-
+ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
+ {
+       static const char str[] = "double fault";
+       struct task_struct *tsk = current;
+ 
++#ifdef CONFIG_X86_ESPFIX64
++      extern unsigned char native_irq_return_iret[];
++
++      /*
++       * If IRET takes a non-IST fault on the espfix64 stack, then we
++       * end up promoting it to a doublefault.  In that case, modify
++       * the stack to make it look like we just entered the #GP
++       * handler from user space, similar to bad_iret.
++       */
++      if (((long)regs->sp >> PGDIR_SHIFT) == ESPFIX_PGD_ENTRY &&
++              regs->cs == __KERNEL_CS &&
++              regs->ip == (unsigned long)native_irq_return_iret)
++      {
++              struct pt_regs *normal_regs = task_pt_regs(current);
++
++              /* Fake a #GP(0) from userspace. */
++              memmove(&normal_regs->ip, (void *)regs->sp, 5*8);
++              normal_regs->orig_ax = 0;  /* Missing (lost) #GP error code */
++              regs->ip = (unsigned long)general_protection;
++              regs->sp = (unsigned long)&normal_regs->orig_ax;
++              return;
++      }
++#endif
++
+       exception_enter();
+       /* Return not checked because double check cannot be ignored */
+       notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
+@@ -376,6 +384,35 @@ asmlinkage __kprobes struct pt_regs *sync_regs(struct 
pt_regs *eregs)
+               *regs = *eregs;
+       return regs;
+ }
++
++struct bad_iret_stack {
++      void *error_entry_ret;
++      struct pt_regs regs;
++};
++
++asmlinkage __visible
++struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
++{
++      /*
++       * This is called from entry_64.S early in handling a fault
++       * caused by a bad iret to user mode.  To handle the fault
++       * correctly, we want move our stack frame to task_pt_regs
++       * and we want to pretend that the exception came from the
++       * iret target.
++       */
++      struct bad_iret_stack *new_stack =
++              container_of(task_pt_regs(current),
++                           struct bad_iret_stack, regs);
++
++      /* Copy the IRET target to the new stack. */
++      memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
++
++      /* Copy the remainder of the stack from the current stack. */
++      memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip));
++
++      BUG_ON(!user_mode_vm(&new_stack->regs));
++      return new_stack;
++}
+ #endif
+ 
+ /*
+@@ -748,7 +785,7 @@ void __init trap_init(void)
+       set_intr_gate(X86_TRAP_OLD_MF, coprocessor_segment_overrun);
+       set_intr_gate(X86_TRAP_TS, invalid_TSS);
+       set_intr_gate(X86_TRAP_NP, segment_not_present);
+-      set_intr_gate_ist(X86_TRAP_SS, &stack_segment, STACKFAULT_STACK);
++      set_intr_gate(X86_TRAP_SS, stack_segment);
+       set_intr_gate(X86_TRAP_GP, general_protection);
+       set_intr_gate(X86_TRAP_SPURIOUS, spurious_interrupt_bug);
+       set_intr_gate(X86_TRAP_MF, coprocessor_error);
+diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
+index f35c66c5959a..2308a401a1c5 100644
+--- a/arch/x86/mm/init_64.c
++++ b/arch/x86/mm/init_64.c
+@@ -1110,7 +1110,7 @@ void mark_rodata_ro(void)
+       unsigned long end = (unsigned long) &__end_rodata_hpage_align;
+       unsigned long text_end = PFN_ALIGN(&__stop___ex_table);
+       unsigned long rodata_end = PFN_ALIGN(&__end_rodata);
+-      unsigned long all_end = PFN_ALIGN(&_end);
++      unsigned long all_end;
+ 
+       printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
+              (end - start) >> 10);
+@@ -1121,7 +1121,16 @@ void mark_rodata_ro(void)
+       /*
+        * The rodata/data/bss/brk section (but not the kernel text!)
+        * should also be not-executable.
++       *
++       * We align all_end to PMD_SIZE because the existing mapping
++       * is a full PMD. If we would align _brk_end to PAGE_SIZE we
++       * split the PMD and the reminder between _brk_end and the end
++       * of the PMD will remain mapped executable.
++       *
++       * Any PMD which was setup after the one which covers _brk_end
++       * has been zapped already via cleanup_highmem().
+        */
++      all_end = roundup((unsigned long)_brk_end, PMD_SIZE);
+       set_memory_nx(rodata_start, (all_end - rodata_start) >> PAGE_SHIFT);
+ 
+       rodata_test();
+diff --git a/arch/x86/tools/calc_run_size.pl b/arch/x86/tools/calc_run_size.pl
+index 0b0b124d3ece..23210baade2d 100644
+--- a/arch/x86/tools/calc_run_size.pl
++++ b/arch/x86/tools/calc_run_size.pl
+@@ -19,7 +19,16 @@ while (<>) {
+               if ($file_offset == 0) {
+                       $file_offset = $offset;
+               } elsif ($file_offset != $offset) {
+-                      die ".bss and .brk lack common file offset\n";
++                      # BFD linker shows the same file offset in ELF.
++                      # Gold linker shows them as consecutive.
++                      next if ($file_offset + $mem_size == $offset + $size);
++
++                      printf STDERR "file_offset: 0x%lx\n", $file_offset;
++                      printf STDERR "mem_size: 0x%lx\n", $mem_size;
++                      printf STDERR "offset: 0x%lx\n", $offset;
++                      printf STDERR "size: 0x%lx\n", $size;
++
++                      die ".bss and .brk are non-contiguous\n";
+               }
+       }
+ }
+diff --git a/drivers/clocksource/sun4i_timer.c 
b/drivers/clocksource/sun4i_timer.c
+index bf497afba9ad..7d19f86012f2 100644
+--- a/drivers/clocksource/sun4i_timer.c
++++ b/drivers/clocksource/sun4i_timer.c
+@@ -182,6 +182,12 @@ static void __init sun4i_timer_init(struct device_node 
*node)
+       /* Make sure timer is stopped before playing with interrupts */
+       sun4i_clkevt_time_stop(0);
+ 
++      sun4i_clockevent.cpumask = cpu_possible_mask;
++      sun4i_clockevent.irq = irq;
++
++      clockevents_config_and_register(&sun4i_clockevent, rate,
++                                      TIMER_SYNC_TICKS, 0xffffffff);
++
+       ret = setup_irq(irq, &sun4i_timer_irq);
+       if (ret)
+               pr_warn("failed to setup irq %d\n", irq);
+@@ -189,12 +195,6 @@ static void __init sun4i_timer_init(struct device_node 
*node)
+       /* Enable timer0 interrupt */
+       val = readl(timer_base + TIMER_IRQ_EN_REG);
+       writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG);
+-
+-      sun4i_clockevent.cpumask = cpu_possible_mask;
+-      sun4i_clockevent.irq = irq;
+-
+-      clockevents_config_and_register(&sun4i_clockevent, rate,
+-                                      TIMER_SYNC_TICKS, 0xffffffff);
+ }
+ CLOCKSOURCE_OF_DECLARE(sun4i, "allwinner,sun4i-timer",
+                      sun4i_timer_init);
+diff --git a/drivers/gpu/drm/radeon/r600_dpm.c 
b/drivers/gpu/drm/radeon/r600_dpm.c
+index 813db8de52b7..3334f916945b 100644
+--- a/drivers/gpu/drm/radeon/r600_dpm.c
++++ b/drivers/gpu/drm/radeon/r600_dpm.c
+@@ -1209,7 +1209,7 @@ int r600_parse_extended_power_table(struct radeon_device 
*rdev)
+                                       (mode_info->atom_context->bios + 
data_offset +
+                                        
le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
+                               
rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
+-                                      ppt->usMaximumPowerDeliveryLimit;
++                                      
le16_to_cpu(ppt->usMaximumPowerDeliveryLimit);
+                               pt = &ppt->power_tune_table;
+                       } else {
+                               ATOM_PPLIB_POWERTUNE_Table *ppt = 
(ATOM_PPLIB_POWERTUNE_Table *)
+diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c 
b/drivers/gpu/drm/radeon/radeon_irq_kms.c
+index 089c9ffb0aa9..b3f0293ba0d8 100644
+--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
++++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
+@@ -202,6 +202,16 @@ static bool radeon_msi_ok(struct radeon_device *rdev)
+       if (rdev->flags & RADEON_IS_AGP)
+               return false;
+ 
++      /*
++       * Older chips have a HW limitation, they can only generate 40 bits
++       * of address for "64-bit" MSIs which breaks on some platforms, notably
++       * IBM POWER servers, so we limit them
++       */
++      if (rdev->family < CHIP_BONAIRE) {
++              dev_info(rdev->dev, "radeon: MSI limited to 32-bit\n");
++              rdev->pdev->no_64bit_msi = 1;
++      }
++
+       /* force MSI on */
+       if (radeon_msi == 1)
+               return true;
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.c 
b/drivers/infiniband/ulp/isert/ib_isert.c
+index c5c194c2e0b6..a96cfc31372e 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -112,9 +112,12 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct 
rdma_cm_id *cma_id)
+       attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS;
+       /*
+        * FIXME: Use devattr.max_sge - 2 for max_send_sge as
+-       * work-around for RDMA_READ..
++       * work-around for RDMA_READs with ConnectX-2.
++       *
++       * Also, still make sure to have at least two SGEs for
++       * outgoing control PDU responses.
+        */
+-      attr.cap.max_send_sge = device->dev_attr.max_sge - 2;
++      attr.cap.max_send_sge = max(2, device->dev_attr.max_sge - 2);
+       isert_conn->max_sge = attr.cap.max_send_sge;
+ 
+       attr.cap.max_recv_sge = 1;
+@@ -220,12 +223,16 @@ isert_create_device_ib_res(struct isert_device *device)
+       struct isert_cq_desc *cq_desc;
+       struct ib_device_attr *dev_attr;
+       int ret = 0, i, j;
++      int max_rx_cqe, max_tx_cqe;
+ 
+       dev_attr = &device->dev_attr;
+       ret = isert_query_device(ib_dev, dev_attr);
+       if (ret)
+               return ret;
+ 
++      max_rx_cqe = min(ISER_MAX_RX_CQ_LEN, dev_attr->max_cqe);
++      max_tx_cqe = min(ISER_MAX_TX_CQ_LEN, dev_attr->max_cqe);
++
+       /* asign function handlers */
+       if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
+               device->use_fastreg = 1;
+@@ -261,7 +268,7 @@ isert_create_device_ib_res(struct isert_device *device)
+                                               isert_cq_rx_callback,
+                                               isert_cq_event_callback,
+                                               (void *)&cq_desc[i],
+-                                              ISER_MAX_RX_CQ_LEN, i);
++                                              max_rx_cqe, i);
+               if (IS_ERR(device->dev_rx_cq[i])) {
+                       ret = PTR_ERR(device->dev_rx_cq[i]);
+                       device->dev_rx_cq[i] = NULL;
+@@ -273,7 +280,7 @@ isert_create_device_ib_res(struct isert_device *device)
+                                               isert_cq_tx_callback,
+                                               isert_cq_event_callback,
+                                               (void *)&cq_desc[i],
+-                                              ISER_MAX_TX_CQ_LEN, i);
++                                              max_tx_cqe, i);
+               if (IS_ERR(device->dev_tx_cq[i])) {
+                       ret = PTR_ERR(device->dev_tx_cq[i]);
+                       device->dev_tx_cq[i] = NULL;
+@@ -718,14 +725,25 @@ wake_up:
+       complete(&isert_conn->conn_wait);
+ }
+ 
+-static void
++static int
+ isert_disconnected_handler(struct rdma_cm_id *cma_id, bool disconnect)
+ {
+-      struct isert_conn *isert_conn = (struct isert_conn *)cma_id->context;
++      struct isert_conn *isert_conn;
++
++      if (!cma_id->qp) {
++              struct isert_np *isert_np = cma_id->context;
++
++              isert_np->np_cm_id = NULL;
++              return -1;
++      }
++
++      isert_conn = (struct isert_conn *)cma_id->context;
+ 
+       isert_conn->disconnect = disconnect;
+       INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work);
+       schedule_work(&isert_conn->conn_logout_work);
++
++      return 0;
+ }
+ 
+ static int
+@@ -740,6 +758,9 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct 
rdma_cm_event *event)
+       switch (event->event) {
+       case RDMA_CM_EVENT_CONNECT_REQUEST:
+               ret = isert_connect_request(cma_id, event);
++              if (ret)
++                      pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x 
%d\n",
++                              event->event, ret);
+               break;
+       case RDMA_CM_EVENT_ESTABLISHED:
+               isert_connected_handler(cma_id);
+@@ -749,7 +770,7 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct 
rdma_cm_event *event)
+       case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
+               disconnect = true;
+       case RDMA_CM_EVENT_TIMEWAIT_EXIT:  /* FALLTHRU */
+-              isert_disconnected_handler(cma_id, disconnect);
++              ret = isert_disconnected_handler(cma_id, disconnect);
+               break;
+       case RDMA_CM_EVENT_CONNECT_ERROR:
+       default:
+@@ -757,12 +778,6 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct 
rdma_cm_event *event)
+               break;
+       }
+ 
+-      if (ret != 0) {
+-              pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n",
+-                     event->event, ret);
+-              dump_stack();
+-      }
+-
+       return ret;
+ }
+ 
+@@ -970,7 +985,8 @@ isert_put_login_tx(struct iscsi_conn *conn, struct 
iscsi_login *login,
+       }
+       if (!login->login_failed) {
+               if (login->login_complete) {
+-                      if (isert_conn->conn_device->use_fastreg) {
++                      if (!conn->sess->sess_ops->SessionType &&
++                          isert_conn->conn_device->use_fastreg) {
+                               ret = 
isert_conn_create_fastreg_pool(isert_conn);
+                               if (ret) {
+                                       pr_err("Conn: %p failed to create"
+@@ -1937,7 +1953,7 @@ isert_put_response(struct iscsi_conn *conn, struct 
iscsi_cmd *cmd)
+               isert_cmd->tx_desc.num_sge = 2;
+       }
+ 
+-      isert_init_send_wr(isert_conn, isert_cmd, send_wr, true);
++      isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
+ 
+       pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
+ 
+@@ -2456,7 +2472,7 @@ isert_put_datain(struct iscsi_conn *conn, struct 
iscsi_cmd *cmd)
+                            &isert_cmd->tx_desc.iscsi_header);
+       isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
+       isert_init_send_wr(isert_conn, isert_cmd,
+-                         &isert_cmd->tx_desc.send_wr, true);
++                         &isert_cmd->tx_desc.send_wr, false);
+ 
+       atomic_add(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
+ 
+@@ -2768,7 +2784,8 @@ isert_free_np(struct iscsi_np *np)
+ {
+       struct isert_np *isert_np = (struct isert_np *)np->np_context;
+ 
+-      rdma_destroy_id(isert_np->np_cm_id);
++      if (isert_np->np_cm_id)
++              rdma_destroy_id(isert_np->np_cm_id);
+ 
+       np->np_context = NULL;
+       kfree(isert_np);
+diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c 
b/drivers/infiniband/ulp/srpt/ib_srpt.c
+index d1078ce73095..0097b8dae5bc 100644
+--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
++++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
+@@ -2091,6 +2091,7 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
+       if (!qp_init)
+               goto out;
+ 
++retry:
+       ch->cq = ib_create_cq(sdev->device, srpt_completion, NULL, ch,
+                             ch->rq_size + srp_sq_size, 0);
+       if (IS_ERR(ch->cq)) {
+@@ -2114,6 +2115,13 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
+       ch->qp = ib_create_qp(sdev->pd, qp_init);
+       if (IS_ERR(ch->qp)) {
+               ret = PTR_ERR(ch->qp);
++              if (ret == -ENOMEM) {
++                      srp_sq_size /= 2;
++                      if (srp_sq_size >= MIN_SRPT_SQ_SIZE) {
++                              ib_destroy_cq(ch->cq);
++                              goto retry;
++                      }
++              }
+               printk(KERN_ERR "failed to create_qp ret= %d\n", ret);
+               goto err_destroy_cq;
+       }
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index 603fe0dd3682..517829f6a58b 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -1003,9 +1003,19 @@ static int xpad_probe(struct usb_interface *intf, const 
struct usb_device_id *id
+               }
+ 
+               ep_irq_in = &intf->cur_altsetting->endpoint[1].desc;
+-              usb_fill_bulk_urb(xpad->bulk_out, udev,
+-                              usb_sndbulkpipe(udev, 
ep_irq_in->bEndpointAddress),
+-                              xpad->bdata, XPAD_PKT_LEN, xpad_bulk_out, xpad);
++              if (usb_endpoint_is_bulk_out(ep_irq_in)) {
++                      usb_fill_bulk_urb(xpad->bulk_out, udev,
++                                        usb_sndbulkpipe(udev,
++                                                        
ep_irq_in->bEndpointAddress),
++                                        xpad->bdata, XPAD_PKT_LEN,
++                                        xpad_bulk_out, xpad);
++              } else {
++                      usb_fill_int_urb(xpad->bulk_out, udev,
++                                       usb_sndintpipe(udev,
++                                                      
ep_irq_in->bEndpointAddress),
++                                       xpad->bdata, XPAD_PKT_LEN,
++                                       xpad_bulk_out, xpad, 0);
++              }
+ 
+               /*
+                * Submit the int URB immediately rather than waiting for open
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index 1e76eb8f06c7..a3769cf84381 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -140,6 +140,10 @@ static const struct min_max_quirk min_max_pnpid_table[] = 
{
+               (const char * const []){"LEN2001", NULL},
+               1024, 5022, 2508, 4832
+       },
++      {
++              (const char * const []){"LEN2006", NULL},
++              1264, 5675, 1171, 4688
++      },
+       { }
+ };
+ 
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index cc38948cf65d..15379824d77d 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -2450,9 +2450,9 @@ static void bond_loadbalance_arp_mon(struct work_struct 
*work)
+               if (!rtnl_trylock())
+                       goto re_arm;
+ 
+-              if (slave_state_changed) {
++              if (slave_state_changed)
+                       bond_slave_state_change(bond);
+-              } else if (do_failover) {
++              if (do_failover) {
+                       /* the bond_select_active_slave must hold RTNL
+                        * and curr_slave_lock for write.
+                        */
+diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
+index fc59bc6f040b..cc11f7f5e91d 100644
+--- a/drivers/net/can/dev.c
++++ b/drivers/net/can/dev.c
+@@ -384,7 +384,7 @@ void can_free_echo_skb(struct net_device *dev, unsigned 
int idx)
+       BUG_ON(idx >= priv->echo_skb_max);
+ 
+       if (priv->echo_skb[idx]) {
+-              kfree_skb(priv->echo_skb[idx]);
++              dev_kfree_skb_any(priv->echo_skb[idx]);
+               priv->echo_skb[idx] = NULL;
+       }
+ }
+diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
+index 7fbe85935f1d..f34f7fa1f901 100644
+--- a/drivers/net/can/usb/esd_usb2.c
++++ b/drivers/net/can/usb/esd_usb2.c
+@@ -1141,6 +1141,7 @@ static void esd_usb2_disconnect(struct usb_interface 
*intf)
+                       }
+               }
+               unlink_all_urbs(dev);
++              kfree(dev);
+       }
+ }
+ 
+diff --git a/drivers/net/ieee802154/fakehard.c 
b/drivers/net/ieee802154/fakehard.c
+index bf0d55e2dd63..6adbef89c4b0 100644
+--- a/drivers/net/ieee802154/fakehard.c
++++ b/drivers/net/ieee802154/fakehard.c
+@@ -376,17 +376,20 @@ static int ieee802154fake_probe(struct platform_device 
*pdev)
+ 
+       err = wpan_phy_register(phy);
+       if (err)
+-              goto out;
++              goto err_phy_reg;
+ 
+       err = register_netdev(dev);
+-      if (err < 0)
+-              goto out;
++      if (err)
++              goto err_netdev_reg;
+ 
+       dev_info(&pdev->dev, "Added ieee802154 HardMAC hardware\n");
+       return 0;
+ 
+-out:
+-      unregister_netdev(dev);
++err_netdev_reg:
++      wpan_phy_unregister(phy);
++err_phy_reg:
++      free_netdev(dev);
++      wpan_phy_free(phy);
+       return err;
+ }
+ 
+diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
+index 1aff970be33e..1dc628ffce2b 100644
+--- a/drivers/net/ppp/pptp.c
++++ b/drivers/net/ppp/pptp.c
+@@ -506,7 +506,9 @@ static int pptp_getname(struct socket *sock, struct 
sockaddr *uaddr,
+       int len = sizeof(struct sockaddr_pppox);
+       struct sockaddr_pppox sp;
+ 
+-      sp.sa_family      = AF_PPPOX;
++      memset(&sp.sa_addr, 0, sizeof(sp.sa_addr));
++
++      sp.sa_family    = AF_PPPOX;
+       sp.sa_protocol  = PX_PROTO_PPTP;
+       sp.sa_addr.pptp = pppox_sk(sock->sk)->proto.pptp.src_addr;
+ 
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index d510f1d41bae..db21af8de9f6 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -769,6 +769,7 @@ static const struct usb_device_id products[] = {
+       {QMI_FIXED_INTF(0x413c, 0x81a4, 8)},    /* Dell Wireless 5570e HSPA+ 
(42Mbps) Mobile Broadband Card */
+       {QMI_FIXED_INTF(0x413c, 0x81a8, 8)},    /* Dell Wireless 5808 Gobi(TM) 
4G LTE Mobile Broadband Card */
+       {QMI_FIXED_INTF(0x413c, 0x81a9, 8)},    /* Dell Wireless 5808e Gobi(TM) 
4G LTE Mobile Broadband Card */
++      {QMI_FIXED_INTF(0x03f0, 0x581d, 4)},    /* HP lt4112 LTE/HSPA+ Gobi 4G 
Module (Huawei me906e) */
+ 
+       /* 4. Gobi 1000 devices */
+       {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)},    /* Acer Gobi Modem Device */
+diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c 
b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+index 09facba1dc6d..390c2de5a73e 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
++++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+@@ -647,6 +647,19 @@ static void ar9003_hw_override_ini(struct ath_hw *ah)
+               ah->enabled_cals |= TX_CL_CAL;
+       else
+               ah->enabled_cals &= ~TX_CL_CAL;
++
++      if (AR_SREV_9340(ah) || AR_SREV_9531(ah) || AR_SREV_9550(ah)) {
++              if (ah->is_clk_25mhz) {
++                      REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x17c << 1);
++                      REG_WRITE(ah, AR_SLP32_MODE, 0x0010f3d7);
++                      REG_WRITE(ah, AR_SLP32_INC, 0x0001e7ae);
++              } else {
++                      REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x261 << 1);
++                      REG_WRITE(ah, AR_SLP32_MODE, 0x0010f400);
++                      REG_WRITE(ah, AR_SLP32_INC, 0x0001e800);
++              }
++              udelay(100);
++      }
+ }
+ 
+ static void ar9003_hw_prog_ini(struct ath_hw *ah,
+diff --git a/drivers/net/wireless/ath/ath9k/hw.c 
b/drivers/net/wireless/ath/ath9k/hw.c
+index 9078a6c5a74e..dcc14940c9df 100644
+--- a/drivers/net/wireless/ath/ath9k/hw.c
++++ b/drivers/net/wireless/ath/ath9k/hw.c
+@@ -858,19 +858,6 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
+       udelay(RTC_PLL_SETTLE_DELAY);
+ 
+       REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_FORCE_DERIVED_CLK);
+-
+-      if (AR_SREV_9340(ah) || AR_SREV_9550(ah)) {
+-              if (ah->is_clk_25mhz) {
+-                      REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x17c << 1);
+-                      REG_WRITE(ah, AR_SLP32_MODE, 0x0010f3d7);
+-                      REG_WRITE(ah,  AR_SLP32_INC, 0x0001e7ae);
+-              } else {
+-                      REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x261 << 1);
+-                      REG_WRITE(ah, AR_SLP32_MODE, 0x0010f400);
+-                      REG_WRITE(ah,  AR_SLP32_INC, 0x0001e800);
+-              }
+-              udelay(100);
+-      }
+ }
+ 
+ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
+diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c 
b/drivers/net/wireless/rt2x00/rt2x00queue.c
+index 5642ccceca7c..22d49d575d3f 100644
+--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
++++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
+@@ -158,55 +158,29 @@ void rt2x00queue_align_frame(struct sk_buff *skb)
+       skb_trim(skb, frame_length);
+ }
+ 
+-void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length)
++/*
++ * H/W needs L2 padding between the header and the paylod if header size
++ * is not 4 bytes aligned.
++ */
++void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int hdr_len)
+ {
+-      unsigned int payload_length = skb->len - header_length;
+-      unsigned int header_align = ALIGN_SIZE(skb, 0);
+-      unsigned int payload_align = ALIGN_SIZE(skb, header_length);
+-      unsigned int l2pad = payload_length ? L2PAD_SIZE(header_length) : 0;
++      unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0;
+ 
+-      /*
+-       * Adjust the header alignment if the payload needs to be moved more
+-       * than the header.
+-       */
+-      if (payload_align > header_align)
+-              header_align += 4;
+-
+-      /* There is nothing to do if no alignment is needed */
+-      if (!header_align)
++      if (!l2pad)
+               return;
+ 
+-      /* Reserve the amount of space needed in front of the frame */
+-      skb_push(skb, header_align);
+-
+-      /*
+-       * Move the header.
+-       */
+-      memmove(skb->data, skb->data + header_align, header_length);
+-
+-      /* Move the payload, if present and if required */
+-      if (payload_length && payload_align)
+-              memmove(skb->data + header_length + l2pad,
+-                      skb->data + header_length + l2pad + payload_align,
+-                      payload_length);
+-
+-      /* Trim the skb to the correct size */
+-      skb_trim(skb, header_length + l2pad + payload_length);
++      skb_push(skb, l2pad);
++      memmove(skb->data, skb->data + l2pad, hdr_len);
+ }
+ 
+-void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length)
++void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int hdr_len)
+ {
+-      /*
+-       * L2 padding is only present if the skb contains more than just the
+-       * IEEE 802.11 header.
+-       */
+-      unsigned int l2pad = (skb->len > header_length) ?
+-                              L2PAD_SIZE(header_length) : 0;
++      unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0;
+ 
+       if (!l2pad)
+               return;
+ 
+-      memmove(skb->data + l2pad, skb->data, header_length);
++      memmove(skb->data + l2pad, skb->data, hdr_len);
+       skb_pull(skb, l2pad);
+ }
+ 
+diff --git a/drivers/of/address.c b/drivers/of/address.c
+index 1a54f1ffaadb..005c65715846 100644
+--- a/drivers/of/address.c
++++ b/drivers/of/address.c
+@@ -401,6 +401,21 @@ static struct of_bus *of_match_bus(struct device_node *np)
+       return NULL;
+ }
+ 
++static int of_empty_ranges_quirk(void)
++{
++      if (IS_ENABLED(CONFIG_PPC)) {
++              /* To save cycles, we cache the result */
++              static int quirk_state = -1;
++
++              if (quirk_state < 0)
++                      quirk_state =
++                              of_machine_is_compatible("Power Macintosh") ||
++                              of_machine_is_compatible("MacRISC");
++              return quirk_state;
++      }
++      return false;
++}
++
+ static int of_translate_one(struct device_node *parent, struct of_bus *bus,
+                           struct of_bus *pbus, __be32 *addr,
+                           int na, int ns, int pna, const char *rprop)
+@@ -426,12 +441,10 @@ static int of_translate_one(struct device_node *parent, 
struct of_bus *bus,
+        * This code is only enabled on powerpc. --gcl
+        */
+       ranges = of_get_property(parent, rprop, &rlen);
+-#if !defined(CONFIG_PPC)
+-      if (ranges == NULL) {
++      if (ranges == NULL && !of_empty_ranges_quirk()) {
+               pr_err("OF: no ranges; cannot translate\n");
+               return 1;
+       }
+-#endif /* !defined(CONFIG_PPC) */
+       if (ranges == NULL || rlen == 0) {
+               offset = of_read_number(addr, na);
+               memset(addr, 0, pna * 4);
+diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
+index fb02fc2fb034..ced17f2ac782 100644
+--- a/drivers/pci/msi.c
++++ b/drivers/pci/msi.c
+@@ -599,6 +599,20 @@ error_attrs:
+       return ret;
+ }
+ 
++static int msi_verify_entries(struct pci_dev *dev)
++{
++      struct msi_desc *entry;
++
++      list_for_each_entry(entry, &dev->msi_list, list) {
++              if (!dev->no_64bit_msi || !entry->msg.address_hi)
++                      continue;
++              dev_err(&dev->dev, "Device has broken 64-bit MSI but arch"
++                      " tried to assign one above 4G\n");
++              return -EIO;
++      }
++      return 0;
++}
++
+ /**
+  * msi_capability_init - configure device's MSI capability structure
+  * @dev: pointer to the pci_dev data structure of MSI device function
+@@ -652,6 +666,13 @@ static int msi_capability_init(struct pci_dev *dev, int 
nvec)
+               return ret;
+       }
+ 
++      ret = msi_verify_entries(dev);
++      if (ret) {
++              msi_mask_irq(entry, mask, ~mask);
++              free_msi_irqs(dev);
++              return ret;
++      }
++
+       ret = populate_msi_sysfs(dev);
+       if (ret) {
+               msi_mask_irq(entry, mask, ~mask);
+@@ -767,6 +788,11 @@ static int msix_capability_init(struct pci_dev *dev,
+       if (ret)
+               goto out_avail;
+ 
++      /* Check if all MSI entries honor device restrictions */
++      ret = msi_verify_entries(dev);
++      if (ret)
++              goto out_free;
++
+       /*
+        * Some devices require MSI-X to be enabled before we can touch the
+        * MSI-X registers.  We need to mask all the vectors to prevent
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index 6e34498ec9f0..34dff3a09b98 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -395,15 +395,16 @@ static void pci_read_bridge_mmio_pref(struct pci_bus 
*child)
+ {
+       struct pci_dev *dev = child->self;
+       u16 mem_base_lo, mem_limit_lo;
+-      unsigned long base, limit;
++      u64 base64, limit64;
++      dma_addr_t base, limit;
+       struct pci_bus_region region;
+       struct resource *res;
+ 
+       res = child->resource[2];
+       pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
+       pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
+-      base = ((unsigned long) mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
+-      limit = ((unsigned long) mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
++      base64 = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
++      limit64 = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
+ 
+       if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) 
{
+               u32 mem_base_hi, mem_limit_hi;
+@@ -417,18 +418,20 @@ static void pci_read_bridge_mmio_pref(struct pci_bus 
*child)
+                * this, just assume they are not being used.
+                */
+               if (mem_base_hi <= mem_limit_hi) {
+-#if BITS_PER_LONG == 64
+-                      base |= ((unsigned long) mem_base_hi) << 32;
+-                      limit |= ((unsigned long) mem_limit_hi) << 32;
+-#else
+-                      if (mem_base_hi || mem_limit_hi) {
+-                              dev_err(&dev->dev, "can't handle 64-bit "
+-                                      "address space for bridge\n");
+-                              return;
+-                      }
+-#endif
++                      base64 |= (u64) mem_base_hi << 32;
++                      limit64 |= (u64) mem_limit_hi << 32;
+               }
+       }
++
++      base = (dma_addr_t) base64;
++      limit = (dma_addr_t) limit64;
++
++      if (base != base64) {
++              dev_err(&dev->dev, "can't handle bridge window above 4GB (bus 
address %#010llx)\n",
++                      (unsigned long long) base64);
++              return;
++      }
++
+       if (base <= limit) {
+               res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
+                                        IORESOURCE_MEM | IORESOURCE_PREFETCH;
+diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c 
b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+index 9b948505d118..cc6b13b81c53 100644
+--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
++++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+@@ -411,6 +411,7 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct 
net_device *dev,
+       struct fc_frame_header *fh;
+       struct fcoe_rcv_info *fr;
+       struct fcoe_percpu_s *bg;
++      struct sk_buff *tmp_skb;
+       unsigned short oxid;
+ 
+       interface = container_of(ptype, struct bnx2fc_interface,
+@@ -423,6 +424,12 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct 
net_device *dev,
+               goto err;
+       }
+ 
++      tmp_skb = skb_share_check(skb, GFP_ATOMIC);
++      if (!tmp_skb)
++              goto err;
++
++      skb = tmp_skb;
++
+       if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
+               printk(KERN_ERR PFX "bnx2fc_rcv: Wrong FC type frame\n");
+               goto err;
+diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
+index 49014a143c6a..c1d04d4d3c6c 100644
+--- a/drivers/scsi/scsi_devinfo.c
++++ b/drivers/scsi/scsi_devinfo.c
+@@ -202,6 +202,7 @@ static struct {
+       {"IOMEGA", "Io20S         *F", NULL, BLIST_KEY},
+       {"INSITE", "Floptical   F*8I", NULL, BLIST_KEY},
+       {"INSITE", "I325VM", NULL, BLIST_KEY},
++      {"Intel", "Multi-Flex", NULL, BLIST_NO_RSOC},
+       {"iRiver", "iFP Mass Driver", NULL, BLIST_NOT_LOCKABLE | 
BLIST_INQUIRY_36},
+       {"LASOUND", "CDX7405", "3.10", BLIST_MAX5LUN | BLIST_SINGLELUN},
+       {"MATSHITA", "PD-1", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
+diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
+index e63d27013142..e543b80d610e 100644
+--- a/drivers/spi/spi-dw.c
++++ b/drivers/spi/spi-dw.c
+@@ -394,9 +394,6 @@ static void pump_transfers(unsigned long data)
+       chip = dws->cur_chip;
+       spi = message->spi;
+ 
+-      if (unlikely(!chip->clk_div))
+-              chip->clk_div = dws->max_freq / chip->speed_hz;
+-
+       if (message->state == ERROR_STATE) {
+               message->status = -EIO;
+               goto early_exit;
+@@ -437,7 +434,7 @@ static void pump_transfers(unsigned long data)
+       if (transfer->speed_hz) {
+               speed = chip->speed_hz;
+ 
+-              if (transfer->speed_hz != speed) {
++              if ((transfer->speed_hz != speed) || (!chip->clk_div)) {
+                       speed = transfer->speed_hz;
+                       if (speed > dws->max_freq) {
+                               printk(KERN_ERR "MRST SPI0: unsupported"
+@@ -659,7 +656,6 @@ static int dw_spi_setup(struct spi_device *spi)
+               dev_err(&spi->dev, "No max speed HZ parameter\n");
+               return -EINVAL;
+       }
+-      chip->speed_hz = spi->max_speed_hz;
+ 
+       chip->tmode = 0; /* Tx & Rx */
+       /* Default SPI mode is SCPOL = 0, SCPH = 0 */
+diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c 
b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+index fed699fc5918..2185a71055f2 100644
+--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
++++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+@@ -57,6 +57,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
+       {USB_DEVICE(0x07b8, 0x8179)}, /* Abocom - Abocom */
+       {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
+       {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
++      {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
+       {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
+       {}      /* Terminating entry */
+ };
+diff --git a/drivers/target/target_core_transport.c 
b/drivers/target/target_core_transport.c
+index 9232c7738ed1..e6463ef33cd2 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -2230,7 +2230,7 @@ transport_generic_new_cmd(struct se_cmd *cmd)
+        * and let it call back once the write buffers are ready.
+        */
+       target_add_to_state_list(cmd);
+-      if (cmd->data_direction != DMA_TO_DEVICE) {
++      if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) {
+               target_execute_cmd(cmd);
+               return 0;
+       }
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index c85459338991..b195fdb1effc 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -44,6 +44,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+       /* Creative SB Audigy 2 NX */
+       { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
++      /* Microsoft Wireless Laser Mouse 6000 Receiver */
++      { USB_DEVICE(0x045e, 0x00e1), .driver_info = USB_QUIRK_RESET_RESUME },
++
+       /* Microsoft LifeCam-VX700 v2.0 */
+       { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 75cb1ff9d26b..73c43e5e231b 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -281,7 +281,7 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool 
do_wakeup)
+       if (xhci_compliance_mode_recovery_timer_quirk_check())
+               pdev->no_d3cold = true;
+ 
+-      return xhci_suspend(xhci);
++      return xhci_suspend(xhci, do_wakeup);
+ }
+ 
+ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
+diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
+index 8abda5c73ca1..1d5ba3c299cc 100644
+--- a/drivers/usb/host/xhci-plat.c
++++ b/drivers/usb/host/xhci-plat.c
+@@ -205,7 +205,15 @@ static int xhci_plat_suspend(struct device *dev)
+       struct usb_hcd  *hcd = dev_get_drvdata(dev);
+       struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ 
+-      return xhci_suspend(xhci);
++      /*
++       * xhci_suspend() needs `do_wakeup` to know whether host is allowed
++       * to do wakeup during suspend. Since xhci_plat_suspend is currently
++       * only designed for system suspend, device_may_wakeup() is enough
++       * to dertermine whether host is allowed to do wakeup. Need to
++       * reconsider this when xhci_plat_suspend enlarges its scope, e.g.,
++       * also applies to runtime suspend.
++       */
++      return xhci_suspend(xhci, device_may_wakeup(dev));
+ }
+ 
+ static int xhci_plat_resume(struct device *dev)
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 0e6665a82e88..1710a8678bcb 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -1180,9 +1180,8 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd 
*xhci, int slot_id,
+                               false);
+               xhci_ring_cmd_db(xhci);
+       } else {
+-              /* Clear our internal halted state and restart the ring(s) */
++              /* Clear our internal halted state */
+               xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
+-              ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
+       }
+ }
+ 
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 82b563fc4fd6..17e398748a2d 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -35,6 +35,8 @@
+ #define DRIVER_AUTHOR "Sarah Sharp"
+ #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
+ 
++#define       PORT_WAKE_BITS  (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E)
++
+ /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared 
*/
+ static int link_quirk;
+ module_param(link_quirk, int, S_IRUGO | S_IWUSR);
+@@ -842,13 +844,47 @@ static void xhci_clear_command_ring(struct xhci_hcd 
*xhci)
+       xhci_set_cmd_ring_deq(xhci);
+ }
+ 
++static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
++{
++      int port_index;
++      __le32 __iomem **port_array;
++      unsigned long flags;
++      u32 t1, t2;
++
++      spin_lock_irqsave(&xhci->lock, flags);
++
++      /* disble usb3 ports Wake bits*/
++      port_index = xhci->num_usb3_ports;
++      port_array = xhci->usb3_ports;
++      while (port_index--) {
++              t1 = readl(port_array[port_index]);
++              t1 = xhci_port_state_to_neutral(t1);
++              t2 = t1 & ~PORT_WAKE_BITS;
++              if (t1 != t2)
++                      writel(t2, port_array[port_index]);
++      }
++
++      /* disble usb2 ports Wake bits*/
++      port_index = xhci->num_usb2_ports;
++      port_array = xhci->usb2_ports;
++      while (port_index--) {
++              t1 = readl(port_array[port_index]);
++              t1 = xhci_port_state_to_neutral(t1);
++              t2 = t1 & ~PORT_WAKE_BITS;
++              if (t1 != t2)
++                      writel(t2, port_array[port_index]);
++      }
++
++      spin_unlock_irqrestore(&xhci->lock, flags);
++}
++
+ /*
+  * Stop HC (not bus-specific)
+  *
+  * This is called when the machine transition into S3/S4 mode.
+  *
+  */
+-int xhci_suspend(struct xhci_hcd *xhci)
++int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
+ {
+       int                     rc = 0;
+       unsigned int            delay = XHCI_MAX_HALT_USEC;
+@@ -859,6 +895,10 @@ int xhci_suspend(struct xhci_hcd *xhci)
+                       xhci->shared_hcd->state != HC_STATE_SUSPENDED)
+               return -EINVAL;
+ 
++      /* Clear root port wake on bits if wakeup not allowed. */
++      if (!do_wakeup)
++              xhci_disable_port_wake_on_bits(xhci);
++
+       /* Don't poll the roothubs on bus suspend. */
+       xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
+       clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 8faef64371c6..96e9e780ccae 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1760,7 +1760,7 @@ void xhci_shutdown(struct usb_hcd *hcd);
+ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks);
+ 
+ #ifdef        CONFIG_PM
+-int xhci_suspend(struct xhci_hcd *xhci);
++int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup);
+ int xhci_resume(struct xhci_hcd *xhci, bool hibernated);
+ #else
+ #define       xhci_suspend    NULL
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 3beae723ad3a..5741e9405069 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -120,6 +120,7 @@ static const struct usb_device_id id_table[] = {
+       { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
+       { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */
+       { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
++      { USB_DEVICE(0x10C4, 0x8875) }, /* CEL MeshConnect USB Stick */
+       { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
+       { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB 
Device */
+       { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index a523adad6380..debcdef4cbf0 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -483,6 +483,39 @@ static const struct usb_device_id id_table_combined[] = {
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FD_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FE_PID) },
+       { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FF_PID) },
++      { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_4701_PID) },
++      { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9300_PID) },
++      { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9301_PID) },
++      { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9302_PID) },
++      { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9303_PID) },
++      { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9304_PID) },
++      { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9305_PID) },
++      { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9306_PID) },
++      { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9307_PID) },
++      { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9308_PID) },
++      { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9309_PID) },
++      { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930A_PID) },
++      { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930B_PID) },
++      { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930C_PID) },
++      { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930D_PID) },
++      { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930E_PID) },
++      { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930F_PID) },
++      { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9310_PID) },
++      { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9311_PID) },
++      { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9312_PID) },
++      { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9313_PID) },
++      { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9314_PID) },
++      { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9315_PID) },
++      { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9316_PID) },
++      { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9317_PID) },
++      { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9318_PID) },
++      { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9319_PID) },
++      { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931A_PID) },
++      { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931B_PID) },
++      { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931C_PID) },
++      { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931D_PID) },
++      { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931E_PID) },
++      { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931F_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_PERLE_ULTRAPORT_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_PIEGROUP_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_TNC_X_PID) },
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h 
b/drivers/usb/serial/ftdi_sio_ids.h
+index 6786b705ccf6..e52409c9be99 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -926,8 +926,8 @@
+ #define BAYER_CONTOUR_CABLE_PID        0x6001
+ 
+ /*
+- * The following are the values for the Matrix Orbital FTDI Range
+- * Anything in this range will use an FT232RL.
++ * Matrix Orbital Intelligent USB displays.
++ * http://www.matrixorbital.com
+  */
+ #define MTXORB_VID                    0x1B3D
+ #define MTXORB_FTDI_RANGE_0100_PID    0x0100
+@@ -1186,8 +1186,39 @@
+ #define MTXORB_FTDI_RANGE_01FD_PID    0x01FD
+ #define MTXORB_FTDI_RANGE_01FE_PID    0x01FE
+ #define MTXORB_FTDI_RANGE_01FF_PID    0x01FF
+-
+-
++#define MTXORB_FTDI_RANGE_4701_PID    0x4701
++#define MTXORB_FTDI_RANGE_9300_PID    0x9300
++#define MTXORB_FTDI_RANGE_9301_PID    0x9301
++#define MTXORB_FTDI_RANGE_9302_PID    0x9302
++#define MTXORB_FTDI_RANGE_9303_PID    0x9303
++#define MTXORB_FTDI_RANGE_9304_PID    0x9304
++#define MTXORB_FTDI_RANGE_9305_PID    0x9305
++#define MTXORB_FTDI_RANGE_9306_PID    0x9306
++#define MTXORB_FTDI_RANGE_9307_PID    0x9307
++#define MTXORB_FTDI_RANGE_9308_PID    0x9308
++#define MTXORB_FTDI_RANGE_9309_PID    0x9309
++#define MTXORB_FTDI_RANGE_930A_PID    0x930A
++#define MTXORB_FTDI_RANGE_930B_PID    0x930B
++#define MTXORB_FTDI_RANGE_930C_PID    0x930C
++#define MTXORB_FTDI_RANGE_930D_PID    0x930D
++#define MTXORB_FTDI_RANGE_930E_PID    0x930E
++#define MTXORB_FTDI_RANGE_930F_PID    0x930F
++#define MTXORB_FTDI_RANGE_9310_PID    0x9310
++#define MTXORB_FTDI_RANGE_9311_PID    0x9311
++#define MTXORB_FTDI_RANGE_9312_PID    0x9312
++#define MTXORB_FTDI_RANGE_9313_PID    0x9313
++#define MTXORB_FTDI_RANGE_9314_PID    0x9314
++#define MTXORB_FTDI_RANGE_9315_PID    0x9315
++#define MTXORB_FTDI_RANGE_9316_PID    0x9316
++#define MTXORB_FTDI_RANGE_9317_PID    0x9317
++#define MTXORB_FTDI_RANGE_9318_PID    0x9318
++#define MTXORB_FTDI_RANGE_9319_PID    0x9319
++#define MTXORB_FTDI_RANGE_931A_PID    0x931A
++#define MTXORB_FTDI_RANGE_931B_PID    0x931B
++#define MTXORB_FTDI_RANGE_931C_PID    0x931C
++#define MTXORB_FTDI_RANGE_931D_PID    0x931D
++#define MTXORB_FTDI_RANGE_931E_PID    0x931E
++#define MTXORB_FTDI_RANGE_931F_PID    0x931F
+ 
+ /*
+  * The Mobility Lab (TML)
+diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
+index 265c6776b081..49101fe45d38 100644
+--- a/drivers/usb/serial/keyspan.c
++++ b/drivers/usb/serial/keyspan.c
+@@ -311,24 +311,30 @@ static void      usa26_indat_callback(struct urb *urb)
+               if ((data[0] & 0x80) == 0) {
+                       /* no errors on individual bytes, only
+                          possible overrun err */
+-                      if (data[0] & RXERROR_OVERRUN)
+-                              err = TTY_OVERRUN;
+-                      else
+-                              err = 0;
++                      if (data[0] & RXERROR_OVERRUN) {
++                              tty_insert_flip_char(&port->port, 0,
++                                                              TTY_OVERRUN);
++                      }
+                       for (i = 1; i < urb->actual_length ; ++i)
+-                              tty_insert_flip_char(&port->port, data[i], err);
++                              tty_insert_flip_char(&port->port, data[i],
++                                                              TTY_NORMAL);
+               } else {
+                       /* some bytes had errors, every byte has status */
+                       dev_dbg(&port->dev, "%s - RX error!!!!\n", __func__);
+                       for (i = 0; i + 1 < urb->actual_length; i += 2) {
+-                              int stat = data[i], flag = 0;
+-                              if (stat & RXERROR_OVERRUN)
+-                                      flag |= TTY_OVERRUN;
+-                              if (stat & RXERROR_FRAMING)
+-                                      flag |= TTY_FRAME;
+-                              if (stat & RXERROR_PARITY)
+-                                      flag |= TTY_PARITY;
++                              int stat = data[i];
++                              int flag = TTY_NORMAL;
++
++                              if (stat & RXERROR_OVERRUN) {
++                                      tty_insert_flip_char(&port->port, 0,
++                                                              TTY_OVERRUN);
++                              }
+                               /* XXX should handle break (0x10) */
++                              if (stat & RXERROR_PARITY)
++                                      flag = TTY_PARITY;
++                              else if (stat & RXERROR_FRAMING)
++                                      flag = TTY_FRAME;
++
+                               tty_insert_flip_char(&port->port, data[i+1],
+                                               flag);
+                       }
+@@ -666,14 +672,19 @@ static void      usa49_indat_callback(struct urb *urb)
+               } else {
+                       /* some bytes had errors, every byte has status */
+                       for (i = 0; i + 1 < urb->actual_length; i += 2) {
+-                              int stat = data[i], flag = 0;
+-                              if (stat & RXERROR_OVERRUN)
+-                                      flag |= TTY_OVERRUN;
+-                              if (stat & RXERROR_FRAMING)
+-                                      flag |= TTY_FRAME;
+-                              if (stat & RXERROR_PARITY)
+-                                      flag |= TTY_PARITY;
++                              int stat = data[i];
++                              int flag = TTY_NORMAL;
++
++                              if (stat & RXERROR_OVERRUN) {
++                                      tty_insert_flip_char(&port->port, 0,
++                                                              TTY_OVERRUN);
++                              }
+                               /* XXX should handle break (0x10) */
++                              if (stat & RXERROR_PARITY)
++                                      flag = TTY_PARITY;
++                              else if (stat & RXERROR_FRAMING)
++                                      flag = TTY_FRAME;
++
+                               tty_insert_flip_char(&port->port, data[i+1],
+                                               flag);
+                       }
+@@ -730,15 +741,19 @@ static void usa49wg_indat_callback(struct urb *urb)
+                        */
+                       for (x = 0; x + 1 < len &&
+                                   i + 1 < urb->actual_length; x += 2) {
+-                              int stat = data[i], flag = 0;
++                              int stat = data[i];
++                              int flag = TTY_NORMAL;
+ 
+-                              if (stat & RXERROR_OVERRUN)
+-                                      flag |= TTY_OVERRUN;
+-                              if (stat & RXERROR_FRAMING)
+-                                      flag |= TTY_FRAME;
+-                              if (stat & RXERROR_PARITY)
+-                                      flag |= TTY_PARITY;
++                              if (stat & RXERROR_OVERRUN) {
++                                      tty_insert_flip_char(&port->port, 0,
++                                                              TTY_OVERRUN);
++                              }
+                               /* XXX should handle break (0x10) */
++                              if (stat & RXERROR_PARITY)
++                                      flag = TTY_PARITY;
++                              else if (stat & RXERROR_FRAMING)
++                                      flag = TTY_FRAME;
++
+                               tty_insert_flip_char(&port->port, data[i+1],
+                                                    flag);
+                               i += 2;
+@@ -790,25 +805,31 @@ static void usa90_indat_callback(struct urb *urb)
+                       if ((data[0] & 0x80) == 0) {
+                               /* no errors on individual bytes, only
+                                  possible overrun err*/
+-                              if (data[0] & RXERROR_OVERRUN)
+-                                      err = TTY_OVERRUN;
+-                              else
+-                                      err = 0;
++                              if (data[0] & RXERROR_OVERRUN) {
++                                      tty_insert_flip_char(&port->port, 0,
++                                                              TTY_OVERRUN);
++                              }
+                               for (i = 1; i < urb->actual_length ; ++i)
+                                       tty_insert_flip_char(&port->port,
+-                                                      data[i], err);
++                                                      data[i], TTY_NORMAL);
+                       }  else {
+                       /* some bytes had errors, every byte has status */
+                               dev_dbg(&port->dev, "%s - RX error!!!!\n", 
__func__);
+                               for (i = 0; i + 1 < urb->actual_length; i += 2) 
{
+-                                      int stat = data[i], flag = 0;
+-                                      if (stat & RXERROR_OVERRUN)
+-                                              flag |= TTY_OVERRUN;
+-                                      if (stat & RXERROR_FRAMING)
+-                                              flag |= TTY_FRAME;
+-                                      if (stat & RXERROR_PARITY)
+-                                              flag |= TTY_PARITY;
++                                      int stat = data[i];
++                                      int flag = TTY_NORMAL;
++
++                                      if (stat & RXERROR_OVERRUN) {
++                                              tty_insert_flip_char(
++                                                              &port->port, 0,
++                                                              TTY_OVERRUN);
++                                      }
+                                       /* XXX should handle break (0x10) */
++                                      if (stat & RXERROR_PARITY)
++                                              flag = TTY_PARITY;
++                                      else if (stat & RXERROR_FRAMING)
++                                              flag = TTY_FRAME;
++
+                                       tty_insert_flip_char(&port->port,
+                                                       data[i+1], flag);
+                               }
+diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c
+index a7fe664b6b7d..70a098de429f 100644
+--- a/drivers/usb/serial/ssu100.c
++++ b/drivers/usb/serial/ssu100.c
+@@ -490,10 +490,9 @@ static void ssu100_update_lsr(struct usb_serial_port 
*port, u8 lsr,
+                       if (*tty_flag == TTY_NORMAL)
+                               *tty_flag = TTY_FRAME;
+               }
+-              if (lsr & UART_LSR_OE){
++              if (lsr & UART_LSR_OE) {
+                       port->icount.overrun++;
+-                      if (*tty_flag == TTY_NORMAL)
+-                              *tty_flag = TTY_OVERRUN;
++                      tty_insert_flip_char(&port->port, 0, TTY_OVERRUN);
+               }
+       }
+ 
+@@ -511,12 +510,8 @@ static void ssu100_process_read_urb(struct urb *urb)
+       if ((len >= 4) &&
+           (packet[0] == 0x1b) && (packet[1] == 0x1b) &&
+           ((packet[2] == 0x00) || (packet[2] == 0x01))) {
+-              if (packet[2] == 0x00) {
++              if (packet[2] == 0x00)
+                       ssu100_update_lsr(port, packet[3], &flag);
+-                      if (flag == TTY_OVERRUN)
+-                              tty_insert_flip_char(&port->port, 0,
+-                                              TTY_OVERRUN);
+-              }
+               if (packet[2] == 0x01)
+                       ssu100_update_msr(port, packet[3]);
+ 
+diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
+index e48d4a672580..5d0b7b846440 100644
+--- a/drivers/vhost/scsi.c
++++ b/drivers/vhost/scsi.c
+@@ -1200,6 +1200,7 @@ static int
+ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
+                       struct vhost_scsi_target *t)
+ {
++      struct se_portal_group *se_tpg;
+       struct tcm_vhost_tport *tv_tport;
+       struct tcm_vhost_tpg *tpg;
+       struct tcm_vhost_tpg **vs_tpg;
+@@ -1247,6 +1248,21 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
+                               ret = -EEXIST;
+                               goto out;
+                       }
++                      /*
++                       * In order to ensure individual vhost-scsi configfs
++                       * groups cannot be removed while in use by vhost ioctl,
++                       * go ahead and take an explicit 
se_tpg->tpg_group.cg_item
++                       * dependency now.
++                       */
++                      se_tpg = &tpg->se_tpg;
++                      ret = 
configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys,
++                                                 &se_tpg->tpg_group.cg_item);
++                      if (ret) {
++                              pr_warn("configfs_depend_item() failed: %d\n", 
ret);
++                              kfree(vs_tpg);
++                              mutex_unlock(&tpg->tv_tpg_mutex);
++                              goto out;
++                      }
+                       tpg->tv_tpg_vhost_count++;
+                       tpg->vhost_scsi = vs;
+                       vs_tpg[tpg->tport_tpgt] = tpg;
+@@ -1289,6 +1305,7 @@ static int
+ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
+                         struct vhost_scsi_target *t)
+ {
++      struct se_portal_group *se_tpg;
+       struct tcm_vhost_tport *tv_tport;
+       struct tcm_vhost_tpg *tpg;
+       struct vhost_virtqueue *vq;
+@@ -1337,6 +1354,13 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
+               vs->vs_tpg[target] = NULL;
+               match = true;
+               mutex_unlock(&tpg->tv_tpg_mutex);
++              /*
++               * Release se_tpg->tpg_group.cg_item configfs dependency now
++               * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur.
++               */
++              se_tpg = &tpg->se_tpg;
++              configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys,
++                                     &se_tpg->tpg_group.cg_item);
+       }
+       if (match) {
+               for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
+diff --git a/fs/aio.c b/fs/aio.c
+index f45ddaa4fffa..2f7e8c2e3e76 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -165,6 +165,15 @@ static struct vfsmount *aio_mnt;
+ static const struct file_operations aio_ring_fops;
+ static const struct address_space_operations aio_ctx_aops;
+ 
++/* Backing dev info for aio fs.
++ * -no dirty page accounting or writeback happens
++ */
++static struct backing_dev_info aio_fs_backing_dev_info = {
++      .name           = "aiofs",
++      .state          = 0,
++      .capabilities   = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_MAP_COPY,
++};
++
+ static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages)
+ {
+       struct qstr this = QSTR_INIT("[aio]", 5);
+@@ -176,6 +185,7 @@ static struct file *aio_private_file(struct kioctx *ctx, 
loff_t nr_pages)
+ 
+       inode->i_mapping->a_ops = &aio_ctx_aops;
+       inode->i_mapping->private_data = ctx;
++      inode->i_mapping->backing_dev_info = &aio_fs_backing_dev_info;
+       inode->i_size = PAGE_SIZE * nr_pages;
+ 
+       path.dentry = d_alloc_pseudo(aio_mnt->mnt_sb, &this);
+@@ -221,6 +231,9 @@ static int __init aio_setup(void)
+       if (IS_ERR(aio_mnt))
+               panic("Failed to create aio fs mount.");
+ 
++      if (bdi_init(&aio_fs_backing_dev_info))
++              panic("Failed to init aio fs backing dev info.");
++
+       kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
+       kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
+ 
+@@ -282,11 +295,6 @@ static const struct file_operations aio_ring_fops = {
+       .mmap = aio_ring_mmap,
+ };
+ 
+-static int aio_set_page_dirty(struct page *page)
+-{
+-      return 0;
+-}
+-
+ #if IS_ENABLED(CONFIG_MIGRATION)
+ static int aio_migratepage(struct address_space *mapping, struct page *new,
+                       struct page *old, enum migrate_mode mode)
+@@ -358,7 +366,7 @@ out:
+ #endif
+ 
+ static const struct address_space_operations aio_ctx_aops = {
+-      .set_page_dirty = aio_set_page_dirty,
++      .set_page_dirty = __set_page_dirty_no_writeback,
+ #if IS_ENABLED(CONFIG_MIGRATION)
+       .migratepage    = aio_migratepage,
+ #endif
+@@ -413,7 +421,6 @@ static int aio_setup_ring(struct kioctx *ctx)
+               pr_debug("pid(%d) page[%d]->count=%d\n",
+                        current->pid, i, page_count(page));
+               SetPageUptodate(page);
+-              SetPageDirty(page);
+               unlock_page(page);
+ 
+               ctx->ring_pages[i] = page;
+diff --git a/fs/locks.c b/fs/locks.c
+index 4dd39b98a6a3..2c61c4e9368c 100644
+--- a/fs/locks.c
++++ b/fs/locks.c
+@@ -2235,16 +2235,28 @@ void locks_remove_flock(struct file *filp)
+ 
+       while ((fl = *before) != NULL) {
+               if (fl->fl_file == filp) {
+-                      if (IS_FLOCK(fl)) {
+-                              locks_delete_lock(before);
+-                              continue;
+-                      }
+                       if (IS_LEASE(fl)) {
+                               lease_modify(before, F_UNLCK);
+                               continue;
+                       }
+-                      /* What? */
+-                      BUG();
++
++                      /*
++                       * There's a leftover lock on the list of a type that
++                       * we didn't expect to see. Most likely a classic
++                       * POSIX lock that ended up not getting released
++                       * properly, or that raced onto the list somehow. Log
++                       * some info about it and then just remove it from
++                       * the list.
++                       */
++                      WARN(!IS_FLOCK(fl),
++                              "leftover lock: dev=%u:%u ino=%lu type=%hhd 
flags=0x%x start=%lld end=%lld\n",
++                              MAJOR(inode->i_sb->s_dev),
++                              MINOR(inode->i_sb->s_dev), inode->i_ino,
++                              fl->fl_type, fl->fl_flags,
++                              fl->fl_start, fl->fl_end);
++
++                      locks_delete_lock(before);
++                      continue;
+               }
+               before = &fl->fl_next;
+       }
+diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
+index 2ffebf2081ce..27d7f2742592 100644
+--- a/fs/nfs/pagelist.c
++++ b/fs/nfs/pagelist.c
+@@ -113,7 +113,7 @@ __nfs_iocounter_wait(struct nfs_io_counter *c)
+               if (atomic_read(&c->io_count) == 0)
+                       break;
+               ret = nfs_wait_bit_killable(&c->flags);
+-      } while (atomic_read(&c->io_count) != 0);
++      } while (atomic_read(&c->io_count) != 0 && !ret);
+       finish_wait(wq, &q.wait);
+       return ret;
+ }
+diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
+index cc8c5b32043c..f42bbe5fbc0a 100644
+--- a/fs/nfsd/nfs4callback.c
++++ b/fs/nfsd/nfs4callback.c
+@@ -784,8 +784,12 @@ static bool nfsd41_cb_get_slot(struct nfs4_client *clp, 
struct rpc_task *task)
+ {
+       if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) {
+               rpc_sleep_on(&clp->cl_cb_waitq, task, NULL);
+-              dprintk("%s slot is busy\n", __func__);
+-              return false;
++              /* Race breaker */
++              if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) {
++                      dprintk("%s slot is busy\n", __func__);
++                      return false;
++              }
++              rpc_wake_up_queued_task(&clp->cl_cb_waitq, task);
+       }
+       return true;
+ }
+diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
+index f8f060ffbf4f..6040da8830ff 100644
+--- a/fs/nfsd/nfscache.c
++++ b/fs/nfsd/nfscache.c
+@@ -224,13 +224,6 @@ hash_refile(struct svc_cacherep *rp)
+       hlist_add_head(&rp->c_hash, cache_hash + hash_32(rp->c_xid, maskbits));
+ }
+ 
+-static inline bool
+-nfsd_cache_entry_expired(struct svc_cacherep *rp)
+-{
+-      return rp->c_state != RC_INPROG &&
+-             time_after(jiffies, rp->c_timestamp + RC_EXPIRE);
+-}
+-
+ /*
+  * Walk the LRU list and prune off entries that are older than RC_EXPIRE.
+  * Also prune the oldest ones when the total exceeds the max number of 
entries.
+@@ -242,8 +235,14 @@ prune_cache_entries(void)
+       long freed = 0;
+ 
+       list_for_each_entry_safe(rp, tmp, &lru_head, c_lru) {
+-              if (!nfsd_cache_entry_expired(rp) &&
+-                  num_drc_entries <= max_drc_entries)
++              /*
++               * Don't free entries attached to calls that are still
++               * in-progress, but do keep scanning the list.
++               */
++              if (rp->c_state == RC_INPROG)
++                      continue;
++              if (num_drc_entries <= max_drc_entries &&
++                  time_before(jiffies, rp->c_timestamp + RC_EXPIRE))
+                       break;
+               nfsd_reply_cache_free_locked(rp);
+               freed++;
+diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
+index 479eb681c27c..f417fef17118 100644
+--- a/fs/nfsd/nfsd.h
++++ b/fs/nfsd/nfsd.h
+@@ -328,12 +328,15 @@ void             nfsd_lockd_shutdown(void);
+       (NFSD4_SUPPORTED_ATTRS_WORD2 | FATTR4_WORD2_SUPPATTR_EXCLCREAT)
+ 
+ #ifdef CONFIG_NFSD_V4_SECURITY_LABEL
+-#define NFSD4_2_SUPPORTED_ATTRS_WORD2 \
+-      (NFSD4_1_SUPPORTED_ATTRS_WORD2 | FATTR4_WORD2_SECURITY_LABEL)
++#define NFSD4_2_SECURITY_ATTRS                FATTR4_WORD2_SECURITY_LABEL
+ #else
+-#define NFSD4_2_SUPPORTED_ATTRS_WORD2 0
++#define NFSD4_2_SECURITY_ATTRS                0
+ #endif
+ 
++#define NFSD4_2_SUPPORTED_ATTRS_WORD2 \
++      (NFSD4_1_SUPPORTED_ATTRS_WORD2 | \
++      NFSD4_2_SECURITY_ATTRS)
++
+ static inline u32 nfsd_suppattrs0(u32 minorversion)
+ {
+       return minorversion ? NFSD4_1_SUPPORTED_ATTRS_WORD0
+diff --git a/include/linux/bitops.h b/include/linux/bitops.h
+index be5fd38bd5a0..5d858e02997f 100644
+--- a/include/linux/bitops.h
++++ b/include/linux/bitops.h
+@@ -18,8 +18,11 @@
+  * position @h. For example
+  * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
+  */
+-#define GENMASK(h, l)         (((U32_C(1) << ((h) - (l) + 1)) - 1) << (l))
+-#define GENMASK_ULL(h, l)     (((U64_C(1) << ((h) - (l) + 1)) - 1) << (l))
++#define GENMASK(h, l) \
++      (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
++
++#define GENMASK_ULL(h, l) \
++      (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
+ 
+ extern unsigned int __sw_hweight8(unsigned int w);
+ extern unsigned int __sw_hweight16(unsigned int w);
+diff --git a/include/linux/iio/events.h b/include/linux/iio/events.h
+index 8bbd7bc1043d..03fa332ad2a8 100644
+--- a/include/linux/iio/events.h
++++ b/include/linux/iio/events.h
+@@ -72,7 +72,7 @@ struct iio_event_data {
+ 
+ #define IIO_EVENT_CODE_EXTRACT_TYPE(mask) ((mask >> 56) & 0xFF)
+ 
+-#define IIO_EVENT_CODE_EXTRACT_DIR(mask) ((mask >> 48) & 0xCF)
++#define IIO_EVENT_CODE_EXTRACT_DIR(mask) ((mask >> 48) & 0x7F)
+ 
+ #define IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(mask) ((mask >> 32) & 0xFF)
+ 
+diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
+index 0068708161ff..0a21fbefdfbe 100644
+--- a/include/linux/inetdevice.h
++++ b/include/linux/inetdevice.h
+@@ -242,7 +242,7 @@ static inline void in_dev_put(struct in_device *idev)
+ static __inline__ __be32 inet_make_mask(int logmask)
+ {
+       if (logmask)
+-              return htonl(~((1<<(32-logmask))-1));
++              return htonl(~((1U<<(32-logmask))-1));
+       return 0;
+ }
+ 
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index 33aa2caf0f0c..0e5e16c6f7f1 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -324,6 +324,7 @@ struct pci_dev {
+       unsigned int    is_added:1;
+       unsigned int    is_busmaster:1; /* device is busmaster */
+       unsigned int    no_msi:1;       /* device may not use msi */
++      unsigned int    no_64bit_msi:1; /* device may only use 32-bit MSIs */
+       unsigned int    block_cfg_access:1;     /* config space access is 
blocked */
+       unsigned int    broken_parity_status:1; /* Device generates false 
positive parity */
+       unsigned int    irq_reroute_variant:2;  /* device needs IRQ rerouting 
variant */
+diff --git a/include/sound/soc-dpcm.h b/include/sound/soc-dpcm.h
+index 2883a7a6f9f3..98f2ade0266e 100644
+--- a/include/sound/soc-dpcm.h
++++ b/include/sound/soc-dpcm.h
+@@ -102,6 +102,8 @@ struct snd_soc_dpcm_runtime {
+       /* state and update */
+       enum snd_soc_dpcm_update runtime_update;
+       enum snd_soc_dpcm_state state;
++
++      int trigger_pending; /* trigger cmd + 1 if pending, 0 if not */
+ };
+ 
+ /* can this BE stop and free */
+diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
+index 307d87c0991a..1139b228befc 100644
+--- a/kernel/events/uprobes.c
++++ b/kernel/events/uprobes.c
+@@ -1621,7 +1621,6 @@ bool uprobe_deny_signal(void)
+               if (__fatal_signal_pending(t) || 
arch_uprobe_xol_was_trapped(t)) {
+                       utask->state = UTASK_SSTEP_TRAPPED;
+                       set_tsk_thread_flag(t, TIF_UPROBE);
+-                      set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
+               }
+       }
+ 
+diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
+index b851cc580853..fbda6b54baff 100644
+--- a/net/batman-adv/hard-interface.c
++++ b/net/batman-adv/hard-interface.c
+@@ -83,7 +83,7 @@ static bool batadv_is_on_batman_iface(const struct 
net_device *net_dev)
+               return true;
+ 
+       /* no more parents..stop recursion */
+-      if (net_dev->iflink == net_dev->ifindex)
++      if (net_dev->iflink == 0 || net_dev->iflink == net_dev->ifindex)
+               return false;
+ 
+       /* recurse over the parent device */
+diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
+index f2e15738534d..8f7bd56955b0 100644
+--- a/net/ipv4/fib_rules.c
++++ b/net/ipv4/fib_rules.c
+@@ -62,6 +62,10 @@ int __fib_lookup(struct net *net, struct flowi4 *flp, 
struct fib_result *res)
+       else
+               res->tclassid = 0;
+ #endif
++
++      if (err == -ESRCH)
++              err = -ENETUNREACH;
++
+       return err;
+ }
+ EXPORT_SYMBOL_GPL(__fib_lookup);
+diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
+index e21934b06d4c..0d33f947a87f 100644
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -217,6 +217,8 @@ static struct sock *ping_lookup(struct net *net, struct 
sk_buff *skb, u16 ident)
+                                            &ipv6_hdr(skb)->daddr))
+                               continue;
+ #endif
++              } else {
++                      continue;
+               }
+ 
+               if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)
+diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
+index 00b2a6d1c009..d65aea21ce81 100644
+--- a/net/ipx/af_ipx.c
++++ b/net/ipx/af_ipx.c
+@@ -1763,6 +1763,7 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket 
*sock,
+       struct ipxhdr *ipx = NULL;
+       struct sk_buff *skb;
+       int copied, rc;
++      bool locked = true;
+ 
+       lock_sock(sk);
+       /* put the autobinding in */
+@@ -1789,6 +1790,8 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket 
*sock,
+       if (sock_flag(sk, SOCK_ZAPPED))
+               goto out;
+ 
++      release_sock(sk);
++      locked = false;
+       skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
+                               flags & MSG_DONTWAIT, &rc);
+       if (!skb)
+@@ -1822,7 +1825,8 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket 
*sock,
+ out_free:
+       skb_free_datagram(sk, skb);
+ out:
+-      release_sock(sk);
++      if (locked)
++              release_sock(sk);
+       return rc;
+ }
+ 
+diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
+index 0fcbe90f3ef2..12528e9ac4c2 100644
+--- a/sound/soc/codecs/sgtl5000.c
++++ b/sound/soc/codecs/sgtl5000.c
+@@ -1369,8 +1369,7 @@ static int sgtl5000_probe(struct snd_soc_codec *codec)
+ 
+       /* enable small pop, introduce 400ms delay in turning off */
+       snd_soc_update_bits(codec, SGTL5000_CHIP_REF_CTRL,
+-                              SGTL5000_SMALL_POP,
+-                              SGTL5000_SMALL_POP);
++                              SGTL5000_SMALL_POP, 1);
+ 
+       /* disable short cut detector */
+       snd_soc_write(codec, SGTL5000_CHIP_SHORT_CTRL, 0);
+diff --git a/sound/soc/codecs/sgtl5000.h b/sound/soc/codecs/sgtl5000.h
+index 2f8c88931f69..bd7a344bf8c5 100644
+--- a/sound/soc/codecs/sgtl5000.h
++++ b/sound/soc/codecs/sgtl5000.h
+@@ -275,7 +275,7 @@
+ #define SGTL5000_BIAS_CTRL_MASK                       0x000e
+ #define SGTL5000_BIAS_CTRL_SHIFT              1
+ #define SGTL5000_BIAS_CTRL_WIDTH              3
+-#define SGTL5000_SMALL_POP                    0x0001
++#define SGTL5000_SMALL_POP                    0
+ 
+ /*
+  * SGTL5000_CHIP_MIC_CTRL
+diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
+index 53c03aff762e..0502e3f17412 100644
+--- a/sound/soc/codecs/wm_adsp.c
++++ b/sound/soc/codecs/wm_adsp.c
+@@ -1341,6 +1341,7 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp)
+                         file, blocks, pos - firmware->size);
+ 
+ out_fw:
++      regmap_async_complete(regmap);
+       release_firmware(firmware);
+       wm_adsp_buf_free(&buf_list);
+ out:
+diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c
+index 1967f44e7cd4..9d0c59ce6de8 100644
+--- a/sound/soc/sh/fsi.c
++++ b/sound/soc/sh/fsi.c
+@@ -1785,8 +1785,7 @@ static const struct snd_soc_dai_ops fsi_dai_ops = {
+ static struct snd_pcm_hardware fsi_pcm_hardware = {
+       .info =         SNDRV_PCM_INFO_INTERLEAVED      |
+                       SNDRV_PCM_INFO_MMAP             |
+-                      SNDRV_PCM_INFO_MMAP_VALID       |
+-                      SNDRV_PCM_INFO_PAUSE,
++                      SNDRV_PCM_INFO_MMAP_VALID,
+       .buffer_bytes_max       = 64 * 1024,
+       .period_bytes_min       = 32,
+       .period_bytes_max       = 8192,
+diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
+index 743de5e3b1e1..37fcd93ed1fd 100644
+--- a/sound/soc/sh/rcar/core.c
++++ b/sound/soc/sh/rcar/core.c
+@@ -626,8 +626,7 @@ static void rsnd_dai_remove(struct platform_device *pdev,
+ static struct snd_pcm_hardware rsnd_pcm_hardware = {
+       .info =         SNDRV_PCM_INFO_INTERLEAVED      |
+                       SNDRV_PCM_INFO_MMAP             |
+-                      SNDRV_PCM_INFO_MMAP_VALID       |
+-                      SNDRV_PCM_INFO_PAUSE,
++                      SNDRV_PCM_INFO_MMAP_VALID,
+       .buffer_bytes_max       = 64 * 1024,
+       .period_bytes_min       = 32,
+       .period_bytes_max       = 8192,
+diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
+index 02733ded2cb1..e28704e1274a 100644
+--- a/sound/soc/soc-pcm.c
++++ b/sound/soc/soc-pcm.c
+@@ -1258,13 +1258,36 @@ static void dpcm_set_fe_runtime(struct 
snd_pcm_substream *substream)
+               dpcm_init_runtime_hw(runtime, &cpu_dai_drv->capture);
+ }
+ 
++static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int 
cmd);
++
++/* Set FE's runtime_update state; the state is protected via PCM stream lock
++ * for avoiding the race with trigger callback.
++ * If the state is unset and a trigger is pending while the previous 
operation,
++ * process the pending trigger action here.
++ */
++static void dpcm_set_fe_update_state(struct snd_soc_pcm_runtime *fe,
++                                   int stream, enum snd_soc_dpcm_update state)
++{
++      struct snd_pcm_substream *substream =
++              snd_soc_dpcm_get_substream(fe, stream);
++
++      snd_pcm_stream_lock_irq(substream);
++      if (state == SND_SOC_DPCM_UPDATE_NO && 
fe->dpcm[stream].trigger_pending) {
++              dpcm_fe_dai_do_trigger(substream,
++                                     fe->dpcm[stream].trigger_pending - 1);
++              fe->dpcm[stream].trigger_pending = 0;
++      }
++      fe->dpcm[stream].runtime_update = state;
++      snd_pcm_stream_unlock_irq(substream);
++}
++
+ static int dpcm_fe_dai_startup(struct snd_pcm_substream *fe_substream)
+ {
+       struct snd_soc_pcm_runtime *fe = fe_substream->private_data;
+       struct snd_pcm_runtime *runtime = fe_substream->runtime;
+       int stream = fe_substream->stream, ret = 0;
+ 
+-      fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
++      dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE);
+ 
+       ret = dpcm_be_dai_startup(fe, fe_substream->stream);
+       if (ret < 0) {
+@@ -1286,13 +1309,13 @@ static int dpcm_fe_dai_startup(struct 
snd_pcm_substream *fe_substream)
+       dpcm_set_fe_runtime(fe_substream);
+       snd_pcm_limit_hw_rates(runtime);
+ 
+-      fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
++      dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
+       return 0;
+ 
+ unwind:
+       dpcm_be_dai_startup_unwind(fe, fe_substream->stream);
+ be_err:
+-      fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
++      dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
+       return ret;
+ }
+ 
+@@ -1339,7 +1362,7 @@ static int dpcm_fe_dai_shutdown(struct snd_pcm_substream 
*substream)
+       struct snd_soc_pcm_runtime *fe = substream->private_data;
+       int stream = substream->stream;
+ 
+-      fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
++      dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE);
+ 
+       /* shutdown the BEs */
+       dpcm_be_dai_shutdown(fe, substream->stream);
+@@ -1353,7 +1376,7 @@ static int dpcm_fe_dai_shutdown(struct snd_pcm_substream 
*substream)
+       dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_STOP);
+ 
+       fe->dpcm[stream].state = SND_SOC_DPCM_STATE_CLOSE;
+-      fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
++      dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
+       return 0;
+ }
+ 
+@@ -1401,7 +1424,7 @@ static int dpcm_fe_dai_hw_free(struct snd_pcm_substream 
*substream)
+       int err, stream = substream->stream;
+ 
+       mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
+-      fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
++      dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE);
+ 
+       dev_dbg(fe->dev, "ASoC: hw_free FE %s\n", fe->dai_link->name);
+ 
+@@ -1416,7 +1439,7 @@ static int dpcm_fe_dai_hw_free(struct snd_pcm_substream 
*substream)
+       err = dpcm_be_dai_hw_free(fe, stream);
+ 
+       fe->dpcm[stream].state = SND_SOC_DPCM_STATE_HW_FREE;
+-      fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
++      dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
+ 
+       mutex_unlock(&fe->card->mutex);
+       return 0;
+@@ -1509,7 +1532,7 @@ static int dpcm_fe_dai_hw_params(struct 
snd_pcm_substream *substream,
+       int ret, stream = substream->stream;
+ 
+       mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
+-      fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
++      dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE);
+ 
+       memcpy(&fe->dpcm[substream->stream].hw_params, params,
+                       sizeof(struct snd_pcm_hw_params));
+@@ -1532,7 +1555,7 @@ static int dpcm_fe_dai_hw_params(struct 
snd_pcm_substream *substream,
+               fe->dpcm[stream].state = SND_SOC_DPCM_STATE_HW_PARAMS;
+ 
+ out:
+-      fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
++      dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
+       mutex_unlock(&fe->card->mutex);
+       return ret;
+ }
+@@ -1646,7 +1669,7 @@ int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, 
int stream,
+ }
+ EXPORT_SYMBOL_GPL(dpcm_be_dai_trigger);
+ 
+-static int dpcm_fe_dai_trigger(struct snd_pcm_substream *substream, int cmd)
++static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int 
cmd)
+ {
+       struct snd_soc_pcm_runtime *fe = substream->private_data;
+       int stream = substream->stream, ret;
+@@ -1720,6 +1743,23 @@ out:
+       return ret;
+ }
+ 
++static int dpcm_fe_dai_trigger(struct snd_pcm_substream *substream, int cmd)
++{
++      struct snd_soc_pcm_runtime *fe = substream->private_data;
++      int stream = substream->stream;
++
++      /* if FE's runtime_update is already set, we're in race;
++       * process this trigger later at exit
++       */
++      if (fe->dpcm[stream].runtime_update != SND_SOC_DPCM_UPDATE_NO) {
++              fe->dpcm[stream].trigger_pending = cmd + 1;
++              return 0; /* delayed, assuming it's successful */
++      }
++
++      /* we're alone, let's trigger */
++      return dpcm_fe_dai_do_trigger(substream, cmd);
++}
++
+ int dpcm_be_dai_prepare(struct snd_soc_pcm_runtime *fe, int stream)
+ {
+       struct snd_soc_dpcm *dpcm;
+@@ -1763,7 +1803,7 @@ static int dpcm_fe_dai_prepare(struct snd_pcm_substream 
*substream)
+ 
+       dev_dbg(fe->dev, "ASoC: prepare FE %s\n", fe->dai_link->name);
+ 
+-      fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
++      dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE);
+ 
+       /* there is no point preparing this FE if there are no BEs */
+       if (list_empty(&fe->dpcm[stream].be_clients)) {
+@@ -1790,7 +1830,7 @@ static int dpcm_fe_dai_prepare(struct snd_pcm_substream 
*substream)
+       fe->dpcm[stream].state = SND_SOC_DPCM_STATE_PREPARE;
+ 
+ out:
+-      fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
++      dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
+       mutex_unlock(&fe->card->mutex);
+ 
+       return ret;
+@@ -1937,11 +1977,11 @@ static int dpcm_run_new_update(struct 
snd_soc_pcm_runtime *fe, int stream)
+ {
+       int ret;
+ 
+-      fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_BE;
++      dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_BE);
+       ret = dpcm_run_update_startup(fe, stream);
+       if (ret < 0)
+               dev_err(fe->dev, "ASoC: failed to startup some BEs\n");
+-      fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
++      dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
+ 
+       return ret;
+ }
+@@ -1950,11 +1990,11 @@ static int dpcm_run_old_update(struct 
snd_soc_pcm_runtime *fe, int stream)
+ {
+       int ret;
+ 
+-      fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_BE;
++      dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_BE);
+       ret = dpcm_run_update_shutdown(fe, stream);
+       if (ret < 0)
+               dev_err(fe->dev, "ASoC: failed to shutdown some BEs\n");
+-      fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
++      dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
+ 
+       return ret;
+ }
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index c64a3d96db22..827d40441ec7 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1142,6 +1142,20 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, 
unsigned int pipe,
+       if ((le16_to_cpu(dev->descriptor.idVendor) == 0x23ba) &&
+           (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
+               mdelay(20);
++
++      /* Marantz/Denon devices with USB DAC functionality need a delay
++       * after each class compliant request
++       */
++      if ((le16_to_cpu(dev->descriptor.idVendor) == 0x154e) &&
++          (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) {
++
++              switch (le16_to_cpu(dev->descriptor.idProduct)) {
++              case 0x3005: /* Marantz HD-DAC1 */
++              case 0x3006: /* Marantz SA-14S1 */
++                      mdelay(20);
++                      break;
++              }
++      }
+ }
+ 
+ /*

Reply via email to