diff --git a/Documentation/devicetree/bindings/clock/keystone-pll.txt 
b/Documentation/devicetree/bindings/clock/keystone-pll.txt
index 225990f79b7c..47570d207215 100644
--- a/Documentation/devicetree/bindings/clock/keystone-pll.txt
+++ b/Documentation/devicetree/bindings/clock/keystone-pll.txt
@@ -15,8 +15,8 @@ Required properties:
 - compatible : shall be "ti,keystone,main-pll-clock" or "ti,keystone,pll-clock"
 - clocks : parent clock phandle
 - reg - pll control0 and pll multipler registers
-- reg-names : control and multiplier. The multiplier is applicable only for
-               main pll clock
+- reg-names : control, multiplier and post-divider. The multiplier and
+               post-divider registers are applicable only for main pll clock
 - fixed-postdiv : fixed post divider value. If absent, use clkod register bits
                for postdiv
 
@@ -25,8 +25,8 @@ Example:
                #clock-cells = <0>;
                compatible = "ti,keystone,main-pll-clock";
                clocks = <&refclksys>;
-               reg = <0x02620350 4>, <0x02310110 4>;
-               reg-names = "control", "multiplier";
+               reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
+               reg-names = "control", "multiplier", "post-divider";
                fixed-postdiv = <2>;
        };
 
diff --git a/Documentation/input/alps.txt b/Documentation/input/alps.txt
index c86f2f1ae4f6..1fec1135791d 100644
--- a/Documentation/input/alps.txt
+++ b/Documentation/input/alps.txt
@@ -119,8 +119,10 @@ ALPS Absolute Mode - Protocol Version 2
  byte 5:  0   z6   z5   z4   z3   z2   z1   z0
 
 Protocol Version 2 DualPoint devices send standard PS/2 mouse packets for
-the DualPoint Stick. For non interleaved dualpoint devices the pointingstick
-buttons get reported separately in the PSM, PSR and PSL bits.
+the DualPoint Stick. The M, R and L bits signal the combined status of both
+the pointingstick and touchpad buttons, except for Dell dualpoint devices
+where the pointingstick buttons get reported separately in the PSM, PSR
+and PSL bits.
 
 Dualpoint device -- interleaved packet format
 ---------------------------------------------
diff --git a/Makefile b/Makefile
index 068dd690933d..838dabcb7f48 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 4
 PATCHLEVEL = 1
-SUBLEVEL = 5
+SUBLEVEL = 6
 EXTRAVERSION =
 NAME = Series 4800
 
diff --git a/arch/arm/boot/dts/imx35.dtsi b/arch/arm/boot/dts/imx35.dtsi
index b6478e97d6a7..e6540b5cfa4c 100644
--- a/arch/arm/boot/dts/imx35.dtsi
+++ b/arch/arm/boot/dts/imx35.dtsi
@@ -286,8 +286,8 @@
                        can1: can@53fe4000 {
                                compatible = "fsl,imx35-flexcan", 
"fsl,p1010-flexcan";
                                reg = <0x53fe4000 0x1000>;
-                               clocks = <&clks 33>;
-                               clock-names = "ipg";
+                               clocks = <&clks 33>, <&clks 33>;
+                               clock-names = "ipg", "per";
                                interrupts = <43>;
                                status = "disabled";
                        };
@@ -295,8 +295,8 @@
                        can2: can@53fe8000 {
                                compatible = "fsl,imx35-flexcan", 
"fsl,p1010-flexcan";
                                reg = <0x53fe8000 0x1000>;
-                               clocks = <&clks 34>;
-                               clock-names = "ipg";
+                               clocks = <&clks 34>, <&clks 34>;
+                               clock-names = "ipg", "per";
                                interrupts = <44>;
                                status = "disabled";
                        };
diff --git a/arch/arm/boot/dts/k2e-clocks.dtsi 
b/arch/arm/boot/dts/k2e-clocks.dtsi
index 4773d6af66a0..d56d68fe7ffc 100644
--- a/arch/arm/boot/dts/k2e-clocks.dtsi
+++ b/arch/arm/boot/dts/k2e-clocks.dtsi
@@ -13,9 +13,8 @@ clocks {
                #clock-cells = <0>;
                compatible = "ti,keystone,main-pll-clock";
                clocks = <&refclksys>;
-               reg = <0x02620350 4>, <0x02310110 4>;
-               reg-names = "control", "multiplier";
-               fixed-postdiv = <2>;
+               reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
+               reg-names = "control", "multiplier", "post-divider";
        };
 
        papllclk: papllclk@2620358 {
diff --git a/arch/arm/boot/dts/k2hk-clocks.dtsi 
b/arch/arm/boot/dts/k2hk-clocks.dtsi
index d5adee3c0067..af9b7190533a 100644
--- a/arch/arm/boot/dts/k2hk-clocks.dtsi
+++ b/arch/arm/boot/dts/k2hk-clocks.dtsi
@@ -22,9 +22,8 @@ clocks {
                #clock-cells = <0>;
                compatible = "ti,keystone,main-pll-clock";
                clocks = <&refclksys>;
-               reg = <0x02620350 4>, <0x02310110 4>;
-               reg-names = "control", "multiplier";
-               fixed-postdiv = <2>;
+               reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
+               reg-names = "control", "multiplier", "post-divider";
        };
 
        papllclk: papllclk@2620358 {
diff --git a/arch/arm/boot/dts/k2l-clocks.dtsi 
b/arch/arm/boot/dts/k2l-clocks.dtsi
index eb1e3e29f073..ef8464bb11ff 100644
--- a/arch/arm/boot/dts/k2l-clocks.dtsi
+++ b/arch/arm/boot/dts/k2l-clocks.dtsi
@@ -22,9 +22,8 @@ clocks {
                #clock-cells = <0>;
                compatible = "ti,keystone,main-pll-clock";
                clocks = <&refclksys>;
-               reg = <0x02620350 4>, <0x02310110 4>;
-               reg-names = "control", "multiplier";
-               fixed-postdiv = <2>;
+               reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
+               reg-names = "control", "multiplier", "post-divider";
        };
 
        papllclk: papllclk@2620358 {
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 752969ff9de0..5286e7773ed4 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -2373,6 +2373,9 @@ static int of_dev_hwmod_lookup(struct device_node *np,
  * registers.  This address is needed early so the OCP registers that
  * are part of the device's address space can be ioremapped properly.
  *
+ * If SYSC access is not needed, the registers will not be remapped
+ * and non-availability of MPU access is not treated as an error.
+ *
  * Returns 0 on success, -EINVAL if an invalid hwmod is passed, and
  * -ENXIO on absent or invalid register target address space.
  */
@@ -2387,6 +2390,11 @@ static int __init _init_mpu_rt_base(struct omap_hwmod 
*oh, void *data,
 
        _save_mpu_port_index(oh);
 
+       /* if we don't need sysc access we don't need to ioremap */
+       if (!oh->class->sysc)
+               return 0;
+
+       /* we can't continue without MPU PORT if we need sysc access */
        if (oh->_int_flags & _HWMOD_NO_MPU_PORT)
                return -ENXIO;
 
@@ -2396,8 +2404,10 @@ static int __init _init_mpu_rt_base(struct omap_hwmod 
*oh, void *data,
                         oh->name);
 
                /* Extract the IO space from device tree blob */
-               if (!np)
+               if (!np) {
+                       pr_err("omap_hwmod: %s: no dt node\n", oh->name);
                        return -ENXIO;
+               }
 
                va_start = of_iomap(np, index + oh->mpu_rt_idx);
        } else {
@@ -2456,13 +2466,11 @@ static int __init _init(struct omap_hwmod *oh, void 
*data)
                                oh->name, np->name);
        }
 
-       if (oh->class->sysc) {
-               r = _init_mpu_rt_base(oh, NULL, index, np);
-               if (r < 0) {
-                       WARN(1, "omap_hwmod: %s: doesn't have mpu register 
target base\n",
-                            oh->name);
-                       return 0;
-               }
+       r = _init_mpu_rt_base(oh, NULL, index, np);
+       if (r < 0) {
+               WARN(1, "omap_hwmod: %s: doesn't have mpu register target 
base\n",
+                    oh->name);
+               return 0;
        }
 
        r = _init_clocks(oh, NULL);
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index d26fcd4cd6e6..c0cff3410166 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -168,7 +168,8 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, 
const siginfo_t *from)
                 * Other callers might not initialize the si_lsb field,
                 * so check explicitely for the right codes here.
                 */
-               if (from->si_code == BUS_MCEERR_AR || from->si_code == 
BUS_MCEERR_AO)
+               if (from->si_signo == SIGBUS &&
+                   (from->si_code == BUS_MCEERR_AR || from->si_code == 
BUS_MCEERR_AO))
                        err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
 #endif
                break;
@@ -201,8 +202,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, 
const siginfo_t *from)
 
 int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
 {
-       memset(to, 0, sizeof *to);
-
        if (copy_from_user(to, from, __ARCH_SI_PREAMBLE_SIZE) ||
            copy_from_user(to->_sifields._pad,
                           from->_sifields._pad, SI_PAD_SIZE))
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
index 7fc8397d16f2..fd2a36a79f97 100644
--- a/arch/mips/ath79/setup.c
+++ b/arch/mips/ath79/setup.c
@@ -186,6 +186,7 @@ int get_c0_perfcount_int(void)
 {
        return ATH79_MISC_IRQ(5);
 }
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
 
 unsigned int get_c0_compare_int(void)
 {
diff --git a/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h 
b/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h
deleted file mode 100644
index 11d3b572b1b3..000000000000
--- a/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef __ASM_MACH_BCM63XX_DMA_COHERENCE_H
-#define __ASM_MACH_BCM63XX_DMA_COHERENCE_H
-
-#include <asm/bmips.h>
-
-#define plat_post_dma_flush    bmips_post_dma_flush
-
-#include <asm/mach-generic/dma-coherence.h>
-
-#endif /* __ASM_MACH_BCM63XX_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 819af9d057a8..70f6e7f073b0 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -182,8 +182,39 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
                 * Make sure the buddy is global too (if it's !none,
                 * it better already be global)
                 */
+#ifdef CONFIG_SMP
+               /*
+                * For SMP, multiple CPUs can race, so we need to do
+                * this atomically.
+                */
+#ifdef CONFIG_64BIT
+#define LL_INSN "lld"
+#define SC_INSN "scd"
+#else /* CONFIG_32BIT */
+#define LL_INSN "ll"
+#define SC_INSN "sc"
+#endif
+               unsigned long page_global = _PAGE_GLOBAL;
+               unsigned long tmp;
+
+               __asm__ __volatile__ (
+                       "       .set    push\n"
+                       "       .set    noreorder\n"
+                       "1:     " LL_INSN "     %[tmp], %[buddy]\n"
+                       "       bnez    %[tmp], 2f\n"
+                       "        or     %[tmp], %[tmp], %[global]\n"
+                       "       " SC_INSN "     %[tmp], %[buddy]\n"
+                       "       beqz    %[tmp], 1b\n"
+                       "        nop\n"
+                       "2:\n"
+                       "       .set pop"
+                       : [buddy] "+m" (buddy->pte),
+                         [tmp] "=&r" (tmp)
+                       : [global] "r" (page_global));
+#else /* !CONFIG_SMP */
                if (pte_none(*buddy))
                        pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
+#endif /* CONFIG_SMP */
        }
 #endif
 }
diff --git a/arch/mips/include/asm/stackframe.h 
b/arch/mips/include/asm/stackframe.h
index 28d6d9364bd1..a71da576883c 100644
--- a/arch/mips/include/asm/stackframe.h
+++ b/arch/mips/include/asm/stackframe.h
@@ -152,6 +152,31 @@
                .set    noreorder
                bltz    k0, 8f
                 move   k1, sp
+#ifdef CONFIG_EVA
+               /*
+                * Flush interAptiv's Return Prediction Stack (RPS) by writing
+                * EntryHi. Toggling Config7.RPS is slower and less portable.
+                *
+                * The RPS isn't automatically flushed when exceptions are
+                * taken, which can result in kernel mode speculative accesses
+                * to user addresses if the RPS mispredicts. That's harmless
+                * when user and kernel share the same address space, but with
+                * EVA the same user segments may be unmapped to kernel mode,
+                * even containing sensitive MMIO regions or invalid memory.
+                *
+                * This can happen when the kernel sets the return address to
+                * ret_from_* and jr's to the exception handler, which looks
+                * more like a tail call than a function call. If nested calls
+                * don't evict the last user address in the RPS, it will
+                * mispredict the return and fetch from a user controlled
+                * address into the icache.
+                *
+                * More recent EVA-capable cores with MAAR to restrict
+                * speculative accesses aren't affected.
+                */
+               MFC0    k0, CP0_ENTRYHI
+               MTC0    k0, CP0_ENTRYHI
+#endif
                .set    reorder
                /* Called from user mode, new stack. */
                get_saved_sp
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
index 3e4491aa6d6b..789d7bf4fef3 100644
--- a/arch/mips/kernel/mips-mt-fpaff.c
+++ b/arch/mips/kernel/mips-mt-fpaff.c
@@ -154,7 +154,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, 
unsigned int len,
                                      unsigned long __user *user_mask_ptr)
 {
        unsigned int real_len;
-       cpumask_t mask;
+       cpumask_t allowed, mask;
        int retval;
        struct task_struct *p;
 
@@ -173,7 +173,8 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, 
unsigned int len,
        if (retval)
                goto out_unlock;
 
-       cpumask_and(&mask, &p->thread.user_cpus_allowed, cpu_possible_mask);
+       cpumask_or(&allowed, &p->thread.user_cpus_allowed, &p->cpus_allowed);
+       cpumask_and(&mask, &allowed, cpu_active_mask);
 
 out_unlock:
        read_unlock(&tasklist_lock);
diff --git a/arch/mips/kernel/relocate_kernel.S 
b/arch/mips/kernel/relocate_kernel.S
index 74bab9ddd0e1..c6bbf2165051 100644
--- a/arch/mips/kernel/relocate_kernel.S
+++ b/arch/mips/kernel/relocate_kernel.S
@@ -24,7 +24,7 @@ LEAF(relocate_new_kernel)
 
 process_entry:
        PTR_L           s2, (s0)
-       PTR_ADD         s0, s0, SZREG
+       PTR_ADDIU       s0, s0, SZREG
 
        /*
         * In case of a kdump/crash kernel, the indirection page is not
@@ -61,9 +61,9 @@ copy_word:
        /* copy page word by word */
        REG_L           s5, (s2)
        REG_S           s5, (s4)
-       PTR_ADD         s4, s4, SZREG
-       PTR_ADD         s2, s2, SZREG
-       LONG_SUB        s6, s6, 1
+       PTR_ADDIU       s4, s4, SZREG
+       PTR_ADDIU       s2, s2, SZREG
+       LONG_ADDIU      s6, s6, -1
        beq             s6, zero, process_entry
        b               copy_word
        b               process_entry
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index 19a7705f2a01..5d7f2634996f 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -409,8 +409,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, 
const siginfo_t *from)
 
 int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
 {
-       memset(to, 0, sizeof *to);
-
        if (copy_from_user(to, from, 3*sizeof(int)) ||
            copy_from_user(to->_sifields._pad,
                           from->_sifields._pad, SI_PAD_SIZE32))
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index d2d1c1933bc9..5f5f44edc77d 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -192,6 +192,7 @@ static void show_stacktrace(struct task_struct *task,
 void show_stack(struct task_struct *task, unsigned long *sp)
 {
        struct pt_regs regs;
+       mm_segment_t old_fs = get_fs();
        if (sp) {
                regs.regs[29] = (unsigned long)sp;
                regs.regs[31] = 0;
@@ -210,7 +211,13 @@ void show_stack(struct task_struct *task, unsigned long 
*sp)
                        prepare_frametrace(&regs);
                }
        }
+       /*
+        * show_stack() deals exclusively with kernel mode, so be sure to access
+        * the stack in the kernel (not user) address space.
+        */
+       set_fs(KERNEL_DS);
        show_stacktrace(task, &regs);
+       set_fs(old_fs);
 }
 
 static void show_code(unsigned int __user *pc)
@@ -1518,6 +1525,7 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
        const int field = 2 * sizeof(unsigned long);
        int multi_match = regs->cp0_status & ST0_TS;
        enum ctx_state prev_state;
+       mm_segment_t old_fs = get_fs();
 
        prev_state = exception_enter();
        show_regs(regs);
@@ -1539,8 +1547,13 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
                dump_tlb_all();
        }
 
+       if (!user_mode(regs))
+               set_fs(KERNEL_DS);
+
        show_code((unsigned int __user *) regs->cp0_epc);
 
+       set_fs(old_fs);
+
        /*
         * Some chips may have other causes of machine check (e.g. SB1
         * graduation timer)
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index af84bef0c90d..eb3efd137fd1 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -438,7 +438,7 @@ do {                                                        
\
                : "memory");                                \
 } while(0)
 
-#define     StoreDW(addr, value, res) \
+#define     _StoreDW(addr, value, res) \
 do {                                                        \
                __asm__ __volatile__ (                      \
                        ".set\tpush\n\t"                    \
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
index 6ab10573490d..d01ade63492f 100644
--- a/arch/mips/lantiq/irq.c
+++ b/arch/mips/lantiq/irq.c
@@ -466,6 +466,7 @@ int get_c0_perfcount_int(void)
 {
        return ltq_perfcount_irq;
 }
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
 
 unsigned int get_c0_compare_int(void)
 {
diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c
index 185e68261f45..a7f7d9ffb402 100644
--- a/arch/mips/mti-malta/malta-time.c
+++ b/arch/mips/mti-malta/malta-time.c
@@ -148,6 +148,7 @@ int get_c0_perfcount_int(void)
 
        return mips_cpu_perf_irq;
 }
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
 
 unsigned int get_c0_compare_int(void)
 {
@@ -165,14 +166,17 @@ unsigned int get_c0_compare_int(void)
 
 static void __init init_rtc(void)
 {
-       /* stop the clock whilst setting it up */
-       CMOS_WRITE(RTC_SET | RTC_24H, RTC_CONTROL);
+       unsigned char freq, ctrl;
 
-       /* 32KHz time base */
-       CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT);
+       /* Set 32KHz time base if not already set */
+       freq = CMOS_READ(RTC_FREQ_SELECT);
+       if ((freq & RTC_DIV_CTL) != RTC_REF_CLCK_32KHZ)
+               CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT);
 
-       /* start the clock */
-       CMOS_WRITE(RTC_24H, RTC_CONTROL);
+       /* Ensure SET bit is clear so RTC can run */
+       ctrl = CMOS_READ(RTC_CONTROL);
+       if (ctrl & RTC_SET)
+               CMOS_WRITE(ctrl & ~RTC_SET, RTC_CONTROL);
 }
 
 void __init plat_time_init(void)
diff --git a/arch/mips/mti-sead3/sead3-time.c b/arch/mips/mti-sead3/sead3-time.c
index e1d69895fb1d..a120b7a5a8fe 100644
--- a/arch/mips/mti-sead3/sead3-time.c
+++ b/arch/mips/mti-sead3/sead3-time.c
@@ -77,6 +77,7 @@ int get_c0_perfcount_int(void)
                return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
        return -1;
 }
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
 
 unsigned int get_c0_compare_int(void)
 {
diff --git a/arch/mips/pistachio/time.c b/arch/mips/pistachio/time.c
index 67889fcea8aa..ab73f6f405bb 100644
--- a/arch/mips/pistachio/time.c
+++ b/arch/mips/pistachio/time.c
@@ -26,6 +26,7 @@ int get_c0_perfcount_int(void)
 {
        return gic_get_c0_perfcount_int();
 }
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
 
 void __init plat_time_init(void)
 {
diff --git a/arch/mips/ralink/irq.c b/arch/mips/ralink/irq.c
index 7cf91b92e9d1..199ace4ca1ad 100644
--- a/arch/mips/ralink/irq.c
+++ b/arch/mips/ralink/irq.c
@@ -89,6 +89,7 @@ int get_c0_perfcount_int(void)
 {
        return rt_perfcount_irq;
 }
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
 
 unsigned int get_c0_compare_int(void)
 {
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index d3a831ac0f92..da50e0c9c57e 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -966,8 +966,6 @@ int copy_siginfo_to_user32(struct compat_siginfo __user *d, 
const siginfo_t *s)
 
 int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
 {
-       memset(to, 0, sizeof *to);
-
        if (copy_from_user(to, from, 3*sizeof(int)) ||
            copy_from_user(to->_sifields._pad,
                           from->_sifields._pad, SI_PAD_SIZE32))
diff --git a/arch/sparc/include/asm/visasm.h b/arch/sparc/include/asm/visasm.h
index 1f0aa2024e94..6424249d5f78 100644
--- a/arch/sparc/include/asm/visasm.h
+++ b/arch/sparc/include/asm/visasm.h
@@ -28,16 +28,10 @@
  * Must preserve %o5 between VISEntryHalf and VISExitHalf */
 
 #define VISEntryHalf                                   \
-       rd              %fprs, %o5;                     \
-       andcc           %o5, FPRS_FEF, %g0;             \
-       be,pt           %icc, 297f;                     \
-        sethi          %hi(298f), %g7;                 \
-       sethi           %hi(VISenterhalf), %g1;         \
-       jmpl            %g1 + %lo(VISenterhalf), %g0;   \
-        or             %g7, %lo(298f), %g7;            \
-       clr             %o5;                            \
-297:   wr              %o5, FPRS_FEF, %fprs;           \
-298:
+       VISEntry
+
+#define VISExitHalf                                    \
+       VISExit
 
 #define VISEntryHalfFast(fail_label)                   \
        rd              %fprs, %o5;                     \
@@ -47,7 +41,7 @@
        ba,a,pt         %xcc, fail_label;               \
 297:   wr              %o5, FPRS_FEF, %fprs;
 
-#define VISExitHalf                                    \
+#define VISExitHalfFast                                        \
        wr              %o5, 0, %fprs;
 
 #ifndef __ASSEMBLY__
diff --git a/arch/sparc/lib/NG4memcpy.S b/arch/sparc/lib/NG4memcpy.S
index 140527a20e7d..83aeeb1dffdb 100644
--- a/arch/sparc/lib/NG4memcpy.S
+++ b/arch/sparc/lib/NG4memcpy.S
@@ -240,8 +240,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
        add             %o0, 0x40, %o0
        bne,pt          %icc, 1b
         LOAD(prefetch, %g1 + 0x200, #n_reads_strong)
+#ifdef NON_USER_COPY
+       VISExitHalfFast
+#else
        VISExitHalf
-
+#endif
        brz,pn          %o2, .Lexit
         cmp            %o2, 19
        ble,pn          %icc, .Lsmall_unaligned
diff --git a/arch/sparc/lib/VISsave.S b/arch/sparc/lib/VISsave.S
index b320ae9e2e2e..a063d84336d6 100644
--- a/arch/sparc/lib/VISsave.S
+++ b/arch/sparc/lib/VISsave.S
@@ -44,9 +44,8 @@ vis1: ldub            [%g6 + TI_FPSAVED], %g3
 
         stx            %g3, [%g6 + TI_GSR]
 2:     add             %g6, %g1, %g3
-       cmp             %o5, FPRS_DU
-       be,pn           %icc, 6f
-        sll            %g1, 3, %g1
+       mov             FPRS_DU | FPRS_DL | FPRS_FEF, %o5
+       sll             %g1, 3, %g1
        stb             %o5, [%g3 + TI_FPSAVED]
        rd              %gsr, %g2
        add             %g6, %g1, %g3
@@ -80,65 +79,3 @@ vis1:        ldub            [%g6 + TI_FPSAVED], %g3
        .align          32
 80:    jmpl            %g7 + %g0, %g0
         nop
-
-6:     ldub            [%g3 + TI_FPSAVED], %o5
-       or              %o5, FPRS_DU, %o5
-       add             %g6, TI_FPREGS+0x80, %g2
-       stb             %o5, [%g3 + TI_FPSAVED]
-
-       sll             %g1, 5, %g1
-       add             %g6, TI_FPREGS+0xc0, %g3
-       wr              %g0, FPRS_FEF, %fprs
-       membar          #Sync
-       stda            %f32, [%g2 + %g1] ASI_BLK_P
-       stda            %f48, [%g3 + %g1] ASI_BLK_P
-       membar          #Sync
-       ba,pt           %xcc, 80f
-        nop
-
-       .align          32
-80:    jmpl            %g7 + %g0, %g0
-        nop
-
-       .align          32
-VISenterhalf:
-       ldub            [%g6 + TI_FPDEPTH], %g1
-       brnz,a,pn       %g1, 1f
-        cmp            %g1, 1
-       stb             %g0, [%g6 + TI_FPSAVED]
-       stx             %fsr, [%g6 + TI_XFSR]
-       clr             %o5
-       jmpl            %g7 + %g0, %g0
-        wr             %g0, FPRS_FEF, %fprs
-
-1:     bne,pn          %icc, 2f
-        srl            %g1, 1, %g1
-       ba,pt           %xcc, vis1
-        sub            %g7, 8, %g7
-2:     addcc           %g6, %g1, %g3
-       sll             %g1, 3, %g1
-       andn            %o5, FPRS_DU, %g2
-       stb             %g2, [%g3 + TI_FPSAVED]
-
-       rd              %gsr, %g2
-       add             %g6, %g1, %g3
-       stx             %g2, [%g3 + TI_GSR]
-       add             %g6, %g1, %g2
-       stx             %fsr, [%g2 + TI_XFSR]
-       sll             %g1, 5, %g1
-3:     andcc           %o5, FPRS_DL, %g0
-       be,pn           %icc, 4f
-        add            %g6, TI_FPREGS, %g2
-
-       add             %g6, TI_FPREGS+0x40, %g3
-       membar          #Sync
-       stda            %f0, [%g2 + %g1] ASI_BLK_P
-       stda            %f16, [%g3 + %g1] ASI_BLK_P
-       membar          #Sync
-       ba,pt           %xcc, 4f
-        nop
-
-       .align          32
-4:     and             %o5, FPRS_DU, %o5
-       jmpl            %g7 + %g0, %g0
-        wr             %o5, FPRS_FEF, %fprs
diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
index 1d649a95660c..8069ce12f20b 100644
--- a/arch/sparc/lib/ksyms.c
+++ b/arch/sparc/lib/ksyms.c
@@ -135,10 +135,6 @@ EXPORT_SYMBOL(copy_user_page);
 void VISenter(void);
 EXPORT_SYMBOL(VISenter);
 
-/* CRYPTO code needs this */
-void VISenterhalf(void);
-EXPORT_SYMBOL(VISenterhalf);
-
 extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
 extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *,
                unsigned long *);
diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c
index e8c2c04143cd..c667e104a0c2 100644
--- a/arch/tile/kernel/compat_signal.c
+++ b/arch/tile/kernel/compat_signal.c
@@ -113,8 +113,6 @@ int copy_siginfo_from_user32(siginfo_t *to, struct 
compat_siginfo __user *from)
        if (!access_ok(VERIFY_READ, from, sizeof(struct compat_siginfo)))
                return -EFAULT;
 
-       memset(to, 0, sizeof(*to));
-
        err = __get_user(to->si_signo, &from->si_signo);
        err |= __get_user(to->si_errno, &from->si_errno);
        err |= __get_user(to->si_code, &from->si_code);
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 02c2eff7478d..4bd6c197563d 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -793,8 +793,6 @@ retint_kernel:
 restore_c_regs_and_iret:
        RESTORE_C_REGS
        REMOVE_PT_GPREGS_FROM_STACK 8
-
-irq_return:
        INTERRUPT_RETURN
 
 ENTRY(native_iret)
@@ -1413,11 +1411,12 @@ ENTRY(nmi)
         *  If the variable is not set and the stack is not the NMI
         *  stack then:
         *    o Set the special variable on the stack
-        *    o Copy the interrupt frame into a "saved" location on the stack
-        *    o Copy the interrupt frame into a "copy" location on the stack
+        *    o Copy the interrupt frame into an "outermost" location on the
+        *      stack
+        *    o Copy the interrupt frame into an "iret" location on the stack
         *    o Continue processing the NMI
         *  If the variable is set or the previous stack is the NMI stack:
-        *    o Modify the "copy" location to jump to the repeate_nmi
+        *    o Modify the "iret" location to jump to the repeat_nmi
         *    o return back to the first NMI
         *
         * Now on exit of the first NMI, we first clear the stack variable
@@ -1426,32 +1425,151 @@ ENTRY(nmi)
         * a nested NMI that updated the copy interrupt stack frame, a
         * jump will be made to the repeat_nmi code that will handle the second
         * NMI.
+        *
+        * However, espfix prevents us from directly returning to userspace
+        * with a single IRET instruction.  Similarly, IRET to user mode
+        * can fault.  We therefore handle NMIs from user space like
+        * other IST entries.
         */
 
        /* Use %rdx as our temp variable throughout */
        pushq_cfi %rdx
        CFI_REL_OFFSET rdx, 0
 
+       testb   $3, CS-RIP+8(%rsp)
+       jz      .Lnmi_from_kernel
+
        /*
-        * If %cs was not the kernel segment, then the NMI triggered in user
-        * space, which means it is definitely not nested.
+        * NMI from user mode.  We need to run on the thread stack, but we
+        * can't go through the normal entry paths: NMIs are masked, and
+        * we don't want to enable interrupts, because then we'll end
+        * up in an awkward situation in which IRQs are on but NMIs
+        * are off.
         */
-       cmpl $__KERNEL_CS, 16(%rsp)
-       jne first_nmi
+
+       SWAPGS
+       cld
+       movq    %rsp, %rdx
+       movq    PER_CPU_VAR(kernel_stack), %rsp
+       pushq   5*8(%rdx)       /* pt_regs->ss */
+       pushq   4*8(%rdx)       /* pt_regs->rsp */
+       pushq   3*8(%rdx)       /* pt_regs->flags */
+       pushq   2*8(%rdx)       /* pt_regs->cs */
+       pushq   1*8(%rdx)       /* pt_regs->rip */
+       pushq   $-1             /* pt_regs->orig_ax */
+       pushq   %rdi            /* pt_regs->di */
+       pushq   %rsi            /* pt_regs->si */
+       pushq   (%rdx)          /* pt_regs->dx */
+       pushq   %rcx            /* pt_regs->cx */
+       pushq   %rax            /* pt_regs->ax */
+       pushq   %r8             /* pt_regs->r8 */
+       pushq   %r9             /* pt_regs->r9 */
+       pushq   %r10            /* pt_regs->r10 */
+       pushq   %r11            /* pt_regs->r11 */
+       pushq   %rbx            /* pt_regs->rbx */
+       pushq   %rbp            /* pt_regs->rbp */
+       pushq   %r12            /* pt_regs->r12 */
+       pushq   %r13            /* pt_regs->r13 */
+       pushq   %r14            /* pt_regs->r14 */
+       pushq   %r15            /* pt_regs->r15 */
 
        /*
-        * Check the special variable on the stack to see if NMIs are
-        * executing.
+        * At this point we no longer need to worry about stack damage
+        * due to nesting -- we're on the normal thread stack and we're
+        * done with the NMI stack.
+        */
+       movq    %rsp, %rdi
+       movq    $-1, %rsi
+       call    do_nmi
+
+       /*
+        * Return back to user mode.  We must *not* do the normal exit
+        * work, because we don't want to enable interrupts.  Fortunately,
+        * do_nmi doesn't modify pt_regs.
+        */
+       SWAPGS
+       jmp     restore_c_regs_and_iret
+
+.Lnmi_from_kernel:
+       /*
+        * Here's what our stack frame will look like:
+        * +---------------------------------------------------------+
+        * | original SS                                             |
+        * | original Return RSP                                     |
+        * | original RFLAGS                                         |
+        * | original CS                                             |
+        * | original RIP                                            |
+        * +---------------------------------------------------------+
+        * | temp storage for rdx                                    |
+        * +---------------------------------------------------------+
+        * | "NMI executing" variable                                |
+        * +---------------------------------------------------------+
+        * | iret SS          } Copied from "outermost" frame        |
+        * | iret Return RSP  } on each loop iteration; overwritten  |
+        * | iret RFLAGS      } by a nested NMI to force another     |
+        * | iret CS          } iteration if needed.                 |
+        * | iret RIP         }                                      |
+        * +---------------------------------------------------------+
+        * | outermost SS          } initialized in first_nmi;       |
+        * | outermost Return RSP  } will not be changed before      |
+        * | outermost RFLAGS      } NMI processing is done.         |
+        * | outermost CS          } Copied to "iret" frame on each  |
+        * | outermost RIP         } iteration.                      |
+        * +---------------------------------------------------------+
+        * | pt_regs                                                 |
+        * +---------------------------------------------------------+
+        *
+        * The "original" frame is used by hardware.  Before re-enabling
+        * NMIs, we need to be done with it, and we need to leave enough
+        * space for the asm code here.
+        *
+        * We return by executing IRET while RSP points to the "iret" frame.
+        * That will either return for real or it will loop back into NMI
+        * processing.
+        *
+        * The "outermost" frame is copied to the "iret" frame on each
+        * iteration of the loop, so each iteration starts with the "iret"
+        * frame pointing to the final return target.
+        */
+
+       /*
+        * Determine whether we're a nested NMI.
+        *
+        * If we interrupted kernel code between repeat_nmi and
+        * end_repeat_nmi, then we are a nested NMI.  We must not
+        * modify the "iret" frame because it's being written by
+        * the outer NMI.  That's okay; the outer NMI handler is
+        * about to about to call do_nmi anyway, so we can just
+        * resume the outer NMI.
+        */
+
+       movq    $repeat_nmi, %rdx
+       cmpq    8(%rsp), %rdx
+       ja      1f
+       movq    $end_repeat_nmi, %rdx
+       cmpq    8(%rsp), %rdx
+       ja      nested_nmi_out
+1:
+
+       /*
+        * Now check "NMI executing".  If it's set, then we're nested.
+        * This will not detect if we interrupted an outer NMI just
+        * before IRET.
         */
        cmpl $1, -8(%rsp)
        je nested_nmi
 
        /*
-        * Now test if the previous stack was an NMI stack.
-        * We need the double check. We check the NMI stack to satisfy the
-        * race when the first NMI clears the variable before returning.
-        * We check the variable because the first NMI could be in a
-        * breakpoint routine using a breakpoint stack.
+        * Now test if the previous stack was an NMI stack.  This covers
+        * the case where we interrupt an outer NMI after it clears
+        * "NMI executing" but before IRET.  We need to be careful, though:
+        * there is one case in which RSP could point to the NMI stack
+        * despite there being no NMI active: naughty userspace controls
+        * RSP at the very beginning of the SYSCALL targets.  We can
+        * pull a fast one on naughty userspace, though: we program
+        * SYSCALL to mask DF, so userspace cannot cause DF to be set
+        * if it controls the kernel's RSP.  We set DF before we clear
+        * "NMI executing".
         */
        lea     6*8(%rsp), %rdx
        /* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) 
*/
@@ -1462,25 +1580,21 @@ ENTRY(nmi)
        cmpq    %rdx, 4*8(%rsp)
        /* If it is below the NMI stack, it is a normal NMI */
        jb      first_nmi
-       /* Ah, it is within the NMI stack, treat it as nested */
+
+       /* Ah, it is within the NMI stack. */
+
+       testb   $(X86_EFLAGS_DF >> 8), (3*8 + 1)(%rsp)
+       jz      first_nmi       /* RSP was user controlled. */
+
+       /* This is a nested NMI. */
 
        CFI_REMEMBER_STATE
 
 nested_nmi:
        /*
-        * Do nothing if we interrupted the fixup in repeat_nmi.
-        * It's about to repeat the NMI handler, so we are fine
-        * with ignoring this one.
+        * Modify the "iret" frame to point to repeat_nmi, forcing another
+        * iteration of NMI handling.
         */
-       movq $repeat_nmi, %rdx
-       cmpq 8(%rsp), %rdx
-       ja 1f
-       movq $end_repeat_nmi, %rdx
-       cmpq 8(%rsp), %rdx
-       ja nested_nmi_out
-
-1:
-       /* Set up the interrupted NMIs stack to jump to repeat_nmi */
        leaq -1*8(%rsp), %rdx
        movq %rdx, %rsp
        CFI_ADJUST_CFA_OFFSET 1*8
@@ -1499,60 +1613,23 @@ nested_nmi_out:
        popq_cfi %rdx
        CFI_RESTORE rdx
 
-       /* No need to check faults here */
+       /* We are returning to kernel mode, so this cannot result in a fault. */
        INTERRUPT_RETURN
 
        CFI_RESTORE_STATE
 first_nmi:
-       /*
-        * Because nested NMIs will use the pushed location that we
-        * stored in rdx, we must keep that space available.
-        * Here's what our stack frame will look like:
-        * +-------------------------+
-        * | original SS             |
-        * | original Return RSP     |
-        * | original RFLAGS         |
-        * | original CS             |
-        * | original RIP            |
-        * +-------------------------+
-        * | temp storage for rdx    |
-        * +-------------------------+
-        * | NMI executing variable  |
-        * +-------------------------+
-        * | copied SS               |
-        * | copied Return RSP       |
-        * | copied RFLAGS           |
-        * | copied CS               |
-        * | copied RIP              |
-        * +-------------------------+
-        * | Saved SS                |
-        * | Saved Return RSP        |
-        * | Saved RFLAGS            |
-        * | Saved CS                |
-        * | Saved RIP               |
-        * +-------------------------+
-        * | pt_regs                 |
-        * +-------------------------+
-        *
-        * The saved stack frame is used to fix up the copied stack frame
-        * that a nested NMI may change to make the interrupted NMI iret jump
-        * to the repeat_nmi. The original stack frame and the temp storage
-        * is also used by nested NMIs and can not be trusted on exit.
-        */
-       /* Do not pop rdx, nested NMIs will corrupt that part of the stack */
+       /* Restore rdx. */
        movq (%rsp), %rdx
        CFI_RESTORE rdx
 
-       /* Set the NMI executing variable on the stack. */
+       /* Set "NMI executing" on the stack. */
        pushq_cfi $1
 
-       /*
-        * Leave room for the "copied" frame
-        */
+       /* Leave room for the "iret" frame */
        subq $(5*8), %rsp
        CFI_ADJUST_CFA_OFFSET 5*8
 
-       /* Copy the stack frame to the Saved frame */
+       /* Copy the "original" frame to the "outermost" frame */
        .rept 5
        pushq_cfi 11*8(%rsp)
        .endr
@@ -1560,6 +1637,7 @@ first_nmi:
 
        /* Everything up to here is safe from nested NMIs */
 
+repeat_nmi:
        /*
         * If there was a nested NMI, the first NMI's iret will return
         * here. But NMIs are still enabled and we can take another
@@ -1568,16 +1646,21 @@ first_nmi:
         * it will just return, as we are about to repeat an NMI anyway.
         * This makes it safe to copy to the stack frame that a nested
         * NMI will update.
-        */
-repeat_nmi:
-       /*
-        * Update the stack variable to say we are still in NMI (the update
-        * is benign for the non-repeat case, where 1 was pushed just above
-        * to this very stack slot).
+        *
+        * RSP is pointing to "outermost RIP".  gsbase is unknown, but, if
+        * we're repeating an NMI, gsbase has the same value that it had on
+        * the first iteration.  paranoid_entry will load the kernel
+        * gsbase if needed before we call do_nmi.
+        *
+        * Set "NMI executing" in case we came back here via IRET.
         */
        movq $1, 10*8(%rsp)
 
-       /* Make another copy, this one may be modified by nested NMIs */
+       /*
+        * Copy the "outermost" frame to the "iret" frame.  NMIs that nest
+        * here must not modify the "iret" frame while we're writing to
+        * it or it will end up containing garbage.
+        */
        addq $(10*8), %rsp
        CFI_ADJUST_CFA_OFFSET -10*8
        .rept 5
@@ -1588,9 +1671,9 @@ repeat_nmi:
 end_repeat_nmi:
 
        /*
-        * Everything below this point can be preempted by a nested
-        * NMI if the first NMI took an exception and reset our iret stack
-        * so that we repeat another NMI.
+        * Everything below this point can be preempted by a nested NMI.
+        * If this happens, then the inner NMI will change the "iret"
+        * frame to point back to repeat_nmi.
         */
        pushq_cfi $-1           /* ORIG_RAX: no syscall to restart */
        ALLOC_PT_GPREGS_ON_STACK
@@ -1605,29 +1688,11 @@ end_repeat_nmi:
        call paranoid_entry
        DEFAULT_FRAME 0
 
-       /*
-        * Save off the CR2 register. If we take a page fault in the NMI then
-        * it could corrupt the CR2 value. If the NMI preempts a page fault
-        * handler before it was able to read the CR2 register, and then the
-        * NMI itself takes a page fault, the page fault that was preempted
-        * will read the information from the NMI page fault and not the
-        * origin fault. Save it off and restore it if it changes.
-        * Use the r12 callee-saved register.
-        */
-       movq %cr2, %r12
-
        /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
        movq %rsp,%rdi
        movq $-1,%rsi
        call do_nmi
 
-       /* Did the NMI take a page fault? Restore cr2 if it did */
-       movq %cr2, %rcx
-       cmpq %rcx, %r12
-       je 1f
-       movq %r12, %cr2
-1:
-       
        testl %ebx,%ebx                         /* swapgs needed? */
        jnz nmi_restore
 nmi_swapgs:
@@ -1635,12 +1700,27 @@ nmi_swapgs:
 nmi_restore:
        RESTORE_EXTRA_REGS
        RESTORE_C_REGS
-       /* Pop the extra iret frame at once */
+
+       /* Point RSP at the "iret" frame. */
        REMOVE_PT_GPREGS_FROM_STACK 6*8
 
-       /* Clear the NMI executing stack variable */
-       movq $0, 5*8(%rsp)
-       jmp irq_return
+       /*
+        * Clear "NMI executing".  Set DF first so that we can easily
+        * distinguish the remaining code between here and IRET from
+        * the SYSCALL entry and exit paths.  On a native kernel, we
+        * could just inspect RIP, but, on paravirt kernels,
+        * INTERRUPT_RETURN can translate into a jump into a
+        * hypercall page.
+        */
+       std
+       movq    $0, 5*8(%rsp)           /* clear "NMI executing" */
+
+       /*
+        * INTERRUPT_RETURN reads the "iret" frame and exits the NMI
+        * stack in a single instruction.  We are returning to kernel
+        * mode, so this cannot result in a fault.
+        */
+       INTERRUPT_RETURN
        CFI_ENDPROC
 END(nmi)
 
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index c3e985d1751c..d05bd2e2ee91 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -408,15 +408,15 @@ static void default_do_nmi(struct pt_regs *regs)
 NOKPROBE_SYMBOL(default_do_nmi);
 
 /*
- * NMIs can hit breakpoints which will cause it to lose its
- * NMI context with the CPU when the breakpoint does an iret.
- */
-#ifdef CONFIG_X86_32
-/*
- * For i386, NMIs use the same stack as the kernel, and we can
- * add a workaround to the iret problem in C (preventing nested
- * NMIs if an NMI takes a trap). Simply have 3 states the NMI
- * can be in:
+ * NMIs can page fault or hit breakpoints which will cause it to lose
+ * its NMI context with the CPU when the breakpoint or page fault does an IRET.
+ *
+ * As a result, NMIs can nest if NMIs get unmasked due an IRET during
+ * NMI processing.  On x86_64, the asm glue protects us from nested NMIs
+ * if the outer NMI came from kernel mode, but we can still nest if the
+ * outer NMI came from user mode.
+ *
+ * To handle these nested NMIs, we have three states:
  *
  *  1) not running
  *  2) executing
@@ -430,15 +430,14 @@ NOKPROBE_SYMBOL(default_do_nmi);
  * (Note, the latch is binary, thus multiple NMIs triggering,
  *  when one is running, are ignored. Only one NMI is restarted.)
  *
- * If an NMI hits a breakpoint that executes an iret, another
- * NMI can preempt it. We do not want to allow this new NMI
- * to run, but we want to execute it when the first one finishes.
- * We set the state to "latched", and the exit of the first NMI will
- * perform a dec_return, if the result is zero (NOT_RUNNING), then
- * it will simply exit the NMI handler. If not, the dec_return
- * would have set the state to NMI_EXECUTING (what we want it to
- * be when we are running). In this case, we simply jump back
- * to rerun the NMI handler again, and restart the 'latched' NMI.
+ * If an NMI executes an iret, another NMI can preempt it. We do not
+ * want to allow this new NMI to run, but we want to execute it when the
+ * first one finishes.  We set the state to "latched", and the exit of
+ * the first NMI will perform a dec_return, if the result is zero
+ * (NOT_RUNNING), then it will simply exit the NMI handler. If not, the
+ * dec_return would have set the state to NMI_EXECUTING (what we want it
+ * to be when we are running). In this case, we simply jump back to
+ * rerun the NMI handler again, and restart the 'latched' NMI.
  *
  * No trap (breakpoint or page fault) should be hit before nmi_restart,
  * thus there is no race between the first check of state for NOT_RUNNING
@@ -461,49 +460,36 @@ enum nmi_states {
 static DEFINE_PER_CPU(enum nmi_states, nmi_state);
 static DEFINE_PER_CPU(unsigned long, nmi_cr2);
 
-#define nmi_nesting_preprocess(regs)                                   \
-       do {                                                            \
-               if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) {      \
-                       this_cpu_write(nmi_state, NMI_LATCHED);         \
-                       return;                                         \
-               }                                                       \
-               this_cpu_write(nmi_state, NMI_EXECUTING);               \
-               this_cpu_write(nmi_cr2, read_cr2());                    \
-       } while (0);                                                    \
-       nmi_restart:
-
-#define nmi_nesting_postprocess()                                      \
-       do {                                                            \
-               if (unlikely(this_cpu_read(nmi_cr2) != read_cr2()))     \
-                       write_cr2(this_cpu_read(nmi_cr2));              \
-               if (this_cpu_dec_return(nmi_state))                     \
-                       goto nmi_restart;                               \
-       } while (0)
-#else /* x86_64 */
+#ifdef CONFIG_X86_64
 /*
- * In x86_64 things are a bit more difficult. This has the same problem
- * where an NMI hitting a breakpoint that calls iret will remove the
- * NMI context, allowing a nested NMI to enter. What makes this more
- * difficult is that both NMIs and breakpoints have their own stack.
- * When a new NMI or breakpoint is executed, the stack is set to a fixed
- * point. If an NMI is nested, it will have its stack set at that same
- * fixed address that the first NMI had, and will start corrupting the
- * stack. This is handled in entry_64.S, but the same problem exists with
- * the breakpoint stack.
+ * In x86_64, we need to handle breakpoint -> NMI -> breakpoint.  Without
+ * some care, the inner breakpoint will clobber the outer breakpoint's
+ * stack.
  *
- * If a breakpoint is being processed, and the debug stack is being used,
- * if an NMI comes in and also hits a breakpoint, the stack pointer
- * will be set to the same fixed address as the breakpoint that was
- * interrupted, causing that stack to be corrupted. To handle this case,
- * check if the stack that was interrupted is the debug stack, and if
- * so, change the IDT so that new breakpoints will use the current stack
- * and not switch to the fixed address. On return of the NMI, switch back
- * to the original IDT.
+ * If a breakpoint is being processed, and the debug stack is being
+ * used, if an NMI comes in and also hits a breakpoint, the stack
+ * pointer will be set to the same fixed address as the breakpoint that
+ * was interrupted, causing that stack to be corrupted. To handle this
+ * case, check if the stack that was interrupted is the debug stack, and
+ * if so, change the IDT so that new breakpoints will use the current
+ * stack and not switch to the fixed address. On return of the NMI,
+ * switch back to the original IDT.
  */
 static DEFINE_PER_CPU(int, update_debug_stack);
+#endif
 
-static inline void nmi_nesting_preprocess(struct pt_regs *regs)
+dotraplinkage notrace void
+do_nmi(struct pt_regs *regs, long error_code)
 {
+       if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) {
+               this_cpu_write(nmi_state, NMI_LATCHED);
+               return;
+       }
+       this_cpu_write(nmi_state, NMI_EXECUTING);
+       this_cpu_write(nmi_cr2, read_cr2());
+nmi_restart:
+
+#ifdef CONFIG_X86_64
        /*
         * If we interrupted a breakpoint, it is possible that
         * the nmi handler will have breakpoints too. We need to
@@ -514,22 +500,8 @@ static inline void nmi_nesting_preprocess(struct pt_regs 
*regs)
                debug_stack_set_zero();
                this_cpu_write(update_debug_stack, 1);
        }
-}
-
-static inline void nmi_nesting_postprocess(void)
-{
-       if (unlikely(this_cpu_read(update_debug_stack))) {
-               debug_stack_reset();
-               this_cpu_write(update_debug_stack, 0);
-       }
-}
 #endif
 
-dotraplinkage notrace void
-do_nmi(struct pt_regs *regs, long error_code)
-{
-       nmi_nesting_preprocess(regs);
-
        nmi_enter();
 
        inc_irq_stat(__nmi_count);
@@ -539,8 +511,17 @@ do_nmi(struct pt_regs *regs, long error_code)
 
        nmi_exit();
 
-       /* On i386, may loop back to preprocess */
-       nmi_nesting_postprocess();
+#ifdef CONFIG_X86_64
+       if (unlikely(this_cpu_read(update_debug_stack))) {
+               debug_stack_reset();
+               this_cpu_write(update_debug_stack, 0);
+       }
+#endif
+
+       if (unlikely(this_cpu_read(nmi_cr2) != read_cr2()))
+               write_cr2(this_cpu_read(nmi_cr2));
+       if (this_cpu_dec_return(nmi_state))
+               goto nmi_restart;
 }
 NOKPROBE_SYMBOL(do_nmi);
 
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index 9d28383fc1e7..c4ea87eedf8a 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -150,7 +150,7 @@ static inline bool kvm_apic_vid_enabled(struct kvm *kvm)
 
 static inline bool kvm_apic_has_events(struct kvm_vcpu *vcpu)
 {
-       return vcpu->arch.apic->pending_events;
+       return kvm_vcpu_has_lapic(vcpu) && vcpu->arch.apic->pending_events;
 }
 
 bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector);
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 46957ead3060..a671e837228d 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -483,6 +483,7 @@ static void set_aliased_prot(void *v, pgprot_t prot)
        pte_t pte;
        unsigned long pfn;
        struct page *page;
+       unsigned char dummy;
 
        ptep = lookup_address((unsigned long)v, &level);
        BUG_ON(ptep == NULL);
@@ -492,6 +493,32 @@ static void set_aliased_prot(void *v, pgprot_t prot)
 
        pte = pfn_pte(pfn, prot);
 
+       /*
+        * Careful: update_va_mapping() will fail if the virtual address
+        * we're poking isn't populated in the page tables.  We don't
+        * need to worry about the direct map (that's always in the page
+        * tables), but we need to be careful about vmap space.  In
+        * particular, the top level page table can lazily propagate
+        * entries between processes, so if we've switched mms since we
+        * vmapped the target in the first place, we might not have the
+        * top-level page table entry populated.
+        *
+        * We disable preemption because we want the same mm active when
+        * we probe the target and when we issue the hypercall.  We'll
+        * have the same nominal mm, but if we're a kernel thread, lazy
+        * mm dropping could change our pgd.
+        *
+        * Out of an abundance of caution, this uses __get_user() to fault
+        * in the target address just in case there's some obscure case
+        * in which the target address isn't readable.
+        */
+
+       preempt_disable();
+
+       pagefault_disable();    /* Avoid warnings due to being atomic. */
+       __get_user(dummy, (unsigned char __user __force *)v);
+       pagefault_enable();
+
        if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
                BUG();
 
@@ -503,6 +530,8 @@ static void set_aliased_prot(void *v, pgprot_t prot)
                                BUG();
        } else
                kmap_flush_unused();
+
+       preempt_enable();
 }
 
 static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
@@ -510,6 +539,17 @@ static void xen_alloc_ldt(struct desc_struct *ldt, 
unsigned entries)
        const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
        int i;
 
+       /*
+        * We need to mark the all aliases of the LDT pages RO.  We
+        * don't need to call vm_flush_aliases(), though, since that's
+        * only responsible for flushing aliases out the TLBs, not the
+        * page tables, and Xen will flush the TLB for us if needed.
+        *
+        * To avoid confusing future readers: none of this is necessary
+        * to load the LDT.  The hypervisor only checks this when the
+        * LDT is faulted in due to subsequent descriptor access.
+        */
+
        for(i = 0; i < entries; i += entries_per_page)
                set_aliased_prot(ldt + i, PAGE_KERNEL_RO);
 }
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 53f253574abe..010ce0b1f517 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -522,6 +522,7 @@ void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, 
...)
 #  define rbd_assert(expr)     ((void) 0)
 #endif /* !RBD_DEBUG */
 
+static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request);
 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
 static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
@@ -1797,6 +1798,16 @@ static void rbd_osd_stat_callback(struct rbd_obj_request 
*obj_request)
        obj_request_done_set(obj_request);
 }
 
+static void rbd_osd_call_callback(struct rbd_obj_request *obj_request)
+{
+       dout("%s: obj %p\n", __func__, obj_request);
+
+       if (obj_request_img_data_test(obj_request))
+               rbd_osd_copyup_callback(obj_request);
+       else
+               obj_request_done_set(obj_request);
+}
+
 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
                                struct ceph_msg *msg)
 {
@@ -1845,6 +1856,8 @@ static void rbd_osd_req_callback(struct ceph_osd_request 
*osd_req,
                rbd_osd_discard_callback(obj_request);
                break;
        case CEPH_OSD_OP_CALL:
+               rbd_osd_call_callback(obj_request);
+               break;
        case CEPH_OSD_OP_NOTIFY_ACK:
        case CEPH_OSD_OP_WATCH:
                rbd_osd_trivial_callback(obj_request);
@@ -2509,13 +2522,15 @@ out_unwind:
 }
 
 static void
-rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
+rbd_osd_copyup_callback(struct rbd_obj_request *obj_request)
 {
        struct rbd_img_request *img_request;
        struct rbd_device *rbd_dev;
        struct page **pages;
        u32 page_count;
 
+       dout("%s: obj %p\n", __func__, obj_request);
+
        rbd_assert(obj_request->type == OBJ_REQUEST_BIO ||
                obj_request->type == OBJ_REQUEST_NODATA);
        rbd_assert(obj_request_img_data_test(obj_request));
@@ -2542,9 +2557,7 @@ rbd_img_obj_copyup_callback(struct rbd_obj_request 
*obj_request)
        if (!obj_request->result)
                obj_request->xferred = obj_request->length;
 
-       /* Finish up with the normal image object callback */
-
-       rbd_img_obj_callback(obj_request);
+       obj_request_done_set(obj_request);
 }
 
 static void
@@ -2629,7 +2642,6 @@ rbd_img_obj_parent_read_full_callback(struct 
rbd_img_request *img_request)
 
        /* All set, send it off. */
 
-       orig_request->callback = rbd_img_obj_copyup_callback;
        osdc = &rbd_dev->rbd_client->client->osdc;
        img_result = rbd_obj_request_submit(osdc, orig_request);
        if (!img_result)
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index da8faf78536a..5643b65cee20 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -429,7 +429,7 @@ static int hwrng_fillfn(void *unused)
 static void start_khwrngd(void)
 {
        hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
-       if (hwrng_fill == ERR_PTR(-ENOMEM)) {
+       if (IS_ERR(hwrng_fill)) {
                pr_err("hwrng_fill thread creation failed");
                hwrng_fill = NULL;
        }
diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
index a43048b5b05f..3c1a123f909c 100644
--- a/drivers/char/i8k.c
+++ b/drivers/char/i8k.c
@@ -900,6 +900,21 @@ static struct dmi_system_id i8k_dmi_table[] __initdata = {
 
 MODULE_DEVICE_TABLE(dmi, i8k_dmi_table);
 
+static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = {
+       {
+               /*
+                * CPU fan speed going up and down on Dell Studio XPS 8100
+                * for unknown reasons.
+                */
+               .ident = "Dell Studio XPS 8100",
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Studio XPS 8100"),
+               },
+       },
+       { }
+};
+
 /*
  * Probe for the presence of a supported laptop.
  */
@@ -911,7 +926,8 @@ static int __init i8k_probe(void)
        /*
         * Get DMI information
         */
-       if (!dmi_check_system(i8k_dmi_table)) {
+       if (!dmi_check_system(i8k_dmi_table) ||
+           dmi_check_system(i8k_blacklist_dmi_table)) {
                if (!ignore_dmi && !force)
                        return -ENODEV;
 
diff --git a/drivers/clk/keystone/pll.c b/drivers/clk/keystone/pll.c
index 0dd8a4b12747..4a375ead70e9 100644
--- a/drivers/clk/keystone/pll.c
+++ b/drivers/clk/keystone/pll.c
@@ -37,7 +37,8 @@
  *     Main PLL or any other PLLs in the device such as ARM PLL, DDR PLL
  *     or PA PLL available on keystone2. These PLLs are controlled by
  *     this register. Main PLL is controlled by a PLL controller.
- * @pllm: PLL register map address
+ * @pllm: PLL register map address for multiplier bits
+ * @pllod: PLL register map address for post divider bits
  * @pll_ctl0: PLL controller map address
  * @pllm_lower_mask: multiplier lower mask
  * @pllm_upper_mask: multiplier upper mask
@@ -53,6 +54,7 @@ struct clk_pll_data {
        u32 phy_pllm;
        u32 phy_pll_ctl0;
        void __iomem *pllm;
+       void __iomem *pllod;
        void __iomem *pll_ctl0;
        u32 pllm_lower_mask;
        u32 pllm_upper_mask;
@@ -102,7 +104,11 @@ static unsigned long clk_pllclk_recalc(struct clk_hw *hw,
                /* read post divider from od bits*/
                postdiv = ((val & pll_data->clkod_mask) >>
                                 pll_data->clkod_shift) + 1;
-       else
+       else if (pll_data->pllod) {
+               postdiv = readl(pll_data->pllod);
+               postdiv = ((postdiv & pll_data->clkod_mask) >>
+                               pll_data->clkod_shift) + 1;
+       } else
                postdiv = pll_data->postdiv;
 
        rate /= (prediv + 1);
@@ -172,12 +178,21 @@ static void __init _of_pll_clk_init(struct device_node 
*node, bool pllctrl)
                /* assume the PLL has output divider register bits */
                pll_data->clkod_mask = CLKOD_MASK;
                pll_data->clkod_shift = CLKOD_SHIFT;
+
+               /*
+                * Check if there is an post-divider register. If not
+                * assume od bits are part of control register.
+                */
+               i = of_property_match_string(node, "reg-names",
+                                            "post-divider");
+               pll_data->pllod = of_iomap(node, i);
        }
 
        i = of_property_match_string(node, "reg-names", "control");
        pll_data->pll_ctl0 = of_iomap(node, i);
        if (!pll_data->pll_ctl0) {
                pr_err("%s: ioremap failed\n", __func__);
+               iounmap(pll_data->pllod);
                goto out;
        }
 
@@ -193,6 +208,7 @@ static void __init _of_pll_clk_init(struct device_node 
*node, bool pllctrl)
                pll_data->pllm = of_iomap(node, i);
                if (!pll_data->pllm) {
                        iounmap(pll_data->pll_ctl0);
+                       iounmap(pll_data->pllod);
                        goto out;
                }
        }
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 48f453555f1f..ede9e9e3c419 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -904,7 +904,6 @@ static int ablk_perform(struct ablkcipher_request *req, int 
encrypt)
                crypt->mode |= NPE_OP_NOT_IN_PLACE;
                /* This was never tested by Intel
                 * for more than one dst buffer, I think. */
-               BUG_ON(req->dst->length < nbytes);
                req_ctx->dst = NULL;
                if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
                                        flags, DMA_FROM_DEVICE))
diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c
index 67f80813a06f..e4311ce0cd78 100644
--- a/drivers/crypto/nx/nx-aes-ccm.c
+++ b/drivers/crypto/nx/nx-aes-ccm.c
@@ -494,8 +494,9 @@ out:
 static int ccm4309_aes_nx_encrypt(struct aead_request *req)
 {
        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+       struct nx_gcm_rctx *rctx = aead_request_ctx(req);
        struct blkcipher_desc desc;
-       u8 *iv = nx_ctx->priv.ccm.iv;
+       u8 *iv = rctx->iv;
 
        iv[0] = 3;
        memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
@@ -525,8 +526,9 @@ static int ccm_aes_nx_encrypt(struct aead_request *req)
 static int ccm4309_aes_nx_decrypt(struct aead_request *req)
 {
        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+       struct nx_gcm_rctx *rctx = aead_request_ctx(req);
        struct blkcipher_desc desc;
-       u8 *iv = nx_ctx->priv.ccm.iv;
+       u8 *iv = rctx->iv;
 
        iv[0] = 3;
        memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c
index 2617cd4d54dd..dd7e9f3f5b6b 100644
--- a/drivers/crypto/nx/nx-aes-ctr.c
+++ b/drivers/crypto/nx/nx-aes-ctr.c
@@ -72,7 +72,7 @@ static int ctr3686_aes_nx_set_key(struct crypto_tfm *tfm,
        if (key_len < CTR_RFC3686_NONCE_SIZE)
                return -EINVAL;
 
-       memcpy(nx_ctx->priv.ctr.iv,
+       memcpy(nx_ctx->priv.ctr.nonce,
               in_key + key_len - CTR_RFC3686_NONCE_SIZE,
               CTR_RFC3686_NONCE_SIZE);
 
@@ -131,14 +131,15 @@ static int ctr3686_aes_nx_crypt(struct blkcipher_desc 
*desc,
                                unsigned int           nbytes)
 {
        struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
-       u8 *iv = nx_ctx->priv.ctr.iv;
+       u8 iv[16];
 
+       memcpy(iv, nx_ctx->priv.ctr.nonce, CTR_RFC3686_IV_SIZE);
        memcpy(iv + CTR_RFC3686_NONCE_SIZE,
               desc->info, CTR_RFC3686_IV_SIZE);
        iv[12] = iv[13] = iv[14] = 0;
        iv[15] = 1;
 
-       desc->info = nx_ctx->priv.ctr.iv;
+       desc->info = iv;
 
        return ctr_aes_nx_crypt(desc, dst, src, nbytes);
 }
diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c
index 88c562434bc0..c6ebeb644db4 100644
--- a/drivers/crypto/nx/nx-aes-gcm.c
+++ b/drivers/crypto/nx/nx-aes-gcm.c
@@ -330,6 +330,7 @@ out:
 static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
 {
        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+       struct nx_gcm_rctx *rctx = aead_request_ctx(req);
        struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
        struct blkcipher_desc desc;
        unsigned int nbytes = req->cryptlen;
@@ -339,7 +340,7 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int 
enc)
 
        spin_lock_irqsave(&nx_ctx->lock, irq_flags);
 
-       desc.info = nx_ctx->priv.gcm.iv;
+       desc.info = rctx->iv;
        /* initialize the counter */
        *(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1;
 
@@ -434,8 +435,8 @@ out:
 
 static int gcm_aes_nx_encrypt(struct aead_request *req)
 {
-       struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
-       char *iv = nx_ctx->priv.gcm.iv;
+       struct nx_gcm_rctx *rctx = aead_request_ctx(req);
+       char *iv = rctx->iv;
 
        memcpy(iv, req->iv, 12);
 
@@ -444,8 +445,8 @@ static int gcm_aes_nx_encrypt(struct aead_request *req)
 
 static int gcm_aes_nx_decrypt(struct aead_request *req)
 {
-       struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
-       char *iv = nx_ctx->priv.gcm.iv;
+       struct nx_gcm_rctx *rctx = aead_request_ctx(req);
+       char *iv = rctx->iv;
 
        memcpy(iv, req->iv, 12);
 
@@ -455,7 +456,8 @@ static int gcm_aes_nx_decrypt(struct aead_request *req)
 static int gcm4106_aes_nx_encrypt(struct aead_request *req)
 {
        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
-       char *iv = nx_ctx->priv.gcm.iv;
+       struct nx_gcm_rctx *rctx = aead_request_ctx(req);
+       char *iv = rctx->iv;
        char *nonce = nx_ctx->priv.gcm.nonce;
 
        memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
@@ -467,7 +469,8 @@ static int gcm4106_aes_nx_encrypt(struct aead_request *req)
 static int gcm4106_aes_nx_decrypt(struct aead_request *req)
 {
        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
-       char *iv = nx_ctx->priv.gcm.iv;
+       struct nx_gcm_rctx *rctx = aead_request_ctx(req);
+       char *iv = rctx->iv;
        char *nonce = nx_ctx->priv.gcm.nonce;
 
        memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
diff --git a/drivers/crypto/nx/nx-aes-xcbc.c b/drivers/crypto/nx/nx-aes-xcbc.c
index 8c2faffab4a3..c2f7d4befb55 100644
--- a/drivers/crypto/nx/nx-aes-xcbc.c
+++ b/drivers/crypto/nx/nx-aes-xcbc.c
@@ -42,6 +42,7 @@ static int nx_xcbc_set_key(struct crypto_shash *desc,
                           unsigned int         key_len)
 {
        struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc);
+       struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
 
        switch (key_len) {
        case AES_KEYSIZE_128:
@@ -51,7 +52,7 @@ static int nx_xcbc_set_key(struct crypto_shash *desc,
                return -EINVAL;
        }
 
-       memcpy(nx_ctx->priv.xcbc.key, in_key, key_len);
+       memcpy(csbcpb->cpb.aes_xcbc.key, in_key, key_len);
 
        return 0;
 }
@@ -148,32 +149,29 @@ out:
        return rc;
 }
 
-static int nx_xcbc_init(struct shash_desc *desc)
+static int nx_crypto_ctx_aes_xcbc_init2(struct crypto_tfm *tfm)
 {
-       struct xcbc_state *sctx = shash_desc_ctx(desc);
-       struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+       struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
        struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
-       struct nx_sg *out_sg;
-       int len;
+       int err;
 
-       nx_ctx_init(nx_ctx, HCOP_FC_AES);
+       err = nx_crypto_ctx_aes_xcbc_init(tfm);
+       if (err)
+               return err;
 
-       memset(sctx, 0, sizeof *sctx);
+       nx_ctx_init(nx_ctx, HCOP_FC_AES);
 
        NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
        csbcpb->cpb.hdr.mode = NX_MODE_AES_XCBC_MAC;
 
-       memcpy(csbcpb->cpb.aes_xcbc.key, nx_ctx->priv.xcbc.key, AES_BLOCK_SIZE);
-       memset(nx_ctx->priv.xcbc.key, 0, sizeof *nx_ctx->priv.xcbc.key);
-
-       len = AES_BLOCK_SIZE;
-       out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
-                                 &len, nx_ctx->ap->sglen);
+       return 0;
+}
 
-       if (len != AES_BLOCK_SIZE)
-               return -EINVAL;
+static int nx_xcbc_init(struct shash_desc *desc)
+{
+       struct xcbc_state *sctx = shash_desc_ctx(desc);
 
-       nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+       memset(sctx, 0, sizeof *sctx);
 
        return 0;
 }
@@ -186,6 +184,7 @@ static int nx_xcbc_update(struct shash_desc *desc,
        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
        struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
        struct nx_sg *in_sg;
+       struct nx_sg *out_sg;
        u32 to_process = 0, leftover, total;
        unsigned int max_sg_len;
        unsigned long irq_flags;
@@ -213,6 +212,17 @@ static int nx_xcbc_update(struct shash_desc *desc,
        max_sg_len = min_t(u64, max_sg_len,
                                nx_ctx->ap->databytelen/NX_PAGE_SIZE);
 
+       data_len = AES_BLOCK_SIZE;
+       out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
+                                 &len, nx_ctx->ap->sglen);
+
+       if (data_len != AES_BLOCK_SIZE) {
+               rc = -EINVAL;
+               goto out;
+       }
+
+       nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+
        do {
                to_process = total - to_process;
                to_process = to_process & ~(AES_BLOCK_SIZE - 1);
@@ -235,8 +245,10 @@ static int nx_xcbc_update(struct shash_desc *desc,
                                                (u8 *) sctx->buffer,
                                                &data_len,
                                                max_sg_len);
-                       if (data_len != sctx->count)
-                               return -EINVAL;
+                       if (data_len != sctx->count) {
+                               rc = -EINVAL;
+                               goto out;
+                       }
                }
 
                data_len = to_process - sctx->count;
@@ -245,8 +257,10 @@ static int nx_xcbc_update(struct shash_desc *desc,
                                        &data_len,
                                        max_sg_len);
 
-               if (data_len != to_process - sctx->count)
-                       return -EINVAL;
+               if (data_len != to_process - sctx->count) {
+                       rc = -EINVAL;
+                       goto out;
+               }
 
                nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
                                        sizeof(struct nx_sg);
@@ -325,15 +339,19 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
        in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buffer,
                                 &len, nx_ctx->ap->sglen);
 
-       if (len != sctx->count)
-               return -EINVAL;
+       if (len != sctx->count) {
+               rc = -EINVAL;
+               goto out;
+       }
 
        len = AES_BLOCK_SIZE;
        out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len,
                                  nx_ctx->ap->sglen);
 
-       if (len != AES_BLOCK_SIZE)
-               return -EINVAL;
+       if (len != AES_BLOCK_SIZE) {
+               rc = -EINVAL;
+               goto out;
+       }
 
        nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
        nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
@@ -372,7 +390,7 @@ struct shash_alg nx_shash_aes_xcbc_alg = {
                .cra_blocksize   = AES_BLOCK_SIZE,
                .cra_module      = THIS_MODULE,
                .cra_ctxsize     = sizeof(struct nx_crypto_ctx),
-               .cra_init        = nx_crypto_ctx_aes_xcbc_init,
+               .cra_init        = nx_crypto_ctx_aes_xcbc_init2,
                .cra_exit        = nx_crypto_ctx_exit,
        }
 };
diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c
index 23621da624c3..08f8d5cd6334 100644
--- a/drivers/crypto/nx/nx-sha256.c
+++ b/drivers/crypto/nx/nx-sha256.c
@@ -29,30 +29,28 @@
 #include "nx.h"
 
 
-static int nx_sha256_init(struct shash_desc *desc)
+static int nx_crypto_ctx_sha256_init(struct crypto_tfm *tfm)
 {
-       struct sha256_state *sctx = shash_desc_ctx(desc);
-       struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
-       int len;
-       int rc;
+       struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+       int err;
 
-       nx_ctx_init(nx_ctx, HCOP_FC_SHA);
+       err = nx_crypto_ctx_sha_init(tfm);
+       if (err)
+               return err;
 
-       memset(sctx, 0, sizeof *sctx);
+       nx_ctx_init(nx_ctx, HCOP_FC_SHA);
 
        nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA256];
 
        NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA256);
 
-       len = SHA256_DIGEST_SIZE;
-       rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
-                                 &nx_ctx->op.outlen,
-                                 &len,
-                                 (u8 *) sctx->state,
-                                 NX_DS_SHA256);
+       return 0;
+}
 
-       if (rc)
-               goto out;
+static int nx_sha256_init(struct shash_desc *desc) {
+       struct sha256_state *sctx = shash_desc_ctx(desc);
+
+       memset(sctx, 0, sizeof *sctx);
 
        sctx->state[0] = __cpu_to_be32(SHA256_H0);
        sctx->state[1] = __cpu_to_be32(SHA256_H1);
@@ -64,7 +62,6 @@ static int nx_sha256_init(struct shash_desc *desc)
        sctx->state[7] = __cpu_to_be32(SHA256_H7);
        sctx->count = 0;
 
-out:
        return 0;
 }
 
@@ -74,10 +71,13 @@ static int nx_sha256_update(struct shash_desc *desc, const 
u8 *data,
        struct sha256_state *sctx = shash_desc_ctx(desc);
        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
        struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
+       struct nx_sg *in_sg;
+       struct nx_sg *out_sg;
        u64 to_process = 0, leftover, total;
        unsigned long irq_flags;
        int rc = 0;
        int data_len;
+       u32 max_sg_len;
        u64 buf_len = (sctx->count % SHA256_BLOCK_SIZE);
 
        spin_lock_irqsave(&nx_ctx->lock, irq_flags);
@@ -97,6 +97,22 @@ static int nx_sha256_update(struct shash_desc *desc, const 
u8 *data,
        NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
        NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
 
+       in_sg = nx_ctx->in_sg;
+       max_sg_len = min_t(u64, nx_ctx->ap->sglen,
+                       nx_driver.of.max_sg_len/sizeof(struct nx_sg));
+       max_sg_len = min_t(u64, max_sg_len,
+                       nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+
+       data_len = SHA256_DIGEST_SIZE;
+       out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
+                                 &data_len, max_sg_len);
+       nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+
+       if (data_len != SHA256_DIGEST_SIZE) {
+               rc = -EINVAL;
+               goto out;
+       }
+
        do {
                /*
                 * to_process: the SHA256_BLOCK_SIZE data chunk to process in
@@ -108,25 +124,22 @@ static int nx_sha256_update(struct shash_desc *desc, 
const u8 *data,
 
                if (buf_len) {
                        data_len = buf_len;
-                       rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
-                                                 &nx_ctx->op.inlen,
-                                                 &data_len,
-                                                 (u8 *) sctx->buf,
-                                                 NX_DS_SHA256);
+                       in_sg = nx_build_sg_list(nx_ctx->in_sg,
+                                                (u8 *) sctx->buf,
+                                                &data_len,
+                                                max_sg_len);
 
-                       if (rc || data_len != buf_len)
+                       if (data_len != buf_len) {
+                               rc = -EINVAL;
                                goto out;
+                       }
                }
 
                data_len = to_process - buf_len;
-               rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
-                                         &nx_ctx->op.inlen,
-                                         &data_len,
-                                         (u8 *) data,
-                                         NX_DS_SHA256);
+               in_sg = nx_build_sg_list(in_sg, (u8 *) data,
+                                        &data_len, max_sg_len);
 
-               if (rc)
-                       goto out;
+               nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct 
nx_sg);
 
                to_process = (data_len + buf_len);
                leftover = total - to_process;
@@ -173,12 +186,19 @@ static int nx_sha256_final(struct shash_desc *desc, u8 
*out)
        struct sha256_state *sctx = shash_desc_ctx(desc);
        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
        struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
+       struct nx_sg *in_sg, *out_sg;
        unsigned long irq_flags;
-       int rc;
+       u32 max_sg_len;
+       int rc = 0;
        int len;
 
        spin_lock_irqsave(&nx_ctx->lock, irq_flags);
 
+       max_sg_len = min_t(u64, nx_ctx->ap->sglen,
+                       nx_driver.of.max_sg_len/sizeof(struct nx_sg));
+       max_sg_len = min_t(u64, max_sg_len,
+                       nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+
        /* final is represented by continuing the operation and indicating that
         * this is not an intermediate operation */
        if (sctx->count >= SHA256_BLOCK_SIZE) {
@@ -195,25 +215,24 @@ static int nx_sha256_final(struct shash_desc *desc, u8 
*out)
        csbcpb->cpb.sha256.message_bit_length = (u64) (sctx->count * 8);
 
        len = sctx->count & (SHA256_BLOCK_SIZE - 1);
-       rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
-                                 &nx_ctx->op.inlen,
-                                 &len,
-                                 (u8 *) sctx->buf,
-                                 NX_DS_SHA256);
+       in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) sctx->buf,
+                                &len, max_sg_len);
 
-       if (rc || len != (sctx->count & (SHA256_BLOCK_SIZE - 1)))
+       if (len != (sctx->count & (SHA256_BLOCK_SIZE - 1))) {
+               rc = -EINVAL;
                goto out;
+       }
 
        len = SHA256_DIGEST_SIZE;
-       rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
-                                 &nx_ctx->op.outlen,
-                                 &len,
-                                 out,
-                                 NX_DS_SHA256);
+       out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len, max_sg_len);
 
-       if (rc || len != SHA256_DIGEST_SIZE)
+       if (len != SHA256_DIGEST_SIZE) {
+               rc = -EINVAL;
                goto out;
+       }
 
+       nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
+       nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
        if (!nx_ctx->op.outlen) {
                rc = -EINVAL;
                goto out;
@@ -268,7 +287,7 @@ struct shash_alg nx_shash_sha256_alg = {
                .cra_blocksize   = SHA256_BLOCK_SIZE,
                .cra_module      = THIS_MODULE,
                .cra_ctxsize     = sizeof(struct nx_crypto_ctx),
-               .cra_init        = nx_crypto_ctx_sha_init,
+               .cra_init        = nx_crypto_ctx_sha256_init,
                .cra_exit        = nx_crypto_ctx_exit,
        }
 };
diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c
index b3adf1022673..aff0fe58eac0 100644
--- a/drivers/crypto/nx/nx-sha512.c
+++ b/drivers/crypto/nx/nx-sha512.c
@@ -28,30 +28,29 @@
 #include "nx.h"
 
 
-static int nx_sha512_init(struct shash_desc *desc)
+static int nx_crypto_ctx_sha512_init(struct crypto_tfm *tfm)
 {
-       struct sha512_state *sctx = shash_desc_ctx(desc);
-       struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
-       int len;
-       int rc;
+       struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+       int err;
 
-       nx_ctx_init(nx_ctx, HCOP_FC_SHA);
+       err = nx_crypto_ctx_sha_init(tfm);
+       if (err)
+               return err;
 
-       memset(sctx, 0, sizeof *sctx);
+       nx_ctx_init(nx_ctx, HCOP_FC_SHA);
 
        nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA512];
 
        NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA512);
 
-       len = SHA512_DIGEST_SIZE;
-       rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
-                                 &nx_ctx->op.outlen,
-                                 &len,
-                                 (u8 *)sctx->state,
-                                 NX_DS_SHA512);
+       return 0;
+}
 
-       if (rc || len != SHA512_DIGEST_SIZE)
-               goto out;
+static int nx_sha512_init(struct shash_desc *desc)
+{
+       struct sha512_state *sctx = shash_desc_ctx(desc);
+
+       memset(sctx, 0, sizeof *sctx);
 
        sctx->state[0] = __cpu_to_be64(SHA512_H0);
        sctx->state[1] = __cpu_to_be64(SHA512_H1);
@@ -63,7 +62,6 @@ static int nx_sha512_init(struct shash_desc *desc)
        sctx->state[7] = __cpu_to_be64(SHA512_H7);
        sctx->count[0] = 0;
 
-out:
        return 0;
 }
 
@@ -73,10 +71,13 @@ static int nx_sha512_update(struct shash_desc *desc, const 
u8 *data,
        struct sha512_state *sctx = shash_desc_ctx(desc);
        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
        struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
+       struct nx_sg *in_sg;
+       struct nx_sg *out_sg;
        u64 to_process, leftover = 0, total;
        unsigned long irq_flags;
        int rc = 0;
        int data_len;
+       u32 max_sg_len;
        u64 buf_len = (sctx->count[0] % SHA512_BLOCK_SIZE);
 
        spin_lock_irqsave(&nx_ctx->lock, irq_flags);
@@ -96,6 +97,22 @@ static int nx_sha512_update(struct shash_desc *desc, const 
u8 *data,
        NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
        NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
 
+       in_sg = nx_ctx->in_sg;
+       max_sg_len = min_t(u64, nx_ctx->ap->sglen,
+                       nx_driver.of.max_sg_len/sizeof(struct nx_sg));
+       max_sg_len = min_t(u64, max_sg_len,
+                       nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+
+       data_len = SHA512_DIGEST_SIZE;
+       out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
+                                 &data_len, max_sg_len);
+       nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+
+       if (data_len != SHA512_DIGEST_SIZE) {
+               rc = -EINVAL;
+               goto out;
+       }
+
        do {
                /*
                 * to_process: the SHA512_BLOCK_SIZE data chunk to process in
@@ -108,25 +125,26 @@ static int nx_sha512_update(struct shash_desc *desc, 
const u8 *data,
 
                if (buf_len) {
                        data_len = buf_len;
-                       rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
-                                                 &nx_ctx->op.inlen,
-                                                 &data_len,
-                                                 (u8 *) sctx->buf,
-                                                 NX_DS_SHA512);
+                       in_sg = nx_build_sg_list(nx_ctx->in_sg,
+                                                (u8 *) sctx->buf,
+                                                &data_len, max_sg_len);
 
-                       if (rc || data_len != buf_len)
+                       if (data_len != buf_len) {
+                               rc = -EINVAL;
                                goto out;
+                       }
                }
 
                data_len = to_process - buf_len;
-               rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
-                                         &nx_ctx->op.inlen,
-                                         &data_len,
-                                         (u8 *) data,
-                                         NX_DS_SHA512);
+               in_sg = nx_build_sg_list(in_sg, (u8 *) data,
+                                        &data_len, max_sg_len);
+
+               nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct 
nx_sg);
 
-               if (rc || data_len != (to_process - buf_len))
+               if (data_len != (to_process - buf_len)) {
+                       rc = -EINVAL;
                        goto out;
+               }
 
                to_process = (data_len + buf_len);
                leftover = total - to_process;
@@ -172,13 +190,20 @@ static int nx_sha512_final(struct shash_desc *desc, u8 
*out)
        struct sha512_state *sctx = shash_desc_ctx(desc);
        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
        struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
+       struct nx_sg *in_sg, *out_sg;
+       u32 max_sg_len;
        u64 count0;
        unsigned long irq_flags;
-       int rc;
+       int rc = 0;
        int len;
 
        spin_lock_irqsave(&nx_ctx->lock, irq_flags);
 
+       max_sg_len = min_t(u64, nx_ctx->ap->sglen,
+                       nx_driver.of.max_sg_len/sizeof(struct nx_sg));
+       max_sg_len = min_t(u64, max_sg_len,
+                       nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+
        /* final is represented by continuing the operation and indicating that
         * this is not an intermediate operation */
        if (sctx->count[0] >= SHA512_BLOCK_SIZE) {
@@ -200,24 +225,20 @@ static int nx_sha512_final(struct shash_desc *desc, u8 
*out)
        csbcpb->cpb.sha512.message_bit_length_lo = count0;
 
        len = sctx->count[0] & (SHA512_BLOCK_SIZE - 1);
-       rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
-                                 &nx_ctx->op.inlen,
-                                 &len,
-                                 (u8 *)sctx->buf,
-                                 NX_DS_SHA512);
+       in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buf, &len,
+                                max_sg_len);
 
-       if (rc || len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1)))
+       if (len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1))) {
+               rc = -EINVAL;
                goto out;
+       }
 
        len = SHA512_DIGEST_SIZE;
-       rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
-                                 &nx_ctx->op.outlen,
-                                 &len,
-                                 out,
-                                 NX_DS_SHA512);
+       out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len,
+                                max_sg_len);
 
-       if (rc)
-               goto out;
+       nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
+       nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
 
        if (!nx_ctx->op.outlen) {
                rc = -EINVAL;
@@ -273,7 +294,7 @@ struct shash_alg nx_shash_sha512_alg = {
                .cra_blocksize   = SHA512_BLOCK_SIZE,
                .cra_module      = THIS_MODULE,
                .cra_ctxsize     = sizeof(struct nx_crypto_ctx),
-               .cra_init        = nx_crypto_ctx_sha_init,
+               .cra_init        = nx_crypto_ctx_sha512_init,
                .cra_exit        = nx_crypto_ctx_exit,
        }
 };
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
index 1da6dc59d0dd..737d33dc50b8 100644
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -215,8 +215,15 @@ struct nx_sg *nx_walk_and_build(struct nx_sg       *nx_dst,
  * @delta:  is the amount we need to crop in order to bound the list.
  *
  */
-static long int trim_sg_list(struct nx_sg *sg, struct nx_sg *end, unsigned int 
delta)
+static long int trim_sg_list(struct nx_sg *sg,
+                            struct nx_sg *end,
+                            unsigned int delta,
+                            unsigned int *nbytes)
 {
+       long int oplen;
+       long int data_back;
+       unsigned int is_delta = delta;
+
        while (delta && end > sg) {
                struct nx_sg *last = end - 1;
 
@@ -228,54 +235,20 @@ static long int trim_sg_list(struct nx_sg *sg, struct 
nx_sg *end, unsigned int d
                        delta -= last->len;
                }
        }
-       return (sg - end) * sizeof(struct nx_sg);
-}
-
-/**
- * nx_sha_build_sg_list - walk and build sg list to sha modes
- *                       using right bounds and limits.
- * @nx_ctx: NX crypto context for the lists we're building
- * @nx_sg: current sg list in or out list
- * @op_len: current op_len to be used in order to build a sg list
- * @nbytes:  number or bytes to be processed
- * @offset: buf offset
- * @mode: SHA256 or SHA512
- */
-int nx_sha_build_sg_list(struct nx_crypto_ctx *nx_ctx,
-                         struct nx_sg        *nx_in_outsg,
-                         s64                 *op_len,
-                         unsigned int        *nbytes,
-                         u8                  *offset,
-                         u32                 mode)
-{
-       unsigned int delta = 0;
-       unsigned int total = *nbytes;
-       struct nx_sg *nx_insg = nx_in_outsg;
-       unsigned int max_sg_len;
 
-       max_sg_len = min_t(u64, nx_ctx->ap->sglen,
-                       nx_driver.of.max_sg_len/sizeof(struct nx_sg));
-       max_sg_len = min_t(u64, max_sg_len,
-                       nx_ctx->ap->databytelen/NX_PAGE_SIZE);
-
-       *nbytes = min_t(u64, *nbytes, nx_ctx->ap->databytelen);
-       nx_insg = nx_build_sg_list(nx_insg, offset, nbytes, max_sg_len);
-
-       switch (mode) {
-       case NX_DS_SHA256:
-               if (*nbytes < total)
-                       delta = *nbytes - (*nbytes & ~(SHA256_BLOCK_SIZE - 1));
-               break;
-       case NX_DS_SHA512:
-               if (*nbytes < total)
-                       delta = *nbytes - (*nbytes & ~(SHA512_BLOCK_SIZE - 1));
-               break;
-       default:
-               return -EINVAL;
+       /* There are cases where we need to crop list in order to make it
+        * a block size multiple, but we also need to align data. In order to
+        * that we need to calculate how much we need to put back to be
+        * processed
+        */
+       oplen = (sg - end) * sizeof(struct nx_sg);
+       if (is_delta) {
+               data_back = (abs(oplen) / AES_BLOCK_SIZE) *  sg->len;
+               data_back = *nbytes - (data_back & ~(AES_BLOCK_SIZE - 1));
+               *nbytes -= data_back;
        }
-       *op_len = trim_sg_list(nx_in_outsg, nx_insg, delta);
 
-       return 0;
+       return oplen;
 }
 
 /**
@@ -330,8 +303,8 @@ int nx_build_sg_lists(struct nx_crypto_ctx  *nx_ctx,
        /* these lengths should be negative, which will indicate to phyp that
         * the input and output parameters are scatterlists, not linear
         * buffers */
-       nx_ctx->op.inlen = trim_sg_list(nx_ctx->in_sg, nx_insg, delta);
-       nx_ctx->op.outlen = trim_sg_list(nx_ctx->out_sg, nx_outsg, delta);
+       nx_ctx->op.inlen = trim_sg_list(nx_ctx->in_sg, nx_insg, delta, nbytes);
+       nx_ctx->op.outlen = trim_sg_list(nx_ctx->out_sg, nx_outsg, delta, 
nbytes);
 
        return 0;
 }
@@ -662,12 +635,14 @@ static int nx_crypto_ctx_init(struct nx_crypto_ctx 
*nx_ctx, u32 fc, u32 mode)
 /* entry points from the crypto tfm initializers */
 int nx_crypto_ctx_aes_ccm_init(struct crypto_tfm *tfm)
 {
+       tfm->crt_aead.reqsize = sizeof(struct nx_ccm_rctx);
        return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
                                  NX_MODE_AES_CCM);
 }
 
 int nx_crypto_ctx_aes_gcm_init(struct crypto_tfm *tfm)
 {
+       tfm->crt_aead.reqsize = sizeof(struct nx_gcm_rctx);
        return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
                                  NX_MODE_AES_GCM);
 }
diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h
index 6c9ecaaead52..c3ed83764fef 100644
--- a/drivers/crypto/nx/nx.h
+++ b/drivers/crypto/nx/nx.h
@@ -2,6 +2,8 @@
 #ifndef __NX_H__
 #define __NX_H__
 
+#include <crypto/ctr.h>
+
 #define NX_NAME                "nx-crypto"
 #define NX_STRING      "IBM Power7+ Nest Accelerator Crypto Driver"
 #define NX_VERSION     "1.0"
@@ -91,8 +93,11 @@ struct nx_crypto_driver {
 
 #define NX_GCM4106_NONCE_LEN           (4)
 #define NX_GCM_CTR_OFFSET              (12)
-struct nx_gcm_priv {
+struct nx_gcm_rctx {
        u8 iv[16];
+};
+
+struct nx_gcm_priv {
        u8 iauth_tag[16];
        u8 nonce[NX_GCM4106_NONCE_LEN];
 };
@@ -100,8 +105,11 @@ struct nx_gcm_priv {
 #define NX_CCM_AES_KEY_LEN             (16)
 #define NX_CCM4309_AES_KEY_LEN         (19)
 #define NX_CCM4309_NONCE_LEN           (3)
-struct nx_ccm_priv {
+struct nx_ccm_rctx {
        u8 iv[16];
+};
+
+struct nx_ccm_priv {
        u8 b0[16];
        u8 iauth_tag[16];
        u8 oauth_tag[16];
@@ -113,7 +121,7 @@ struct nx_xcbc_priv {
 };
 
 struct nx_ctr_priv {
-       u8 iv[16];
+       u8 nonce[CTR_RFC3686_NONCE_SIZE];
 };
 
 struct nx_crypto_ctx {
@@ -153,8 +161,6 @@ void nx_crypto_ctx_exit(struct crypto_tfm *tfm);
 void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function);
 int nx_hcall_sync(struct nx_crypto_ctx *ctx, struct vio_pfo_op *op,
                  u32 may_sleep);
-int nx_sha_build_sg_list(struct nx_crypto_ctx *, struct nx_sg *,
-                        s64 *, unsigned int *, u8 *, u32);
 struct nx_sg *nx_build_sg_list(struct nx_sg *, u8 *, unsigned int *, u32);
 int nx_build_sg_lists(struct nx_crypto_ctx *, struct blkcipher_desc *,
                      struct scatterlist *, struct scatterlist *, unsigned int 
*,
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c 
b/drivers/crypto/qat/qat_common/qat_algs.c
index 1dc5b0a17cf7..34139a8894a0 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -73,7 +73,8 @@
                                       ICP_QAT_HW_CIPHER_KEY_CONVERT, \
                                       ICP_QAT_HW_CIPHER_DECRYPT)
 
-static atomic_t active_dev;
+static DEFINE_MUTEX(algs_lock);
+static unsigned int active_devs;
 
 struct qat_alg_buf {
        uint32_t len;
@@ -1271,7 +1272,10 @@ static struct crypto_alg qat_algs[] = { {
 
 int qat_algs_register(void)
 {
-       if (atomic_add_return(1, &active_dev) == 1) {
+       int ret = 0;
+
+       mutex_lock(&algs_lock);
+       if (++active_devs == 1) {
                int i;
 
                for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
@@ -1280,21 +1284,25 @@ int qat_algs_register(void)
                                CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC :
                                CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
 
-               return crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
+               ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
        }
-       return 0;
+       mutex_unlock(&algs_lock);
+       return ret;
 }
 
 int qat_algs_unregister(void)
 {
-       if (atomic_sub_return(1, &active_dev) == 0)
-               return crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
-       return 0;
+       int ret = 0;
+
+       mutex_lock(&algs_lock);
+       if (--active_devs == 0)
+               ret = crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
+       mutex_unlock(&algs_lock);
+       return ret;
 }
 
 int qat_algs_init(void)
 {
-       atomic_set(&active_dev, 0);
        crypto_get_default_rng();
        return 0;
 }
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 7992164ea9ec..c89a7abb523f 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -648,16 +648,17 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct 
scatterlist *sgl,
                        desc->lld.mbr_sa = mem;
                        desc->lld.mbr_da = atchan->sconfig.dst_addr;
                }
-               desc->lld.mbr_cfg = atchan->cfg;
-               dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
+               dwidth = at_xdmac_get_dwidth(atchan->cfg);
                fixed_dwidth = IS_ALIGNED(len, 1 << dwidth)
-                              ? at_xdmac_get_dwidth(desc->lld.mbr_cfg)
+                              ? dwidth
                               : AT_XDMAC_CC_DWIDTH_BYTE;
                desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2                       
/* next descriptor view */
                        | AT_XDMAC_MBR_UBC_NDEN                                 
/* next descriptor dst parameter update */
                        | AT_XDMAC_MBR_UBC_NSEN                                 
/* next descriptor src parameter update */
                        | (i == sg_len - 1 ? 0 : AT_XDMAC_MBR_UBC_NDE)          
/* descriptor fetch */
                        | (len >> fixed_dwidth);                                
/* microblock length */
+               desc->lld.mbr_cfg = (atchan->cfg & ~AT_XDMAC_CC_DWIDTH_MASK) |
+                                   AT_XDMAC_CC_DWIDTH(fixed_dwidth);
                dev_dbg(chan2dev(chan),
                         "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
                         __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, 
desc->lld.mbr_ubc);
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 340f9e607cd8..3dabc52b9615 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2328,7 +2328,7 @@ static dma_cookie_t pl330_tx_submit(struct 
dma_async_tx_descriptor *tx)
                        desc->txd.callback = last->txd.callback;
                        desc->txd.callback_param = last->txd.callback_param;
                }
-               last->last = false;
+               desc->last = false;
 
                dma_cookie_assign(&desc->txd);
 
@@ -2621,6 +2621,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t 
dst,
                desc->rqcfg.brst_len = 1;
 
        desc->rqcfg.brst_len = get_burst_len(desc, len);
+       desc->bytes_requested = len;
 
        desc->txd.flags = flags;
 
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c 
b/drivers/gpu/drm/drm_dp_mst_topology.c
index 778bbb6425b8..b0487c9f018c 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1294,7 +1294,6 @@ retry:
                                goto retry;
                        }
                        DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, 
ret);
-                       WARN(1, "fail\n");
 
                        return -EIO;
                }
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 8ae6f7f06b3a..683a9b004c11 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3190,15 +3190,14 @@ int intel_freq_opcode(struct drm_i915_private 
*dev_priv, int val);
 #define I915_READ64(reg)       dev_priv->uncore.funcs.mmio_readq(dev_priv, 
(reg), true)
 
 #define I915_READ64_2x32(lower_reg, upper_reg) ({                      \
-               u32 upper = I915_READ(upper_reg);                       \
-               u32 lower = I915_READ(lower_reg);                       \
-               u32 tmp = I915_READ(upper_reg);                         \
-               if (upper != tmp) {                                     \
-                       upper = tmp;                                    \
-                       lower = I915_READ(lower_reg);                   \
-                       WARN_ON(I915_READ(upper_reg) != upper);         \
-               }                                                       \
-               (u64)upper << 32 | lower; })
+       u32 upper, lower, tmp;                                          \
+       tmp = I915_READ(upper_reg);                                     \
+       do {                                                            \
+               upper = tmp;                                            \
+               lower = I915_READ(lower_reg);                           \
+               tmp = I915_READ(upper_reg);                             \
+       } while (upper != tmp);                                         \
+       (u64)upper << 32 | lower; })
 
 #define POSTING_READ(reg)      (void)I915_READ_NOTRACE(reg)
 #define POSTING_READ16(reg)    (void)I915_READ16_NOTRACE(reg)
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c 
b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 6377b22269ad..7ee23d1d1e74 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -464,7 +464,10 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
        }
 
        /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
-       args->phys_swizzle_mode = args->swizzle_mode;
+       if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
+               args->phys_swizzle_mode = I915_BIT_6_SWIZZLE_UNKNOWN;
+       else
+               args->phys_swizzle_mode = args->swizzle_mode;
        if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
                args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
        if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c 
b/drivers/gpu/drm/radeon/dce6_afmt.c
index 68fd9fc677e3..44480c1b9738 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -93,30 +93,26 @@ void dce6_afmt_select_pin(struct drm_encoder *encoder)
        struct radeon_device *rdev = encoder->dev->dev_private;
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-       u32 offset;
 
-       if (!dig || !dig->afmt || !dig->afmt->pin)
+       if (!dig || !dig->afmt || !dig->pin)
                return;
 
-       offset = dig->afmt->offset;
-
-       WREG32(AFMT_AUDIO_SRC_CONTROL + offset,
-              AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
+       WREG32(AFMT_AUDIO_SRC_CONTROL +  dig->afmt->offset,
+              AFMT_AUDIO_SRC_SELECT(dig->pin->id));
 }
 
 void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
-               struct drm_connector *connector, struct drm_display_mode *mode)
+                                   struct drm_connector *connector,
+                                   struct drm_display_mode *mode)
 {
        struct radeon_device *rdev = encoder->dev->dev_private;
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-       u32 tmp = 0, offset;
+       u32 tmp = 0;
 
-       if (!dig || !dig->afmt || !dig->afmt->pin)
+       if (!dig || !dig->afmt || !dig->pin)
                return;
 
-       offset = dig->afmt->pin->offset;
-
        if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
                if (connector->latency_present[1])
                        tmp = VIDEO_LIPSYNC(connector->video_latency[1]) |
@@ -130,24 +126,24 @@ void dce6_afmt_write_latency_fields(struct drm_encoder 
*encoder,
                else
                        tmp = VIDEO_LIPSYNC(0) | AUDIO_LIPSYNC(0);
        }
-       WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
+       WREG32_ENDPOINT(dig->pin->offset,
+                       AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
 }
 
 void dce6_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder,
-       u8 *sadb, int sad_count)
+                                            u8 *sadb, int sad_count)
 {
        struct radeon_device *rdev = encoder->dev->dev_private;
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-       u32 offset, tmp;
+       u32 tmp;
 
-       if (!dig || !dig->afmt || !dig->afmt->pin)
+       if (!dig || !dig->afmt || !dig->pin)
                return;
 
-       offset = dig->afmt->pin->offset;
-
        /* program the speaker allocation */
-       tmp = RREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
+       tmp = RREG32_ENDPOINT(dig->pin->offset,
+                             AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
        tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK);
        /* set HDMI mode */
        tmp |= HDMI_CONNECTION;
@@ -155,24 +151,24 @@ void dce6_afmt_hdmi_write_speaker_allocation(struct 
drm_encoder *encoder,
                tmp |= SPEAKER_ALLOCATION(sadb[0]);
        else
                tmp |= SPEAKER_ALLOCATION(5); /* stereo */
-       WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
+       WREG32_ENDPOINT(dig->pin->offset,
+                       AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
 }
 
 void dce6_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder,
-       u8 *sadb, int sad_count)
+                                          u8 *sadb, int sad_count)
 {
        struct radeon_device *rdev = encoder->dev->dev_private;
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-       u32 offset, tmp;
+       u32 tmp;
 
-       if (!dig || !dig->afmt || !dig->afmt->pin)
+       if (!dig || !dig->afmt || !dig->pin)
                return;
 
-       offset = dig->afmt->pin->offset;
-
        /* program the speaker allocation */
-       tmp = RREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
+       tmp = RREG32_ENDPOINT(dig->pin->offset,
+                             AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
        tmp &= ~(HDMI_CONNECTION | SPEAKER_ALLOCATION_MASK);
        /* set DP mode */
        tmp |= DP_CONNECTION;
@@ -180,13 +176,13 @@ void dce6_afmt_dp_write_speaker_allocation(struct 
drm_encoder *encoder,
                tmp |= SPEAKER_ALLOCATION(sadb[0]);
        else
                tmp |= SPEAKER_ALLOCATION(5); /* stereo */
-       WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
+       WREG32_ENDPOINT(dig->pin->offset,
+                       AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
 }
 
 void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
-       struct cea_sad *sads, int sad_count)
+                             struct cea_sad *sads, int sad_count)
 {
-       u32 offset;
        int i;
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
@@ -206,11 +202,9 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
                { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, 
HDMI_AUDIO_CODING_TYPE_WMA_PRO },
        };
 
-       if (!dig || !dig->afmt || !dig->afmt->pin)
+       if (!dig || !dig->afmt || !dig->pin)
                return;
 
-       offset = dig->afmt->pin->offset;
-
        for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
                u32 value = 0;
                u8 stereo_freqs = 0;
@@ -237,7 +231,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
 
                value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs);
 
-               WREG32_ENDPOINT(offset, eld_reg_to_type[i][0], value);
+               WREG32_ENDPOINT(dig->pin->offset, eld_reg_to_type[i][0], value);
        }
 }
 
@@ -253,7 +247,7 @@ void dce6_audio_enable(struct radeon_device *rdev,
 }
 
 void dce6_hdmi_audio_set_dto(struct radeon_device *rdev,
-       struct radeon_crtc *crtc, unsigned int clock)
+                            struct radeon_crtc *crtc, unsigned int clock)
 {
        /* Two dtos; generally use dto0 for HDMI */
        u32 value = 0;
@@ -272,7 +266,7 @@ void dce6_hdmi_audio_set_dto(struct radeon_device *rdev,
 }
 
 void dce6_dp_audio_set_dto(struct radeon_device *rdev,
-       struct radeon_crtc *crtc, unsigned int clock)
+                          struct radeon_crtc *crtc, unsigned int clock)
 {
        /* Two dtos; generally use dto1 for DP */
        u32 value = 0;
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c 
b/drivers/gpu/drm/radeon/radeon_audio.c
index fa719c53449b..59b3d3221294 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.c
+++ b/drivers/gpu/drm/radeon/radeon_audio.c
@@ -245,6 +245,28 @@ static struct radeon_audio_funcs dce6_dp_funcs = {
 static void radeon_audio_enable(struct radeon_device *rdev,
                                struct r600_audio_pin *pin, u8 enable_mask)
 {
+       struct drm_encoder *encoder;
+       struct radeon_encoder *radeon_encoder;
+       struct radeon_encoder_atom_dig *dig;
+       int pin_count = 0;
+
+       if (!pin)
+               return;
+
+       if (rdev->mode_info.mode_config_initialized) {
+               list_for_each_entry(encoder, 
&rdev->ddev->mode_config.encoder_list, head) {
+                       if (radeon_encoder_is_digital(encoder)) {
+                               radeon_encoder = to_radeon_encoder(encoder);
+                               dig = radeon_encoder->enc_priv;
+                               if (dig->pin == pin)
+                                       pin_count++;
+                       }
+               }
+
+               if ((pin_count > 1) && (enable_mask == 0))
+                       return;
+       }
+
        if (rdev->audio.funcs->enable)
                rdev->audio.funcs->enable(rdev, pin, enable_mask);
 }
@@ -336,24 +358,13 @@ void radeon_audio_endpoint_wreg(struct radeon_device 
*rdev, u32 offset,
 
 static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
 {
-       struct radeon_encoder *radeon_encoder;
-       struct drm_connector *connector;
-       struct radeon_connector *radeon_connector = NULL;
+       struct drm_connector *connector = 
radeon_get_connector_for_encoder(encoder);
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct cea_sad *sads;
        int sad_count;
 
-       list_for_each_entry(connector,
-               &encoder->dev->mode_config.connector_list, head) {
-               if (connector->encoder == encoder) {
-                       radeon_connector = to_radeon_connector(connector);
-                       break;
-               }
-       }
-
-       if (!radeon_connector) {
-               DRM_ERROR("Couldn't find encoder's connector\n");
+       if (!connector)
                return;
-       }
 
        sad_count = drm_edid_to_sad(radeon_connector_edid(connector), &sads);
        if (sad_count <= 0) {
@@ -362,8 +373,6 @@ static void radeon_audio_write_sad_regs(struct drm_encoder 
*encoder)
        }
        BUG_ON(!sads);
 
-       radeon_encoder = to_radeon_encoder(encoder);
-
        if (radeon_encoder->audio && radeon_encoder->audio->write_sad_regs)
                radeon_encoder->audio->write_sad_regs(encoder, sads, sad_count);
 
@@ -372,27 +381,16 @@ static void radeon_audio_write_sad_regs(struct 
drm_encoder *encoder)
 
 static void radeon_audio_write_speaker_allocation(struct drm_encoder *encoder)
 {
+       struct drm_connector *connector = 
radeon_get_connector_for_encoder(encoder);
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-       struct drm_connector *connector;
-       struct radeon_connector *radeon_connector = NULL;
        u8 *sadb = NULL;
        int sad_count;
 
-       list_for_each_entry(connector,
-                           &encoder->dev->mode_config.connector_list, head) {
-               if (connector->encoder == encoder) {
-                       radeon_connector = to_radeon_connector(connector);
-                       break;
-               }
-       }
-
-       if (!radeon_connector) {
-               DRM_ERROR("Couldn't find encoder's connector\n");
+       if (!connector)
                return;
-       }
 
-       sad_count = drm_edid_to_speaker_allocation(
-               radeon_connector_edid(connector), &sadb);
+       sad_count = 
drm_edid_to_speaker_allocation(radeon_connector_edid(connector),
+                                                  &sadb);
        if (sad_count < 0) {
                DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n",
                          sad_count);
@@ -406,26 +404,13 @@ static void radeon_audio_write_speaker_allocation(struct 
drm_encoder *encoder)
 }
 
 static void radeon_audio_write_latency_fields(struct drm_encoder *encoder,
-       struct drm_display_mode *mode)
+                                             struct drm_display_mode *mode)
 {
-       struct radeon_encoder *radeon_encoder;
-       struct drm_connector *connector;
-       struct radeon_connector *radeon_connector = 0;
-
-       list_for_each_entry(connector,
-               &encoder->dev->mode_config.connector_list, head) {
-               if (connector->encoder == encoder) {
-                       radeon_connector = to_radeon_connector(connector);
-                       break;
-               }
-       }
+       struct drm_connector *connector = 
radeon_get_connector_for_encoder(encoder);
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
 
-       if (!radeon_connector) {
-               DRM_ERROR("Couldn't find encoder's connector\n");
+       if (!connector)
                return;
-       }
-
-       radeon_encoder = to_radeon_encoder(encoder);
 
        if (radeon_encoder->audio && 
radeon_encoder->audio->write_latency_fields)
                radeon_encoder->audio->write_latency_fields(encoder, connector, 
mode);
@@ -451,29 +436,23 @@ static void radeon_audio_select_pin(struct drm_encoder 
*encoder)
 }
 
 void radeon_audio_detect(struct drm_connector *connector,
+                        struct drm_encoder *encoder,
                         enum drm_connector_status status)
 {
-       struct radeon_device *rdev;
-       struct radeon_encoder *radeon_encoder;
+       struct drm_device *dev = connector->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig;
 
-       if (!connector || !connector->encoder)
+       if (!radeon_audio_chipset_supported(rdev))
                return;
 
-       rdev = connector->encoder->dev->dev_private;
-
-       if (!radeon_audio_chipset_supported(rdev))
+       if (!radeon_encoder_is_digital(encoder))
                return;
 
-       radeon_encoder = to_radeon_encoder(connector->encoder);
        dig = radeon_encoder->enc_priv;
 
        if (status == connector_status_connected) {
-               if 
(!drm_detect_monitor_audio(radeon_connector_edid(connector))) {
-                       radeon_encoder->audio = NULL;
-                       return;
-               }
-
                if (connector->connector_type == 
DRM_MODE_CONNECTOR_DisplayPort) {
                        struct radeon_connector *radeon_connector = 
to_radeon_connector(connector);
 
@@ -486,11 +465,17 @@ void radeon_audio_detect(struct drm_connector *connector,
                        radeon_encoder->audio = rdev->audio.hdmi_funcs;
                }
 
-               dig->afmt->pin = radeon_audio_get_pin(connector->encoder);
-               radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
+               if (drm_detect_monitor_audio(radeon_connector_edid(connector))) 
{
+                       if (!dig->pin)
+                               dig->pin = radeon_audio_get_pin(encoder);
+                       radeon_audio_enable(rdev, dig->pin, 0xf);
+               } else {
+                       radeon_audio_enable(rdev, dig->pin, 0);
+                       dig->pin = NULL;
+               }
        } else {
-               radeon_audio_enable(rdev, dig->afmt->pin, 0);
-               dig->afmt->pin = NULL;
+               radeon_audio_enable(rdev, dig->pin, 0);
+               dig->pin = NULL;
        }
 }
 
@@ -518,29 +503,18 @@ static void radeon_audio_set_dto(struct drm_encoder 
*encoder, unsigned int clock
 }
 
 static int radeon_audio_set_avi_packet(struct drm_encoder *encoder,
-       struct drm_display_mode *mode)
+                                      struct drm_display_mode *mode)
 {
        struct radeon_device *rdev = encoder->dev->dev_private;
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-       struct drm_connector *connector;
-       struct radeon_connector *radeon_connector = NULL;
+       struct drm_connector *connector = 
radeon_get_connector_for_encoder(encoder);
        u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
        struct hdmi_avi_infoframe frame;
        int err;
 
-       list_for_each_entry(connector,
-               &encoder->dev->mode_config.connector_list, head) {
-               if (connector->encoder == encoder) {
-                       radeon_connector = to_radeon_connector(connector);
-                       break;
-               }
-       }
-
-       if (!radeon_connector) {
-               DRM_ERROR("Couldn't find encoder's connector\n");
-               return -ENOENT;
-       }
+       if (!connector)
+               return -EINVAL;
 
        err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
        if (err < 0) {
@@ -563,8 +537,8 @@ static int radeon_audio_set_avi_packet(struct drm_encoder 
*encoder,
                return err;
        }
 
-       if (dig && dig->afmt &&
-               radeon_encoder->audio && radeon_encoder->audio->set_avi_packet)
+       if (dig && dig->afmt && radeon_encoder->audio &&
+           radeon_encoder->audio->set_avi_packet)
                radeon_encoder->audio->set_avi_packet(rdev, dig->afmt->offset,
                        buffer, sizeof(buffer));
 
@@ -745,7 +719,7 @@ static void radeon_audio_hdmi_mode_set(struct drm_encoder 
*encoder,
 }
 
 static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
-       struct drm_display_mode *mode)
+                                    struct drm_display_mode *mode)
 {
        struct drm_device *dev = encoder->dev;
        struct radeon_device *rdev = dev->dev_private;
@@ -756,6 +730,9 @@ static void radeon_audio_dp_mode_set(struct drm_encoder 
*encoder,
        struct radeon_connector_atom_dig *dig_connector =
                radeon_connector->con_priv;
 
+       if (!connector)
+               return;
+
        if (!dig || !dig->afmt)
                return;
 
@@ -774,7 +751,7 @@ static void radeon_audio_dp_mode_set(struct drm_encoder 
*encoder,
 }
 
 void radeon_audio_mode_set(struct drm_encoder *encoder,
-       struct drm_display_mode *mode)
+                          struct drm_display_mode *mode)
 {
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
 
diff --git a/drivers/gpu/drm/radeon/radeon_audio.h 
b/drivers/gpu/drm/radeon/radeon_audio.h
index 8438304f7139..059cc3012062 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.h
+++ b/drivers/gpu/drm/radeon/radeon_audio.h
@@ -68,7 +68,8 @@ struct radeon_audio_funcs
 
 int radeon_audio_init(struct radeon_device *rdev);
 void radeon_audio_detect(struct drm_connector *connector,
-       enum drm_connector_status status);
+                        struct drm_encoder *encoder,
+                        enum drm_connector_status status);
 u32 radeon_audio_endpoint_rreg(struct radeon_device *rdev,
        u32 offset, u32 reg);
 void radeon_audio_endpoint_wreg(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c 
b/drivers/gpu/drm/radeon/radeon_combios.c
index 3e5f6b71f3ad..c097d3a82bda 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -1255,10 +1255,15 @@ struct radeon_encoder_lvds 
*radeon_combios_get_lvds_info(struct radeon_encoder
 
                        if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) &&
                            (RBIOS16(tmp + 2) == lvds->native_mode.vdisplay)) {
+                               u32 hss = (RBIOS16(tmp + 21) - RBIOS16(tmp + 
19) - 1) * 8;
+
+                               if (hss > lvds->native_mode.hdisplay)
+                                       hss = (10 - 1) * 8;
+
                                lvds->native_mode.htotal = 
lvds->native_mode.hdisplay +
                                        (RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) 
* 8;
                                lvds->native_mode.hsync_start = 
lvds->native_mode.hdisplay +
-                                       (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) 
- 1) * 8;
+                                       hss;
                                lvds->native_mode.hsync_end = 
lvds->native_mode.hsync_start +
                                        (RBIOS8(tmp + 23) * 8);
 
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c 
b/drivers/gpu/drm/radeon/radeon_connectors.c
index cebb65e07e1d..94b21ae70ef7 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1379,8 +1379,16 @@ out:
        /* updated in get modes as well since we need to know if it's analog or 
digital */
        radeon_connector_update_scratch_regs(connector, ret);
 
-       if (radeon_audio != 0)
-               radeon_audio_detect(connector, ret);
+       if ((radeon_audio != 0) && radeon_connector->use_digital) {
+               const struct drm_connector_helper_funcs *connector_funcs =
+                       connector->helper_private;
+
+               encoder = connector_funcs->best_encoder(connector);
+               if (encoder && (encoder->encoder_type == 
DRM_MODE_ENCODER_TMDS)) {
+                       radeon_connector_get_edid(connector);
+                       radeon_audio_detect(connector, encoder, ret);
+               }
+       }
 
 exit:
        pm_runtime_mark_last_busy(connector->dev->dev);
@@ -1717,8 +1725,10 @@ radeon_dp_detect(struct drm_connector *connector, bool 
force)
 
        radeon_connector_update_scratch_regs(connector, ret);
 
-       if (radeon_audio != 0)
-               radeon_audio_detect(connector, ret);
+       if ((radeon_audio != 0) && encoder) {
+               radeon_connector_get_edid(connector);
+               radeon_audio_detect(connector, encoder, ret);
+       }
 
 out:
        pm_runtime_mark_last_busy(connector->dev->dev);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h 
b/drivers/gpu/drm/radeon/radeon_mode.h
index f01c797b78cf..9af2d8398e90 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -237,7 +237,6 @@ struct radeon_afmt {
        int offset;
        bool last_buffer_filled_status;
        int id;
-       struct r600_audio_pin *pin;
 };
 
 struct radeon_mode_info {
@@ -439,6 +438,7 @@ struct radeon_encoder_atom_dig {
        uint8_t backlight_level;
        int panel_mode;
        struct radeon_afmt *afmt;
+       struct r600_audio_pin *pin;
        int active_mst_links;
 };
 
diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c
index 6153df735e82..08ff89d222e5 100644
--- a/drivers/hwmon/nct7904.c
+++ b/drivers/hwmon/nct7904.c
@@ -575,6 +575,7 @@ static const struct i2c_device_id nct7904_id[] = {
        {"nct7904", 0},
        {}
 };
+MODULE_DEVICE_TABLE(i2c, nct7904_id);
 
 static struct i2c_driver nct7904_driver = {
        .class = I2C_CLASS_HWMON,
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index a353b7de6d22..bc7eed67998a 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -20,6 +20,7 @@
 #include <linux/input/mt.h>
 #include <linux/serio.h>
 #include <linux/libps2.h>
+#include <linux/dmi.h>
 
 #include "psmouse.h"
 #include "alps.h"
@@ -99,6 +100,7 @@ static const struct alps_nibble_commands 
alps_v6_nibble_commands[] = {
 #define ALPS_FOUR_BUTTONS      0x40    /* 4 direction button present */
 #define ALPS_PS2_INTERLEAVED   0x80    /* 3-byte PS/2 packet interleaved with
                                           6-byte ALPS packet */
+#define ALPS_DELL              0x100   /* device is a Dell laptop */
 #define ALPS_BUTTONPAD         0x200   /* device is a clickpad */
 
 static const struct alps_model_info alps_model_data[] = {
@@ -251,9 +253,9 @@ static void alps_process_packet_v1_v2(struct psmouse 
*psmouse)
                return;
        }
 
-       /* Non interleaved V2 dualpoint has separate stick button bits */
+       /* Dell non interleaved V2 dualpoint has separate stick button bits */
        if (priv->proto_version == ALPS_PROTO_V2 &&
-           priv->flags == (ALPS_PASS | ALPS_DUALPOINT)) {
+           priv->flags == (ALPS_DELL | ALPS_PASS | ALPS_DUALPOINT)) {
                left |= packet[0] & 1;
                right |= packet[0] & 2;
                middle |= packet[0] & 4;
@@ -2542,6 +2544,8 @@ static int alps_set_protocol(struct psmouse *psmouse,
        priv->byte0 = protocol->byte0;
        priv->mask0 = protocol->mask0;
        priv->flags = protocol->flags;
+       if (dmi_name_in_vendors("Dell"))
+               priv->flags |= ALPS_DELL;
 
        priv->x_max = 2000;
        priv->y_max = 1400;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index e8d84566f311..697f34fba06b 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1719,7 +1719,8 @@ static int dm_merge_bvec(struct request_queue *q,
        struct mapped_device *md = q->queuedata;
        struct dm_table *map = dm_get_live_table_fast(md);
        struct dm_target *ti;
-       sector_t max_sectors, max_size = 0;
+       sector_t max_sectors;
+       int max_size = 0;
 
        if (unlikely(!map))
                goto out;
@@ -1732,18 +1733,10 @@ static int dm_merge_bvec(struct request_queue *q,
         * Find maximum amount of I/O that won't need splitting
         */
        max_sectors = min(max_io_len(bvm->bi_sector, ti),
-                         (sector_t) queue_max_sectors(q));
+                         (sector_t) BIO_MAX_SECTORS);
        max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
-
-       /*
-        * FIXME: this stop-gap fix _must_ be cleaned up (by passing a sector_t
-        * to the targets' merge function since it holds sectors not bytes).
-        * Just doing this as an interim fix for stable@ because the more
-        * comprehensive cleanup of switching to sector_t will impact every
-        * DM target that implements a ->merge hook.
-        */
-       if (max_size > INT_MAX)
-               max_size = INT_MAX;
+       if (max_size < 0)
+               max_size = 0;
 
        /*
         * merge_bvec_fn() returns number of bytes
@@ -1751,13 +1744,13 @@ static int dm_merge_bvec(struct request_queue *q,
         * max is precomputed maximal io size
         */
        if (max_size && ti->type->merge)
-               max_size = ti->type->merge(ti, bvm, biovec, (int) max_size);
+               max_size = ti->type->merge(ti, bvm, biovec, max_size);
        /*
         * If the target doesn't support merge method and some of the devices
-        * provided their merge_bvec method (we know this by looking for the
-        * max_hw_sectors that dm_set_device_limits may set), then we can't
-        * allow bios with multiple vector entries.  So always set max_size
-        * to 0, and the code below allows just one page.
+        * provided their merge_bvec method (we know this by looking at
+        * queue_max_hw_sectors), then we can't allow bios with multiple vector
+        * entries.  So always set max_size to 0, and the code below allows
+        * just one page.
         */
        else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9)
                max_size = 0;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index b9200282fd77..e4621511d118 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5740,7 +5740,7 @@ static int get_bitmap_file(struct mddev *mddev, void 
__user * arg)
        char *ptr;
        int err;
 
-       file = kmalloc(sizeof(*file), GFP_NOIO);
+       file = kzalloc(sizeof(*file), GFP_NOIO);
        if (!file)
                return -ENOMEM;
 
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index cd7b0c1e882d..5ce3cd5c4e1d 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1475,6 +1475,7 @@ static void error(struct mddev *mddev, struct md_rdev 
*rdev)
 {
        char b[BDEVNAME_SIZE];
        struct r1conf *conf = mddev->private;
+       unsigned long flags;
 
        /*
         * If it is not operational, then we have already marked it as dead
@@ -1494,14 +1495,13 @@ static void error(struct mddev *mddev, struct md_rdev 
*rdev)
                return;
        }
        set_bit(Blocked, &rdev->flags);
+       spin_lock_irqsave(&conf->device_lock, flags);
        if (test_and_clear_bit(In_sync, &rdev->flags)) {
-               unsigned long flags;
-               spin_lock_irqsave(&conf->device_lock, flags);
                mddev->degraded++;
                set_bit(Faulty, &rdev->flags);
-               spin_unlock_irqrestore(&conf->device_lock, flags);
        } else
                set_bit(Faulty, &rdev->flags);
+       spin_unlock_irqrestore(&conf->device_lock, flags);
        /*
         * if recovery is running, make sure it aborts.
         */
@@ -1567,7 +1567,10 @@ static int raid1_spare_active(struct mddev *mddev)
         * Find all failed disks within the RAID1 configuration
         * and mark them readable.
         * Called under mddev lock, so rcu protection not needed.
+        * device_lock used to avoid races with raid1_end_read_request
+        * which expects 'In_sync' flags and ->degraded to be consistent.
         */
+       spin_lock_irqsave(&conf->device_lock, flags);
        for (i = 0; i < conf->raid_disks; i++) {
                struct md_rdev *rdev = conf->mirrors[i].rdev;
                struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
@@ -1598,7 +1601,6 @@ static int raid1_spare_active(struct mddev *mddev)
                        sysfs_notify_dirent_safe(rdev->sysfs_state);
                }
        }
-       spin_lock_irqsave(&conf->device_lock, flags);
        mddev->degraded -= count;
        spin_unlock_irqrestore(&conf->device_lock, flags);
 
diff --git a/drivers/net/wireless/ath/ath10k/pci.c 
b/drivers/net/wireless/ath/ath10k/pci.c
index 7681237fe298..ead543282128 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -1524,12 +1524,11 @@ static int ath10k_pci_get_num_banks(struct ath10k *ar)
                switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) {
                case QCA6174_HW_1_0_CHIP_ID_REV:
                case QCA6174_HW_1_1_CHIP_ID_REV:
+               case QCA6174_HW_2_1_CHIP_ID_REV:
+               case QCA6174_HW_2_2_CHIP_ID_REV:
                        return 3;
                case QCA6174_HW_1_3_CHIP_ID_REV:
                        return 2;
-               case QCA6174_HW_2_1_CHIP_ID_REV:
-               case QCA6174_HW_2_2_CHIP_ID_REV:
-                       return 6;
                case QCA6174_HW_3_0_CHIP_ID_REV:
                case QCA6174_HW_3_1_CHIP_ID_REV:
                case QCA6174_HW_3_2_CHIP_ID_REV:
diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c
index 8882afbef688..6285f46f3ddb 100644
--- a/drivers/phy/phy-twl4030-usb.c
+++ b/drivers/phy/phy-twl4030-usb.c
@@ -144,6 +144,16 @@
 #define PMBR1                          0x0D
 #define GPIO_USB_4PIN_ULPI_2430C       (3 << 0)
 
+/*
+ * If VBUS is valid or ID is ground, then we know a
+ * cable is present and we need to be runtime-enabled
+ */
+static inline bool cable_present(enum omap_musb_vbus_id_status stat)
+{
+       return stat == OMAP_MUSB_VBUS_VALID ||
+               stat == OMAP_MUSB_ID_GROUND;
+}
+
 struct twl4030_usb {
        struct usb_phy          phy;
        struct device           *dev;
@@ -536,8 +546,10 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl)
 
        mutex_lock(&twl->lock);
        if (status >= 0 && status != twl->linkstat) {
+               status_changed =
+                       cable_present(twl->linkstat) !=
+                       cable_present(status);
                twl->linkstat = status;
-               status_changed = true;
        }
        mutex_unlock(&twl->lock);
 
@@ -553,15 +565,11 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl)
                 * USB_LINK_VBUS state.  musb_hdrc won't care until it
                 * starts to handle softconnect right.
                 */
-               if ((status == OMAP_MUSB_VBUS_VALID) ||
-                   (status == OMAP_MUSB_ID_GROUND)) {
-                       if (pm_runtime_suspended(twl->dev))
-                               pm_runtime_get_sync(twl->dev);
+               if (cable_present(status)) {
+                       pm_runtime_get_sync(twl->dev);
                } else {
-                       if (pm_runtime_active(twl->dev)) {
-                               pm_runtime_mark_last_busy(twl->dev);
-                               pm_runtime_put_autosuspend(twl->dev);
-                       }
+                       pm_runtime_mark_last_busy(twl->dev);
+                       pm_runtime_put_autosuspend(twl->dev);
                }
                omap_musb_mailbox(status);
        }
@@ -766,6 +774,9 @@ static int twl4030_usb_remove(struct platform_device *pdev)
 
        /* disable complete OTG block */
        twl4030_usb_clear_bits(twl, POWER_CTRL, POWER_CTRL_OTG_ENAB);
+
+       if (cable_present(twl->linkstat))
+               pm_runtime_put_noidle(twl->dev);
        pm_runtime_mark_last_busy(twl->dev);
        pm_runtime_put(twl->dev);
 
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 882744852aac..a9aa38903efe 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -599,9 +599,10 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
 {
        struct ipr_trace_entry *trace_entry;
        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+       unsigned int trace_index;
 
-       trace_entry = &ioa_cfg->trace[atomic_add_return
-                       (1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES];
+       trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & 
IPR_TRACE_INDEX_MASK;
+       trace_entry = &ioa_cfg->trace[trace_index];
        trace_entry->time = jiffies;
        trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
        trace_entry->type = type;
@@ -1051,10 +1052,15 @@ static void ipr_send_blocking_cmd(struct ipr_cmnd 
*ipr_cmd,
 
 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
 {
+       unsigned int hrrq;
+
        if (ioa_cfg->hrrq_num == 1)
-               return 0;
-       else
-               return (atomic_add_return(1, &ioa_cfg->hrrq_index) % 
(ioa_cfg->hrrq_num - 1)) + 1;
+               hrrq = 0;
+       else {
+               hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
+               hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
+       }
+       return hrrq;
 }
 
 /**
@@ -6263,21 +6269,23 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
        struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
        u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
-       unsigned long hrrq_flags;
+       unsigned long lock_flags;
 
        scsi_set_resid(scsi_cmd, 
be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
 
        if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
                scsi_dma_unmap(scsi_cmd);
 
-               spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
+               spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
                list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
                scsi_cmd->scsi_done(scsi_cmd);
-               spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
+               spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
        } else {
-               spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
+               spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+               spin_lock(&ipr_cmd->hrrq->_lock);
                ipr_erp_start(ioa_cfg, ipr_cmd);
-               spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
+               spin_unlock(&ipr_cmd->hrrq->_lock);
+               spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
        }
 }
 
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 73790a1d0969..6b97ee45c7b4 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -1486,6 +1486,7 @@ struct ipr_ioa_cfg {
 
 #define IPR_NUM_TRACE_INDEX_BITS       8
 #define IPR_NUM_TRACE_ENTRIES          (1 << IPR_NUM_TRACE_INDEX_BITS)
+#define IPR_TRACE_INDEX_MASK           (IPR_NUM_TRACE_ENTRIES - 1)
 #define IPR_TRACE_SIZE (sizeof(struct ipr_trace_entry) * IPR_NUM_TRACE_ENTRIES)
        char trace_start[8];
 #define IPR_TRACE_START_LABEL                  "trace"
diff --git a/drivers/staging/lustre/lustre/obdclass/debug.c 
b/drivers/staging/lustre/lustre/obdclass/debug.c
index 9c934e6d2ea1..c61add46b426 100644
--- a/drivers/staging/lustre/lustre/obdclass/debug.c
+++ b/drivers/staging/lustre/lustre/obdclass/debug.c
@@ -40,7 +40,7 @@
 
 #define DEBUG_SUBSYSTEM D_OTHER
 
-#include <linux/unaligned/access_ok.h>
+#include <asm/unaligned.h>
 
 #include "../include/obd_support.h"
 #include "../include/lustre_debug.h"
diff --git a/drivers/staging/vt6655/device_main.c 
b/drivers/staging/vt6655/device_main.c
index 15baacb126ad..376e4a0c15c6 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -1486,8 +1486,9 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
                }
        }
 
-       if (changed & BSS_CHANGED_ASSOC && priv->op_mode != NL80211_IFTYPE_AP) {
-               if (conf->assoc) {
+       if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_BEACON_INFO) &&
+           priv->op_mode != NL80211_IFTYPE_AP) {
+               if (conf->assoc && conf->beacon_rate) {
                        CARDbUpdateTSF(priv, conf->beacon_rate->hw_value,
                                       conf->sync_tsf);
 
diff --git a/drivers/thermal/samsung/exynos_tmu.c 
b/drivers/thermal/samsung/exynos_tmu.c
index 1d30b0975651..67098a8a7a02 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -1209,6 +1209,8 @@ err_clk_sec:
        if (!IS_ERR(data->clk_sec))
                clk_unprepare(data->clk_sec);
 err_sensor:
+       if (!IS_ERR_OR_NULL(data->regulator))
+               regulator_disable(data->regulator);
        thermal_zone_of_sensor_unregister(&pdev->dev, data->tzd);
 
        return ret;
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 74fea4fa41b1..3ad48e1c0c57 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -1024,7 +1024,18 @@ static struct platform_driver ci_hdrc_driver = {
        },
 };
 
-module_platform_driver(ci_hdrc_driver);
+static int __init ci_hdrc_platform_register(void)
+{
+       ci_hdrc_host_driver_init();
+       return platform_driver_register(&ci_hdrc_driver);
+}
+module_init(ci_hdrc_platform_register);
+
+static void __exit ci_hdrc_platform_unregister(void)
+{
+       platform_driver_unregister(&ci_hdrc_driver);
+}
+module_exit(ci_hdrc_platform_unregister);
 
 MODULE_ALIAS("platform:ci_hdrc");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index 21fe1a314313..2f8af40e87ca 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -237,9 +237,12 @@ int ci_hdrc_host_init(struct ci_hdrc *ci)
        rdrv->name      = "host";
        ci->roles[CI_ROLE_HOST] = rdrv;
 
+       return 0;
+}
+
+void ci_hdrc_host_driver_init(void)
+{
        ehci_init_driver(&ci_ehci_hc_driver, &ehci_ci_overrides);
        orig_bus_suspend = ci_ehci_hc_driver.bus_suspend;
        ci_ehci_hc_driver.bus_suspend = ci_ehci_bus_suspend;
-
-       return 0;
 }
diff --git a/drivers/usb/chipidea/host.h b/drivers/usb/chipidea/host.h
index 5707bf379bfb..0f12f131bdd3 100644
--- a/drivers/usb/chipidea/host.h
+++ b/drivers/usb/chipidea/host.h
@@ -5,6 +5,7 @@
 
 int ci_hdrc_host_init(struct ci_hdrc *ci);
 void ci_hdrc_host_destroy(struct ci_hdrc *ci);
+void ci_hdrc_host_driver_init(void);
 
 #else
 
@@ -18,6 +19,11 @@ static inline void ci_hdrc_host_destroy(struct ci_hdrc *ci)
 
 }
 
+static void ci_hdrc_host_driver_init(void)
+{
+
+}
+
 #endif
 
 #endif /* __DRIVERS_USB_CHIPIDEA_HOST_H */
diff --git a/drivers/usb/gadget/function/f_uac2.c 
b/drivers/usb/gadget/function/f_uac2.c
index 6d3eb8b00a48..531861547253 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -1162,14 +1162,14 @@ afunc_set_alt(struct usb_function *fn, unsigned intf, 
unsigned alt)
                        factor = 1000;
                } else {
                        ep_desc = &hs_epin_desc;
-                       factor = 125;
+                       factor = 8000;
                }
 
                /* pre-compute some values for iso_complete() */
                uac2->p_framesize = opts->p_ssize *
                                    num_channels(opts->p_chmask);
                rate = opts->p_srate * uac2->p_framesize;
-               uac2->p_interval = (1 << (ep_desc->bInterval - 1)) * factor;
+               uac2->p_interval = factor / (1 << (ep_desc->bInterval - 1));
                uac2->p_pktsize = min_t(unsigned int, rate / uac2->p_interval,
                                        prm->max_psize);
 
diff --git a/drivers/usb/gadget/udc/udc-core.c 
b/drivers/usb/gadget/udc/udc-core.c
index d69c35558f68..7d69931cf45d 100644
--- a/drivers/usb/gadget/udc/udc-core.c
+++ b/drivers/usb/gadget/udc/udc-core.c
@@ -321,6 +321,7 @@ err4:
 
 err3:
        put_device(&udc->dev);
+       device_del(&gadget->dev);
 
 err2:
        put_device(&gadget->dev);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 3e442f77a2b9..9a8c936cd42c 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1792,7 +1792,8 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
        int size;
        int i, j, num_ports;
 
-       del_timer_sync(&xhci->cmd_timer);
+       if (timer_pending(&xhci->cmd_timer))
+               del_timer_sync(&xhci->cmd_timer);
 
        /* Free the Event Ring Segment Table and the actual Event Ring */
        size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index d095677a0702..b3a0a2275f5a 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -82,7 +82,7 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
                return 0;
        /* offset in TRBs */
        segment_offset = trb - seg->trbs;
-       if (segment_offset > TRBS_PER_SEGMENT)
+       if (segment_offset >= TRBS_PER_SEGMENT)
                return 0;
        return seg->dma + (segment_offset * sizeof(*trb));
 }
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 19b85ee98a72..876423b8892c 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1099,6 +1099,8 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
        { USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x68c0, 0xff),
          .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC73xx 
*/
+       { USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x9041, 0xff),
+         .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* 
MC7305/MC7355 */
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 9c63897b3a56..d156545728c2 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -145,7 +145,6 @@ static const struct usb_device_id id_table[] = {
        {DEVICE_SWI(0x1199, 0x901c)},   /* Sierra Wireless EM7700 */
        {DEVICE_SWI(0x1199, 0x901f)},   /* Sierra Wireless EM7355 */
        {DEVICE_SWI(0x1199, 0x9040)},   /* Sierra Wireless Modem */
-       {DEVICE_SWI(0x1199, 0x9041)},   /* Sierra Wireless MC7305/MC7355 */
        {DEVICE_SWI(0x1199, 0x9051)},   /* Netgear AirCard 340U */
        {DEVICE_SWI(0x1199, 0x9053)},   /* Sierra Wireless Modem */
        {DEVICE_SWI(0x1199, 0x9054)},   /* Sierra Wireless Modem */
@@ -158,6 +157,7 @@ static const struct usb_device_id id_table[] = {
        {DEVICE_SWI(0x413c, 0x81a4)},   /* Dell Wireless 5570e HSPA+ (42Mbps) 
Mobile Broadband Card */
        {DEVICE_SWI(0x413c, 0x81a8)},   /* Dell Wireless 5808 Gobi(TM) 4G LTE 
Mobile Broadband Card */
        {DEVICE_SWI(0x413c, 0x81a9)},   /* Dell Wireless 5808e Gobi(TM) 4G LTE 
Mobile Broadband Card */
+       {DEVICE_SWI(0x413c, 0x81b1)},   /* Dell Wireless 5809e Gobi(TM) 4G LTE 
Mobile Broadband Card */
 
        /* Huawei devices */
        {DEVICE_HWI(0x03f0, 0x581d)},   /* HP lt4112 LTE/HSPA+ Gobi 4G Modem 
(Huawei me906e) */
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 46179a0828eb..07d1ecd564f7 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -289,6 +289,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68AA, 0xFF, 0xFF, 0xFF),
          .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
        },
+       { USB_DEVICE(0x1199, 0x68AB) }, /* Sierra Wireless AR8550 */
        /* AT&T Direct IP LTE modems */
        { USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF),
          .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 89274850741b..4bd23bba816f 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -568,12 +568,14 @@ static int gntdev_release(struct inode *inode, struct 
file *flip)
 
        pr_debug("priv %p\n", priv);
 
+       mutex_lock(&priv->lock);
        while (!list_empty(&priv->maps)) {
                map = list_entry(priv->maps.next, struct grant_map, next);
                list_del(&map->next);
                gntdev_put_map(NULL /* already removed */, map);
        }
        WARN_ON(!list_empty(&priv->freeable_maps));
+       mutex_unlock(&priv->lock);
 
        if (use_ptemod)
                mmu_notifier_unregister(&priv->mn, priv->mm);
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 039f9c8a95e8..6e13504f736e 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -4397,9 +4397,9 @@ laundromat_main(struct work_struct *laundry)
        queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
 }
 
-static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_ol_stateid 
*stp)
+static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp)
 {
-       if (!fh_match(&fhp->fh_handle, &stp->st_stid.sc_file->fi_fhandle))
+       if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle))
                return nfserr_bad_stateid;
        return nfs_ok;
 }
@@ -4574,20 +4574,48 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state 
*cstate,
        return nfs_ok;
 }
 
+static struct file *
+nfs4_find_file(struct nfs4_stid *s, int flags)
+{
+       switch (s->sc_type) {
+       case NFS4_DELEG_STID:
+               if (WARN_ON_ONCE(!s->sc_file->fi_deleg_file))
+                       return NULL;
+               return get_file(s->sc_file->fi_deleg_file);
+       case NFS4_OPEN_STID:
+       case NFS4_LOCK_STID:
+               if (flags & RD_STATE)
+                       return find_readable_file(s->sc_file);
+               else
+                       return find_writeable_file(s->sc_file);
+               break;
+       }
+
+       return NULL;
+}
+
+static __be32
+nfs4_check_olstateid(struct svc_fh *fhp, struct nfs4_ol_stateid *ols, int 
flags)
+{
+       __be32 status;
+
+       status = nfsd4_check_openowner_confirmed(ols);
+       if (status)
+               return status;
+       return nfs4_check_openmode(ols, flags);
+}
+
 /*
-* Checks for stateid operations
-*/
+ * Checks for stateid operations
+ */
 __be32
 nfs4_preprocess_stateid_op(struct net *net, struct nfsd4_compound_state 
*cstate,
                           stateid_t *stateid, int flags, struct file **filpp)
 {
-       struct nfs4_stid *s;
-       struct nfs4_ol_stateid *stp = NULL;
-       struct nfs4_delegation *dp = NULL;
-       struct svc_fh *current_fh = &cstate->current_fh;
-       struct inode *ino = d_inode(current_fh->fh_dentry);
+       struct svc_fh *fhp = &cstate->current_fh;
+       struct inode *ino = d_inode(fhp->fh_dentry);
        struct nfsd_net *nn = net_generic(net, nfsd_net_id);
-       struct file *file = NULL;
+       struct nfs4_stid *s;
        __be32 status;
 
        if (filpp)
@@ -4597,60 +4625,39 @@ nfs4_preprocess_stateid_op(struct net *net, struct 
nfsd4_compound_state *cstate,
                return nfserr_grace;
 
        if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
-               return check_special_stateids(net, current_fh, stateid, flags);
+               return check_special_stateids(net, fhp, stateid, flags);
 
        status = nfsd4_lookup_stateid(cstate, stateid,
                                NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
                                &s, nn);
        if (status)
                return status;
-       status = check_stateid_generation(stateid, &s->sc_stateid, 
nfsd4_has_session(cstate));
+       status = check_stateid_generation(stateid, &s->sc_stateid,
+                       nfsd4_has_session(cstate));
        if (status)
                goto out;
+
        switch (s->sc_type) {
        case NFS4_DELEG_STID:
-               dp = delegstateid(s);
-               status = nfs4_check_delegmode(dp, flags);
-               if (status)
-                       goto out;
-               if (filpp) {
-                       file = dp->dl_stid.sc_file->fi_deleg_file;
-                       if (!file) {
-                               WARN_ON_ONCE(1);
-                               status = nfserr_serverfault;
-                               goto out;
-                       }
-                       get_file(file);
-               }
+               status = nfs4_check_delegmode(delegstateid(s), flags);
                break;
        case NFS4_OPEN_STID:
        case NFS4_LOCK_STID:
-               stp = openlockstateid(s);
-               status = nfs4_check_fh(current_fh, stp);
-               if (status)
-                       goto out;
-               status = nfsd4_check_openowner_confirmed(stp);
-               if (status)
-                       goto out;
-               status = nfs4_check_openmode(stp, flags);
-               if (status)
-                       goto out;
-               if (filpp) {
-                       struct nfs4_file *fp = stp->st_stid.sc_file;
-
-                       if (flags & RD_STATE)
-                               file = find_readable_file(fp);
-                       else
-                               file = find_writeable_file(fp);
-               }
+               status = nfs4_check_olstateid(fhp, openlockstateid(s), flags);
                break;
        default:
                status = nfserr_bad_stateid;
+               break;
+       }
+       if (status)
                goto out;
+       status = nfs4_check_fh(fhp, s);
+
+       if (!status && filpp) {
+               *filpp = nfs4_find_file(s, flags);
+               if (!*filpp)
+                       status = nfserr_serverfault;
        }
-       status = nfs_ok;
-       if (file)
-               *filpp = file;
 out:
        nfs4_put_stid(s);
        return status;
@@ -4754,7 +4761,7 @@ static __be32 nfs4_seqid_op_checks(struct 
nfsd4_compound_state *cstate, stateid_
        status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, 
nfsd4_has_session(cstate));
        if (status)
                return status;
-       return nfs4_check_fh(current_fh, stp);
+       return nfs4_check_fh(current_fh, &stp->st_stid);
 }
 
 /* 
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 158badf945df..d4d84451e0e6 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -2142,6 +2142,7 @@ nfsd4_encode_aclname(struct xdr_stream *xdr, struct 
svc_rqst *rqstp,
 #define WORD0_ABSENT_FS_ATTRS (FATTR4_WORD0_FS_LOCATIONS | FATTR4_WORD0_FSID | 
\
                              FATTR4_WORD0_RDATTR_ERROR)
 #define WORD1_ABSENT_FS_ATTRS FATTR4_WORD1_MOUNTED_ON_FILEID
+#define WORD2_ABSENT_FS_ATTRS 0
 
 #ifdef CONFIG_NFSD_V4_SECURITY_LABEL
 static inline __be32
@@ -2170,7 +2171,7 @@ nfsd4_encode_security_label(struct xdr_stream *xdr, 
struct svc_rqst *rqstp,
 { return 0; }
 #endif
 
-static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *rdattr_err)
+static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *bmval2, 
u32 *rdattr_err)
 {
        /* As per referral draft:  */
        if (*bmval0 & ~WORD0_ABSENT_FS_ATTRS ||
@@ -2183,6 +2184,7 @@ static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 
*bmval1, u32 *rdattr_err)
        }
        *bmval0 &= WORD0_ABSENT_FS_ATTRS;
        *bmval1 &= WORD1_ABSENT_FS_ATTRS;
+       *bmval2 &= WORD2_ABSENT_FS_ATTRS;
        return 0;
 }
 
@@ -2246,8 +2248,7 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh 
*fhp,
        BUG_ON(bmval2 & ~nfsd_suppattrs2(minorversion));
 
        if (exp->ex_fslocs.migrated) {
-               BUG_ON(bmval[2]);
-               status = fattr_handle_absent_fs(&bmval0, &bmval1, &rdattr_err);
+               status = fattr_handle_absent_fs(&bmval0, &bmval1, &bmval2, 
&rdattr_err);
                if (status)
                        goto out;
        }
@@ -2290,8 +2291,8 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh 
*fhp,
        }
 
 #ifdef CONFIG_NFSD_V4_SECURITY_LABEL
-       if ((bmval[2] & FATTR4_WORD2_SECURITY_LABEL) ||
-                       bmval[0] & FATTR4_WORD0_SUPPORTED_ATTRS) {
+       if ((bmval2 & FATTR4_WORD2_SECURITY_LABEL) ||
+            bmval0 & FATTR4_WORD0_SUPPORTED_ATTRS) {
                err = security_inode_getsecctx(d_inode(dentry),
                                                &context, &contextlen);
                contextsupport = (err == 0);
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index 92e48c70f0f0..39ddcaf0918f 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -412,16 +412,36 @@ void fsnotify_clear_marks_by_group_flags(struct 
fsnotify_group *group,
                                         unsigned int flags)
 {
        struct fsnotify_mark *lmark, *mark;
+       LIST_HEAD(to_free);
 
+       /*
+        * We have to be really careful here. Anytime we drop mark_mutex, e.g.
+        * fsnotify_clear_marks_by_inode() can come and free marks. Even in our
+        * to_free list so we have to use mark_mutex even when accessing that
+        * list. And freeing mark requires us to drop mark_mutex. So we can
+        * reliably free only the first mark in the list. That's why we first
+        * move marks to free to to_free list in one go and then free marks in
+        * to_free list one by one.
+        */
        mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
        list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) {
-               if (mark->flags & flags) {
-                       fsnotify_get_mark(mark);
-                       fsnotify_destroy_mark_locked(mark, group);
-                       fsnotify_put_mark(mark);
-               }
+               if (mark->flags & flags)
+                       list_move(&mark->g_list, &to_free);
        }
        mutex_unlock(&group->mark_mutex);
+
+       while (1) {
+               mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
+               if (list_empty(&to_free)) {
+                       mutex_unlock(&group->mark_mutex);
+                       break;
+               }
+               mark = list_first_entry(&to_free, struct fsnotify_mark, g_list);
+               fsnotify_get_mark(mark);
+               fsnotify_destroy_mark_locked(mark, group);
+               mutex_unlock(&group->mark_mutex);
+               fsnotify_put_mark(mark);
+       }
 }
 
 /*
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index f906a250da6a..9ea70127074d 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -686,7 +686,7 @@ static int ocfs2_direct_IO_zero_extend(struct ocfs2_super 
*osb,
 
        if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) {
                u64 s = i_size_read(inode);
-               sector_t sector = (p_cpos << (osb->s_clustersize_bits - 9)) +
+               sector_t sector = ((u64)p_cpos << (osb->s_clustersize_bits - 
9)) +
                        (do_div(s, osb->s_clustersize) >> 9);
 
                ret = blkdev_issue_zeroout(osb->sb->s_bdev, sector,
@@ -911,7 +911,7 @@ static ssize_t ocfs2_direct_IO_write(struct kiocb *iocb,
                BUG_ON(!p_cpos || (ext_flags & OCFS2_EXT_UNWRITTEN));
 
                ret = blkdev_issue_zeroout(osb->sb->s_bdev,
-                               p_cpos << (osb->s_clustersize_bits - 9),
+                               (u64)p_cpos << (osb->s_clustersize_bits - 9),
                                zero_len_head >> 9, GFP_NOFS, false);
                if (ret < 0)
                        mlog_errno(ret);
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 8b23aa2f52dd..23157e40dd74 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -4025,9 +4025,13 @@ static void ocfs2_downconvert_thread_do_work(struct 
ocfs2_super *osb)
        osb->dc_work_sequence = osb->dc_wake_sequence;
 
        processed = osb->blocked_lock_count;
-       while (processed) {
-               BUG_ON(list_empty(&osb->blocked_lock_list));
-
+       /*
+        * blocked lock processing in this loop might call iput which can
+        * remove items off osb->blocked_lock_list. Downconvert up to
+        * 'processed' number of locks, but stop short if we had some
+        * removed in ocfs2_mark_lockres_freeing when downconverting.
+        */
+       while (processed && !list_empty(&osb->blocked_lock_list)) {
                lockres = list_entry(osb->blocked_lock_list.next,
                                     struct ocfs2_lock_res, l_blocked_list);
                list_del_init(&lockres->l_blocked_list);
diff --git a/fs/signalfd.c b/fs/signalfd.c
index 7e412ad74836..270221fcef42 100644
--- a/fs/signalfd.c
+++ b/fs/signalfd.c
@@ -121,8 +121,9 @@ static int signalfd_copyinfo(struct signalfd_siginfo __user 
*uinfo,
                 * Other callers might not initialize the si_lsb field,
                 * so check explicitly for the right codes here.
                 */
-               if (kinfo->si_code == BUS_MCEERR_AR ||
-                   kinfo->si_code == BUS_MCEERR_AO)
+               if (kinfo->si_signo == SIGBUS &&
+                   (kinfo->si_code == BUS_MCEERR_AR ||
+                    kinfo->si_code == BUS_MCEERR_AO))
                        err |= __put_user((short) kinfo->si_addr_lsb,
                                          &uinfo->ssi_addr_lsb);
 #endif
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 3d4ea7eb2b68..12b75f3ba0a0 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -176,17 +176,17 @@ typedef enum {
 /* Chip may not exist, so silence any errors in scan */
 #define NAND_SCAN_SILENT_NODEV 0x00040000
 /*
- * This option could be defined by controller drivers to protect against
- * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers
- */
-#define NAND_USE_BOUNCE_BUFFER 0x00080000
-/*
  * Autodetect nand buswidth with readid/onfi.
  * This suppose the driver will configure the hardware in 8 bits mode
  * when calling nand_scan_ident, and update its configuration
  * before calling nand_scan_tail.
  */
 #define NAND_BUSWIDTH_AUTO      0x00080000
+/*
+ * This option could be defined by controller drivers to protect against
+ * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers
+ */
+#define NAND_USE_BOUNCE_BUFFER 0x00100000
 
 /* Options set by nand scan */
 /* Nand scan has allocated controller struct */
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index efe3443572ba..413417f3707b 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -319,6 +319,7 @@
 #define PCI_MSIX_PBA           8       /* Pending Bit Array offset */
 #define  PCI_MSIX_PBA_BIR      0x00000007 /* BAR index */
 #define  PCI_MSIX_PBA_OFFSET   0xfffffff8 /* Offset into specified BAR */
+#define PCI_MSIX_FLAGS_BIRMASK PCI_MSIX_PBA_BIR /* deprecated */
 #define PCI_CAP_MSIX_SIZEOF    12      /* size of MSIX registers */
 
 /* MSI-X Table entry format */
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index 3aaea7ffd077..c3fc5c2b63f3 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -143,7 +143,6 @@ static int msg_insert(struct msg_msg *msg, struct 
mqueue_inode_info *info)
                if (!leaf)
                        return -ENOMEM;
                INIT_LIST_HEAD(&leaf->msg_list);
-               info->qsize += sizeof(*leaf);
        }
        leaf->priority = msg->m_type;
        rb_link_node(&leaf->rb_node, parent, p);
@@ -188,7 +187,6 @@ try_again:
                             "lazy leaf delete!\n");
                rb_erase(&leaf->rb_node, &info->msg_tree);
                if (info->node_cache) {
-                       info->qsize -= sizeof(*leaf);
                        kfree(leaf);
                } else {
                        info->node_cache = leaf;
@@ -201,7 +199,6 @@ try_again:
                if (list_empty(&leaf->msg_list)) {
                        rb_erase(&leaf->rb_node, &info->msg_tree);
                        if (info->node_cache) {
-                               info->qsize -= sizeof(*leaf);
                                kfree(leaf);
                        } else {
                                info->node_cache = leaf;
@@ -1026,7 +1023,6 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char 
__user *, u_msg_ptr,
                /* Save our speculative allocation into the cache */
                INIT_LIST_HEAD(&new_leaf->msg_list);
                info->node_cache = new_leaf;
-               info->qsize += sizeof(*new_leaf);
                new_leaf = NULL;
        } else {
                kfree(new_leaf);
@@ -1133,7 +1129,6 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char 
__user *, u_msg_ptr,
                /* Save our speculative allocation into the cache */
                INIT_LIST_HEAD(&new_leaf->msg_list);
                info->node_cache = new_leaf;
-               info->qsize += sizeof(*new_leaf);
        } else {
                kfree(new_leaf);
        }
diff --git a/kernel/signal.c b/kernel/signal.c
index d51c5ddd855c..0206be728dac 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2753,12 +2753,15 @@ int copy_siginfo_to_user(siginfo_t __user *to, const 
siginfo_t *from)
                 * Other callers might not initialize the si_lsb field,
                 * so check explicitly for the right codes here.
                 */
-               if (from->si_code == BUS_MCEERR_AR || from->si_code == 
BUS_MCEERR_AO)
+               if (from->si_signo == SIGBUS &&
+                   (from->si_code == BUS_MCEERR_AR || from->si_code == 
BUS_MCEERR_AO))
                        err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
 #endif
 #ifdef SEGV_BNDERR
-               err |= __put_user(from->si_lower, &to->si_lower);
-               err |= __put_user(from->si_upper, &to->si_upper);
+               if (from->si_signo == SIGSEGV && from->si_code == SEGV_BNDERR) {
+                       err |= __put_user(from->si_lower, &to->si_lower);
+                       err |= __put_user(from->si_upper, &to->si_upper);
+               }
 #endif
                break;
        case __SI_CHLD:
@@ -3022,7 +3025,7 @@ COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
                        int, sig,
                        struct compat_siginfo __user *, uinfo)
 {
-       siginfo_t info;
+       siginfo_t info = {};
        int ret = copy_siginfo_from_user32(&info, uinfo);
        if (unlikely(ret))
                return ret;
@@ -3066,7 +3069,7 @@ COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
                        int, sig,
                        struct compat_siginfo __user *, uinfo)
 {
-       siginfo_t info;
+       siginfo_t info = {};
 
        if (copy_siginfo_from_user32(&info, uinfo))
                return -EFAULT;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 5e8eadd71bac..0d024fc8aa8e 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -937,21 +937,17 @@ static unsigned long shrink_page_list(struct list_head 
*page_list,
                 *
                 * 2) Global reclaim encounters a page, memcg encounters a
                 *    page that is not marked for immediate reclaim or
-                *    the caller does not have __GFP_IO. In this case mark
+                *    the caller does not have __GFP_FS (or __GFP_IO if it's
+                *    simply going to swap, not to fs). In this case mark
                 *    the page for immediate reclaim and continue scanning.
                 *
-                *    __GFP_IO is checked  because a loop driver thread might
+                *    Require may_enter_fs because we would wait on fs, which
+                *    may not have submitted IO yet. And the loop driver might
                 *    enter reclaim, and deadlock if it waits on a page for
                 *    which it is needed to do the write (loop masks off
                 *    __GFP_IO|__GFP_FS for this reason); but more thought
                 *    would probably show more reasons.
                 *
-                *    Don't require __GFP_FS, since we're not going into the
-                *    FS, just waiting on its writeback completion. Worryingly,
-                *    ext4 gfs2 and xfs allocate pages with
-                *    grab_cache_page_write_begin(,,AOP_FLAG_NOFS), so testing
-                *    may_enter_fs here is liable to OOM on them.
-                *
                 * 3) memcg encounters a page that is not already marked
                 *    PageReclaim. memcg does not have any dirty pages
                 *    throttling so we could easily OOM just because too many
@@ -968,7 +964,7 @@ static unsigned long shrink_page_list(struct list_head 
*page_list,
 
                        /* Case 2 above */
                        } else if (global_reclaim(sc) ||
-                           !PageReclaim(page) || !(sc->gfp_mask & __GFP_IO)) {
+                           !PageReclaim(page) || !may_enter_fs) {
                                /*
                                 * This is slightly racy - end_page_writeback()
                                 * might have just cleared PageReclaim, then
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 1ab3dc9c8f99..7b815bcc8c9b 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -2295,6 +2295,10 @@ int smp_conn_security(struct hci_conn *hcon, __u8 
sec_level)
                return 1;
 
        chan = conn->smp;
+       if (!chan) {
+               BT_ERR("SMP security requested but not available");
+               return 1;
+       }
 
        if (!hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED))
                return 1;
diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
index e061355f535f..bf20593d3085 100644
--- a/sound/firewire/amdtp.c
+++ b/sound/firewire/amdtp.c
@@ -730,8 +730,9 @@ static void handle_in_packet(struct amdtp_stream *s,
            s->data_block_counter != UINT_MAX)
                data_block_counter = s->data_block_counter;
 
-       if (((s->flags & CIP_SKIP_DBC_ZERO_CHECK) && data_block_counter == 0) ||
-           (s->data_block_counter == UINT_MAX)) {
+       if (((s->flags & CIP_SKIP_DBC_ZERO_CHECK) &&
+            data_block_counter == s->tx_first_dbc) ||
+           s->data_block_counter == UINT_MAX) {
                lost = false;
        } else if (!(s->flags & CIP_DBC_IS_END_EVENT)) {
                lost = data_block_counter != s->data_block_counter;
diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h
index 8a03a91e728b..25c905537658 100644
--- a/sound/firewire/amdtp.h
+++ b/sound/firewire/amdtp.h
@@ -153,6 +153,8 @@ struct amdtp_stream {
 
        /* quirk: fixed interval of dbc between previos/current packets. */
        unsigned int tx_dbc_interval;
+       /* quirk: indicate the value of dbc field in a first packet. */
+       unsigned int tx_first_dbc;
 
        bool callbacked;
        wait_queue_head_t callback_wait;
diff --git a/sound/firewire/fireworks/fireworks.c 
b/sound/firewire/fireworks/fireworks.c
index 2682e7e3e5c9..c94a432f7cc6 100644
--- a/sound/firewire/fireworks/fireworks.c
+++ b/sound/firewire/fireworks/fireworks.c
@@ -248,8 +248,16 @@ efw_probe(struct fw_unit *unit,
        err = get_hardware_info(efw);
        if (err < 0)
                goto error;
+       /* AudioFire8 (since 2009) and AudioFirePre8 */
        if (entry->model_id == MODEL_ECHO_AUDIOFIRE_9)
                efw->is_af9 = true;
+       /* These models uses the same firmware. */
+       if (entry->model_id == MODEL_ECHO_AUDIOFIRE_2 ||
+           entry->model_id == MODEL_ECHO_AUDIOFIRE_4 ||
+           entry->model_id == MODEL_ECHO_AUDIOFIRE_9 ||
+           entry->model_id == MODEL_GIBSON_RIP ||
+           entry->model_id == MODEL_GIBSON_GOLDTOP)
+               efw->is_fireworks3 = true;
 
        snd_efw_proc_init(efw);
 
diff --git a/sound/firewire/fireworks/fireworks.h 
b/sound/firewire/fireworks/fireworks.h
index 4f0201a95222..084d414b228c 100644
--- a/sound/firewire/fireworks/fireworks.h
+++ b/sound/firewire/fireworks/fireworks.h
@@ -71,6 +71,7 @@ struct snd_efw {
 
        /* for quirks */
        bool is_af9;
+       bool is_fireworks3;
        u32 firmware_version;
 
        unsigned int midi_in_ports;
diff --git a/sound/firewire/fireworks/fireworks_stream.c 
b/sound/firewire/fireworks/fireworks_stream.c
index c55db1bddc80..7e353f1f7bff 100644
--- a/sound/firewire/fireworks/fireworks_stream.c
+++ b/sound/firewire/fireworks/fireworks_stream.c
@@ -172,6 +172,15 @@ int snd_efw_stream_init_duplex(struct snd_efw *efw)
        efw->tx_stream.flags |= CIP_DBC_IS_END_EVENT;
        /* Fireworks reset dbc at bus reset. */
        efw->tx_stream.flags |= CIP_SKIP_DBC_ZERO_CHECK;
+       /*
+        * But Recent firmwares starts packets with non-zero dbc.
+        * Driver version 5.7.6 installs firmware version 5.7.3.
+        */
+       if (efw->is_fireworks3 &&
+           (efw->firmware_version == 0x5070000 ||
+            efw->firmware_version == 0x5070300 ||
+            efw->firmware_version == 0x5080000))
+               efw->tx_stream.tx_first_dbc = 0x02;
        /* AudioFire9 always reports wrong dbs. */
        if (efw->is_af9)
                efw->tx_stream.flags |= CIP_WRONG_DBS;
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index 50e9dd675579..3a24f7739aaa 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -1001,9 +1001,7 @@ static void cs4210_spdif_automute(struct hda_codec *codec,
 
        spec->spdif_present = spdif_present;
        /* SPDIF TX on/off */
-       if (spdif_present)
-               snd_hda_set_pin_ctl(codec, spdif_pin,
-                                   spdif_present ? PIN_OUT : 0);
+       snd_hda_set_pin_ctl(codec, spdif_pin, spdif_present ? PIN_OUT : 0);
 
        cs_automute(codec);
 }
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 590bcfb0e82f..1e99f075a5ab 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -5118,6 +5118,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", 
ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", 
ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x06da, "Dell", 
ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
        SND_PCI_QUIRK(0x1028, 0x164a, "Dell", 
ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x164b, "Dell", 
ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
diff --git a/sound/soc/codecs/pcm1681.c b/sound/soc/codecs/pcm1681.c
index 477e13d30971..e7ba557979cb 100644
--- a/sound/soc/codecs/pcm1681.c
+++ b/sound/soc/codecs/pcm1681.c
@@ -102,7 +102,7 @@ static int pcm1681_set_deemph(struct snd_soc_codec *codec)
 
        if (val != -1) {
                regmap_update_bits(priv->regmap, PCM1681_DEEMPH_CONTROL,
-                                       PCM1681_DEEMPH_RATE_MASK, val);
+                                  PCM1681_DEEMPH_RATE_MASK, val << 3);
                enable = 1;
        } else
                enable = 0;
diff --git a/sound/soc/codecs/ssm4567.c b/sound/soc/codecs/ssm4567.c
index a984485108cd..f7549cc7ea85 100644
--- a/sound/soc/codecs/ssm4567.c
+++ b/sound/soc/codecs/ssm4567.c
@@ -315,7 +315,13 @@ static int ssm4567_set_dai_fmt(struct snd_soc_dai *dai, 
unsigned int fmt)
        if (invert_fclk)
                ctrl1 |= SSM4567_SAI_CTRL_1_FSYNC;
 
-       return regmap_write(ssm4567->regmap, SSM4567_REG_SAI_CTRL_1, ctrl1);
+       return regmap_update_bits(ssm4567->regmap, SSM4567_REG_SAI_CTRL_1,
+                       SSM4567_SAI_CTRL_1_BCLK |
+                       SSM4567_SAI_CTRL_1_FSYNC |
+                       SSM4567_SAI_CTRL_1_LJ |
+                       SSM4567_SAI_CTRL_1_TDM |
+                       SSM4567_SAI_CTRL_1_PDM,
+                       ctrl1);
 }
 
 static int ssm4567_set_power(struct ssm4567 *ssm4567, bool enable)
diff --git a/sound/soc/intel/atom/sst/sst_drv_interface.c 
b/sound/soc/intel/atom/sst/sst_drv_interface.c
index 7b50a9d17ec1..edc186908358 100644
--- a/sound/soc/intel/atom/sst/sst_drv_interface.c
+++ b/sound/soc/intel/atom/sst/sst_drv_interface.c
@@ -42,6 +42,11 @@
 #define MIN_FRAGMENT_SIZE (50 * 1024)
 #define MAX_FRAGMENT_SIZE (1024 * 1024)
 #define SST_GET_BYTES_PER_SAMPLE(pcm_wd_sz)  (((pcm_wd_sz + 15) >> 4) << 1)
+#ifdef CONFIG_PM
+#define GET_USAGE_COUNT(dev) (atomic_read(&dev->power.usage_count))
+#else
+#define GET_USAGE_COUNT(dev) 1
+#endif
 
 int free_stream_context(struct intel_sst_drv *ctx, unsigned int str_id)
 {
@@ -141,15 +146,9 @@ static int sst_power_control(struct device *dev, bool 
state)
        int ret = 0;
        int usage_count = 0;
 
-#ifdef CONFIG_PM
-       usage_count = atomic_read(&dev->power.usage_count);
-#else
-       usage_count = 1;
-#endif
-
        if (state == true) {
                ret = pm_runtime_get_sync(dev);
-
+               usage_count = GET_USAGE_COUNT(dev);
                dev_dbg(ctx->dev, "Enable: pm usage count: %d\n", usage_count);
                if (ret < 0) {
                        dev_err(ctx->dev, "Runtime get failed with err: %d\n", 
ret);
@@ -164,6 +163,7 @@ static int sst_power_control(struct device *dev, bool state)
                        }
                }
        } else {
+               usage_count = GET_USAGE_COUNT(dev);
                dev_dbg(ctx->dev, "Disable: pm usage count: %d\n", usage_count);
                return sst_pm_runtime_put(ctx);
        }
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 158204d08924..b6c12dccb259 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -1811,6 +1811,7 @@ static ssize_t dapm_widget_power_read_file(struct file 
*file,
                                           size_t count, loff_t *ppos)
 {
        struct snd_soc_dapm_widget *w = file->private_data;
+       struct snd_soc_card *card = w->dapm->card;
        char *buf;
        int in, out;
        ssize_t ret;
@@ -1820,6 +1821,8 @@ static ssize_t dapm_widget_power_read_file(struct file 
*file,
        if (!buf)
                return -ENOMEM;
 
+       mutex_lock(&card->dapm_mutex);
+
        /* Supply widgets are not handled by is_connected_{input,output}_ep() */
        if (w->is_supply) {
                in = 0;
@@ -1866,6 +1869,8 @@ static ssize_t dapm_widget_power_read_file(struct file 
*file,
                                        p->sink->name);
        }
 
+       mutex_unlock(&card->dapm_mutex);
+
        ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
 
        kfree(buf);
@@ -2140,11 +2145,15 @@ static ssize_t dapm_widget_show(struct device *dev,
        struct snd_soc_pcm_runtime *rtd = dev_get_drvdata(dev);
        int i, count = 0;
 
+       mutex_lock(&rtd->card->dapm_mutex);
+
        for (i = 0; i < rtd->num_codecs; i++) {
                struct snd_soc_codec *codec = rtd->codec_dais[i]->codec;
                count += dapm_widget_show_codec(codec, buf + count);
        }
 
+       mutex_unlock(&rtd->card->dapm_mutex);
+
        return count;
 }
 
@@ -3100,16 +3109,10 @@ snd_soc_dapm_new_control(struct snd_soc_dapm_context 
*dapm,
        }
 
        prefix = soc_dapm_prefix(dapm);
-       if (prefix) {
+       if (prefix)
                w->name = kasprintf(GFP_KERNEL, "%s %s", prefix, widget->name);
-               if (widget->sname)
-                       w->sname = kasprintf(GFP_KERNEL, "%s %s", prefix,
-                                            widget->sname);
-       } else {
+       else
                w->name = kasprintf(GFP_KERNEL, "%s", widget->name);
-               if (widget->sname)
-                       w->sname = kasprintf(GFP_KERNEL, "%s", widget->sname);
-       }
        if (w->name == NULL) {
                kfree(w);
                return NULL;
@@ -3557,7 +3560,7 @@ int snd_soc_dapm_link_dai_widgets(struct snd_soc_card 
*card)
                                break;
                        }
 
-                       if (!w->sname || !strstr(w->sname, dai_w->name))
+                       if (!w->sname || !strstr(w->sname, dai_w->sname))
                                continue;
 
                        if (dai_w->id == snd_soc_dapm_dai_in) {
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to