commit:     3e36410423095c3ba385b2850ebad08d7a842228
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Oct  3 17:45:26 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Oct  3 17:45:26 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=3e364104

Linux patch 3.18.22

 0000_README              |    4 +
 1021_linux-3.18.22.patch | 2622 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2626 insertions(+)

diff --git a/0000_README b/0000_README
index 558f14f..7e934be 100644
--- a/0000_README
+++ b/0000_README
@@ -127,6 +127,10 @@ Patch:  1020_linux-3.18.21.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.18.21
 
+Patch:  1021_linux-3.18.22.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.18.22
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1021_linux-3.18.22.patch b/1021_linux-3.18.22.patch
new file mode 100644
index 0000000..0049146
--- /dev/null
+++ b/1021_linux-3.18.22.patch
@@ -0,0 +1,2622 @@
+diff --git a/Makefile b/Makefile
+index 6be90fab361b..7adbbbeeb421 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 18
+-SUBLEVEL = 21
++SUBLEVEL = 22
+ EXTRAVERSION =
+ NAME = Diseased Newt
+ 
+diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
+index 81a02a8762b0..86825f8883de 100644
+--- a/arch/arm64/kvm/inject_fault.c
++++ b/arch/arm64/kvm/inject_fault.c
+@@ -168,8 +168,8 @@ void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long 
addr)
+ {
+       if (!(vcpu->arch.hcr_el2 & HCR_RW))
+               inject_abt32(vcpu, false, addr);
+-
+-      inject_abt64(vcpu, false, addr);
++      else
++              inject_abt64(vcpu, false, addr);
+ }
+ 
+ /**
+@@ -184,8 +184,8 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long 
addr)
+ {
+       if (!(vcpu->arch.hcr_el2 & HCR_RW))
+               inject_abt32(vcpu, true, addr);
+-
+-      inject_abt64(vcpu, true, addr);
++      else
++              inject_abt64(vcpu, true, addr);
+ }
+ 
+ /**
+@@ -198,6 +198,6 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
+ {
+       if (!(vcpu->arch.hcr_el2 & HCR_RW))
+               inject_undef32(vcpu);
+-
+-      inject_undef64(vcpu);
++      else
++              inject_undef64(vcpu);
+ }
+diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
+index 5251565e344b..a6576cf1e6d9 100644
+--- a/arch/mips/kernel/scall64-64.S
++++ b/arch/mips/kernel/scall64-64.S
+@@ -80,7 +80,7 @@ syscall_trace_entry:
+       SAVE_STATIC
+       move    s0, t2
+       move    a0, sp
+-      daddiu  a1, v0, __NR_64_Linux
++      move    a1, v0
+       jal     syscall_trace_enter
+ 
+       bltz    v0, 2f                  # seccomp failed? Skip syscall
+diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
+index 77e74398b828..a8eb6575edc0 100644
+--- a/arch/mips/kernel/scall64-n32.S
++++ b/arch/mips/kernel/scall64-n32.S
+@@ -72,7 +72,7 @@ n32_syscall_trace_entry:
+       SAVE_STATIC
+       move    s0, t2
+       move    a0, sp
+-      daddiu  a1, v0, __NR_N32_Linux
++      move    a1, v0
+       jal     syscall_trace_enter
+ 
+       bltz    v0, 2f                  # seccomp failed? Skip syscall
+diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
+index a94b82e8f156..69126184c609 100644
+--- a/arch/x86/include/asm/desc.h
++++ b/arch/x86/include/asm/desc.h
+@@ -280,21 +280,6 @@ static inline void clear_LDT(void)
+       set_ldt(NULL, 0);
+ }
+ 
+-/*
+- * load one particular LDT into the current CPU
+- */
+-static inline void load_LDT_nolock(mm_context_t *pc)
+-{
+-      set_ldt(pc->ldt, pc->size);
+-}
+-
+-static inline void load_LDT(mm_context_t *pc)
+-{
+-      preempt_disable();
+-      load_LDT_nolock(pc);
+-      preempt_enable();
+-}
+-
+ static inline unsigned long get_desc_base(const struct desc_struct *desc)
+ {
+       return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) 
<< 24));
+diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
+index 876e74e8eec7..b6b7bc3f5d26 100644
+--- a/arch/x86/include/asm/mmu.h
++++ b/arch/x86/include/asm/mmu.h
+@@ -9,8 +9,7 @@
+  * we put the segment information here.
+  */
+ typedef struct {
+-      void *ldt;
+-      int size;
++      struct ldt_struct *ldt;
+ 
+ #ifdef CONFIG_X86_64
+       /* True if mm supports a task running in 32 bit compatibility mode. */
+diff --git a/arch/x86/include/asm/mmu_context.h 
b/arch/x86/include/asm/mmu_context.h
+index 166af2a8e865..23e0625a6183 100644
+--- a/arch/x86/include/asm/mmu_context.h
++++ b/arch/x86/include/asm/mmu_context.h
+@@ -20,6 +20,50 @@ static inline void paravirt_activate_mm(struct mm_struct 
*prev,
+ #endif        /* !CONFIG_PARAVIRT */
+ 
+ /*
++ * ldt_structs can be allocated, used, and freed, but they are never
++ * modified while live.
++ */
++struct ldt_struct {
++      /*
++       * Xen requires page-aligned LDTs with special permissions.  This is
++       * needed to prevent us from installing evil descriptors such as
++       * call gates.  On native, we could merge the ldt_struct and LDT
++       * allocations, but it's not worth trying to optimize.
++       */
++      struct desc_struct *entries;
++      int size;
++};
++
++static inline void load_mm_ldt(struct mm_struct *mm)
++{
++      struct ldt_struct *ldt;
++
++      /* lockless_dereference synchronizes with smp_store_release */
++      ldt = lockless_dereference(mm->context.ldt);
++
++      /*
++       * Any change to mm->context.ldt is followed by an IPI to all
++       * CPUs with the mm active.  The LDT will not be freed until
++       * after the IPI is handled by all such CPUs.  This means that,
++       * if the ldt_struct changes before we return, the values we see
++       * will be safe, and the new values will be loaded before we run
++       * any user code.
++       *
++       * NB: don't try to convert this to use RCU without extreme care.
++       * We would still need IRQs off, because we don't want to change
++       * the local LDT after an IPI loaded a newer value than the one
++       * that we can see.
++       */
++
++      if (unlikely(ldt))
++              set_ldt(ldt->entries, ldt->size);
++      else
++              clear_LDT();
++
++      DEBUG_LOCKS_WARN_ON(preemptible());
++}
++
++/*
+  * Used for LDT copy/destruction.
+  */
+ int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
+@@ -55,7 +99,7 @@ static inline void switch_mm(struct mm_struct *prev, struct 
mm_struct *next,
+ 
+               /* Load the LDT, if the LDT is different: */
+               if (unlikely(prev->context.ldt != next->context.ldt))
+-                      load_LDT_nolock(&next->context);
++                      load_mm_ldt(next);
+       }
+ #ifdef CONFIG_SMP
+         else {
+@@ -77,7 +121,7 @@ static inline void switch_mm(struct mm_struct *prev, struct 
mm_struct *next,
+                        */
+                       load_cr3(next->pgd);
+                       trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, 
TLB_FLUSH_ALL);
+-                      load_LDT_nolock(&next->context);
++                      load_mm_ldt(next);
+               }
+       }
+ #endif
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index 7bc49c3b9684..e757fcbe90db 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1383,7 +1383,7 @@ void cpu_init(void)
+       load_sp0(t, &current->thread);
+       set_tss_desc(cpu, t);
+       load_TR_desc();
+-      load_LDT(&init_mm.context);
++      load_mm_ldt(&init_mm);
+ 
+       clear_all_debug_regs();
+       dbg_restore_debug_regs();
+@@ -1426,7 +1426,7 @@ void cpu_init(void)
+       load_sp0(t, thread);
+       set_tss_desc(cpu, t);
+       load_TR_desc();
+-      load_LDT(&init_mm.context);
++      load_mm_ldt(&init_mm);
+ 
+       t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
+ 
+diff --git a/arch/x86/kernel/cpu/perf_event.c 
b/arch/x86/kernel/cpu/perf_event.c
+index 6b5acd5f4a34..c832e9f54cd6 100644
+--- a/arch/x86/kernel/cpu/perf_event.c
++++ b/arch/x86/kernel/cpu/perf_event.c
+@@ -32,6 +32,7 @@
+ #include <asm/smp.h>
+ #include <asm/alternative.h>
+ #include <asm/tlbflush.h>
++#include <asm/mmu_context.h>
+ #include <asm/timer.h>
+ #include <asm/desc.h>
+ #include <asm/ldt.h>
+@@ -1987,21 +1988,25 @@ static unsigned long get_segment_base(unsigned int 
segment)
+       int idx = segment >> 3;
+ 
+       if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) {
++              struct ldt_struct *ldt;
++
+               if (idx > LDT_ENTRIES)
+                       return 0;
+ 
+-              if (idx > current->active_mm->context.size)
++              /* IRQs are off, so this synchronizes with smp_store_release */
++              ldt = lockless_dereference(current->active_mm->context.ldt);
++              if (!ldt || idx > ldt->size)
+                       return 0;
+ 
+-              desc = current->active_mm->context.ldt;
++              desc = &ldt->entries[idx];
+       } else {
+               if (idx > GDT_ENTRIES)
+                       return 0;
+ 
+-              desc = raw_cpu_ptr(gdt_page.gdt);
++              desc = raw_cpu_ptr(gdt_page.gdt) + idx;
+       }
+ 
+-      return get_desc_base(desc + idx);
++      return get_desc_base(desc);
+ }
+ 
+ #ifdef CONFIG_COMPAT
+diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
+index e36d9815ef56..fad5cd9d7c4b 100644
+--- a/arch/x86/kernel/entry_64.S
++++ b/arch/x86/kernel/entry_64.S
+@@ -1459,20 +1459,77 @@ ENTRY(nmi)
+        * a nested NMI that updated the copy interrupt stack frame, a
+        * jump will be made to the repeat_nmi code that will handle the second
+        * NMI.
++       *
++       * However, espfix prevents us from directly returning to userspace
++       * with a single IRET instruction.  Similarly, IRET to user mode
++       * can fault.  We therefore handle NMIs from user space like
++       * other IST entries.
+        */
+ 
+       /* Use %rdx as out temp variable throughout */
+       pushq_cfi %rdx
+       CFI_REL_OFFSET rdx, 0
+ 
++      testb   $3, CS-RIP+8(%rsp)
++      jz      .Lnmi_from_kernel
++
++      /*
++       * NMI from user mode.  We need to run on the thread stack, but we
++       * can't go through the normal entry paths: NMIs are masked, and
++       * we don't want to enable interrupts, because then we'll end
++       * up in an awkward situation in which IRQs are on but NMIs
++       * are off.
++       */
++
++      SWAPGS
++      cld
++      movq    %rsp, %rdx
++      movq    PER_CPU_VAR(kernel_stack), %rsp
++      addq    $KERNEL_STACK_OFFSET, %rsp
++      pushq   5*8(%rdx)       /* pt_regs->ss */
++      pushq   4*8(%rdx)       /* pt_regs->rsp */
++      pushq   3*8(%rdx)       /* pt_regs->flags */
++      pushq   2*8(%rdx)       /* pt_regs->cs */
++      pushq   1*8(%rdx)       /* pt_regs->rip */
++      pushq   $-1             /* pt_regs->orig_ax */
++      pushq   %rdi            /* pt_regs->di */
++      pushq   %rsi            /* pt_regs->si */
++      pushq   (%rdx)          /* pt_regs->dx */
++      pushq   %rcx            /* pt_regs->cx */
++      pushq   %rax            /* pt_regs->ax */
++      pushq   %r8             /* pt_regs->r8 */
++      pushq   %r9             /* pt_regs->r9 */
++      pushq   %r10            /* pt_regs->r10 */
++      pushq   %r11            /* pt_regs->r11 */
++      pushq   %rbx            /* pt_regs->rbx */
++      pushq   %rbp            /* pt_regs->rbp */
++      pushq   %r12            /* pt_regs->r12 */
++      pushq   %r13            /* pt_regs->r13 */
++      pushq   %r14            /* pt_regs->r14 */
++      pushq   %r15            /* pt_regs->r15 */
++
+       /*
+-       * If %cs was not the kernel segment, then the NMI triggered in user
+-       * space, which means it is definitely not nested.
++       * At this point we no longer need to worry about stack damage
++       * due to nesting -- we're on the normal thread stack and we're
++       * done with the NMI stack.
+        */
+-      cmpl $__KERNEL_CS, 16(%rsp)
+-      jne first_nmi
++
++      movq    %rsp, %rdi
++      movq    $-1, %rsi
++      call    do_nmi
+ 
+       /*
++       * Return back to user mode.  We must *not* do the normal exit
++       * work, because we don't want to enable interrupts.  Fortunately,
++       * do_nmi doesn't modify pt_regs.
++       */
++      SWAPGS
++
++      addq    $6*8, %rsp      /* skip bx, bp, and r12-r15 */
++      jmp     restore_args
++
++.Lnmi_from_kernel:
++      /*
+        * Check the special variable on the stack to see if NMIs are
+        * executing.
+        */
+@@ -1629,29 +1686,11 @@ end_repeat_nmi:
+       call save_paranoid
+       DEFAULT_FRAME 0
+ 
+-      /*
+-       * Save off the CR2 register. If we take a page fault in the NMI then
+-       * it could corrupt the CR2 value. If the NMI preempts a page fault
+-       * handler before it was able to read the CR2 register, and then the
+-       * NMI itself takes a page fault, the page fault that was preempted
+-       * will read the information from the NMI page fault and not the
+-       * origin fault. Save it off and restore it if it changes.
+-       * Use the r12 callee-saved register.
+-       */
+-      movq %cr2, %r12
+-
+       /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
+       movq %rsp,%rdi
+       movq $-1,%rsi
+       call do_nmi
+ 
+-      /* Did the NMI take a page fault? Restore cr2 if it did */
+-      movq %cr2, %rcx
+-      cmpq %rcx, %r12
+-      je 1f
+-      movq %r12, %cr2
+-1:
+-      
+       testl %ebx,%ebx                         /* swapgs needed? */
+       jnz nmi_restore
+ nmi_swapgs:
+diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
+index c37886d759cc..2bcc0525f1c1 100644
+--- a/arch/x86/kernel/ldt.c
++++ b/arch/x86/kernel/ldt.c
+@@ -12,6 +12,7 @@
+ #include <linux/string.h>
+ #include <linux/mm.h>
+ #include <linux/smp.h>
++#include <linux/slab.h>
+ #include <linux/vmalloc.h>
+ #include <linux/uaccess.h>
+ 
+@@ -20,82 +21,82 @@
+ #include <asm/mmu_context.h>
+ #include <asm/syscalls.h>
+ 
+-#ifdef CONFIG_SMP
++/* context.lock is held for us, so we don't need any locking. */
+ static void flush_ldt(void *current_mm)
+ {
+-      if (current->active_mm == current_mm)
+-              load_LDT(&current->active_mm->context);
++      mm_context_t *pc;
++
++      if (current->active_mm != current_mm)
++              return;
++
++      pc = &current->active_mm->context;
++      set_ldt(pc->ldt->entries, pc->ldt->size);
+ }
+-#endif
+ 
+-static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
++/* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. 
*/
++static struct ldt_struct *alloc_ldt_struct(int size)
+ {
+-      void *oldldt, *newldt;
+-      int oldsize;
+-
+-      if (mincount <= pc->size)
+-              return 0;
+-      oldsize = pc->size;
+-      mincount = (mincount + (PAGE_SIZE / LDT_ENTRY_SIZE - 1)) &
+-                      (~(PAGE_SIZE / LDT_ENTRY_SIZE - 1));
+-      if (mincount * LDT_ENTRY_SIZE > PAGE_SIZE)
+-              newldt = vmalloc(mincount * LDT_ENTRY_SIZE);
++      struct ldt_struct *new_ldt;
++      int alloc_size;
++
++      if (size > LDT_ENTRIES)
++              return NULL;
++
++      new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL);
++      if (!new_ldt)
++              return NULL;
++
++      BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct));
++      alloc_size = size * LDT_ENTRY_SIZE;
++
++      /*
++       * Xen is very picky: it requires a page-aligned LDT that has no
++       * trailing nonzero bytes in any page that contains LDT descriptors.
++       * Keep it simple: zero the whole allocation and never allocate less
++       * than PAGE_SIZE.
++       */
++      if (alloc_size > PAGE_SIZE)
++              new_ldt->entries = vzalloc(alloc_size);
+       else
+-              newldt = (void *)__get_free_page(GFP_KERNEL);
+-
+-      if (!newldt)
+-              return -ENOMEM;
++              new_ldt->entries = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ 
+-      if (oldsize)
+-              memcpy(newldt, pc->ldt, oldsize * LDT_ENTRY_SIZE);
+-      oldldt = pc->ldt;
+-      memset(newldt + oldsize * LDT_ENTRY_SIZE, 0,
+-             (mincount - oldsize) * LDT_ENTRY_SIZE);
++      if (!new_ldt->entries) {
++              kfree(new_ldt);
++              return NULL;
++      }
+ 
+-      paravirt_alloc_ldt(newldt, mincount);
++      new_ldt->size = size;
++      return new_ldt;
++}
+ 
+-#ifdef CONFIG_X86_64
+-      /* CHECKME: Do we really need this ? */
+-      wmb();
+-#endif
+-      pc->ldt = newldt;
+-      wmb();
+-      pc->size = mincount;
+-      wmb();
+-
+-      if (reload) {
+-#ifdef CONFIG_SMP
+-              preempt_disable();
+-              load_LDT(pc);
+-              if (!cpumask_equal(mm_cpumask(current->mm),
+-                                 cpumask_of(smp_processor_id())))
+-                      smp_call_function(flush_ldt, current->mm, 1);
+-              preempt_enable();
+-#else
+-              load_LDT(pc);
+-#endif
+-      }
+-      if (oldsize) {
+-              paravirt_free_ldt(oldldt, oldsize);
+-              if (oldsize * LDT_ENTRY_SIZE > PAGE_SIZE)
+-                      vfree(oldldt);
+-              else
+-                      put_page(virt_to_page(oldldt));
+-      }
+-      return 0;
++/* After calling this, the LDT is immutable. */
++static void finalize_ldt_struct(struct ldt_struct *ldt)
++{
++      paravirt_alloc_ldt(ldt->entries, ldt->size);
+ }
+ 
+-static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
++/* context.lock is held */
++static void install_ldt(struct mm_struct *current_mm,
++                      struct ldt_struct *ldt)
+ {
+-      int err = alloc_ldt(new, old->size, 0);
+-      int i;
++      /* Synchronizes with lockless_dereference in load_mm_ldt. */
++      smp_store_release(&current_mm->context.ldt, ldt);
++
++      /* Activate the LDT for all CPUs using current_mm. */
++      on_each_cpu_mask(mm_cpumask(current_mm), flush_ldt, current_mm, true);
++}
+ 
+-      if (err < 0)
+-              return err;
++static void free_ldt_struct(struct ldt_struct *ldt)
++{
++      if (likely(!ldt))
++              return;
+ 
+-      for (i = 0; i < old->size; i++)
+-              write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
+-      return 0;
++      paravirt_free_ldt(ldt->entries, ldt->size);
++      if (ldt->size * LDT_ENTRY_SIZE > PAGE_SIZE)
++              vfree(ldt->entries);
++      else
++              kfree(ldt->entries);
++      kfree(ldt);
+ }
+ 
+ /*
+@@ -104,17 +105,37 @@ static inline int copy_ldt(mm_context_t *new, 
mm_context_t *old)
+  */
+ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+ {
++      struct ldt_struct *new_ldt;
+       struct mm_struct *old_mm;
+       int retval = 0;
+ 
+       mutex_init(&mm->context.lock);
+-      mm->context.size = 0;
+       old_mm = current->mm;
+-      if (old_mm && old_mm->context.size > 0) {
+-              mutex_lock(&old_mm->context.lock);
+-              retval = copy_ldt(&mm->context, &old_mm->context);
+-              mutex_unlock(&old_mm->context.lock);
++      if (!old_mm) {
++              mm->context.ldt = NULL;
++              return 0;
+       }
++
++      mutex_lock(&old_mm->context.lock);
++      if (!old_mm->context.ldt) {
++              mm->context.ldt = NULL;
++              goto out_unlock;
++      }
++
++      new_ldt = alloc_ldt_struct(old_mm->context.ldt->size);
++      if (!new_ldt) {
++              retval = -ENOMEM;
++              goto out_unlock;
++      }
++
++      memcpy(new_ldt->entries, old_mm->context.ldt->entries,
++             new_ldt->size * LDT_ENTRY_SIZE);
++      finalize_ldt_struct(new_ldt);
++
++      mm->context.ldt = new_ldt;
++
++out_unlock:
++      mutex_unlock(&old_mm->context.lock);
+       return retval;
+ }
+ 
+@@ -125,53 +146,47 @@ int init_new_context(struct task_struct *tsk, struct 
mm_struct *mm)
+  */
+ void destroy_context(struct mm_struct *mm)
+ {
+-      if (mm->context.size) {
+-#ifdef CONFIG_X86_32
+-              /* CHECKME: Can this ever happen ? */
+-              if (mm == current->active_mm)
+-                      clear_LDT();
+-#endif
+-              paravirt_free_ldt(mm->context.ldt, mm->context.size);
+-              if (mm->context.size * LDT_ENTRY_SIZE > PAGE_SIZE)
+-                      vfree(mm->context.ldt);
+-              else
+-                      put_page(virt_to_page(mm->context.ldt));
+-              mm->context.size = 0;
+-      }
++      free_ldt_struct(mm->context.ldt);
++      mm->context.ldt = NULL;
+ }
+ 
+ static int read_ldt(void __user *ptr, unsigned long bytecount)
+ {
+-      int err;
++      int retval;
+       unsigned long size;
+       struct mm_struct *mm = current->mm;
+ 
+-      if (!mm->context.size)
+-              return 0;
++      mutex_lock(&mm->context.lock);
++
++      if (!mm->context.ldt) {
++              retval = 0;
++              goto out_unlock;
++      }
++
+       if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES)
+               bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES;
+ 
+-      mutex_lock(&mm->context.lock);
+-      size = mm->context.size * LDT_ENTRY_SIZE;
++      size = mm->context.ldt->size * LDT_ENTRY_SIZE;
+       if (size > bytecount)
+               size = bytecount;
+ 
+-      err = 0;
+-      if (copy_to_user(ptr, mm->context.ldt, size))
+-              err = -EFAULT;
+-      mutex_unlock(&mm->context.lock);
+-      if (err < 0)
+-              goto error_return;
++      if (copy_to_user(ptr, mm->context.ldt->entries, size)) {
++              retval = -EFAULT;
++              goto out_unlock;
++      }
++
+       if (size != bytecount) {
+-              /* zero-fill the rest */
+-              if (clear_user(ptr + size, bytecount - size) != 0) {
+-                      err = -EFAULT;
+-                      goto error_return;
++              /* Zero-fill the rest and pretend we read bytecount bytes. */
++              if (clear_user(ptr + size, bytecount - size)) {
++                      retval = -EFAULT;
++                      goto out_unlock;
+               }
+       }
+-      return bytecount;
+-error_return:
+-      return err;
++      retval = bytecount;
++
++out_unlock:
++      mutex_unlock(&mm->context.lock);
++      return retval;
+ }
+ 
+ static int read_default_ldt(void __user *ptr, unsigned long bytecount)
+@@ -195,6 +210,8 @@ static int write_ldt(void __user *ptr, unsigned long 
bytecount, int oldmode)
+       struct desc_struct ldt;
+       int error;
+       struct user_desc ldt_info;
++      int oldsize, newsize;
++      struct ldt_struct *new_ldt, *old_ldt;
+ 
+       error = -EINVAL;
+       if (bytecount != sizeof(ldt_info))
+@@ -213,34 +230,39 @@ static int write_ldt(void __user *ptr, unsigned long 
bytecount, int oldmode)
+                       goto out;
+       }
+ 
+-      mutex_lock(&mm->context.lock);
+-      if (ldt_info.entry_number >= mm->context.size) {
+-              error = alloc_ldt(&current->mm->context,
+-                                ldt_info.entry_number + 1, 1);
+-              if (error < 0)
+-                      goto out_unlock;
+-      }
+-
+-      /* Allow LDTs to be cleared by the user. */
+-      if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
+-              if (oldmode || LDT_empty(&ldt_info)) {
+-                      memset(&ldt, 0, sizeof(ldt));
+-                      goto install;
++      if ((oldmode && !ldt_info.base_addr && !ldt_info.limit) ||
++          LDT_empty(&ldt_info)) {
++              /* The user wants to clear the entry. */
++              memset(&ldt, 0, sizeof(ldt));
++      } else {
++              if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
++                      error = -EINVAL;
++                      goto out;
+               }
++
++              fill_ldt(&ldt, &ldt_info);
++              if (oldmode)
++                      ldt.avl = 0;
+       }
+ 
+-      if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
+-              error = -EINVAL;
++      mutex_lock(&mm->context.lock);
++
++      old_ldt = mm->context.ldt;
++      oldsize = old_ldt ? old_ldt->size : 0;
++      newsize = max((int)(ldt_info.entry_number + 1), oldsize);
++
++      error = -ENOMEM;
++      new_ldt = alloc_ldt_struct(newsize);
++      if (!new_ldt)
+               goto out_unlock;
+-      }
+ 
+-      fill_ldt(&ldt, &ldt_info);
+-      if (oldmode)
+-              ldt.avl = 0;
++      if (old_ldt)
++              memcpy(new_ldt->entries, old_ldt->entries, oldsize * 
LDT_ENTRY_SIZE);
++      new_ldt->entries[ldt_info.entry_number] = ldt;
++      finalize_ldt_struct(new_ldt);
+ 
+-      /* Install the new entry ...  */
+-install:
+-      write_ldt_entry(mm->context.ldt, ldt_info.entry_number, &ldt);
++      install_ldt(mm, new_ldt);
++      free_ldt_struct(old_ldt);
+       error = 0;
+ 
+ out_unlock:
+diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
+index c3e985d1751c..5c5ec7d28d9b 100644
+--- a/arch/x86/kernel/nmi.c
++++ b/arch/x86/kernel/nmi.c
+@@ -408,15 +408,15 @@ static void default_do_nmi(struct pt_regs *regs)
+ NOKPROBE_SYMBOL(default_do_nmi);
+ 
+ /*
+- * NMIs can hit breakpoints which will cause it to lose its
+- * NMI context with the CPU when the breakpoint does an iret.
+- */
+-#ifdef CONFIG_X86_32
+-/*
+- * For i386, NMIs use the same stack as the kernel, and we can
+- * add a workaround to the iret problem in C (preventing nested
+- * NMIs if an NMI takes a trap). Simply have 3 states the NMI
+- * can be in:
++ * NMIs can hit breakpoints which will cause it to lose its NMI context
++ * with the CPU when the breakpoint or page fault does an IRET.
++ *
++ * As a result, NMIs can nest if NMIs get unmasked due an IRET during
++ * NMI processing.  On x86_64, the asm glue protects us from nested NMIs
++ * if the outer NMI came from kernel mode, but we can still nest if the
++ * outer NMI came from user mode.
++ *
++ * To handle these nested NMIs, we have three states:
+  *
+  *  1) not running
+  *  2) executing
+@@ -430,15 +430,14 @@ NOKPROBE_SYMBOL(default_do_nmi);
+  * (Note, the latch is binary, thus multiple NMIs triggering,
+  *  when one is running, are ignored. Only one NMI is restarted.)
+  *
+- * If an NMI hits a breakpoint that executes an iret, another
+- * NMI can preempt it. We do not want to allow this new NMI
+- * to run, but we want to execute it when the first one finishes.
+- * We set the state to "latched", and the exit of the first NMI will
+- * perform a dec_return, if the result is zero (NOT_RUNNING), then
+- * it will simply exit the NMI handler. If not, the dec_return
+- * would have set the state to NMI_EXECUTING (what we want it to
+- * be when we are running). In this case, we simply jump back
+- * to rerun the NMI handler again, and restart the 'latched' NMI.
++ * If an NMI executes an iret, another NMI can preempt it. We do not
++ * want to allow this new NMI to run, but we want to execute it when the
++ * first one finishes.  We set the state to "latched", and the exit of
++ * the first NMI will perform a dec_return, if the result is zero
++ * (NOT_RUNNING), then it will simply exit the NMI handler. If not, the
++ * dec_return would have set the state to NMI_EXECUTING (what we want it
++ * to be when we are running). In this case, we simply jump back to
++ * rerun the NMI handler again, and restart the 'latched' NMI.
+  *
+  * No trap (breakpoint or page fault) should be hit before nmi_restart,
+  * thus there is no race between the first check of state for NOT_RUNNING
+@@ -461,49 +460,36 @@ enum nmi_states {
+ static DEFINE_PER_CPU(enum nmi_states, nmi_state);
+ static DEFINE_PER_CPU(unsigned long, nmi_cr2);
+ 
+-#define nmi_nesting_preprocess(regs)                                  \
+-      do {                                                            \
+-              if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) {      \
+-                      this_cpu_write(nmi_state, NMI_LATCHED);         \
+-                      return;                                         \
+-              }                                                       \
+-              this_cpu_write(nmi_state, NMI_EXECUTING);               \
+-              this_cpu_write(nmi_cr2, read_cr2());                    \
+-      } while (0);                                                    \
+-      nmi_restart:
+-
+-#define nmi_nesting_postprocess()                                     \
+-      do {                                                            \
+-              if (unlikely(this_cpu_read(nmi_cr2) != read_cr2()))     \
+-                      write_cr2(this_cpu_read(nmi_cr2));              \
+-              if (this_cpu_dec_return(nmi_state))                     \
+-                      goto nmi_restart;                               \
+-      } while (0)
+-#else /* x86_64 */
++#ifdef CONFIG_X86_64
+ /*
+- * In x86_64 things are a bit more difficult. This has the same problem
+- * where an NMI hitting a breakpoint that calls iret will remove the
+- * NMI context, allowing a nested NMI to enter. What makes this more
+- * difficult is that both NMIs and breakpoints have their own stack.
+- * When a new NMI or breakpoint is executed, the stack is set to a fixed
+- * point. If an NMI is nested, it will have its stack set at that same
+- * fixed address that the first NMI had, and will start corrupting the
+- * stack. This is handled in entry_64.S, but the same problem exists with
+- * the breakpoint stack.
++ * In x86_64, we need to handle breakpoint -> NMI -> breakpoint.  Without
++ * some care, the inner breakpoint will clobber the outer breakpoint's
++ * stack.
+  *
+- * If a breakpoint is being processed, and the debug stack is being used,
+- * if an NMI comes in and also hits a breakpoint, the stack pointer
+- * will be set to the same fixed address as the breakpoint that was
+- * interrupted, causing that stack to be corrupted. To handle this case,
+- * check if the stack that was interrupted is the debug stack, and if
+- * so, change the IDT so that new breakpoints will use the current stack
+- * and not switch to the fixed address. On return of the NMI, switch back
+- * to the original IDT.
++ * If a breakpoint is being processed, and the debug stack is being
++ * used, if an NMI comes in and also hits a breakpoint, the stack
++ * pointer will be set to the same fixed address as the breakpoint that
++ * was interrupted, causing that stack to be corrupted. To handle this
++ * case, check if the stack that was interrupted is the debug stack, and
++ * if so, change the IDT so that new breakpoints will use the current
++ * stack and not switch to the fixed address. On return of the NMI,
++ * switch back to the original IDT.
+  */
+ static DEFINE_PER_CPU(int, update_debug_stack);
++#endif
+ 
+-static inline void nmi_nesting_preprocess(struct pt_regs *regs)
++dotraplinkage notrace __kprobes void
++do_nmi(struct pt_regs *regs, long error_code)
+ {
++      if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) {
++              this_cpu_write(nmi_state, NMI_LATCHED);
++              return;
++      }
++      this_cpu_write(nmi_state, NMI_EXECUTING);
++      this_cpu_write(nmi_cr2, read_cr2());
++nmi_restart:
++
++#ifdef CONFIG_X86_64
+       /*
+        * If we interrupted a breakpoint, it is possible that
+        * the nmi handler will have breakpoints too. We need to
+@@ -514,22 +500,8 @@ static inline void nmi_nesting_preprocess(struct pt_regs 
*regs)
+               debug_stack_set_zero();
+               this_cpu_write(update_debug_stack, 1);
+       }
+-}
+-
+-static inline void nmi_nesting_postprocess(void)
+-{
+-      if (unlikely(this_cpu_read(update_debug_stack))) {
+-              debug_stack_reset();
+-              this_cpu_write(update_debug_stack, 0);
+-      }
+-}
+ #endif
+ 
+-dotraplinkage notrace void
+-do_nmi(struct pt_regs *regs, long error_code)
+-{
+-      nmi_nesting_preprocess(regs);
+-
+       nmi_enter();
+ 
+       inc_irq_stat(__nmi_count);
+@@ -539,8 +511,17 @@ do_nmi(struct pt_regs *regs, long error_code)
+ 
+       nmi_exit();
+ 
+-      /* On i386, may loop back to preprocess */
+-      nmi_nesting_postprocess();
++#ifdef CONFIG_X86_64
++      if (unlikely(this_cpu_read(update_debug_stack))) {
++              debug_stack_reset();
++              this_cpu_write(update_debug_stack, 0);
++      }
++#endif
++
++      if (unlikely(this_cpu_read(nmi_cr2) != read_cr2()))
++              write_cr2(this_cpu_read(nmi_cr2));
++      if (this_cpu_dec_return(nmi_state))
++              goto nmi_restart;
+ }
+ NOKPROBE_SYMBOL(do_nmi);
+ 
+diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
+index 67fcc43577d2..63a4b5092203 100644
+--- a/arch/x86/kernel/process_64.c
++++ b/arch/x86/kernel/process_64.c
+@@ -122,11 +122,11 @@ void __show_regs(struct pt_regs *regs, int all)
+ void release_thread(struct task_struct *dead_task)
+ {
+       if (dead_task->mm) {
+-              if (dead_task->mm->context.size) {
++              if (dead_task->mm->context.ldt) {
+                       pr_warn("WARNING: dead process %s still has LDT? 
<%p/%d>\n",
+                               dead_task->comm,
+                               dead_task->mm->context.ldt,
+-                              dead_task->mm->context.size);
++                              dead_task->mm->context.ldt->size);
+                       BUG();
+               }
+       }
+diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
+index 9b4d51d0c0d0..0ccb53a9fcd9 100644
+--- a/arch/x86/kernel/step.c
++++ b/arch/x86/kernel/step.c
+@@ -5,6 +5,7 @@
+ #include <linux/mm.h>
+ #include <linux/ptrace.h>
+ #include <asm/desc.h>
++#include <asm/mmu_context.h>
+ 
+ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs 
*regs)
+ {
+@@ -27,13 +28,14 @@ unsigned long convert_ip_to_linear(struct task_struct 
*child, struct pt_regs *re
+               struct desc_struct *desc;
+               unsigned long base;
+ 
+-              seg &= ~7UL;
++              seg >>= 3;
+ 
+               mutex_lock(&child->mm->context.lock);
+-              if (unlikely((seg >> 3) >= child->mm->context.size))
++              if (unlikely(!child->mm->context.ldt ||
++                           seg >= child->mm->context.ldt->size))
+                       addr = -1L; /* bogus selector, access would fault */
+               else {
+-                      desc = child->mm->context.ldt + seg;
++                      desc = &child->mm->context.ldt->entries[seg];
+                       base = get_desc_base(desc);
+ 
+                       /* 16-bit code segment? */
+diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
+index 3e32ed5648a0..a13a38830e76 100644
+--- a/arch/x86/power/cpu.c
++++ b/arch/x86/power/cpu.c
+@@ -23,6 +23,7 @@
+ #include <asm/debugreg.h>
+ #include <asm/fpu-internal.h> /* pcntxt_mask */
+ #include <asm/cpu.h>
++#include <asm/mmu_context.h>
+ 
+ #ifdef CONFIG_X86_32
+ __visible unsigned long saved_context_ebx;
+@@ -154,7 +155,7 @@ static void fix_processor_context(void)
+       syscall_init();                         /* This sets MSR_*STAR and 
related */
+ #endif
+       load_TR_desc();                         /* This does ltr */
+-      load_LDT(&current->active_mm->context); /* This does lldt */
++      load_mm_ldt(current->active_mm);        /* This does lldt */
+ }
+ 
+ /**
+diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
+index e88fda867a33..484145368a24 100644
+--- a/arch/x86/xen/Kconfig
++++ b/arch/x86/xen/Kconfig
+@@ -8,7 +8,7 @@ config XEN
+       select PARAVIRT_CLOCK
+       select XEN_HAVE_PVMMU
+       depends on X86_64 || (X86_32 && X86_PAE)
+-      depends on X86_TSC
++      depends on X86_LOCAL_APIC && X86_TSC
+       help
+         This is the Linux Xen port.  Enabling this will allow the
+         kernel to boot in a paravirtualized environment under the
+@@ -17,7 +17,7 @@ config XEN
+ config XEN_DOM0
+       def_bool y
+       depends on XEN && PCI_XEN && SWIOTLB_XEN
+-      depends on X86_LOCAL_APIC && X86_IO_APIC && ACPI && PCI
++      depends on X86_IO_APIC && ACPI && PCI
+ 
+ config XEN_PVHVM
+       def_bool y
+diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile
+index 7322755f337a..4b6e29ac0968 100644
+--- a/arch/x86/xen/Makefile
++++ b/arch/x86/xen/Makefile
+@@ -13,13 +13,13 @@ CFLAGS_mmu.o                       := $(nostackp)
+ obj-y         := enlighten.o setup.o multicalls.o mmu.o irq.o \
+                       time.o xen-asm.o xen-asm_$(BITS).o \
+                       grant-table.o suspend.o platform-pci-unplug.o \
+-                      p2m.o
++                      p2m.o apic.o
+ 
+ obj-$(CONFIG_EVENT_TRACING) += trace.o
+ 
+ obj-$(CONFIG_SMP)             += smp.o
+ obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o
+ obj-$(CONFIG_XEN_DEBUG_FS)    += debugfs.o
+-obj-$(CONFIG_XEN_DOM0)                += apic.o vga.o
++obj-$(CONFIG_XEN_DOM0)                += vga.o
+ obj-$(CONFIG_SWIOTLB_XEN)     += pci-swiotlb-xen.o
+ obj-$(CONFIG_XEN_EFI)         += efi.o
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index e180e097a53a..d8d81d1aa1d5 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -1772,6 +1772,7 @@ asmlinkage __visible void __init xen_start_kernel(void)
+ #ifdef CONFIG_X86_32
+       i386_start_kernel();
+ #else
++      cr4_init_shadow(); /* 32b kernel does this in i386_start_kernel() */
+       x86_64_start_reservations((char *)__pa_symbol(&boot_params));
+ #endif
+ }
+diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
+index 28c7e0be56e4..566004cc8a5b 100644
+--- a/arch/x86/xen/xen-ops.h
++++ b/arch/x86/xen/xen-ops.h
+@@ -94,17 +94,15 @@ struct dom0_vga_console_info;
+ 
+ #ifdef CONFIG_XEN_DOM0
+ void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size);
+-void __init xen_init_apic(void);
+ #else
+ static inline void __init xen_init_vga(const struct dom0_vga_console_info 
*info,
+                                      size_t size)
+ {
+ }
+-static inline void __init xen_init_apic(void)
+-{
+-}
+ #endif
+ 
++void __init xen_init_apic(void);
++
+ #ifdef CONFIG_XEN_EFI
+ extern void xen_efi_init(void);
+ #else
+diff --git a/block/blk-settings.c b/block/blk-settings.c
+index aa02247d227e..d15c34a31633 100644
+--- a/block/blk-settings.c
++++ b/block/blk-settings.c
+@@ -241,8 +241,8 @@ EXPORT_SYMBOL(blk_queue_bounce_limit);
+  * Description:
+  *    Enables a low level driver to set a hard upper limit,
+  *    max_hw_sectors, on the size of requests.  max_hw_sectors is set by
+- *    the device driver based upon the combined capabilities of I/O
+- *    controller and storage device.
++ *    the device driver based upon the capabilities of the I/O
++ *    controller.
+  *
+  *    max_sectors is a soft limit imposed by the block layer for
+  *    filesystem type requests.  This value can be overridden on a
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 74e18e94bef2..7f15707b4850 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4238,6 +4238,8 @@ static const struct ata_blacklist_entry 
ata_device_blacklist [] = {
+                                               ATA_HORKAGE_ZERO_AFTER_TRIM, },
+       { "Samsung SSD 8*",             NULL,   ATA_HORKAGE_NO_NCQ_TRIM |
+                                               ATA_HORKAGE_ZERO_AFTER_TRIM, },
++      { "FCCT*M500*",                 NULL,   ATA_HORKAGE_NO_NCQ_TRIM |
++                                              ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ 
+       /* devices that don't properly handle TRIM commands */
+       { "SuperSSpeed S238*",          NULL,   ATA_HORKAGE_NOTRIM, },
+diff --git a/drivers/base/regmap/regcache-rbtree.c 
b/drivers/base/regmap/regcache-rbtree.c
+index 9d09c5bb5874..bb39181e4c33 100644
+--- a/drivers/base/regmap/regcache-rbtree.c
++++ b/drivers/base/regmap/regcache-rbtree.c
+@@ -296,11 +296,20 @@ static int regcache_rbtree_insert_to_block(struct regmap 
*map,
+       if (!blk)
+               return -ENOMEM;
+ 
+-      present = krealloc(rbnode->cache_present,
+-                  BITS_TO_LONGS(blklen) * sizeof(*present), GFP_KERNEL);
+-      if (!present) {
+-              kfree(blk);
+-              return -ENOMEM;
++      if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
++              present = krealloc(rbnode->cache_present,
++                                 BITS_TO_LONGS(blklen) * sizeof(*present),
++                                 GFP_KERNEL);
++              if (!present) {
++                      kfree(blk);
++                      return -ENOMEM;
++              }
++
++              memset(present + BITS_TO_LONGS(rbnode->blklen), 0,
++                     (BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen))
++                     * sizeof(*present));
++      } else {
++              present = rbnode->cache_present;
+       }
+ 
+       /* insert the register value in the correct place in the rbnode block */
+diff --git a/drivers/block/xen-blkback/blkback.c 
b/drivers/block/xen-blkback/blkback.c
+index 63fc7f06a014..0c858a60dc40 100644
+--- a/drivers/block/xen-blkback/blkback.c
++++ b/drivers/block/xen-blkback/blkback.c
+@@ -350,7 +350,7 @@ static void purge_persistent_gnt(struct xen_blkif *blkif)
+               return;
+       }
+ 
+-      if (work_pending(&blkif->persistent_purge_work)) {
++      if (work_busy(&blkif->persistent_purge_work)) {
+               pr_alert_ratelimited(DRV_PFX "Scheduled work from previous 
purge is still pending, cannot purge list\n");
+               return;
+       }
+diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
+index 5ac312f6e0be..218c4858f494 100644
+--- a/drivers/block/xen-blkfront.c
++++ b/drivers/block/xen-blkfront.c
+@@ -1099,8 +1099,10 @@ static void blkif_completion(struct blk_shadow *s, 
struct blkfront_info *info,
+                                * Add the used indirect page back to the list 
of
+                                * available pages for indirect grefs.
+                                */
+-                              indirect_page = 
pfn_to_page(s->indirect_grants[i]->pfn);
+-                              list_add(&indirect_page->lru, 
&info->indirect_pages);
++                              if (!info->feature_persistent) {
++                                      indirect_page = 
pfn_to_page(s->indirect_grants[i]->pfn);
++                                      list_add(&indirect_page->lru, 
&info->indirect_pages);
++                              }
+                               s->indirect_grants[i]->gref = GRANT_INVALID_REF;
+                               list_add_tail(&s->indirect_grants[i]->node, 
&info->grants);
+                       }
+diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
+index 08b0da23c4ab..5408450204b0 100644
+--- a/drivers/crypto/caam/caamhash.c
++++ b/drivers/crypto/caam/caamhash.c
+@@ -909,13 +909,14 @@ static int ahash_final_ctx(struct ahash_request *req)
+                         state->buflen_1;
+       u32 *sh_desc = ctx->sh_desc_fin, *desc;
+       dma_addr_t ptr = ctx->sh_desc_fin_dma;
+-      int sec4_sg_bytes;
++      int sec4_sg_bytes, sec4_sg_src_index;
+       int digestsize = crypto_ahash_digestsize(ahash);
+       struct ahash_edesc *edesc;
+       int ret = 0;
+       int sh_len;
+ 
+-      sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry);
++      sec4_sg_src_index = 1 + (buflen ? 1 : 0);
++      sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
+ 
+       /* allocate space for base edesc and hw desc commands, link tables */
+       edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+@@ -942,7 +943,7 @@ static int ahash_final_ctx(struct ahash_request *req)
+       state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
+                                               buf, state->buf_dma, buflen,
+                                               last_buflen);
+-      (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
++      (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= SEC4_SG_LEN_FIN;
+ 
+       edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+                                           sec4_sg_bytes, DMA_TO_DEVICE);
+diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
+index 0f04d5ead521..4c1991d4ce8b 100644
+--- a/drivers/edac/ppc4xx_edac.c
++++ b/drivers/edac/ppc4xx_edac.c
+@@ -921,7 +921,7 @@ static int ppc4xx_edac_init_csrows(struct mem_ctl_info 
*mci, u32 mcopt1)
+        */
+ 
+       for (row = 0; row < mci->nr_csrows; row++) {
+-              struct csrow_info *csi = &mci->csrows[row];
++              struct csrow_info *csi = mci->csrows[row];
+ 
+               /*
+                * Get the configuration settings for this
+diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c 
b/drivers/gpu/drm/radeon/radeon_irq_kms.c
+index bbcd754dc7d0..ccedb17580f7 100644
+--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
++++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
+@@ -79,6 +79,11 @@ static void radeon_hotplug_work_func(struct work_struct 
*work)
+       struct drm_mode_config *mode_config = &dev->mode_config;
+       struct drm_connector *connector;
+ 
++      /* we can race here at startup, some boards seem to trigger
++       * hotplug irqs when they shouldn't. */
++      if (!rdev->mode_info.mode_config_initialized)
++              return;
++
+       mutex_lock(&mode_config->mutex);
+       if (mode_config->num_connector) {
+               list_for_each_entry(connector, &mode_config->connector_list, 
head)
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c 
b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+index 50b52802f470..8ad66bbd4f28 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+@@ -2489,7 +2489,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
+ 
+       ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes, 
true);
+       if (unlikely(ret != 0))
+-              goto out_err;
++              goto out_err_nores;
+ 
+       ret = vmw_validate_buffers(dev_priv, sw_context);
+       if (unlikely(ret != 0))
+@@ -2533,6 +2533,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
+       vmw_resource_relocations_free(&sw_context->res_relocations);
+ 
+       vmw_fifo_commit(dev_priv, command_size);
++      mutex_unlock(&dev_priv->binding_mutex);
+ 
+       vmw_query_bo_switch_commit(dev_priv, sw_context);
+       ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
+@@ -2548,7 +2549,6 @@ int vmw_execbuf_process(struct drm_file *file_priv,
+               DRM_ERROR("Fence submission error. Syncing.\n");
+ 
+       vmw_resource_list_unreserve(&sw_context->resource_list, false);
+-      mutex_unlock(&dev_priv->binding_mutex);
+ 
+       ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
+                                   (void *) fence);
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 3603d0cb25d9..ef984eba8396 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -222,6 +222,7 @@
+ #define USB_DEVICE_ID_CHICONY_TACTICAL_PAD    0x0418
+ #define USB_DEVICE_ID_CHICONY_MULTI_TOUCH     0xb19d
+ #define USB_DEVICE_ID_CHICONY_WIRELESS        0x0618
++#define USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE        0x1053
+ #define USB_DEVICE_ID_CHICONY_WIRELESS2       0x1123
+ #define USB_DEVICE_ID_CHICONY_AK1D    0x1125
+ 
+diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
+index 509dee2e9b72..a4d1fe64c925 100644
+--- a/drivers/hid/usbhid/hid-quirks.c
++++ b/drivers/hid/usbhid/hid-quirks.c
+@@ -69,6 +69,7 @@ static const struct hid_blacklist {
+       { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_PEDALS, HID_QUIRK_NOGET },
+       { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK, 
HID_QUIRK_NOGET },
+       { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
++      { USB_VENDOR_ID_CHICONY, 
USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
+       { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
+       { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN, 
HID_QUIRK_ALWAYS_POLL },
+       { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B, 
HID_QUIRK_ALWAYS_POLL },
+diff --git a/drivers/isdn/gigaset/ser-gigaset.c 
b/drivers/isdn/gigaset/ser-gigaset.c
+index 8c91fd5eb6fd..3ac9c4194814 100644
+--- a/drivers/isdn/gigaset/ser-gigaset.c
++++ b/drivers/isdn/gigaset/ser-gigaset.c
+@@ -524,9 +524,18 @@ gigaset_tty_open(struct tty_struct *tty)
+       cs->hw.ser->tty = tty;
+       atomic_set(&cs->hw.ser->refcnt, 1);
+       init_completion(&cs->hw.ser->dead_cmp);
+-
+       tty->disc_data = cs;
+ 
++      /* Set the amount of data we're willing to receive per call
++       * from the hardware driver to half of the input buffer size
++       * to leave some reserve.
++       * Note: We don't do flow control towards the hardware driver.
++       * If more data is received than will fit into the input buffer,
++       * it will be dropped and an error will be logged. This should
++       * never happen as the device is slow and the buffer size ample.
++       */
++      tty->receive_room = RBUFSIZE/2;
++
+       /* OK.. Initialization of the datastructures and the HW is done.. Now
+        * startup system and notify the LL that we are ready to run
+        */
+diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
+index e9d33ad59df5..3412b86e79fd 100644
+--- a/drivers/md/dm-thin-metadata.c
++++ b/drivers/md/dm-thin-metadata.c
+@@ -1295,8 +1295,8 @@ static int __release_metadata_snap(struct 
dm_pool_metadata *pmd)
+               return r;
+ 
+       disk_super = dm_block_data(copy);
+-      dm_sm_dec_block(pmd->metadata_sm, 
le64_to_cpu(disk_super->data_mapping_root));
+-      dm_sm_dec_block(pmd->metadata_sm, 
le64_to_cpu(disk_super->device_details_root));
++      dm_btree_del(&pmd->info, le64_to_cpu(disk_super->data_mapping_root));
++      dm_btree_del(&pmd->details_info, 
le64_to_cpu(disk_super->device_details_root));
+       dm_sm_dec_block(pmd->metadata_sm, held_root);
+ 
+       return dm_tm_unlock(pmd->tm, copy);
+diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
+index bce7c0784b6b..633c63e7c32f 100644
+--- a/drivers/mfd/arizona-core.c
++++ b/drivers/mfd/arizona-core.c
+@@ -892,10 +892,6 @@ int arizona_dev_init(struct arizona *arizona)
+                            arizona->pdata.gpio_defaults[i]);
+       }
+ 
+-      pm_runtime_set_autosuspend_delay(arizona->dev, 100);
+-      pm_runtime_use_autosuspend(arizona->dev);
+-      pm_runtime_enable(arizona->dev);
+-
+       /* Chip default */
+       if (!arizona->pdata.clk32k_src)
+               arizona->pdata.clk32k_src = ARIZONA_32KZ_MCLK2;
+@@ -992,11 +988,17 @@ int arizona_dev_init(struct arizona *arizona)
+                                          arizona->pdata.spk_fmt[i]);
+       }
+ 
++      pm_runtime_set_active(arizona->dev);
++      pm_runtime_enable(arizona->dev);
++
+       /* Set up for interrupts */
+       ret = arizona_irq_init(arizona);
+       if (ret != 0)
+               goto err_reset;
+ 
++      pm_runtime_set_autosuspend_delay(arizona->dev, 100);
++      pm_runtime_use_autosuspend(arizona->dev);
++
+       arizona_request_irq(arizona, ARIZONA_IRQ_CLKGEN_ERR, "CLKGEN error",
+                           arizona_clkgen_err, arizona);
+       arizona_request_irq(arizona, ARIZONA_IRQ_OVERCLOCKED, "Overclocked",
+@@ -1024,10 +1026,6 @@ int arizona_dev_init(struct arizona *arizona)
+               goto err_irq;
+       }
+ 
+-#ifdef CONFIG_PM_RUNTIME
+-      regulator_disable(arizona->dcvdd);
+-#endif
+-
+       return 0;
+ 
+ err_irq:
+diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
+index f35d4280b2f7..c58fc62545a3 100644
+--- a/drivers/mfd/lpc_ich.c
++++ b/drivers/mfd/lpc_ich.c
+@@ -934,8 +934,8 @@ gpe0_done:
+       lpc_ich_enable_gpio_space(dev);
+ 
+       lpc_ich_finalize_cell(dev, &lpc_ich_cells[LPC_GPIO]);
+-      ret = mfd_add_devices(&dev->dev, -1, &lpc_ich_cells[LPC_GPIO],
+-                            1, NULL, 0, NULL);
++      ret = mfd_add_devices(&dev->dev, PLATFORM_DEVID_AUTO,
++                            &lpc_ich_cells[LPC_GPIO], 1, NULL, 0, NULL);
+ 
+ gpio_done:
+       if (acpi_conflict)
+@@ -1008,8 +1008,8 @@ static int lpc_ich_init_wdt(struct pci_dev *dev)
+       }
+ 
+       lpc_ich_finalize_cell(dev, &lpc_ich_cells[LPC_WDT]);
+-      ret = mfd_add_devices(&dev->dev, -1, &lpc_ich_cells[LPC_WDT],
+-                            1, NULL, 0, NULL);
++      ret = mfd_add_devices(&dev->dev, PLATFORM_DEVID_AUTO,
++                            &lpc_ich_cells[LPC_WDT], 1, NULL, 0, NULL);
+ 
+ wdt_done:
+       return ret;
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 1cc06c0e3e92..081dd70813c8 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -622,6 +622,23 @@ static void bond_set_dev_addr(struct net_device *bond_dev,
+       call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev);
+ }
+ 
++static struct slave *bond_get_old_active(struct bonding *bond,
++                                       struct slave *new_active)
++{
++      struct slave *slave;
++      struct list_head *iter;
++
++      bond_for_each_slave(bond, slave, iter) {
++              if (slave == new_active)
++                      continue;
++
++              if (ether_addr_equal(bond->dev->dev_addr, slave->dev->dev_addr))
++                      return slave;
++      }
++
++      return NULL;
++}
++
+ /* bond_do_fail_over_mac
+  *
+  * Perform special MAC address swapping for fail_over_mac settings
+@@ -649,6 +666,9 @@ static void bond_do_fail_over_mac(struct bonding *bond,
+               if (!new_active)
+                       return;
+ 
++              if (!old_active)
++                      old_active = bond_get_old_active(bond, new_active);
++
+               if (old_active) {
+                       ether_addr_copy(tmp_mac, new_active->dev->dev_addr);
+                       ether_addr_copy(saddr.sa_data,
+@@ -1805,6 +1825,7 @@ static int  bond_release_and_destroy(struct net_device 
*bond_dev,
+               bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
+               netdev_info(bond_dev, "Destroying bond %s\n",
+                           bond_dev->name);
++              bond_remove_proc_entry(bond);
+               unregister_netdevice(bond_dev);
+       }
+       return ret;
+diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c 
b/drivers/net/ethernet/mellanox/mlx4/eq.c
+index 49290a405903..af67e7d410eb 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
++++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
+@@ -568,7 +568,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct 
mlx4_eq *eq)
+                                                       continue;
+                                               mlx4_dbg(dev, "%s: Sending 
MLX4_PORT_CHANGE_SUBTYPE_DOWN to slave: %d, port:%d\n",
+                                                        __func__, i, port);
+-                                              s_info = 
&priv->mfunc.master.vf_oper[slave].vport[port].state;
++                                              s_info = 
&priv->mfunc.master.vf_oper[i].vport[port].state;
+                                               if (IFLA_VF_LINK_STATE_AUTO == 
s_info->link_state) {
+                                                       
eqe->event.port_change.port =
+                                                               cpu_to_be32(
+@@ -601,7 +601,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct 
mlx4_eq *eq)
+                                                       continue;
+                                               if (i == 
mlx4_master_func_num(dev))
+                                                       continue;
+-                                              s_info = 
&priv->mfunc.master.vf_oper[slave].vport[port].state;
++                                              s_info = 
&priv->mfunc.master.vf_oper[i].vport[port].state;
+                                               if (IFLA_VF_LINK_STATE_AUTO == 
s_info->link_state) {
+                                                       
eqe->event.port_change.port =
+                                                               cpu_to_be32(
+diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c 
b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
+index 223eb42992bd..775e7bc292f2 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
+@@ -385,6 +385,7 @@ module_param_named(debug, rtl8723be_mod_params.debug, int, 
0444);
+ module_param_named(ips, rtl8723be_mod_params.inactiveps, bool, 0444);
+ module_param_named(swlps, rtl8723be_mod_params.swctrl_lps, bool, 0444);
+ module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444);
++module_param_named(msi, rtl8723be_mod_params.msi_support, bool, 0444);
+ module_param_named(disable_watchdog, rtl8723be_mod_params.disable_watchdog,
+                  bool, 0444);
+ MODULE_PARM_DESC(swenc, "using hardware crypto (default 0 [hardware])\n");
+diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
+index 1b3a09473452..30f9ef0c0d4f 100644
+--- a/drivers/scsi/libfc/fc_exch.c
++++ b/drivers/scsi/libfc/fc_exch.c
+@@ -733,8 +733,6 @@ static bool fc_invoke_resp(struct fc_exch *ep, struct 
fc_seq *sp,
+       if (resp) {
+               resp(sp, fp, arg);
+               res = true;
+-      } else if (!IS_ERR(fp)) {
+-              fc_frame_free(fp);
+       }
+ 
+       spin_lock_bh(&ep->ex_lock);
+@@ -1596,7 +1594,8 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr 
*mp, struct fc_frame *fp)
+        * If new exch resp handler is valid then call that
+        * first.
+        */
+-      fc_invoke_resp(ep, sp, fp);
++      if (!fc_invoke_resp(ep, sp, fp))
++              fc_frame_free(fp);
+ 
+       fc_exch_release(ep);
+       return;
+@@ -1695,7 +1694,8 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct 
fc_frame *fp)
+       fc_exch_hold(ep);
+       if (!rc)
+               fc_exch_delete(ep);
+-      fc_invoke_resp(ep, sp, fp);
++      if (!fc_invoke_resp(ep, sp, fp))
++              fc_frame_free(fp);
+       if (has_rec)
+               fc_exch_timer_set(ep, ep->r_a_tov);
+       fc_exch_release(ep);
+diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
+index 1d7e76e8b447..ae6fc1a94568 100644
+--- a/drivers/scsi/libfc/fc_fcp.c
++++ b/drivers/scsi/libfc/fc_fcp.c
+@@ -1039,11 +1039,26 @@ restart:
+               fc_fcp_pkt_hold(fsp);
+               spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
+ 
+-              if (!fc_fcp_lock_pkt(fsp)) {
++              spin_lock_bh(&fsp->scsi_pkt_lock);
++              if (!(fsp->state & FC_SRB_COMPL)) {
++                      fsp->state |= FC_SRB_COMPL;
++                      /*
++                       * TODO: dropping scsi_pkt_lock and then reacquiring
++                       * again around fc_fcp_cleanup_cmd() is required,
++                       * since fc_fcp_cleanup_cmd() calls into
++                       * fc_seq_set_resp() and that func preempts cpu using
++                       * schedule. May be schedule and related code should be
++                       * removed instead of unlocking here to avoid scheduling
++                       * while atomic bug.
++                       */
++                      spin_unlock_bh(&fsp->scsi_pkt_lock);
++
+                       fc_fcp_cleanup_cmd(fsp, error);
++
++                      spin_lock_bh(&fsp->scsi_pkt_lock);
+                       fc_io_compl(fsp);
+-                      fc_fcp_unlock_pkt(fsp);
+               }
++              spin_unlock_bh(&fsp->scsi_pkt_lock);
+ 
+               fc_fcp_pkt_release(fsp);
+               spin_lock_irqsave(&si->scsi_queue_lock, flags);
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
+index 0d8bc6c66650..7854584ebd59 100644
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -2960,10 +2960,10 @@ void iscsi_conn_teardown(struct iscsi_cls_conn 
*cls_conn)
+ {
+       struct iscsi_conn *conn = cls_conn->dd_data;
+       struct iscsi_session *session = conn->session;
+-      unsigned long flags;
+ 
+       del_timer_sync(&conn->transport_timer);
+ 
++      mutex_lock(&session->eh_mutex);
+       spin_lock_bh(&session->frwd_lock);
+       conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
+       if (session->leadconn == conn) {
+@@ -2975,28 +2975,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn 
*cls_conn)
+       }
+       spin_unlock_bh(&session->frwd_lock);
+ 
+-      /*
+-       * Block until all in-progress commands for this connection
+-       * time out or fail.
+-       */
+-      for (;;) {
+-              spin_lock_irqsave(session->host->host_lock, flags);
+-              if (!atomic_read(&session->host->host_busy)) { /* OK for ERL == 
0 */
+-                      spin_unlock_irqrestore(session->host->host_lock, flags);
+-                      break;
+-              }
+-              spin_unlock_irqrestore(session->host->host_lock, flags);
+-              msleep_interruptible(500);
+-              iscsi_conn_printk(KERN_INFO, conn, "iscsi conn_destroy(): "
+-                                "host_busy %d host_failed %d\n",
+-                                atomic_read(&session->host->host_busy),
+-                                session->host->host_failed);
+-              /*
+-               * force eh_abort() to unblock
+-               */
+-              wake_up(&conn->ehwait);
+-      }
+-
+       /* flush queued up work because we free the connection below */
+       iscsi_suspend_tx(conn);
+ 
+@@ -3013,6 +2991,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
+       if (session->leadconn == conn)
+               session->leadconn = NULL;
+       spin_unlock_bh(&session->frwd_lock);
++      mutex_unlock(&session->eh_mutex);
+ 
+       iscsi_destroy_conn(cls_conn);
+ }
+diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
+index b99399fe2548..15acc808658f 100644
+--- a/drivers/scsi/lpfc/lpfc_scsi.c
++++ b/drivers/scsi/lpfc/lpfc_scsi.c
+@@ -3380,7 +3380,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct 
lpfc_scsi_buf *lpfc_cmd)
+                */
+ 
+               nseg = scsi_dma_map(scsi_cmnd);
+-              if (unlikely(!nseg))
++              if (unlikely(nseg <= 0))
+                       return 1;
+               sgl += 1;
+               /* clear the last flag in the fcp_rsp map entry */
+diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
+index 7454498c4091..a2dcf6a54ec6 100644
+--- a/drivers/scsi/scsi_pm.c
++++ b/drivers/scsi/scsi_pm.c
+@@ -219,15 +219,15 @@ static int sdev_runtime_suspend(struct device *dev)
+ {
+       const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+       struct scsi_device *sdev = to_scsi_device(dev);
+-      int err;
++      int err = 0;
+ 
+-      err = blk_pre_runtime_suspend(sdev->request_queue);
+-      if (err)
+-              return err;
+-      if (pm && pm->runtime_suspend)
++      if (pm && pm->runtime_suspend) {
++              err = blk_pre_runtime_suspend(sdev->request_queue);
++              if (err)
++                      return err;
+               err = pm->runtime_suspend(dev);
+-      blk_post_runtime_suspend(sdev->request_queue, err);
+-
++              blk_post_runtime_suspend(sdev->request_queue, err);
++      }
+       return err;
+ }
+ 
+@@ -250,11 +250,11 @@ static int sdev_runtime_resume(struct device *dev)
+       const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+       int err = 0;
+ 
+-      blk_pre_runtime_resume(sdev->request_queue);
+-      if (pm && pm->runtime_resume)
++      if (pm && pm->runtime_resume) {
++              blk_pre_runtime_resume(sdev->request_queue);
+               err = pm->runtime_resume(dev);
+-      blk_post_runtime_resume(sdev->request_queue, err);
+-
++              blk_post_runtime_resume(sdev->request_queue, err);
++      }
+       return err;
+ }
+ 
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index ce382e858452..6d931d598d80 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -2812,9 +2812,9 @@ static int sd_revalidate_disk(struct gendisk *disk)
+       max_xfer = sdkp->max_xfer_blocks;
+       max_xfer <<= ilog2(sdp->sector_size) - 9;
+ 
+-      max_xfer = min_not_zero(queue_max_hw_sectors(sdkp->disk->queue),
+-                              max_xfer);
+-      blk_queue_max_hw_sectors(sdkp->disk->queue, max_xfer);
++      sdkp->disk->queue->limits.max_sectors =
++              min_not_zero(queue_max_hw_sectors(sdkp->disk->queue), max_xfer);
++
+       set_capacity(disk, sdkp->capacity);
+       sd_config_write_same(sdkp);
+       kfree(buffer);
+diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
+index 45c39a37f924..8bc073d297db 100644
+--- a/include/drm/drm_pciids.h
++++ b/include/drm/drm_pciids.h
+@@ -172,6 +172,7 @@
+       {0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 
CHIP_OLAND|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 
CHIP_OLAND|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 
CHIP_OLAND|RADEON_NEW_MEMMAP}, \
++      {0x1002, 0x6617, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 
CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6620, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 
CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 
CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 
CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+diff --git a/include/linux/compiler.h b/include/linux/compiler.h
+index 000c5f90f08c..2bd394ed35f6 100644
+--- a/include/linux/compiler.h
++++ b/include/linux/compiler.h
+@@ -454,6 +454,21 @@ static __always_inline void __write_once_size(volatile 
void *p, void *res, int s
+  */
+ #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
+ 
++/**
++ * lockless_dereference() - safely load a pointer for later dereference
++ * @p: The pointer to load
++ *
++ * Similar to rcu_dereference(), but for situations where the pointed-to
++ * object's lifetime is managed by something other than RCU.  That
++ * "something other" might be reference counting or simple immortality.
++ */
++#define lockless_dereference(p) \
++({ \
++      typeof(p) _________p1 = ACCESS_ONCE(p); \
++      smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
++      (_________p1); \
++})
++
+ /* Ignore/forbid kprobes attach on very low level functions marked by this 
attribute: */
+ #ifdef CONFIG_KPROBES
+ # define __kprobes    __attribute__((__section__(".kprobes.text")))
+diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
+index 53ff1a752d7e..a4a819ffb2d1 100644
+--- a/include/linux/rcupdate.h
++++ b/include/linux/rcupdate.h
+@@ -617,21 +617,6 @@ static inline void rcu_preempt_sleep_check(void)
+ #define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v)
+ 
+ /**
+- * lockless_dereference() - safely load a pointer for later dereference
+- * @p: The pointer to load
+- *
+- * Similar to rcu_dereference(), but for situations where the pointed-to
+- * object's lifetime is managed by something other than RCU.  That
+- * "something other" might be reference counting or simple immortality.
+- */
+-#define lockless_dereference(p) \
+-({ \
+-      typeof(p) _________p1 = ACCESS_ONCE(p); \
+-      smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
+-      (_________p1); \
+-})
+-
+-/**
+  * rcu_assign_pointer() - assign to RCU-protected pointer
+  * @p: pointer to assign to
+  * @v: value to assign (publish)
+diff --git a/include/net/ip.h b/include/net/ip.h
+index c0c26c3deeb5..d00ebdf14ca4 100644
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -160,6 +160,7 @@ static inline __u8 get_rtconn_flags(struct ipcm_cookie* 
ipc, struct sock* sk)
+ }
+ 
+ /* datagram.c */
++int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int 
addr_len);
+ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int 
addr_len);
+ 
+ void ip4_datagram_release_cb(struct sock *sk);
+diff --git a/ipc/sem.c b/ipc/sem.c
+index 53c3310f41c6..85ad28aaf548 100644
+--- a/ipc/sem.c
++++ b/ipc/sem.c
+@@ -253,6 +253,16 @@ static void sem_rcu_free(struct rcu_head *head)
+ }
+ 
+ /*
++ * spin_unlock_wait() and !spin_is_locked() are not memory barriers, they
++ * are only control barriers.
++ * The code must pair with spin_unlock(&sem->lock) or
++ * spin_unlock(&sem_perm.lock), thus just the control barrier is insufficient.
++ *
++ * smp_rmb() is sufficient, as writes cannot pass the control barrier.
++ */
++#define ipc_smp_acquire__after_spin_is_unlocked()     smp_rmb()
++
++/*
+  * Wait until all currently ongoing simple ops have completed.
+  * Caller must own sem_perm.lock.
+  * New simple ops cannot start, because simple ops first check
+@@ -275,6 +285,7 @@ static void sem_wait_array(struct sem_array *sma)
+               sem = sma->sem_base + i;
+               spin_unlock_wait(&sem->lock);
+       }
++      ipc_smp_acquire__after_spin_is_unlocked();
+ }
+ 
+ /*
+@@ -326,8 +337,13 @@ static inline int sem_lock(struct sem_array *sma, struct 
sembuf *sops,
+ 
+               /* Then check that the global lock is free */
+               if (!spin_is_locked(&sma->sem_perm.lock)) {
+-                      /* spin_is_locked() is not a memory barrier */
+-                      smp_mb();
++                      /*
++                       * We need a memory barrier with acquire semantics,
++                       * otherwise we can race with another thread that does:
++                       *      complex_count++;
++                       *      spin_unlock(sem_perm.lock);
++                       */
++                      ipc_smp_acquire__after_spin_is_unlocked();
+ 
+                       /* Now repeat the test of complex_count:
+                        * It can't change anymore until we drop sem->lock.
+@@ -2067,17 +2083,28 @@ void exit_sem(struct task_struct *tsk)
+               rcu_read_lock();
+               un = list_entry_rcu(ulp->list_proc.next,
+                                   struct sem_undo, list_proc);
+-              if (&un->list_proc == &ulp->list_proc)
+-                      semid = -1;
+-               else
+-                      semid = un->semid;
++              if (&un->list_proc == &ulp->list_proc) {
++                      /*
++                       * We must wait for freeary() before freeing this ulp,
++                       * in case we raced with last sem_undo. There is a small
++                       * possibility where we exit while freeary() didn't
++                       * finish unlocking sem_undo_list.
++                       */
++                      spin_unlock_wait(&ulp->lock);
++                      rcu_read_unlock();
++                      break;
++              }
++              spin_lock(&ulp->lock);
++              semid = un->semid;
++              spin_unlock(&ulp->lock);
+ 
++              /* exit_sem raced with IPC_RMID, nothing to do */
+               if (semid == -1) {
+                       rcu_read_unlock();
+-                      break;
++                      continue;
+               }
+ 
+-              sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid);
++              sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid);
+               /* exit_sem raced with IPC_RMID, nothing to do */
+               if (IS_ERR(sma)) {
+                       rcu_read_unlock();
+diff --git a/kernel/cpuset.c b/kernel/cpuset.c
+index 672310e1597e..71b52dd957de 100644
+--- a/kernel/cpuset.c
++++ b/kernel/cpuset.c
+@@ -1204,7 +1204,7 @@ static int update_nodemask(struct cpuset *cs, struct 
cpuset *trialcs,
+       mutex_unlock(&callback_mutex);
+ 
+       /* use trialcs->mems_allowed as a temp variable */
+-      update_nodemasks_hier(cs, &cs->mems_allowed);
++      update_nodemasks_hier(cs, &trialcs->mems_allowed);
+ done:
+       return retval;
+ }
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index cb86038cad47..ff181a5a5562 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -3729,28 +3729,21 @@ static void perf_event_for_each(struct perf_event 
*event,
+       mutex_unlock(&ctx->mutex);
+ }
+ 
+-static int perf_event_period(struct perf_event *event, u64 __user *arg)
+-{
+-      struct perf_event_context *ctx = event->ctx;
+-      int ret = 0, active;
++struct period_event {
++      struct perf_event *event;
+       u64 value;
++};
+ 
+-      if (!is_sampling_event(event))
+-              return -EINVAL;
+-
+-      if (copy_from_user(&value, arg, sizeof(value)))
+-              return -EFAULT;
+-
+-      if (!value)
+-              return -EINVAL;
++static int __perf_event_period(void *info)
++{
++      struct period_event *pe = info;
++      struct perf_event *event = pe->event;
++      struct perf_event_context *ctx = event->ctx;
++      u64 value = pe->value;
++      bool active;
+ 
+-      raw_spin_lock_irq(&ctx->lock);
++      raw_spin_lock(&ctx->lock);
+       if (event->attr.freq) {
+-              if (value > sysctl_perf_event_sample_rate) {
+-                      ret = -EINVAL;
+-                      goto unlock;
+-              }
+-
+               event->attr.sample_freq = value;
+       } else {
+               event->attr.sample_period = value;
+@@ -3769,11 +3762,53 @@ static int perf_event_period(struct perf_event *event, 
u64 __user *arg)
+               event->pmu->start(event, PERF_EF_RELOAD);
+               perf_pmu_enable(ctx->pmu);
+       }
++      raw_spin_unlock(&ctx->lock);
+ 
+-unlock:
++      return 0;
++}
++
++static int perf_event_period(struct perf_event *event, u64 __user *arg)
++{
++      struct period_event pe = { .event = event, };
++      struct perf_event_context *ctx = event->ctx;
++      struct task_struct *task;
++      u64 value;
++
++      if (!is_sampling_event(event))
++              return -EINVAL;
++
++      if (copy_from_user(&value, arg, sizeof(value)))
++              return -EFAULT;
++
++      if (!value)
++              return -EINVAL;
++
++      if (event->attr.freq && value > sysctl_perf_event_sample_rate)
++              return -EINVAL;
++
++      task = ctx->task;
++      pe.value = value;
++
++      if (!task) {
++              cpu_function_call(event->cpu, __perf_event_period, &pe);
++              return 0;
++      }
++
++retry:
++      if (!task_function_call(task, __perf_event_period, &pe))
++              return 0;
++
++      raw_spin_lock_irq(&ctx->lock);
++      if (ctx->is_active) {
++              raw_spin_unlock_irq(&ctx->lock);
++              task = ctx->task;
++              goto retry;
++      }
++
++      __perf_event_period(&pe);
+       raw_spin_unlock_irq(&ctx->lock);
+ 
+-      return ret;
++      return 0;
+ }
+ 
+ static const struct file_operations perf_fops;
+@@ -4398,12 +4433,20 @@ static const struct file_operations perf_fops = {
+  * to user-space before waking everybody up.
+  */
+ 
++static inline struct fasync_struct **perf_event_fasync(struct perf_event 
*event)
++{
++      /* only the parent has fasync state */
++      if (event->parent)
++              event = event->parent;
++      return &event->fasync;
++}
++
+ void perf_event_wakeup(struct perf_event *event)
+ {
+       ring_buffer_wakeup(event);
+ 
+       if (event->pending_kill) {
+-              kill_fasync(&event->fasync, SIGIO, event->pending_kill);
++              kill_fasync(perf_event_fasync(event), SIGIO, 
event->pending_kill);
+               event->pending_kill = 0;
+       }
+ }
+@@ -5638,7 +5681,7 @@ static int __perf_event_overflow(struct perf_event 
*event,
+       else
+               perf_event_output(event, data, regs);
+ 
+-      if (event->fasync && event->pending_kill) {
++      if (*perf_event_fasync(event) && event->pending_kill) {
+               event->pending_wakeup = 1;
+               irq_work_queue(&event->pending);
+       }
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index da8fa4e4237c..a1d4dfa62023 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -855,6 +855,31 @@ struct hstate *size_to_hstate(unsigned long size)
+       return NULL;
+ }
+ 
++/*
++ * Test to determine whether the hugepage is "active/in-use" (i.e. being 
linked
++ * to hstate->hugepage_activelist.)
++ *
++ * This function can be called for tail pages, but never returns true for 
them.
++ */
++bool page_huge_active(struct page *page)
++{
++      VM_BUG_ON_PAGE(!PageHuge(page), page);
++      return PageHead(page) && PagePrivate(&page[1]);
++}
++
++/* never called for tail page */
++static void set_page_huge_active(struct page *page)
++{
++      VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
++      SetPagePrivate(&page[1]);
++}
++
++static void clear_page_huge_active(struct page *page)
++{
++      VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
++      ClearPagePrivate(&page[1]);
++}
++
+ void free_huge_page(struct page *page)
+ {
+       /*
+@@ -875,6 +900,7 @@ void free_huge_page(struct page *page)
+       ClearPagePrivate(page);
+ 
+       spin_lock(&hugetlb_lock);
++      clear_page_huge_active(page);
+       hugetlb_cgroup_uncharge_page(hstate_index(h),
+                                    pages_per_huge_page(h), page);
+       if (restore_reserve)
+@@ -2884,6 +2910,7 @@ retry_avoidcopy:
+       copy_user_huge_page(new_page, old_page, address, vma,
+                           pages_per_huge_page(h));
+       __SetPageUptodate(new_page);
++      set_page_huge_active(new_page);
+ 
+       mmun_start = address & huge_page_mask(h);
+       mmun_end = mmun_start + huge_page_size(h);
+@@ -2995,6 +3022,7 @@ retry:
+               }
+               clear_huge_page(page, address, pages_per_huge_page(h));
+               __SetPageUptodate(page);
++              set_page_huge_active(page);
+ 
+               if (vma->vm_flags & VM_MAYSHARE) {
+                       int err;
+@@ -3799,19 +3827,26 @@ int dequeue_hwpoisoned_huge_page(struct page *hpage)
+ 
+ bool isolate_huge_page(struct page *page, struct list_head *list)
+ {
++      bool ret = true;
++
+       VM_BUG_ON_PAGE(!PageHead(page), page);
+-      if (!get_page_unless_zero(page))
+-              return false;
+       spin_lock(&hugetlb_lock);
++      if (!page_huge_active(page) || !get_page_unless_zero(page)) {
++              ret = false;
++              goto unlock;
++      }
++      clear_page_huge_active(page);
+       list_move_tail(&page->lru, list);
++unlock:
+       spin_unlock(&hugetlb_lock);
+-      return true;
++      return ret;
+ }
+ 
+ void putback_active_hugepage(struct page *page)
+ {
+       VM_BUG_ON_PAGE(!PageHead(page), page);
+       spin_lock(&hugetlb_lock);
++      set_page_huge_active(page);
+       list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
+       spin_unlock(&hugetlb_lock);
+       put_page(page);
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index 22f047fbaa33..715bc57385b9 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -1524,6 +1524,8 @@ static int get_any_page(struct page *page, unsigned long 
pfn, int flags)
+                */
+               ret = __get_any_page(page, pfn, 0);
+               if (!PageLRU(page)) {
++                      /* Drop page reference which is from __get_any_page() */
++                      put_page(page);
+                       pr_info("soft_offline: %#lx: unknown non LRU page type 
%lx\n",
+                               pfn, page->flags);
+                       return -EIO;
+@@ -1552,8 +1554,17 @@ static int soft_offline_huge_page(struct page *page, 
int flags)
+       }
+       unlock_page(hpage);
+ 
+-      /* Keep page count to indicate a given hugepage is isolated. */
+-      list_move(&hpage->lru, &pagelist);
++      ret = isolate_huge_page(hpage, &pagelist);
++      /*
++       * get_any_page() and isolate_huge_page() takes a refcount each,
++       * so need to drop one here.
++       */
++      put_page(hpage);
++      if (!ret) {
++              pr_info("soft offline: %#lx hugepage failed to isolate\n", pfn);
++              return -EBUSY;
++      }
++
+       ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL,
+                               MIGRATE_SYNC, MR_MEMORY_FAILURE);
+       if (ret) {
+diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
+index 5df05269d17a..cc641541d38f 100644
+--- a/net/bridge/br_mdb.c
++++ b/net/bridge/br_mdb.c
+@@ -347,7 +347,6 @@ static int br_mdb_add_group(struct net_bridge *br, struct 
net_bridge_port *port,
+               return -ENOMEM;
+       rcu_assign_pointer(*pp, p);
+ 
+-      br_mdb_notify(br->dev, port, group, RTM_NEWMDB);
+       return 0;
+ }
+ 
+@@ -370,6 +369,7 @@ static int __br_mdb_add(struct net *net, struct net_bridge 
*br,
+       if (!p || p->br != br || p->state == BR_STATE_DISABLED)
+               return -EINVAL;
+ 
++      memset(&ip, 0, sizeof(ip));
+       ip.proto = entry->addr.proto;
+       if (ip.proto == htons(ETH_P_IP))
+               ip.u.ip4 = entry->addr.u.ip4;
+@@ -416,6 +416,7 @@ static int __br_mdb_del(struct net_bridge *br, struct 
br_mdb_entry *entry)
+       if (!netif_running(br->dev) || br->multicast_disabled)
+               return -EINVAL;
+ 
++      memset(&ip, 0, sizeof(ip));
+       ip.proto = entry->addr.proto;
+       if (ip.proto == htons(ETH_P_IP)) {
+               if (timer_pending(&br->ip4_other_query.timer))
+diff --git a/net/core/datagram.c b/net/core/datagram.c
+index fdbc9a81d4c2..3a402a7b20e9 100644
+--- a/net/core/datagram.c
++++ b/net/core/datagram.c
+@@ -744,7 +744,8 @@ __sum16 __skb_checksum_complete_head(struct sk_buff *skb, 
int len)
+                   !skb->csum_complete_sw)
+                       netdev_rx_csum_fault(skb->dev);
+       }
+-      skb->csum_valid = !sum;
++      if (!skb_shared(skb))
++              skb->csum_valid = !sum;
+       return sum;
+ }
+ EXPORT_SYMBOL(__skb_checksum_complete_head);
+@@ -764,11 +765,13 @@ __sum16 __skb_checksum_complete(struct sk_buff *skb)
+                       netdev_rx_csum_fault(skb->dev);
+       }
+ 
+-      /* Save full packet checksum */
+-      skb->csum = csum;
+-      skb->ip_summed = CHECKSUM_COMPLETE;
+-      skb->csum_complete_sw = 1;
+-      skb->csum_valid = !sum;
++      if (!skb_shared(skb)) {
++              /* Save full packet checksum */
++              skb->csum = csum;
++              skb->ip_summed = CHECKSUM_COMPLETE;
++              skb->csum_complete_sw = 1;
++              skb->csum_valid = !sum;
++      }
+ 
+       return sum;
+ }
+diff --git a/net/core/dev.c b/net/core/dev.c
+index fb9625874b3c..93612b2e3bbf 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3309,6 +3309,8 @@ static int enqueue_to_backlog(struct sk_buff *skb, int 
cpu,
+       local_irq_save(flags);
+ 
+       rps_lock(sd);
++      if (!netif_running(skb->dev))
++              goto drop;
+       qlen = skb_queue_len(&sd->input_pkt_queue);
+       if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
+               if (skb_queue_len(&sd->input_pkt_queue)) {
+@@ -3330,6 +3332,7 @@ enqueue:
+               goto enqueue;
+       }
+ 
++drop:
+       sd->dropped++;
+       rps_unlock(sd);
+ 
+@@ -3638,8 +3641,6 @@ static int __netif_receive_skb_core(struct sk_buff *skb, 
bool pfmemalloc)
+ 
+       pt_prev = NULL;
+ 
+-      rcu_read_lock();
+-
+ another_round:
+       skb->skb_iif = skb->dev->ifindex;
+ 
+@@ -3649,7 +3650,7 @@ another_round:
+           skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
+               skb = skb_vlan_untag(skb);
+               if (unlikely(!skb))
+-                      goto unlock;
++                      goto out;
+       }
+ 
+ #ifdef CONFIG_NET_CLS_ACT
+@@ -3674,7 +3675,7 @@ skip_taps:
+ #ifdef CONFIG_NET_CLS_ACT
+       skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
+       if (!skb)
+-              goto unlock;
++              goto out;
+ ncls:
+ #endif
+ 
+@@ -3689,7 +3690,7 @@ ncls:
+               if (vlan_do_receive(&skb))
+                       goto another_round;
+               else if (unlikely(!skb))
+-                      goto unlock;
++                      goto out;
+       }
+ 
+       rx_handler = rcu_dereference(skb->dev->rx_handler);
+@@ -3701,7 +3702,7 @@ ncls:
+               switch (rx_handler(&skb)) {
+               case RX_HANDLER_CONSUMED:
+                       ret = NET_RX_SUCCESS;
+-                      goto unlock;
++                      goto out;
+               case RX_HANDLER_ANOTHER:
+                       goto another_round;
+               case RX_HANDLER_EXACT:
+@@ -3753,8 +3754,7 @@ drop:
+               ret = NET_RX_DROP;
+       }
+ 
+-unlock:
+-      rcu_read_unlock();
++out:
+       return ret;
+ }
+ 
+@@ -3785,29 +3785,30 @@ static int __netif_receive_skb(struct sk_buff *skb)
+ 
+ static int netif_receive_skb_internal(struct sk_buff *skb)
+ {
++      int ret;
++
+       net_timestamp_check(netdev_tstamp_prequeue, skb);
+ 
+       if (skb_defer_rx_timestamp(skb))
+               return NET_RX_SUCCESS;
+ 
++      rcu_read_lock();
++
+ #ifdef CONFIG_RPS
+       if (static_key_false(&rps_needed)) {
+               struct rps_dev_flow voidflow, *rflow = &voidflow;
+-              int cpu, ret;
+-
+-              rcu_read_lock();
+-
+-              cpu = get_rps_cpu(skb->dev, skb, &rflow);
++              int cpu = get_rps_cpu(skb->dev, skb, &rflow);
+ 
+               if (cpu >= 0) {
+                       ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
+                       rcu_read_unlock();
+                       return ret;
+               }
+-              rcu_read_unlock();
+       }
+ #endif
+-      return __netif_receive_skb(skb);
++      ret = __netif_receive_skb(skb);
++      rcu_read_unlock();
++      return ret;
+ }
+ 
+ /**
+@@ -4343,8 +4344,10 @@ static int process_backlog(struct napi_struct *napi, 
int quota)
+               struct sk_buff *skb;
+ 
+               while ((skb = __skb_dequeue(&sd->process_queue))) {
++                      rcu_read_lock();
+                       local_irq_enable();
+                       __netif_receive_skb(skb);
++                      rcu_read_unlock();
+                       local_irq_disable();
+                       input_queue_head_incr(sd);
+                       if (++work >= quota) {
+@@ -5867,6 +5870,7 @@ static void rollback_registered_many(struct list_head 
*head)
+               unlist_netdevice(dev);
+ 
+               dev->reg_state = NETREG_UNREGISTERING;
++              on_each_cpu(flush_backlog, dev, 1);
+       }
+ 
+       synchronize_net();
+@@ -6128,7 +6132,8 @@ static int netif_alloc_netdev_queues(struct net_device 
*dev)
+       struct netdev_queue *tx;
+       size_t sz = count * sizeof(*tx);
+ 
+-      BUG_ON(count < 1 || count > 0xffff);
++      if (count < 1 || count > 0xffff)
++              return -EINVAL;
+ 
+       tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
+       if (!tx) {
+@@ -6486,8 +6491,6 @@ void netdev_run_todo(void)
+ 
+               dev->reg_state = NETREG_UNREGISTERED;
+ 
+-              on_each_cpu(flush_backlog, dev, 1);
+-
+               netdev_wait_allrefs(dev);
+ 
+               /* paranoia */
+diff --git a/net/core/pktgen.c b/net/core/pktgen.c
+index 0b320d93fb56..4ff3eacc99f5 100644
+--- a/net/core/pktgen.c
++++ b/net/core/pktgen.c
+@@ -3490,8 +3490,10 @@ static int pktgen_thread_worker(void *arg)
+       pktgen_rem_thread(t);
+ 
+       /* Wait for kthread_stop */
+-      while (!kthread_should_stop()) {
++      for (;;) {
+               set_current_state(TASK_INTERRUPTIBLE);
++              if (kthread_should_stop())
++                      break;
+               schedule();
+       }
+       __set_current_state(TASK_RUNNING);
+diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
+index 90c0e8386116..574fad9cca05 100644
+--- a/net/ipv4/datagram.c
++++ b/net/ipv4/datagram.c
+@@ -20,7 +20,7 @@
+ #include <net/route.h>
+ #include <net/tcp_states.h>
+ 
+-int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int 
addr_len)
++int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int 
addr_len)
+ {
+       struct inet_sock *inet = inet_sk(sk);
+       struct sockaddr_in *usin = (struct sockaddr_in *) uaddr;
+@@ -39,8 +39,6 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr 
*uaddr, int addr_len)
+ 
+       sk_dst_reset(sk);
+ 
+-      lock_sock(sk);
+-
+       oif = sk->sk_bound_dev_if;
+       saddr = inet->inet_saddr;
+       if (ipv4_is_multicast(usin->sin_addr.s_addr)) {
+@@ -82,9 +80,19 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr 
*uaddr, int addr_len)
+       sk_dst_set(sk, &rt->dst);
+       err = 0;
+ out:
+-      release_sock(sk);
+       return err;
+ }
++EXPORT_SYMBOL(__ip4_datagram_connect);
++
++int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int 
addr_len)
++{
++      int res;
++
++      lock_sock(sk);
++      res = __ip4_datagram_connect(sk, uaddr, addr_len);
++      release_sock(sk);
++      return res;
++}
+ EXPORT_SYMBOL(ip4_datagram_connect);
+ 
+ /* Because UDP xmit path can manipulate sk_dst_cache without holding
+diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
+index b48e03cd6656..9516031847f1 100644
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -342,7 +342,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff 
*skb)
+       ihl = ip_hdrlen(skb);
+ 
+       /* Determine the position of this fragment. */
+-      end = offset + skb->len - ihl;
++      end = offset + skb->len - skb_network_offset(skb) - ihl;
+       err = -EINVAL;
+ 
+       /* Is this the final fragment? */
+@@ -372,7 +372,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff 
*skb)
+               goto err;
+ 
+       err = -ENOMEM;
+-      if (pskb_pull(skb, ihl) == NULL)
++      if (!pskb_pull(skb, skb_network_offset(skb) + ihl))
+               goto err;
+ 
+       err = pskb_trim_rcsum(skb, end - offset);
+@@ -612,6 +612,9 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff 
*prev,
+       iph->frag_off = qp->q.max_size ? htons(IP_DF) : 0;
+       iph->tot_len = htons(len);
+       iph->tos |= ecn;
++
++      ip_send_check(iph);
++
+       IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS);
+       qp->q.fragments = NULL;
+       qp->q.fragments_tail = NULL;
+diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
+index 0bb8e141eacc..682257242971 100644
+--- a/net/ipv4/ip_tunnel.c
++++ b/net/ipv4/ip_tunnel.c
+@@ -587,7 +587,8 @@ int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel 
*t,
+ EXPORT_SYMBOL(ip_tunnel_encap);
+ 
+ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
+-                          struct rtable *rt, __be16 df)
++                          struct rtable *rt, __be16 df,
++                          const struct iphdr *inner_iph)
+ {
+       struct ip_tunnel *tunnel = netdev_priv(dev);
+       int pkt_size = skb->len - tunnel->hlen - dev->hard_header_len;
+@@ -604,7 +605,8 @@ static int tnl_update_pmtu(struct net_device *dev, struct 
sk_buff *skb,
+ 
+       if (skb->protocol == htons(ETH_P_IP)) {
+               if (!skb_is_gso(skb) &&
+-                  (df & htons(IP_DF)) && mtu < pkt_size) {
++                  (inner_iph->frag_off & htons(IP_DF)) &&
++                  mtu < pkt_size) {
+                       memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
+                       icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, 
htonl(mtu));
+                       return -E2BIG;
+@@ -738,7 +740,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device 
*dev,
+               goto tx_error;
+       }
+ 
+-      if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off)) {
++      if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off, inner_iph)) {
+               ip_rt_put(rt);
+               goto tx_error;
+       }
+diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
+index 11e3945eeac7..e069aeb2cf72 100644
+--- a/net/ipv6/datagram.c
++++ b/net/ipv6/datagram.c
+@@ -40,7 +40,7 @@ static bool ipv6_mapped_addr_any(const struct in6_addr *a)
+       return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0);
+ }
+ 
+-int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int 
addr_len)
++static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, 
int addr_len)
+ {
+       struct sockaddr_in6     *usin = (struct sockaddr_in6 *) uaddr;
+       struct inet_sock        *inet = inet_sk(sk);
+@@ -56,7 +56,7 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr 
*uaddr, int addr_len)
+       if (usin->sin6_family == AF_INET) {
+               if (__ipv6_only_sock(sk))
+                       return -EAFNOSUPPORT;
+-              err = ip4_datagram_connect(sk, uaddr, addr_len);
++              err = __ip4_datagram_connect(sk, uaddr, addr_len);
+               goto ipv4_connected;
+       }
+ 
+@@ -98,9 +98,9 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr 
*uaddr, int addr_len)
+               sin.sin_addr.s_addr = daddr->s6_addr32[3];
+               sin.sin_port = usin->sin6_port;
+ 
+-              err = ip4_datagram_connect(sk,
+-                                         (struct sockaddr *) &sin,
+-                                         sizeof(sin));
++              err = __ip4_datagram_connect(sk,
++                                           (struct sockaddr *) &sin,
++                                           sizeof(sin));
+ 
+ ipv4_connected:
+               if (err)
+@@ -204,6 +204,16 @@ out:
+       fl6_sock_release(flowlabel);
+       return err;
+ }
++
++int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int 
addr_len)
++{
++      int res;
++
++      lock_sock(sk);
++      res = __ip6_datagram_connect(sk, uaddr, addr_len);
++      release_sock(sk);
++      return res;
++}
+ EXPORT_SYMBOL_GPL(ip6_datagram_connect);
+ 
+ int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *uaddr,
+diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
+index a3084ab5df6c..ac5e973e9eb5 100644
+--- a/net/ipv6/ip6_input.c
++++ b/net/ipv6/ip6_input.c
+@@ -329,10 +329,10 @@ int ip6_mc_input(struct sk_buff *skb)
+                               if (offset < 0)
+                                       goto out;
+ 
+-                              if (!ipv6_is_mld(skb, nexthdr, offset))
+-                                      goto out;
++                              if (ipv6_is_mld(skb, nexthdr, offset))
++                                      deliver = true;
+ 
+-                              deliver = true;
++                              goto out;
+                       }
+                       /* unknown RA - process it normally */
+               }
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index c82b2e37e652..6ffd1ebaba93 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -366,25 +366,52 @@ err1:
+       return NULL;
+ }
+ 
++
++static void
++__netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, bool tx_ring, 
void **pg_vec,
++                 unsigned int order)
++{
++      struct netlink_sock *nlk = nlk_sk(sk);
++      struct sk_buff_head *queue;
++      struct netlink_ring *ring;
++
++      queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
++      ring  = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
++
++      spin_lock_bh(&queue->lock);
++
++      ring->frame_max         = req->nm_frame_nr - 1;
++      ring->head              = 0;
++      ring->frame_size        = req->nm_frame_size;
++      ring->pg_vec_pages      = req->nm_block_size / PAGE_SIZE;
++
++      swap(ring->pg_vec_len, req->nm_block_nr);
++      swap(ring->pg_vec_order, order);
++      swap(ring->pg_vec, pg_vec);
++
++      __skb_queue_purge(queue);
++      spin_unlock_bh(&queue->lock);
++
++      WARN_ON(atomic_read(&nlk->mapped));
++
++      if (pg_vec)
++              free_pg_vec(pg_vec, order, req->nm_block_nr);
++}
++
+ static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
+-                          bool closing, bool tx_ring)
++                          bool tx_ring)
+ {
+       struct netlink_sock *nlk = nlk_sk(sk);
+       struct netlink_ring *ring;
+-      struct sk_buff_head *queue;
+       void **pg_vec = NULL;
+       unsigned int order = 0;
+-      int err;
+ 
+       ring  = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
+-      queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
+ 
+-      if (!closing) {
+-              if (atomic_read(&nlk->mapped))
+-                      return -EBUSY;
+-              if (atomic_read(&ring->pending))
+-                      return -EBUSY;
+-      }
++      if (atomic_read(&nlk->mapped))
++              return -EBUSY;
++      if (atomic_read(&ring->pending))
++              return -EBUSY;
+ 
+       if (req->nm_block_nr) {
+               if (ring->pg_vec != NULL)
+@@ -416,31 +443,19 @@ static int netlink_set_ring(struct sock *sk, struct 
nl_mmap_req *req,
+                       return -EINVAL;
+       }
+ 
+-      err = -EBUSY;
+       mutex_lock(&nlk->pg_vec_lock);
+-      if (closing || atomic_read(&nlk->mapped) == 0) {
+-              err = 0;
+-              spin_lock_bh(&queue->lock);
+-
+-              ring->frame_max         = req->nm_frame_nr - 1;
+-              ring->head              = 0;
+-              ring->frame_size        = req->nm_frame_size;
+-              ring->pg_vec_pages      = req->nm_block_size / PAGE_SIZE;
+-
+-              swap(ring->pg_vec_len, req->nm_block_nr);
+-              swap(ring->pg_vec_order, order);
+-              swap(ring->pg_vec, pg_vec);
+-
+-              __skb_queue_purge(queue);
+-              spin_unlock_bh(&queue->lock);
+-
+-              WARN_ON(atomic_read(&nlk->mapped));
++      if (atomic_read(&nlk->mapped) == 0) {
++              __netlink_set_ring(sk, req, tx_ring, pg_vec, order);
++              mutex_unlock(&nlk->pg_vec_lock);
++              return 0;
+       }
++
+       mutex_unlock(&nlk->pg_vec_lock);
+ 
+       if (pg_vec)
+               free_pg_vec(pg_vec, order, req->nm_block_nr);
+-      return err;
++
++      return -EBUSY;
+ }
+ 
+ static void netlink_mm_open(struct vm_area_struct *vma)
+@@ -909,10 +924,10 @@ static void netlink_sock_destruct(struct sock *sk)
+ 
+               memset(&req, 0, sizeof(req));
+               if (nlk->rx_ring.pg_vec)
+-                      netlink_set_ring(sk, &req, true, false);
++                      __netlink_set_ring(sk, &req, false, NULL, 0);
+               memset(&req, 0, sizeof(req));
+               if (nlk->tx_ring.pg_vec)
+-                      netlink_set_ring(sk, &req, true, true);
++                      __netlink_set_ring(sk, &req, true, NULL, 0);
+       }
+ #endif /* CONFIG_NETLINK_MMAP */
+ 
+@@ -2163,7 +2178,7 @@ static int netlink_setsockopt(struct socket *sock, int 
level, int optname,
+                       return -EINVAL;
+               if (copy_from_user(&req, optval, sizeof(req)))
+                       return -EFAULT;
+-              err = netlink_set_ring(sk, &req, false,
++              err = netlink_set_ring(sk, &req,
+                                      optname == NETLINK_TX_RING);
+               break;
+       }
+diff --git a/net/rds/info.c b/net/rds/info.c
+index 9a6b4f66187c..140a44a5f7b7 100644
+--- a/net/rds/info.c
++++ b/net/rds/info.c
+@@ -176,7 +176,7 @@ int rds_info_getsockopt(struct socket *sock, int optname, 
char __user *optval,
+ 
+       /* check for all kinds of wrapping and the like */
+       start = (unsigned long)optval;
+-      if (len < 0 || len + PAGE_SIZE - 1 < len || start + len < start) {
++      if (len < 0 || len > INT_MAX - PAGE_SIZE + 1 || start + len < start) {
+               ret = -EINVAL;
+               goto out;
+       }
+diff --git a/net/tipc/socket.c b/net/tipc/socket.c
+index 51bddc236a15..8224016ebd70 100644
+--- a/net/tipc/socket.c
++++ b/net/tipc/socket.c
+@@ -1996,6 +1996,7 @@ static int tipc_accept(struct socket *sock, struct 
socket *new_sock, int flags)
+       res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, 1);
+       if (res)
+               goto exit;
++      security_sk_clone(sock->sk, new_sock->sk);
+ 
+       new_sk = new_sock->sk;
+       new_tsock = tipc_sk(new_sk);
+diff --git a/scripts/kconfig/streamline_config.pl 
b/scripts/kconfig/streamline_config.pl
+index 9cb8522d8d22..f3d3fb42b873 100755
+--- a/scripts/kconfig/streamline_config.pl
++++ b/scripts/kconfig/streamline_config.pl
+@@ -137,7 +137,7 @@ my $ksource = ($ARGV[0] ? $ARGV[0] : '.');
+ my $kconfig = $ARGV[1];
+ my $lsmod_file = $ENV{'LSMOD'};
+ 
+-my @makefiles = `find $ksource -name Makefile 2>/dev/null`;
++my @makefiles = `find $ksource -name Makefile -or -name Kbuild 2>/dev/null`;
+ chomp @makefiles;
+ 
+ my %depends;
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 9ff5050d513a..2c10c9ee36a2 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1258,6 +1258,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct 
snd_usb_audio *chip,
+                       return SNDRV_PCM_FMTBIT_DSD_U32_BE;
+               break;
+ 
++      case USB_ID(0x20b1, 0x000a): /* Gustard DAC-X20U */
+       case USB_ID(0x20b1, 0x2009): /* DIYINHK DSD DXD 384kHz USB to I2S/DSD */
+       case USB_ID(0x20b1, 0x2023): /* JLsounds I2SoverUSB */
+               if (fp->altsetting == 3)

Reply via email to